smsm.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications Inc.
  3. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/interrupt.h>
  15. #include <linux/mfd/syscon.h>
  16. #include <linux/module.h>
  17. #include <linux/of_irq.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/regmap.h>
  21. #include <linux/soc/qcom/smem.h>
  22. #include <linux/soc/qcom/smem_state.h>
  23. /*
  24. * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
  25. * for communicating single bit state information to remote processors.
  26. *
  27. * The implementation is based on two sections of shared memory; the first
  28. * holding the state bits and the second holding a matrix of subscription bits.
  29. *
  30. * The state bits are structured in entries of 32 bits, each belonging to one
  31. * system in the SoC. The entry belonging to the local system is considered
  32. * read-write, while the rest should be considered read-only.
  33. *
  34. * The subscription matrix consists of N bitmaps per entry, denoting interest
  35. * in updates of the entry for each of the N hosts. Upon updating a state bit
  36. * each host's subscription bitmap should be queried and the remote system
  37. * should be interrupted if they request so.
  38. *
  39. * The subscription matrix is laid out in entry-major order:
  40. * entry0: [host0 ... hostN]
  41. * .
  42. * .
  43. * entryM: [host0 ... hostN]
  44. *
  45. * A third, optional, shared memory region might contain information regarding
  46. * the number of entries in the state bitmap as well as number of columns in
  47. * the subscription matrix.
  48. */
  49. /*
  50. * Shared memory identifiers, used to acquire handles to respective memory
  51. * region.
  52. */
  53. #define SMEM_SMSM_SHARED_STATE 85
  54. #define SMEM_SMSM_CPU_INTR_MASK 333
  55. #define SMEM_SMSM_SIZE_INFO 419
  56. /*
  57. * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
  58. */
  59. #define SMSM_DEFAULT_NUM_ENTRIES 8
  60. #define SMSM_DEFAULT_NUM_HOSTS 3
  61. struct smsm_entry;
  62. struct smsm_host;
  63. /**
  64. * struct qcom_smsm - smsm driver context
  65. * @dev: smsm device pointer
  66. * @local_host: column in the subscription matrix representing this system
  67. * @num_hosts: number of columns in the subscription matrix
  68. * @num_entries: number of entries in the state map and rows in the subscription
  69. * matrix
  70. * @local_state: pointer to the local processor's state bits
  71. * @subscription: pointer to local processor's row in subscription matrix
  72. * @state: smem state handle
  73. * @lock: spinlock for read-modify-write of the outgoing state
  74. * @entries: context for each of the entries
  75. * @hosts: context for each of the hosts
  76. */
  77. struct qcom_smsm {
  78. struct device *dev;
  79. u32 local_host;
  80. u32 num_hosts;
  81. u32 num_entries;
  82. u32 *local_state;
  83. u32 *subscription;
  84. struct qcom_smem_state *state;
  85. spinlock_t lock;
  86. struct smsm_entry *entries;
  87. struct smsm_host *hosts;
  88. };
  89. /**
  90. * struct smsm_entry - per remote processor entry context
  91. * @smsm: back-reference to driver context
  92. * @domain: IRQ domain for this entry, if representing a remote system
  93. * @irq_enabled: bitmap of which state bits IRQs are enabled
  94. * @irq_rising: bitmap tracking if rising bits should be propagated
  95. * @irq_falling: bitmap tracking if falling bits should be propagated
  96. * @last_value: snapshot of state bits last time the interrupts where propagated
  97. * @remote_state: pointer to this entry's state bits
  98. * @subscription: pointer to a row in the subscription matrix representing this
  99. * entry
  100. */
  101. struct smsm_entry {
  102. struct qcom_smsm *smsm;
  103. struct irq_domain *domain;
  104. DECLARE_BITMAP(irq_enabled, 32);
  105. DECLARE_BITMAP(irq_rising, 32);
  106. DECLARE_BITMAP(irq_falling, 32);
  107. u32 last_value;
  108. u32 *remote_state;
  109. u32 *subscription;
  110. };
  111. /**
  112. * struct smsm_host - representation of a remote host
  113. * @ipc_regmap: regmap for outgoing interrupt
  114. * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
  115. * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
  116. */
  117. struct smsm_host {
  118. struct regmap *ipc_regmap;
  119. int ipc_offset;
  120. int ipc_bit;
  121. };
  122. /**
  123. * smsm_update_bits() - change bit in outgoing entry and inform subscribers
  124. * @data: smsm context pointer
  125. * @offset: bit in the entry
  126. * @value: new value
  127. *
  128. * Used to set and clear the bits in the outgoing/local entry and inform
  129. * subscribers about the change.
  130. */
  131. static int smsm_update_bits(void *data, u32 mask, u32 value)
  132. {
  133. struct qcom_smsm *smsm = data;
  134. struct smsm_host *hostp;
  135. unsigned long flags;
  136. u32 changes;
  137. u32 host;
  138. u32 orig;
  139. u32 val;
  140. spin_lock_irqsave(&smsm->lock, flags);
  141. /* Update the entry */
  142. val = orig = readl(smsm->local_state);
  143. val &= ~mask;
  144. val |= value;
  145. /* Don't signal if we didn't change the value */
  146. changes = val ^ orig;
  147. if (!changes) {
  148. spin_unlock_irqrestore(&smsm->lock, flags);
  149. goto done;
  150. }
  151. /* Write out the new value */
  152. writel(val, smsm->local_state);
  153. spin_unlock_irqrestore(&smsm->lock, flags);
  154. /* Make sure the value update is ordered before any kicks */
  155. wmb();
  156. /* Iterate over all hosts to check whom wants a kick */
  157. for (host = 0; host < smsm->num_hosts; host++) {
  158. hostp = &smsm->hosts[host];
  159. val = readl(smsm->subscription + host);
  160. if (val & changes && hostp->ipc_regmap) {
  161. regmap_write(hostp->ipc_regmap,
  162. hostp->ipc_offset,
  163. BIT(hostp->ipc_bit));
  164. }
  165. }
  166. done:
  167. return 0;
  168. }
  169. static const struct qcom_smem_state_ops smsm_state_ops = {
  170. .update_bits = smsm_update_bits,
  171. };
  172. /**
  173. * smsm_intr() - cascading IRQ handler for SMSM
  174. * @irq: unused
  175. * @data: entry related to this IRQ
  176. *
  177. * This function cascades an incoming interrupt from a remote system, based on
  178. * the state bits and configuration.
  179. */
  180. static irqreturn_t smsm_intr(int irq, void *data)
  181. {
  182. struct smsm_entry *entry = data;
  183. unsigned i;
  184. int irq_pin;
  185. u32 changed;
  186. u32 val;
  187. val = readl(entry->remote_state);
  188. changed = val ^ entry->last_value;
  189. entry->last_value = val;
  190. for_each_set_bit(i, entry->irq_enabled, 32) {
  191. if (!(changed & BIT(i)))
  192. continue;
  193. if (val & BIT(i)) {
  194. if (test_bit(i, entry->irq_rising)) {
  195. irq_pin = irq_find_mapping(entry->domain, i);
  196. handle_nested_irq(irq_pin);
  197. }
  198. } else {
  199. if (test_bit(i, entry->irq_falling)) {
  200. irq_pin = irq_find_mapping(entry->domain, i);
  201. handle_nested_irq(irq_pin);
  202. }
  203. }
  204. }
  205. return IRQ_HANDLED;
  206. }
  207. /**
  208. * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
  209. * @irqd: IRQ handle to be masked
  210. *
  211. * This un-subscribes the local CPU from interrupts upon changes to the defines
  212. * status bit. The bit is also cleared from cascading.
  213. */
  214. static void smsm_mask_irq(struct irq_data *irqd)
  215. {
  216. struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
  217. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  218. struct qcom_smsm *smsm = entry->smsm;
  219. u32 val;
  220. if (entry->subscription) {
  221. val = readl(entry->subscription + smsm->local_host);
  222. val &= ~BIT(irq);
  223. writel(val, entry->subscription + smsm->local_host);
  224. }
  225. clear_bit(irq, entry->irq_enabled);
  226. }
  227. /**
  228. * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
  229. * @irqd: IRQ handle to be unmasked
  230. *
  231. * This subscribes the local CPU to interrupts upon changes to the defined
  232. * status bit. The bit is also marked for cascading.
  233. */
  234. static void smsm_unmask_irq(struct irq_data *irqd)
  235. {
  236. struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
  237. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  238. struct qcom_smsm *smsm = entry->smsm;
  239. u32 val;
  240. set_bit(irq, entry->irq_enabled);
  241. if (entry->subscription) {
  242. val = readl(entry->subscription + smsm->local_host);
  243. val |= BIT(irq);
  244. writel(val, entry->subscription + smsm->local_host);
  245. }
  246. }
  247. /**
  248. * smsm_set_irq_type() - updates the requested IRQ type for the cascading
  249. * @irqd: consumer interrupt handle
  250. * @type: requested flags
  251. */
  252. static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
  253. {
  254. struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
  255. irq_hw_number_t irq = irqd_to_hwirq(irqd);
  256. if (!(type & IRQ_TYPE_EDGE_BOTH))
  257. return -EINVAL;
  258. if (type & IRQ_TYPE_EDGE_RISING)
  259. set_bit(irq, entry->irq_rising);
  260. else
  261. clear_bit(irq, entry->irq_rising);
  262. if (type & IRQ_TYPE_EDGE_FALLING)
  263. set_bit(irq, entry->irq_falling);
  264. else
  265. clear_bit(irq, entry->irq_falling);
  266. return 0;
  267. }
  268. static struct irq_chip smsm_irq_chip = {
  269. .name = "smsm",
  270. .irq_mask = smsm_mask_irq,
  271. .irq_unmask = smsm_unmask_irq,
  272. .irq_set_type = smsm_set_irq_type,
  273. };
  274. /**
  275. * smsm_irq_map() - sets up a mapping for a cascaded IRQ
  276. * @d: IRQ domain representing an entry
  277. * @irq: IRQ to set up
  278. * @hw: unused
  279. */
  280. static int smsm_irq_map(struct irq_domain *d,
  281. unsigned int irq,
  282. irq_hw_number_t hw)
  283. {
  284. struct smsm_entry *entry = d->host_data;
  285. irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
  286. irq_set_chip_data(irq, entry);
  287. irq_set_nested_thread(irq, 1);
  288. return 0;
  289. }
  290. static const struct irq_domain_ops smsm_irq_ops = {
  291. .map = smsm_irq_map,
  292. .xlate = irq_domain_xlate_twocell,
  293. };
  294. /**
  295. * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
  296. * @smsm: smsm driver context
  297. * @host_id: index of the remote host to be resolved
  298. *
  299. * Parses device tree to acquire the information needed for sending the
  300. * outgoing interrupts to a remote host - identified by @host_id.
  301. */
  302. static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
  303. {
  304. struct device_node *syscon;
  305. struct device_node *node = smsm->dev->of_node;
  306. struct smsm_host *host = &smsm->hosts[host_id];
  307. char key[16];
  308. int ret;
  309. snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
  310. syscon = of_parse_phandle(node, key, 0);
  311. if (!syscon)
  312. return 0;
  313. host->ipc_regmap = syscon_node_to_regmap(syscon);
  314. if (IS_ERR(host->ipc_regmap))
  315. return PTR_ERR(host->ipc_regmap);
  316. ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
  317. if (ret < 0) {
  318. dev_err(smsm->dev, "no offset in %s\n", key);
  319. return -EINVAL;
  320. }
  321. ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
  322. if (ret < 0) {
  323. dev_err(smsm->dev, "no bit in %s\n", key);
  324. return -EINVAL;
  325. }
  326. return 0;
  327. }
  328. /**
  329. * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
  330. * @smsm: smsm driver context
  331. * @entry: entry context to be set up
  332. * @node: dt node containing the entry's properties
  333. */
  334. static int smsm_inbound_entry(struct qcom_smsm *smsm,
  335. struct smsm_entry *entry,
  336. struct device_node *node)
  337. {
  338. int ret;
  339. int irq;
  340. irq = irq_of_parse_and_map(node, 0);
  341. if (!irq) {
  342. dev_err(smsm->dev, "failed to parse smsm interrupt\n");
  343. return -EINVAL;
  344. }
  345. ret = devm_request_threaded_irq(smsm->dev, irq,
  346. NULL, smsm_intr,
  347. IRQF_ONESHOT,
  348. "smsm", (void *)entry);
  349. if (ret) {
  350. dev_err(smsm->dev, "failed to request interrupt\n");
  351. return ret;
  352. }
  353. entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
  354. if (!entry->domain) {
  355. dev_err(smsm->dev, "failed to add irq_domain\n");
  356. return -ENOMEM;
  357. }
  358. return 0;
  359. }
  360. /**
  361. * smsm_get_size_info() - parse the optional memory segment for sizes
  362. * @smsm: smsm driver context
  363. *
  364. * Attempt to acquire the number of hosts and entries from the optional shared
  365. * memory location. Not being able to find this segment should indicate that
  366. * we're on a older system where these values was hard coded to
  367. * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
  368. *
  369. * Returns 0 on success, negative errno on failure.
  370. */
  371. static int smsm_get_size_info(struct qcom_smsm *smsm)
  372. {
  373. size_t size;
  374. struct {
  375. u32 num_hosts;
  376. u32 num_entries;
  377. u32 reserved0;
  378. u32 reserved1;
  379. } *info;
  380. info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
  381. if (PTR_ERR(info) == -ENOENT || size != sizeof(*info)) {
  382. dev_warn(smsm->dev, "no smsm size info, using defaults\n");
  383. smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
  384. smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
  385. return 0;
  386. } else if (IS_ERR(info)) {
  387. dev_err(smsm->dev, "unable to retrieve smsm size info\n");
  388. return PTR_ERR(info);
  389. }
  390. smsm->num_entries = info->num_entries;
  391. smsm->num_hosts = info->num_hosts;
  392. dev_dbg(smsm->dev,
  393. "found custom size of smsm: %d entries %d hosts\n",
  394. smsm->num_entries, smsm->num_hosts);
  395. return 0;
  396. }
  397. static int qcom_smsm_probe(struct platform_device *pdev)
  398. {
  399. struct device_node *local_node;
  400. struct device_node *node;
  401. struct smsm_entry *entry;
  402. struct qcom_smsm *smsm;
  403. u32 *intr_mask;
  404. size_t size;
  405. u32 *states;
  406. u32 id;
  407. int ret;
  408. smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
  409. if (!smsm)
  410. return -ENOMEM;
  411. smsm->dev = &pdev->dev;
  412. spin_lock_init(&smsm->lock);
  413. ret = smsm_get_size_info(smsm);
  414. if (ret)
  415. return ret;
  416. smsm->entries = devm_kcalloc(&pdev->dev,
  417. smsm->num_entries,
  418. sizeof(struct smsm_entry),
  419. GFP_KERNEL);
  420. if (!smsm->entries)
  421. return -ENOMEM;
  422. smsm->hosts = devm_kcalloc(&pdev->dev,
  423. smsm->num_hosts,
  424. sizeof(struct smsm_host),
  425. GFP_KERNEL);
  426. if (!smsm->hosts)
  427. return -ENOMEM;
  428. local_node = of_find_node_with_property(pdev->dev.of_node, "#qcom,smem-state-cells");
  429. if (!local_node) {
  430. dev_err(&pdev->dev, "no state entry\n");
  431. return -EINVAL;
  432. }
  433. of_property_read_u32(pdev->dev.of_node,
  434. "qcom,local-host",
  435. &smsm->local_host);
  436. /* Parse the host properties */
  437. for (id = 0; id < smsm->num_hosts; id++) {
  438. ret = smsm_parse_ipc(smsm, id);
  439. if (ret < 0)
  440. return ret;
  441. }
  442. /* Acquire the main SMSM state vector */
  443. ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
  444. smsm->num_entries * sizeof(u32));
  445. if (ret < 0 && ret != -EEXIST) {
  446. dev_err(&pdev->dev, "unable to allocate shared state entry\n");
  447. return ret;
  448. }
  449. states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
  450. if (IS_ERR(states)) {
  451. dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
  452. return PTR_ERR(states);
  453. }
  454. /* Acquire the list of interrupt mask vectors */
  455. size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
  456. ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
  457. if (ret < 0 && ret != -EEXIST) {
  458. dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
  459. return ret;
  460. }
  461. intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
  462. if (IS_ERR(intr_mask)) {
  463. dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
  464. return PTR_ERR(intr_mask);
  465. }
  466. /* Setup the reference to the local state bits */
  467. smsm->local_state = states + smsm->local_host;
  468. smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
  469. /* Register the outgoing state */
  470. smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
  471. if (IS_ERR(smsm->state)) {
  472. dev_err(smsm->dev, "failed to register qcom_smem_state\n");
  473. return PTR_ERR(smsm->state);
  474. }
  475. /* Register handlers for remote processor entries of interest. */
  476. for_each_available_child_of_node(pdev->dev.of_node, node) {
  477. if (!of_property_read_bool(node, "interrupt-controller"))
  478. continue;
  479. ret = of_property_read_u32(node, "reg", &id);
  480. if (ret || id >= smsm->num_entries) {
  481. dev_err(&pdev->dev, "invalid reg of entry\n");
  482. if (!ret)
  483. ret = -EINVAL;
  484. goto unwind_interfaces;
  485. }
  486. entry = &smsm->entries[id];
  487. entry->smsm = smsm;
  488. entry->remote_state = states + id;
  489. /* Setup subscription pointers and unsubscribe to any kicks */
  490. entry->subscription = intr_mask + id * smsm->num_hosts;
  491. writel(0, entry->subscription + smsm->local_host);
  492. ret = smsm_inbound_entry(smsm, entry, node);
  493. if (ret < 0)
  494. goto unwind_interfaces;
  495. }
  496. platform_set_drvdata(pdev, smsm);
  497. return 0;
  498. unwind_interfaces:
  499. for (id = 0; id < smsm->num_entries; id++)
  500. if (smsm->entries[id].domain)
  501. irq_domain_remove(smsm->entries[id].domain);
  502. qcom_smem_state_unregister(smsm->state);
  503. return ret;
  504. }
  505. static int qcom_smsm_remove(struct platform_device *pdev)
  506. {
  507. struct qcom_smsm *smsm = platform_get_drvdata(pdev);
  508. unsigned id;
  509. for (id = 0; id < smsm->num_entries; id++)
  510. if (smsm->entries[id].domain)
  511. irq_domain_remove(smsm->entries[id].domain);
  512. qcom_smem_state_unregister(smsm->state);
  513. return 0;
  514. }
  515. static const struct of_device_id qcom_smsm_of_match[] = {
  516. { .compatible = "qcom,smsm" },
  517. {}
  518. };
  519. MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
  520. static struct platform_driver qcom_smsm_driver = {
  521. .probe = qcom_smsm_probe,
  522. .remove = qcom_smsm_remove,
  523. .driver = {
  524. .name = "qcom-smsm",
  525. .of_match_table = qcom_smsm_of_match,
  526. },
  527. };
  528. module_platform_driver(qcom_smsm_driver);
  529. MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
  530. MODULE_LICENSE("GPL v2");