slim-msm.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/pm_runtime.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/delay.h>
  15. #include <linux/slimbus/slimbus.h>
  16. #include <mach/sps.h>
  17. #include "slim-msm.h"
  18. int msm_slim_rx_enqueue(struct msm_slim_ctrl *dev, u32 *buf, u8 len)
  19. {
  20. spin_lock(&dev->rx_lock);
  21. if ((dev->tail + 1) % MSM_CONCUR_MSG == dev->head) {
  22. spin_unlock(&dev->rx_lock);
  23. dev_err(dev->dev, "RX QUEUE full!");
  24. return -EXFULL;
  25. }
  26. memcpy((u8 *)dev->rx_msgs[dev->tail], (u8 *)buf, len);
  27. dev->tail = (dev->tail + 1) % MSM_CONCUR_MSG;
  28. spin_unlock(&dev->rx_lock);
  29. return 0;
  30. }
  31. int msm_slim_rx_dequeue(struct msm_slim_ctrl *dev, u8 *buf)
  32. {
  33. unsigned long flags;
  34. spin_lock_irqsave(&dev->rx_lock, flags);
  35. if (dev->tail == dev->head) {
  36. spin_unlock_irqrestore(&dev->rx_lock, flags);
  37. return -ENODATA;
  38. }
  39. memcpy(buf, (u8 *)dev->rx_msgs[dev->head], 40);
  40. dev->head = (dev->head + 1) % MSM_CONCUR_MSG;
  41. spin_unlock_irqrestore(&dev->rx_lock, flags);
  42. return 0;
  43. }
  44. int msm_slim_get_ctrl(struct msm_slim_ctrl *dev)
  45. {
  46. #ifdef CONFIG_PM_RUNTIME
  47. int ref = 0;
  48. int ret = pm_runtime_get_sync(dev->dev);
  49. if (ret >= 0) {
  50. ref = atomic_read(&dev->dev->power.usage_count);
  51. if (ref <= 0) {
  52. SLIM_WARN(dev, "reference count -ve:%d", ref);
  53. ret = -ENODEV;
  54. }
  55. }
  56. return ret;
  57. #else
  58. return -ENODEV;
  59. #endif
  60. }
  61. void msm_slim_put_ctrl(struct msm_slim_ctrl *dev)
  62. {
  63. #ifdef CONFIG_PM_RUNTIME
  64. int ref;
  65. pm_runtime_mark_last_busy(dev->dev);
  66. ref = atomic_read(&dev->dev->power.usage_count);
  67. if (ref <= 0)
  68. SLIM_WARN(dev, "reference count mismatch:%d", ref);
  69. else
  70. pm_runtime_put_sync(dev->dev);
  71. #endif
  72. }
  73. #if defined(CONFIG_SND_SOC_ES705)
  74. void msm_slim_es705_func(struct slim_device *gen0_client)
  75. {
  76. struct msm_slim_ctrl *dev = slim_get_ctrldata(gen0_client->ctrl);
  77. msm_slim_get_ctrl(dev);
  78. msm_slim_put_ctrl(dev);
  79. }
  80. EXPORT_SYMBOL(msm_slim_es705_func);
  81. #endif
  82. #if defined(CONFIG_SND_SOC_ES325_ATLANTIC)
  83. void msm_slim_vote_func(struct slim_device *gen0_client)
  84. {
  85. struct msm_slim_ctrl *dev = slim_get_ctrldata(gen0_client->ctrl);
  86. pr_info("%s()", __func__);
  87. msm_slim_get_ctrl(dev);
  88. msm_slim_put_ctrl(dev);
  89. }
  90. EXPORT_SYMBOL(msm_slim_vote_func);
  91. #endif
  92. irqreturn_t msm_slim_port_irq_handler(struct msm_slim_ctrl *dev, u32 pstat)
  93. {
  94. int i;
  95. u32 int_en = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
  96. dev->ver));
  97. /*
  98. * different port-interrupt than what we enabled, ignore.
  99. * This may happen if overflow/underflow is reported, but
  100. * was disabled due to unavailability of buffers provided by
  101. * client.
  102. */
  103. if ((pstat & int_en) == 0)
  104. return IRQ_HANDLED;
  105. for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
  106. if (pstat & (1 << i)) {
  107. u32 val = readl_relaxed(PGD_PORT(PGD_PORT_STATn,
  108. i, dev->ver));
  109. if (val & MSM_PORT_OVERFLOW) {
  110. dev->ctrl.ports[i-dev->port_b].err =
  111. SLIM_P_OVERFLOW;
  112. } else if (val & MSM_PORT_UNDERFLOW) {
  113. dev->ctrl.ports[i-dev->port_b].err =
  114. SLIM_P_UNDERFLOW;
  115. }
  116. }
  117. }
  118. /*
  119. * Disable port interrupt here. Re-enable when more
  120. * buffers are provided for this port.
  121. */
  122. writel_relaxed((int_en & (~pstat)),
  123. PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
  124. dev->ver));
  125. /* clear port interrupts */
  126. writel_relaxed(pstat, PGD_THIS_EE(PGD_PORT_INT_CL_EEn,
  127. dev->ver));
  128. SLIM_INFO(dev, "disabled overflow/underflow for port 0x%x", pstat);
  129. /*
  130. * Guarantee that port interrupt bit(s) clearing writes go
  131. * through before exiting ISR
  132. */
  133. mb();
  134. return IRQ_HANDLED;
  135. }
  136. int msm_slim_init_endpoint(struct msm_slim_ctrl *dev, struct msm_slim_endp *ep)
  137. {
  138. int ret;
  139. struct sps_pipe *endpoint;
  140. struct sps_connect *config = &ep->config;
  141. /* Allocate the endpoint */
  142. endpoint = sps_alloc_endpoint();
  143. if (!endpoint) {
  144. dev_err(dev->dev, "sps_alloc_endpoint failed\n");
  145. return -ENOMEM;
  146. }
  147. /* Get default connection configuration for an endpoint */
  148. ret = sps_get_config(endpoint, config);
  149. if (ret) {
  150. dev_err(dev->dev, "sps_get_config failed 0x%x\n", ret);
  151. goto sps_config_failed;
  152. }
  153. ep->sps = endpoint;
  154. return 0;
  155. sps_config_failed:
  156. sps_free_endpoint(endpoint);
  157. return ret;
  158. }
  159. void msm_slim_free_endpoint(struct msm_slim_endp *ep)
  160. {
  161. sps_free_endpoint(ep->sps);
  162. ep->sps = NULL;
  163. }
  164. int msm_slim_sps_mem_alloc(
  165. struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem, u32 len)
  166. {
  167. dma_addr_t phys;
  168. mem->size = len;
  169. mem->min_size = 0;
  170. mem->base = dma_alloc_coherent(dev->dev, mem->size, &phys, GFP_KERNEL);
  171. if (!mem->base) {
  172. dev_err(dev->dev, "dma_alloc_coherent(%d) failed\n", len);
  173. return -ENOMEM;
  174. }
  175. mem->phys_base = phys;
  176. memset(mem->base, 0x00, mem->size);
  177. return 0;
  178. }
  179. void
  180. msm_slim_sps_mem_free(struct msm_slim_ctrl *dev, struct sps_mem_buffer *mem)
  181. {
  182. dma_free_coherent(dev->dev, mem->size, mem->base, mem->phys_base);
  183. mem->size = 0;
  184. mem->base = NULL;
  185. mem->phys_base = 0;
  186. }
  187. void msm_hw_set_port(struct msm_slim_ctrl *dev, u8 pn)
  188. {
  189. u32 set_cfg = DEF_WATERMARK | DEF_ALIGN | DEF_PACK | ENABLE_PORT;
  190. writel_relaxed(set_cfg, PGD_PORT(PGD_PORT_CFGn, pn, dev->ver));
  191. writel_relaxed(DEF_BLKSZ, PGD_PORT(PGD_PORT_BLKn, pn, dev->ver));
  192. writel_relaxed(DEF_TRANSZ, PGD_PORT(PGD_PORT_TRANn, pn, dev->ver));
  193. /* Make sure that port registers are updated before returning */
  194. mb();
  195. }
  196. static void msm_slim_disconn_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
  197. {
  198. struct msm_slim_endp *endpoint = &dev->pipes[pn];
  199. struct sps_register_event sps_event;
  200. writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (pn + dev->port_b),
  201. dev->ver));
  202. /* Make sure port register is updated */
  203. mb();
  204. memset(&sps_event, 0, sizeof(sps_event));
  205. sps_register_event(endpoint->sps, &sps_event);
  206. sps_disconnect(endpoint->sps);
  207. dev->pipes[pn].connected = false;
  208. }
  209. int msm_slim_connect_pipe_port(struct msm_slim_ctrl *dev, u8 pn)
  210. {
  211. struct msm_slim_endp *endpoint = &dev->pipes[pn];
  212. struct sps_connect *cfg = &endpoint->config;
  213. u32 stat;
  214. int ret = sps_get_config(dev->pipes[pn].sps, cfg);
  215. if (ret) {
  216. dev_err(dev->dev, "sps pipe-port get config error%x\n", ret);
  217. return ret;
  218. }
  219. cfg->options = SPS_O_DESC_DONE | SPS_O_ERROR |
  220. SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
  221. if (dev->pipes[pn].connected &&
  222. dev->ctrl.ports[pn].state == SLIM_P_CFG) {
  223. return -EISCONN;
  224. } else if (dev->pipes[pn].connected) {
  225. writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn, (pn + dev->port_b),
  226. dev->ver));
  227. /* Make sure port disabling goes through */
  228. mb();
  229. /* Is pipe already connected in desired direction */
  230. if ((dev->ctrl.ports[pn].flow == SLIM_SRC &&
  231. cfg->mode == SPS_MODE_DEST) ||
  232. (dev->ctrl.ports[pn].flow == SLIM_SINK &&
  233. cfg->mode == SPS_MODE_SRC)) {
  234. msm_hw_set_port(dev, pn + dev->port_b);
  235. return 0;
  236. }
  237. msm_slim_disconn_pipe_port(dev, pn);
  238. }
  239. stat = readl_relaxed(PGD_PORT(PGD_PORT_STATn, (pn + dev->port_b),
  240. dev->ver));
  241. if (dev->ctrl.ports[pn].flow == SLIM_SRC) {
  242. cfg->destination = dev->bam.hdl;
  243. cfg->source = SPS_DEV_HANDLE_MEM;
  244. cfg->dest_pipe_index = ((stat & (0xFF << 4)) >> 4);
  245. cfg->src_pipe_index = 0;
  246. dev_dbg(dev->dev, "flow src:pipe num:%d",
  247. cfg->dest_pipe_index);
  248. cfg->mode = SPS_MODE_DEST;
  249. } else {
  250. cfg->source = dev->bam.hdl;
  251. cfg->destination = SPS_DEV_HANDLE_MEM;
  252. cfg->src_pipe_index = ((stat & (0xFF << 4)) >> 4);
  253. cfg->dest_pipe_index = 0;
  254. dev_dbg(dev->dev, "flow dest:pipe num:%d",
  255. cfg->src_pipe_index);
  256. cfg->mode = SPS_MODE_SRC;
  257. }
  258. /* Space for desciptor FIFOs */
  259. ret = msm_slim_sps_mem_alloc(dev, &cfg->desc,
  260. MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
  261. if (ret)
  262. pr_err("mem alloc for descr failed:%d", ret);
  263. else
  264. ret = sps_connect(dev->pipes[pn].sps, cfg);
  265. if (!ret) {
  266. dev->pipes[pn].connected = true;
  267. msm_hw_set_port(dev, pn + dev->port_b);
  268. }
  269. return ret;
  270. }
  271. int msm_alloc_port(struct slim_controller *ctrl, u8 pn)
  272. {
  273. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  274. struct msm_slim_endp *endpoint;
  275. int ret = 0;
  276. if (ctrl->ports[pn].req == SLIM_REQ_HALF_DUP ||
  277. ctrl->ports[pn].req == SLIM_REQ_MULTI_CH)
  278. return -EPROTONOSUPPORT;
  279. if (pn >= (MSM_SLIM_NPORTS - dev->port_b))
  280. return -ENODEV;
  281. endpoint = &dev->pipes[pn];
  282. ret = msm_slim_init_endpoint(dev, endpoint);
  283. dev_dbg(dev->dev, "sps register bam error code:%x\n", ret);
  284. return ret;
  285. }
  286. void msm_dealloc_port(struct slim_controller *ctrl, u8 pn)
  287. {
  288. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  289. struct msm_slim_endp *endpoint;
  290. if (pn >= (MSM_SLIM_NPORTS - dev->port_b))
  291. return;
  292. endpoint = &dev->pipes[pn];
  293. if (dev->pipes[pn].connected)
  294. msm_slim_disconn_pipe_port(dev, pn);
  295. if (endpoint->sps) {
  296. struct sps_connect *config = &endpoint->config;
  297. msm_slim_free_endpoint(endpoint);
  298. msm_slim_sps_mem_free(dev, &config->desc);
  299. }
  300. }
  301. enum slim_port_err msm_slim_port_xfer_status(struct slim_controller *ctr,
  302. u8 pn, phys_addr_t *done_buf, u32 *done_len)
  303. {
  304. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctr);
  305. struct sps_iovec sio;
  306. int ret;
  307. if (done_len)
  308. *done_len = 0;
  309. if (done_buf)
  310. *done_buf = 0;
  311. if (!dev->pipes[pn].connected)
  312. return SLIM_P_DISCONNECT;
  313. ret = sps_get_iovec(dev->pipes[pn].sps, &sio);
  314. if (!ret) {
  315. if (done_len)
  316. *done_len = sio.size;
  317. if (done_buf)
  318. *done_buf = (phys_addr_t)sio.addr;
  319. }
  320. dev_dbg(dev->dev, "get iovec returned %d\n", ret);
  321. return SLIM_P_INPROGRESS;
  322. }
  323. static void msm_slim_port_cb(struct sps_event_notify *ev)
  324. {
  325. struct completion *comp = ev->data.transfer.user;
  326. struct sps_iovec *iovec = &ev->data.transfer.iovec;
  327. if (ev->event_id == SPS_EVENT_DESC_DONE) {
  328. pr_debug("desc done iovec = (0x%x 0x%x 0x%x)\n",
  329. iovec->addr, iovec->size, iovec->flags);
  330. } else {
  331. pr_err("%s: ERR event %d\n",
  332. __func__, ev->event_id);
  333. }
  334. if (comp)
  335. complete(comp);
  336. }
  337. int msm_slim_port_xfer(struct slim_controller *ctrl, u8 pn, phys_addr_t iobuf,
  338. u32 len, struct completion *comp)
  339. {
  340. struct sps_register_event sreg;
  341. int ret;
  342. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  343. if (pn >= 7)
  344. return -ENODEV;
  345. sreg.options = (SPS_EVENT_DESC_DONE|SPS_EVENT_ERROR);
  346. sreg.mode = SPS_TRIGGER_WAIT;
  347. sreg.xfer_done = NULL;
  348. sreg.callback = msm_slim_port_cb;
  349. sreg.user = NULL;
  350. ret = sps_register_event(dev->pipes[pn].sps, &sreg);
  351. if (ret) {
  352. dev_dbg(dev->dev, "sps register event error:%x\n", ret);
  353. return ret;
  354. }
  355. ret = sps_transfer_one(dev->pipes[pn].sps, iobuf, len, comp,
  356. SPS_IOVEC_FLAG_INT);
  357. dev_dbg(dev->dev, "sps submit xfer error code:%x\n", ret);
  358. if (!ret) {
  359. /* Enable port interrupts */
  360. u32 int_port = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_EN_EEn,
  361. dev->ver));
  362. if (!(int_port & (1 << (dev->port_b + pn))))
  363. writel_relaxed((int_port | (1 << (dev->port_b + pn))),
  364. PGD_THIS_EE(PGD_PORT_INT_EN_EEn, dev->ver));
  365. /* Make sure that port registers are updated before returning */
  366. mb();
  367. }
  368. return ret;
  369. }
  370. /* Queue up Tx message buffer */
  371. static int msm_slim_post_tx_msgq(struct msm_slim_ctrl *dev, u8 *buf, int len)
  372. {
  373. int ret;
  374. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  375. struct sps_mem_buffer *mem = &endpoint->buf;
  376. struct sps_pipe *pipe = endpoint->sps;
  377. int ix = (buf - (u8 *)mem->base);
  378. phys_addr_t phys_addr = mem->phys_base + ix;
  379. for (ret = 0; ret < ((len + 3) >> 2); ret++)
  380. pr_debug("BAM TX buf[%d]:0x%x", ret, ((u32 *)buf)[ret]);
  381. ret = sps_transfer_one(pipe, phys_addr, ((len + 3) & 0xFC), NULL,
  382. SPS_IOVEC_FLAG_EOT);
  383. if (ret)
  384. dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
  385. return ret;
  386. }
  387. void msm_slim_tx_msg_return(struct msm_slim_ctrl *dev)
  388. {
  389. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  390. struct sps_mem_buffer *mem = &endpoint->buf;
  391. struct sps_pipe *pipe = endpoint->sps;
  392. struct sps_iovec iovec;
  393. int idx, ret = 0;
  394. if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
  395. /* use 1 buffer, non-blocking writes are not possible */
  396. if (dev->wr_comp[0]) {
  397. struct completion *comp = dev->wr_comp[0];
  398. dev->wr_comp[0] = NULL;
  399. complete(comp);
  400. }
  401. return;
  402. }
  403. while (!ret) {
  404. ret = sps_get_iovec(pipe, &iovec);
  405. if (ret || iovec.addr == 0) {
  406. if (ret)
  407. pr_err("SLIM TX get IOVEC failed:%d", ret);
  408. return;
  409. }
  410. idx = (int) ((iovec.addr - mem->phys_base) / SLIM_MSGQ_BUF_LEN);
  411. if (idx < MSM_TX_BUFS && dev->wr_comp[idx]) {
  412. struct completion *comp = dev->wr_comp[idx];
  413. dev->wr_comp[idx] = NULL;
  414. complete(comp);
  415. }
  416. /* reclaim all packets that were delivered out of order */
  417. if (idx != dev->tx_head)
  418. pr_err("SLIM OUT OF ORDER TX:idx:%d, head:%d", idx,
  419. dev->tx_head);
  420. while (idx == dev->tx_head) {
  421. dev->tx_head = (dev->tx_head + 1) % MSM_TX_BUFS;
  422. idx++;
  423. if (dev->tx_head == dev->tx_tail ||
  424. dev->wr_comp[idx] != NULL)
  425. break;
  426. }
  427. }
  428. }
  429. static u32 *msm_slim_modify_tx_buf(struct msm_slim_ctrl *dev,
  430. struct completion *comp)
  431. {
  432. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  433. struct sps_mem_buffer *mem = &endpoint->buf;
  434. u32 *retbuf = NULL;
  435. if ((dev->tx_tail + 1) % MSM_TX_BUFS == dev->tx_head)
  436. return NULL;
  437. retbuf = (u32 *)((u8 *)mem->base +
  438. (dev->tx_tail * SLIM_MSGQ_BUF_LEN));
  439. dev->wr_comp[dev->tx_tail] = comp;
  440. dev->tx_tail = (dev->tx_tail + 1) % MSM_TX_BUFS;
  441. return retbuf;
  442. }
  443. u32 *msm_slim_manage_tx_msgq(struct msm_slim_ctrl *dev, bool getbuf,
  444. struct completion *comp)
  445. {
  446. int ret = 0;
  447. int retries = 0;
  448. u32 *retbuf = NULL;
  449. mutex_lock(&dev->tx_buf_lock);
  450. if (!getbuf) {
  451. msm_slim_tx_msg_return(dev);
  452. mutex_unlock(&dev->tx_buf_lock);
  453. return NULL;
  454. }
  455. retbuf = msm_slim_modify_tx_buf(dev, comp);
  456. if (retbuf) {
  457. mutex_unlock(&dev->tx_buf_lock);
  458. return retbuf;
  459. }
  460. do {
  461. msm_slim_tx_msg_return(dev);
  462. retbuf = msm_slim_modify_tx_buf(dev, comp);
  463. if (!retbuf)
  464. ret = -EAGAIN;
  465. else {
  466. if (retries > 0)
  467. SLIM_INFO(dev, "SLIM TX retrieved:%d retries",
  468. retries);
  469. mutex_unlock(&dev->tx_buf_lock);
  470. return retbuf;
  471. }
  472. /*
  473. * superframe size will vary based on clock gear
  474. * 1 superframe will consume at least 1 message
  475. * if HW is in good condition. With MX_RETRIES,
  476. * make sure we wait for a [3, 10] superframes
  477. * before deciding HW couldn't process descriptors
  478. */
  479. usleep_range(100, 250);
  480. retries++;
  481. } while (ret && (retries < INIT_MX_RETRIES));
  482. mutex_unlock(&dev->tx_buf_lock);
  483. return NULL;
  484. }
  485. int msm_send_msg_buf(struct msm_slim_ctrl *dev, u32 *buf, u8 len, u32 tx_reg)
  486. {
  487. if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
  488. int i;
  489. for (i = 0; i < (len + 3) >> 2; i++) {
  490. dev_dbg(dev->dev, "AHB TX data:0x%x\n", buf[i]);
  491. writel_relaxed(buf[i], dev->base + tx_reg + (i * 4));
  492. }
  493. /* Guarantee that message is sent before returning */
  494. mb();
  495. return 0;
  496. }
  497. return msm_slim_post_tx_msgq(dev, (u8 *)buf, len);
  498. }
  499. u32 *msm_get_msg_buf(struct msm_slim_ctrl *dev, int len,
  500. struct completion *comp)
  501. {
  502. /*
  503. * Currently we block a transaction until the current one completes.
  504. * In case we need multiple transactions, use message Q
  505. */
  506. if (dev->use_tx_msgqs != MSM_MSGQ_ENABLED) {
  507. dev->wr_comp[0] = comp;
  508. return dev->tx_buf;
  509. }
  510. return msm_slim_manage_tx_msgq(dev, true, comp);
  511. }
  512. static void
  513. msm_slim_rx_msgq_event(struct msm_slim_ctrl *dev, struct sps_event_notify *ev)
  514. {
  515. u32 *buf = ev->data.transfer.user;
  516. struct sps_iovec *iovec = &ev->data.transfer.iovec;
  517. /*
  518. * Note the virtual address needs to be offset by the same index
  519. * as the physical address or just pass in the actual virtual address
  520. * if the sps_mem_buffer is not needed. Note that if completion is
  521. * used, the virtual address won't be available and will need to be
  522. * calculated based on the offset of the physical address
  523. */
  524. if (ev->event_id == SPS_EVENT_DESC_DONE) {
  525. pr_debug("buf = 0x%p, data = 0x%x\n", buf, *buf);
  526. pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
  527. iovec->addr, iovec->size, iovec->flags);
  528. } else {
  529. dev_err(dev->dev, "%s: unknown event %d\n",
  530. __func__, ev->event_id);
  531. }
  532. }
  533. static void msm_slim_rx_msgq_cb(struct sps_event_notify *notify)
  534. {
  535. struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)notify->user;
  536. msm_slim_rx_msgq_event(dev, notify);
  537. }
  538. /* Queue up Rx message buffer */
  539. static int msm_slim_post_rx_msgq(struct msm_slim_ctrl *dev, int ix)
  540. {
  541. int ret;
  542. u32 flags = SPS_IOVEC_FLAG_INT;
  543. struct msm_slim_endp *endpoint = &dev->rx_msgq;
  544. struct sps_mem_buffer *mem = &endpoint->buf;
  545. struct sps_pipe *pipe = endpoint->sps;
  546. /* Rx message queue buffers are 4 bytes in length */
  547. u8 *virt_addr = mem->base + (4 * ix);
  548. phys_addr_t phys_addr = mem->phys_base + (4 * ix);
  549. pr_debug("index:%d, virt:0x%p\n", ix, virt_addr);
  550. ret = sps_transfer_one(pipe, phys_addr, 4, virt_addr, flags);
  551. if (ret)
  552. dev_err(dev->dev, "transfer_one() failed 0x%x, %d\n", ret, ix);
  553. return ret;
  554. }
  555. int msm_slim_rx_msgq_get(struct msm_slim_ctrl *dev, u32 *data, int offset)
  556. {
  557. struct msm_slim_endp *endpoint = &dev->rx_msgq;
  558. struct sps_mem_buffer *mem = &endpoint->buf;
  559. struct sps_pipe *pipe = endpoint->sps;
  560. struct sps_iovec iovec;
  561. int index;
  562. int ret;
  563. ret = sps_get_iovec(pipe, &iovec);
  564. if (ret) {
  565. dev_err(dev->dev, "sps_get_iovec() failed 0x%x\n", ret);
  566. goto err_exit;
  567. }
  568. pr_debug("iovec = (0x%x 0x%x 0x%x)\n",
  569. iovec.addr, iovec.size, iovec.flags);
  570. BUG_ON(iovec.addr < mem->phys_base);
  571. BUG_ON(iovec.addr >= mem->phys_base + mem->size);
  572. /* Calculate buffer index */
  573. index = (iovec.addr - mem->phys_base) / 4;
  574. *(data + offset) = *((u32 *)mem->base + index);
  575. pr_debug("buf = 0x%p, data = 0x%x\n", (u32 *)mem->base + index, *data);
  576. /* Add buffer back to the queue */
  577. (void)msm_slim_post_rx_msgq(dev, index);
  578. err_exit:
  579. return ret;
  580. }
  581. int msm_slim_connect_endp(struct msm_slim_ctrl *dev,
  582. struct msm_slim_endp *endpoint,
  583. struct completion *notify)
  584. {
  585. int i, ret;
  586. struct sps_register_event sps_error_event; /* SPS_ERROR */
  587. struct sps_register_event sps_descr_event; /* DESCR_DONE */
  588. struct sps_connect *config = &endpoint->config;
  589. ret = sps_connect(endpoint->sps, config);
  590. if (ret) {
  591. dev_err(dev->dev, "sps_connect failed 0x%x\n", ret);
  592. return ret;
  593. }
  594. memset(&sps_descr_event, 0x00, sizeof(sps_descr_event));
  595. if (notify) {
  596. sps_descr_event.mode = SPS_TRIGGER_CALLBACK;
  597. sps_descr_event.options = SPS_O_DESC_DONE;
  598. sps_descr_event.user = (void *)dev;
  599. sps_descr_event.xfer_done = notify;
  600. ret = sps_register_event(endpoint->sps, &sps_descr_event);
  601. if (ret) {
  602. dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
  603. goto sps_reg_event_failed;
  604. }
  605. }
  606. /* Register callback for errors */
  607. memset(&sps_error_event, 0x00, sizeof(sps_error_event));
  608. sps_error_event.mode = SPS_TRIGGER_CALLBACK;
  609. sps_error_event.options = SPS_O_ERROR;
  610. sps_error_event.user = (void *)dev;
  611. sps_error_event.callback = msm_slim_rx_msgq_cb;
  612. ret = sps_register_event(endpoint->sps, &sps_error_event);
  613. if (ret) {
  614. dev_err(dev->dev, "sps_connect() failed 0x%x\n", ret);
  615. goto sps_reg_event_failed;
  616. }
  617. /*
  618. * Call transfer_one for each 4-byte buffer
  619. * Use (buf->size/4) - 1 for the number of buffer to post
  620. */
  621. if (endpoint == &dev->rx_msgq) {
  622. /* Setup the transfer */
  623. for (i = 0; i < (MSM_SLIM_DESC_NUM - 1); i++) {
  624. ret = msm_slim_post_rx_msgq(dev, i);
  625. if (ret) {
  626. dev_err(dev->dev,
  627. "post_rx_msgq() failed 0x%x\n", ret);
  628. goto sps_transfer_failed;
  629. }
  630. }
  631. dev->use_rx_msgqs = MSM_MSGQ_ENABLED;
  632. } else {
  633. dev->tx_tail = 0;
  634. dev->tx_head = 0;
  635. dev->use_tx_msgqs = MSM_MSGQ_ENABLED;
  636. }
  637. return 0;
  638. sps_transfer_failed:
  639. memset(&sps_error_event, 0x00, sizeof(sps_error_event));
  640. sps_register_event(endpoint->sps, &sps_error_event);
  641. sps_reg_event_failed:
  642. sps_disconnect(endpoint->sps);
  643. return ret;
  644. }
  645. static int msm_slim_init_rx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
  646. {
  647. int ret;
  648. u32 pipe_offset;
  649. struct msm_slim_endp *endpoint = &dev->rx_msgq;
  650. struct sps_connect *config = &endpoint->config;
  651. struct sps_mem_buffer *descr = &config->desc;
  652. struct sps_mem_buffer *mem = &endpoint->buf;
  653. struct completion *notify = &dev->rx_msgq_notify;
  654. init_completion(notify);
  655. if (dev->use_rx_msgqs == MSM_MSGQ_DISABLED)
  656. return 0;
  657. /* Allocate the endpoint */
  658. ret = msm_slim_init_endpoint(dev, endpoint);
  659. if (ret) {
  660. dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
  661. goto sps_init_endpoint_failed;
  662. }
  663. /* Get the pipe indices for the message queues */
  664. pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
  665. dev_dbg(dev->dev, "Message queue pipe offset %d\n", pipe_offset);
  666. config->mode = SPS_MODE_SRC;
  667. config->source = dev->bam.hdl;
  668. config->destination = SPS_DEV_HANDLE_MEM;
  669. config->src_pipe_index = pipe_offset;
  670. config->options = SPS_O_DESC_DONE | SPS_O_ERROR |
  671. SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
  672. /* Allocate memory for the FIFO descriptors */
  673. ret = msm_slim_sps_mem_alloc(dev, descr,
  674. MSM_SLIM_DESC_NUM * sizeof(struct sps_iovec));
  675. if (ret) {
  676. dev_err(dev->dev, "unable to allocate SPS descriptors\n");
  677. goto alloc_descr_failed;
  678. }
  679. /* Allocate memory for the message buffer(s), N descrs, 4-byte mesg */
  680. ret = msm_slim_sps_mem_alloc(dev, mem, MSM_SLIM_DESC_NUM * 4);
  681. if (ret) {
  682. dev_err(dev->dev, "dma_alloc_coherent failed\n");
  683. goto alloc_buffer_failed;
  684. }
  685. ret = msm_slim_connect_endp(dev, endpoint, notify);
  686. if (!ret)
  687. return 0;
  688. msm_slim_sps_mem_free(dev, mem);
  689. alloc_buffer_failed:
  690. msm_slim_sps_mem_free(dev, descr);
  691. alloc_descr_failed:
  692. msm_slim_free_endpoint(endpoint);
  693. sps_init_endpoint_failed:
  694. dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
  695. return ret;
  696. }
  697. static int msm_slim_init_tx_msgq(struct msm_slim_ctrl *dev, u32 pipe_reg)
  698. {
  699. int ret;
  700. u32 pipe_offset;
  701. struct msm_slim_endp *endpoint = &dev->tx_msgq;
  702. struct sps_connect *config = &endpoint->config;
  703. struct sps_mem_buffer *descr = &config->desc;
  704. struct sps_mem_buffer *mem = &endpoint->buf;
  705. if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED)
  706. return 0;
  707. /* Allocate the endpoint */
  708. ret = msm_slim_init_endpoint(dev, endpoint);
  709. if (ret) {
  710. dev_err(dev->dev, "init_endpoint failed 0x%x\n", ret);
  711. goto sps_init_endpoint_failed;
  712. }
  713. /* Get the pipe indices for the message queues */
  714. pipe_offset = (readl_relaxed(dev->base + pipe_reg) & 0xfc) >> 2;
  715. pipe_offset += 1;
  716. dev_dbg(dev->dev, "TX Message queue pipe offset %d\n", pipe_offset);
  717. config->mode = SPS_MODE_DEST;
  718. config->source = SPS_DEV_HANDLE_MEM;
  719. config->destination = dev->bam.hdl;
  720. config->dest_pipe_index = pipe_offset;
  721. config->src_pipe_index = 0;
  722. config->options = SPS_O_ERROR | SPS_O_NO_Q |
  723. SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
  724. /* Desc and TX buf are circular queues */
  725. /* Allocate memory for the FIFO descriptors */
  726. ret = msm_slim_sps_mem_alloc(dev, descr,
  727. (MSM_TX_BUFS + 1) * sizeof(struct sps_iovec));
  728. if (ret) {
  729. dev_err(dev->dev, "unable to allocate SPS descriptors\n");
  730. goto alloc_descr_failed;
  731. }
  732. /* Allocate TX buffer from which descriptors are created */
  733. ret = msm_slim_sps_mem_alloc(dev, mem, ((MSM_TX_BUFS + 1) *
  734. SLIM_MSGQ_BUF_LEN));
  735. if (ret) {
  736. dev_err(dev->dev, "dma_alloc_coherent failed\n");
  737. goto alloc_buffer_failed;
  738. }
  739. ret = msm_slim_connect_endp(dev, endpoint, NULL);
  740. if (!ret)
  741. return 0;
  742. msm_slim_sps_mem_free(dev, mem);
  743. alloc_buffer_failed:
  744. msm_slim_sps_mem_free(dev, descr);
  745. alloc_descr_failed:
  746. msm_slim_free_endpoint(endpoint);
  747. sps_init_endpoint_failed:
  748. dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
  749. return ret;
  750. }
  751. /* Registers BAM h/w resource with SPS driver and initializes msgq endpoints */
  752. int msm_slim_sps_init(struct msm_slim_ctrl *dev, struct resource *bam_mem,
  753. u32 pipe_reg, bool remote)
  754. {
  755. int i, ret;
  756. u32 bam_handle;
  757. struct sps_bam_props bam_props = {0};
  758. static struct sps_bam_sec_config_props sec_props = {
  759. .ees = {
  760. [0] = { /* LPASS */
  761. .vmid = 0,
  762. .pipe_mask = 0xFFFF98,
  763. },
  764. [1] = { /* Krait Apps */
  765. .vmid = 1,
  766. .pipe_mask = 0x3F000007,
  767. },
  768. [2] = { /* Modem */
  769. .vmid = 2,
  770. .pipe_mask = 0x00000060,
  771. },
  772. },
  773. };
  774. if (dev->bam.hdl) {
  775. bam_handle = dev->bam.hdl;
  776. goto init_msgq;
  777. }
  778. bam_props.ee = dev->ee;
  779. bam_props.virt_addr = dev->bam.base;
  780. bam_props.phys_addr = bam_mem->start;
  781. bam_props.irq = dev->bam.irq;
  782. if (!remote) {
  783. bam_props.manage = SPS_BAM_MGR_LOCAL;
  784. bam_props.sec_config = SPS_BAM_SEC_DO_CONFIG;
  785. } else {
  786. bam_props.manage = SPS_BAM_MGR_DEVICE_REMOTE |
  787. SPS_BAM_MGR_MULTI_EE;
  788. bam_props.sec_config = SPS_BAM_SEC_DO_NOT_CONFIG;
  789. }
  790. bam_props.summing_threshold = MSM_SLIM_PERF_SUMM_THRESHOLD;
  791. bam_props.p_sec_config_props = &sec_props;
  792. bam_props.options = SPS_O_DESC_DONE | SPS_O_ERROR |
  793. SPS_O_ACK_TRANSFERS | SPS_O_AUTO_ENABLE;
  794. /* override apps channel pipes if specified in platform-data or DT */
  795. if (dev->pdata.apps_pipes)
  796. sec_props.ees[dev->ee].pipe_mask = dev->pdata.apps_pipes;
  797. /* First 7 bits are for message Qs */
  798. for (i = 7; i < 32; i++) {
  799. /* Check what pipes are owned by Apps. */
  800. if ((sec_props.ees[dev->ee].pipe_mask >> i) & 0x1)
  801. break;
  802. }
  803. dev->port_b = i - 7;
  804. /* Register the BAM device with the SPS driver */
  805. ret = sps_register_bam_device(&bam_props, &bam_handle);
  806. if (ret) {
  807. dev_err(dev->dev, "disabling BAM: reg-bam failed 0x%x\n", ret);
  808. dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
  809. dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
  810. return ret;
  811. }
  812. dev->bam.hdl = bam_handle;
  813. dev_dbg(dev->dev, "SLIM BAM registered, handle = 0x%x\n", bam_handle);
  814. init_msgq:
  815. ret = msm_slim_init_rx_msgq(dev, pipe_reg);
  816. if (ret)
  817. dev_err(dev->dev, "msm_slim_init_rx_msgq failed 0x%x\n", ret);
  818. if (ret && bam_handle)
  819. dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
  820. ret = msm_slim_init_tx_msgq(dev, pipe_reg);
  821. if (ret)
  822. dev_err(dev->dev, "msm_slim_init_tx_msgq failed 0x%x\n", ret);
  823. if (ret && bam_handle)
  824. dev->use_tx_msgqs = MSM_MSGQ_DISABLED;
  825. if (dev->use_tx_msgqs == MSM_MSGQ_DISABLED &&
  826. dev->use_rx_msgqs == MSM_MSGQ_DISABLED && bam_handle) {
  827. sps_deregister_bam_device(bam_handle);
  828. dev->bam.hdl = 0L;
  829. }
  830. return ret;
  831. }
  832. void msm_slim_disconnect_endp(struct msm_slim_ctrl *dev,
  833. struct msm_slim_endp *endpoint,
  834. enum msm_slim_msgq *msgq_flag)
  835. {
  836. if (*msgq_flag == MSM_MSGQ_ENABLED) {
  837. sps_disconnect(endpoint->sps);
  838. *msgq_flag = MSM_MSGQ_RESET;
  839. }
  840. }
  841. static void msm_slim_remove_ep(struct msm_slim_ctrl *dev,
  842. struct msm_slim_endp *endpoint,
  843. enum msm_slim_msgq *msgq_flag)
  844. {
  845. struct sps_connect *config = &endpoint->config;
  846. struct sps_mem_buffer *descr = &config->desc;
  847. struct sps_mem_buffer *mem = &endpoint->buf;
  848. struct sps_register_event sps_event;
  849. memset(&sps_event, 0x00, sizeof(sps_event));
  850. msm_slim_sps_mem_free(dev, mem);
  851. sps_register_event(endpoint->sps, &sps_event);
  852. if (*msgq_flag == MSM_MSGQ_ENABLED) {
  853. msm_slim_disconnect_endp(dev, endpoint, msgq_flag);
  854. msm_slim_free_endpoint(endpoint);
  855. }
  856. msm_slim_sps_mem_free(dev, descr);
  857. }
  858. void msm_slim_sps_exit(struct msm_slim_ctrl *dev, bool dereg)
  859. {
  860. int i;
  861. if (dev->use_rx_msgqs >= MSM_MSGQ_ENABLED)
  862. msm_slim_remove_ep(dev, &dev->rx_msgq, &dev->use_rx_msgqs);
  863. if (dev->use_tx_msgqs >= MSM_MSGQ_ENABLED)
  864. msm_slim_remove_ep(dev, &dev->tx_msgq, &dev->use_tx_msgqs);
  865. for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
  866. if (dev->pipes[i - dev->port_b].connected)
  867. msm_slim_disconn_pipe_port(dev, i - dev->port_b);
  868. }
  869. if (dereg) {
  870. int i;
  871. for (i = dev->port_b; i < MSM_SLIM_NPORTS; i++) {
  872. if (dev->pipes[i - dev->port_b].connected)
  873. msm_dealloc_port(&dev->ctrl,
  874. i - dev->port_b);
  875. }
  876. sps_deregister_bam_device(dev->bam.hdl);
  877. dev->bam.hdl = 0L;
  878. }
  879. }
  880. /* Slimbus QMI Messaging */
  881. #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01 0x0020
  882. #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01 0x0020
  883. #define SLIMBUS_QMI_POWER_REQ_V01 0x0021
  884. #define SLIMBUS_QMI_POWER_RESP_V01 0x0021
  885. #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ 0x0022
  886. #define SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP 0x0022
  887. #define SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN 7
  888. #define SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN 7
  889. #define SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN 14
  890. #define SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN 7
  891. #define SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN 7
  892. enum slimbus_mode_enum_type_v01 {
  893. /* To force a 32 bit signed enum. Do not change or use*/
  894. SLIMBUS_MODE_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
  895. SLIMBUS_MODE_SATELLITE_V01 = 1,
  896. SLIMBUS_MODE_MASTER_V01 = 2,
  897. SLIMBUS_MODE_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
  898. };
  899. enum slimbus_pm_enum_type_v01 {
  900. /* To force a 32 bit signed enum. Do not change or use*/
  901. SLIMBUS_PM_ENUM_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
  902. SLIMBUS_PM_INACTIVE_V01 = 1,
  903. SLIMBUS_PM_ACTIVE_V01 = 2,
  904. SLIMBUS_PM_ENUM_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
  905. };
  906. struct slimbus_select_inst_req_msg_v01 {
  907. /* Mandatory */
  908. /* Hardware Instance Selection */
  909. uint32_t instance;
  910. /* Optional */
  911. /* Optional Mode Request Operation */
  912. /* Must be set to true if mode is being passed */
  913. uint8_t mode_valid;
  914. enum slimbus_mode_enum_type_v01 mode;
  915. };
  916. struct slimbus_select_inst_resp_msg_v01 {
  917. /* Mandatory */
  918. /* Result Code */
  919. struct qmi_response_type_v01 resp;
  920. };
  921. struct slimbus_power_req_msg_v01 {
  922. /* Mandatory */
  923. /* Power Request Operation */
  924. enum slimbus_pm_enum_type_v01 pm_req;
  925. };
  926. struct slimbus_power_resp_msg_v01 {
  927. /* Mandatory */
  928. /* Result Code */
  929. struct qmi_response_type_v01 resp;
  930. };
  931. struct slimbus_chkfrm_resp_msg {
  932. /* Mandatory */
  933. /* Result Code */
  934. struct qmi_response_type_v01 resp;
  935. };
  936. static struct elem_info slimbus_select_inst_req_msg_v01_ei[] = {
  937. {
  938. .data_type = QMI_UNSIGNED_4_BYTE,
  939. .elem_len = 1,
  940. .elem_size = sizeof(uint32_t),
  941. .is_array = NO_ARRAY,
  942. .tlv_type = 0x01,
  943. .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
  944. instance),
  945. .ei_array = NULL,
  946. },
  947. {
  948. .data_type = QMI_OPT_FLAG,
  949. .elem_len = 1,
  950. .elem_size = sizeof(uint8_t),
  951. .is_array = NO_ARRAY,
  952. .tlv_type = 0x10,
  953. .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
  954. mode_valid),
  955. .ei_array = NULL,
  956. },
  957. {
  958. .data_type = QMI_UNSIGNED_4_BYTE,
  959. .elem_len = 1,
  960. .elem_size = sizeof(enum slimbus_mode_enum_type_v01),
  961. .is_array = NO_ARRAY,
  962. .tlv_type = 0x10,
  963. .offset = offsetof(struct slimbus_select_inst_req_msg_v01,
  964. mode),
  965. .ei_array = NULL,
  966. },
  967. {
  968. .data_type = QMI_EOTI,
  969. .elem_len = 0,
  970. .elem_size = 0,
  971. .is_array = NO_ARRAY,
  972. .tlv_type = 0x00,
  973. .offset = 0,
  974. .ei_array = NULL,
  975. },
  976. };
  977. static struct elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
  978. {
  979. .data_type = QMI_STRUCT,
  980. .elem_len = 1,
  981. .elem_size = sizeof(struct qmi_response_type_v01),
  982. .is_array = NO_ARRAY,
  983. .tlv_type = 0x02,
  984. .offset = offsetof(struct slimbus_select_inst_resp_msg_v01,
  985. resp),
  986. .ei_array = get_qmi_response_type_v01_ei(),
  987. },
  988. {
  989. .data_type = QMI_EOTI,
  990. .elem_len = 0,
  991. .elem_size = 0,
  992. .is_array = NO_ARRAY,
  993. .tlv_type = 0x00,
  994. .offset = 0,
  995. .ei_array = NULL,
  996. },
  997. };
  998. static struct elem_info slimbus_power_req_msg_v01_ei[] = {
  999. {
  1000. .data_type = QMI_UNSIGNED_4_BYTE,
  1001. .elem_len = 1,
  1002. .elem_size = sizeof(enum slimbus_pm_enum_type_v01),
  1003. .is_array = NO_ARRAY,
  1004. .tlv_type = 0x01,
  1005. .offset = offsetof(struct slimbus_power_req_msg_v01, pm_req),
  1006. .ei_array = NULL,
  1007. },
  1008. {
  1009. .data_type = QMI_EOTI,
  1010. .elem_len = 0,
  1011. .elem_size = 0,
  1012. .is_array = NO_ARRAY,
  1013. .tlv_type = 0x00,
  1014. .offset = 0,
  1015. .ei_array = NULL,
  1016. },
  1017. };
  1018. static struct elem_info slimbus_power_resp_msg_v01_ei[] = {
  1019. {
  1020. .data_type = QMI_STRUCT,
  1021. .elem_len = 1,
  1022. .elem_size = sizeof(struct qmi_response_type_v01),
  1023. .is_array = NO_ARRAY,
  1024. .tlv_type = 0x02,
  1025. .offset = offsetof(struct slimbus_power_resp_msg_v01, resp),
  1026. .ei_array = get_qmi_response_type_v01_ei(),
  1027. },
  1028. {
  1029. .data_type = QMI_EOTI,
  1030. .elem_len = 0,
  1031. .elem_size = 0,
  1032. .is_array = NO_ARRAY,
  1033. .tlv_type = 0x00,
  1034. .offset = 0,
  1035. .ei_array = NULL,
  1036. },
  1037. };
  1038. static struct elem_info slimbus_chkfrm_resp_msg_v01_ei[] = {
  1039. {
  1040. .data_type = QMI_STRUCT,
  1041. .elem_len = 1,
  1042. .elem_size = sizeof(struct qmi_response_type_v01),
  1043. .is_array = NO_ARRAY,
  1044. .tlv_type = 0x02,
  1045. .offset = offsetof(struct slimbus_chkfrm_resp_msg, resp),
  1046. .ei_array = get_qmi_response_type_v01_ei(),
  1047. },
  1048. {
  1049. .data_type = QMI_EOTI,
  1050. .elem_len = 0,
  1051. .elem_size = 0,
  1052. .is_array = NO_ARRAY,
  1053. .tlv_type = 0x00,
  1054. .offset = 0,
  1055. .ei_array = NULL,
  1056. },
  1057. };
  1058. static void msm_slim_qmi_recv_msg(struct kthread_work *work)
  1059. {
  1060. int rc;
  1061. struct msm_slim_qmi *qmi =
  1062. container_of(work, struct msm_slim_qmi, kwork);
  1063. rc = qmi_recv_msg(qmi->handle);
  1064. if (rc < 0)
  1065. pr_err("%s: Error receiving QMI message\n", __func__);
  1066. }
  1067. static void msm_slim_qmi_notify(struct qmi_handle *handle,
  1068. enum qmi_event_type event, void *notify_priv)
  1069. {
  1070. struct msm_slim_ctrl *dev = notify_priv;
  1071. struct msm_slim_qmi *qmi = &dev->qmi;
  1072. switch (event) {
  1073. case QMI_RECV_MSG:
  1074. queue_kthread_work(&qmi->kworker, &qmi->kwork);
  1075. break;
  1076. default:
  1077. break;
  1078. }
  1079. }
  1080. static const char *get_qmi_error(struct qmi_response_type_v01 *r)
  1081. {
  1082. if (r->result == QMI_RESULT_SUCCESS_V01 || r->error == QMI_ERR_NONE_V01)
  1083. return "No Error";
  1084. else if (r->error == QMI_ERR_NO_MEMORY_V01)
  1085. return "Out of Memory";
  1086. else if (r->error == QMI_ERR_INTERNAL_V01)
  1087. return "Unexpected error occurred";
  1088. else if (r->error == QMI_ERR_INCOMPATIBLE_STATE_V01)
  1089. return "Slimbus s/w already configured to a different mode";
  1090. else if (r->error == QMI_ERR_INVALID_ID_V01)
  1091. return "Slimbus hardware instance is not valid";
  1092. else
  1093. return "Unknown error";
  1094. }
  1095. static int msm_slim_qmi_send_select_inst_req(struct msm_slim_ctrl *dev,
  1096. struct slimbus_select_inst_req_msg_v01 *req)
  1097. {
  1098. struct slimbus_select_inst_resp_msg_v01 resp = { { 0, 0 } };
  1099. struct msg_desc req_desc, resp_desc;
  1100. int rc;
  1101. req_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_REQ_V01;
  1102. req_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_REQ_MAX_MSG_LEN;
  1103. req_desc.ei_array = slimbus_select_inst_req_msg_v01_ei;
  1104. resp_desc.msg_id = SLIMBUS_QMI_SELECT_INSTANCE_RESP_V01;
  1105. resp_desc.max_msg_len = SLIMBUS_QMI_SELECT_INSTANCE_RESP_MAX_MSG_LEN;
  1106. resp_desc.ei_array = slimbus_select_inst_resp_msg_v01_ei;
  1107. rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
  1108. &resp_desc, &resp, sizeof(resp), 5000);
  1109. if (rc < 0) {
  1110. SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
  1111. return rc;
  1112. }
  1113. /* Check the response */
  1114. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1115. SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
  1116. resp.resp.result, get_qmi_error(&resp.resp));
  1117. return -EREMOTEIO;
  1118. }
  1119. return 0;
  1120. }
  1121. static int msm_slim_qmi_send_power_request(struct msm_slim_ctrl *dev,
  1122. struct slimbus_power_req_msg_v01 *req)
  1123. {
  1124. struct slimbus_power_resp_msg_v01 resp = { { 0, 0 } };
  1125. struct msg_desc req_desc, resp_desc;
  1126. int rc;
  1127. req_desc.msg_id = SLIMBUS_QMI_POWER_REQ_V01;
  1128. req_desc.max_msg_len = SLIMBUS_QMI_POWER_REQ_MAX_MSG_LEN;
  1129. req_desc.ei_array = slimbus_power_req_msg_v01_ei;
  1130. resp_desc.msg_id = SLIMBUS_QMI_POWER_RESP_V01;
  1131. resp_desc.max_msg_len = SLIMBUS_QMI_POWER_RESP_MAX_MSG_LEN;
  1132. resp_desc.ei_array = slimbus_power_resp_msg_v01_ei;
  1133. rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, req, sizeof(*req),
  1134. &resp_desc, &resp, sizeof(resp), 5000);
  1135. if (rc < 0) {
  1136. SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
  1137. return rc;
  1138. }
  1139. /* Check the response */
  1140. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1141. SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n", __func__,
  1142. resp.resp.result, get_qmi_error(&resp.resp));
  1143. return -EREMOTEIO;
  1144. }
  1145. return 0;
  1146. }
  1147. int msm_slim_qmi_init(struct msm_slim_ctrl *dev, bool apps_is_master)
  1148. {
  1149. int rc = 0;
  1150. struct qmi_handle *handle;
  1151. struct slimbus_select_inst_req_msg_v01 req;
  1152. init_kthread_worker(&dev->qmi.kworker);
  1153. dev->qmi.task = kthread_run(kthread_worker_fn,
  1154. &dev->qmi.kworker, "msm_slim_qmi_clnt%d", dev->ctrl.nr);
  1155. if (IS_ERR(dev->qmi.task)) {
  1156. pr_err("%s: Failed to create QMI client kthread\n", __func__);
  1157. return -ENOMEM;
  1158. }
  1159. init_kthread_work(&dev->qmi.kwork, msm_slim_qmi_recv_msg);
  1160. handle = qmi_handle_create(msm_slim_qmi_notify, dev);
  1161. if (!handle) {
  1162. rc = -ENOMEM;
  1163. pr_err("%s: QMI client handle alloc failed\n", __func__);
  1164. goto qmi_handle_create_failed;
  1165. }
  1166. rc = qmi_connect_to_service(handle, SLIMBUS_QMI_SVC_ID,
  1167. SLIMBUS_QMI_SVC_V1,
  1168. SLIMBUS_QMI_INS_ID);
  1169. if (rc < 0) {
  1170. SLIM_ERR(dev, "%s: QMI server not found\n", __func__);
  1171. goto qmi_connect_to_service_failed;
  1172. }
  1173. /* Instance is 0 based */
  1174. req.instance = dev->ctrl.nr - 1;
  1175. req.mode_valid = 1;
  1176. /* Mode indicates the role of the ADSP */
  1177. if (apps_is_master)
  1178. req.mode = SLIMBUS_MODE_SATELLITE_V01;
  1179. else
  1180. req.mode = SLIMBUS_MODE_MASTER_V01;
  1181. dev->qmi.handle = handle;
  1182. rc = msm_slim_qmi_send_select_inst_req(dev, &req);
  1183. if (rc) {
  1184. pr_err("%s: failed to select h/w instance\n", __func__);
  1185. goto qmi_select_instance_failed;
  1186. }
  1187. return 0;
  1188. qmi_select_instance_failed:
  1189. dev->qmi.handle = NULL;
  1190. qmi_connect_to_service_failed:
  1191. qmi_handle_destroy(handle);
  1192. qmi_handle_create_failed:
  1193. flush_kthread_worker(&dev->qmi.kworker);
  1194. kthread_stop(dev->qmi.task);
  1195. dev->qmi.task = NULL;
  1196. return rc;
  1197. }
  1198. void msm_slim_qmi_exit(struct msm_slim_ctrl *dev)
  1199. {
  1200. qmi_handle_destroy(dev->qmi.handle);
  1201. flush_kthread_worker(&dev->qmi.kworker);
  1202. kthread_stop(dev->qmi.task);
  1203. dev->qmi.task = NULL;
  1204. dev->qmi.handle = NULL;
  1205. }
  1206. int msm_slim_qmi_power_request(struct msm_slim_ctrl *dev, bool active)
  1207. {
  1208. struct slimbus_power_req_msg_v01 req;
  1209. if (active)
  1210. req.pm_req = SLIMBUS_PM_ACTIVE_V01;
  1211. else
  1212. req.pm_req = SLIMBUS_PM_INACTIVE_V01;
  1213. return msm_slim_qmi_send_power_request(dev, &req);
  1214. }
  1215. int msm_slim_qmi_check_framer_request(struct msm_slim_ctrl *dev)
  1216. {
  1217. struct slimbus_chkfrm_resp_msg resp = { { 0, 0 } };
  1218. struct msg_desc req_desc, resp_desc;
  1219. int rc;
  1220. req_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_REQ;
  1221. req_desc.max_msg_len = 0;
  1222. req_desc.ei_array = NULL;
  1223. resp_desc.msg_id = SLIMBUS_QMI_CHECK_FRAMER_STATUS_RESP;
  1224. resp_desc.max_msg_len = SLIMBUS_QMI_CHECK_FRAMER_STAT_RESP_MAX_MSG_LEN;
  1225. resp_desc.ei_array = slimbus_chkfrm_resp_msg_v01_ei;
  1226. rc = qmi_send_req_wait(dev->qmi.handle, &req_desc, NULL, 0,
  1227. &resp_desc, &resp, sizeof(resp), 5000);
  1228. if (rc < 0) {
  1229. SLIM_ERR(dev, "%s: QMI send req failed %d\n", __func__, rc);
  1230. return rc;
  1231. }
  1232. /* Check the response */
  1233. if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
  1234. SLIM_ERR(dev, "%s: QMI request failed 0x%x (%s)\n",
  1235. __func__, resp.resp.result, get_qmi_error(&resp.resp));
  1236. return -EREMOTEIO;
  1237. }
  1238. return 0;
  1239. }