slim-msm-ctrl.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605
  1. /* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/irq.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/slab.h>
  16. #include <linux/io.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/slimbus/slimbus.h>
  20. #include <linux/delay.h>
  21. #include <linux/kthread.h>
  22. #include <linux/clk.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/of.h>
  25. #include <linux/of_slimbus.h>
  26. #include <mach/sps.h>
  27. #include <mach/qdsp6v2/apr.h>
  28. #include "slim-msm.h"
  29. #define MSM_SLIM_NAME "msm_slim_ctrl"
  30. #define SLIM_ROOT_FREQ 24576000
  31. #define QC_MSM_DEVS 5
  32. /* Manager registers */
  33. enum mgr_reg {
  34. MGR_CFG = 0x200,
  35. MGR_STATUS = 0x204,
  36. MGR_RX_MSGQ_CFG = 0x208,
  37. MGR_INT_EN = 0x210,
  38. MGR_INT_STAT = 0x214,
  39. MGR_INT_CLR = 0x218,
  40. MGR_TX_MSG = 0x230,
  41. MGR_RX_MSG = 0x270,
  42. MGR_IE_STAT = 0x2F0,
  43. MGR_VE_STAT = 0x300,
  44. };
  45. enum msg_cfg {
  46. MGR_CFG_ENABLE = 1,
  47. MGR_CFG_RX_MSGQ_EN = 1 << 1,
  48. MGR_CFG_TX_MSGQ_EN_HIGH = 1 << 2,
  49. MGR_CFG_TX_MSGQ_EN_LOW = 1 << 3,
  50. };
  51. /* Message queue types */
  52. enum msm_slim_msgq_type {
  53. MSGQ_RX = 0,
  54. MSGQ_TX_LOW = 1,
  55. MSGQ_TX_HIGH = 2,
  56. };
  57. /* Framer registers */
  58. enum frm_reg {
  59. FRM_CFG = 0x400,
  60. FRM_STAT = 0x404,
  61. FRM_INT_EN = 0x410,
  62. FRM_INT_STAT = 0x414,
  63. FRM_INT_CLR = 0x418,
  64. FRM_WAKEUP = 0x41C,
  65. FRM_CLKCTL_DONE = 0x420,
  66. FRM_IE_STAT = 0x430,
  67. FRM_VE_STAT = 0x440,
  68. };
  69. /* Interface registers */
  70. enum intf_reg {
  71. INTF_CFG = 0x600,
  72. INTF_STAT = 0x604,
  73. INTF_INT_EN = 0x610,
  74. INTF_INT_STAT = 0x614,
  75. INTF_INT_CLR = 0x618,
  76. INTF_IE_STAT = 0x630,
  77. INTF_VE_STAT = 0x640,
  78. };
  79. enum mgr_intr {
  80. MGR_INT_RECFG_DONE = 1 << 24,
  81. MGR_INT_TX_NACKED_2 = 1 << 25,
  82. MGR_INT_MSG_BUF_CONTE = 1 << 26,
  83. MGR_INT_RX_MSG_RCVD = 1 << 30,
  84. MGR_INT_TX_MSG_SENT = 1 << 31,
  85. };
  86. enum frm_cfg {
  87. FRM_ACTIVE = 1,
  88. CLK_GEAR = 7,
  89. ROOT_FREQ = 11,
  90. REF_CLK_GEAR = 15,
  91. INTR_WAKE = 19,
  92. };
  93. static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev);
  94. static int msm_sat_enqueue(struct msm_slim_sat *sat, u32 *buf, u8 len)
  95. {
  96. struct msm_slim_ctrl *dev = sat->dev;
  97. unsigned long flags;
  98. spin_lock_irqsave(&sat->lock, flags);
  99. if ((sat->stail + 1) % SAT_CONCUR_MSG == sat->shead) {
  100. spin_unlock_irqrestore(&sat->lock, flags);
  101. dev_err(dev->dev, "SAT QUEUE full!");
  102. return -EXFULL;
  103. }
  104. memcpy(sat->sat_msgs[sat->stail], (u8 *)buf, len);
  105. sat->stail = (sat->stail + 1) % SAT_CONCUR_MSG;
  106. spin_unlock_irqrestore(&sat->lock, flags);
  107. return 0;
  108. }
  109. static int msm_sat_dequeue(struct msm_slim_sat *sat, u8 *buf)
  110. {
  111. unsigned long flags;
  112. spin_lock_irqsave(&sat->lock, flags);
  113. if (sat->stail == sat->shead) {
  114. spin_unlock_irqrestore(&sat->lock, flags);
  115. return -ENODATA;
  116. }
  117. memcpy(buf, sat->sat_msgs[sat->shead], 40);
  118. sat->shead = (sat->shead + 1) % SAT_CONCUR_MSG;
  119. spin_unlock_irqrestore(&sat->lock, flags);
  120. return 0;
  121. }
  122. static void msm_get_eaddr(u8 *e_addr, u32 *buffer)
  123. {
  124. e_addr[0] = (buffer[1] >> 24) & 0xff;
  125. e_addr[1] = (buffer[1] >> 16) & 0xff;
  126. e_addr[2] = (buffer[1] >> 8) & 0xff;
  127. e_addr[3] = buffer[1] & 0xff;
  128. e_addr[4] = (buffer[0] >> 24) & 0xff;
  129. e_addr[5] = (buffer[0] >> 16) & 0xff;
  130. }
  131. static bool msm_is_sat_dev(u8 *e_addr)
  132. {
  133. if (e_addr[5] == QC_MFGID_LSB && e_addr[4] == QC_MFGID_MSB &&
  134. e_addr[2] != QC_CHIPID_SL &&
  135. (e_addr[1] == QC_DEVID_SAT1 || e_addr[1] == QC_DEVID_SAT2))
  136. return true;
  137. return false;
  138. }
  139. static struct msm_slim_sat *addr_to_sat(struct msm_slim_ctrl *dev, u8 laddr)
  140. {
  141. struct msm_slim_sat *sat = NULL;
  142. int i = 0;
  143. while (!sat && i < dev->nsats) {
  144. if (laddr == dev->satd[i]->satcl.laddr)
  145. sat = dev->satd[i];
  146. i++;
  147. }
  148. return sat;
  149. }
  150. static irqreturn_t msm_slim_interrupt(int irq, void *d)
  151. {
  152. struct msm_slim_ctrl *dev = d;
  153. u32 pstat;
  154. u32 stat = readl_relaxed(dev->base + MGR_INT_STAT);
  155. if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) {
  156. if (stat & MGR_INT_TX_MSG_SENT)
  157. writel_relaxed(MGR_INT_TX_MSG_SENT,
  158. dev->base + MGR_INT_CLR);
  159. else {
  160. u32 mgr_stat = readl_relaxed(dev->base + MGR_STATUS);
  161. u32 mgr_ie_stat = readl_relaxed(dev->base +
  162. MGR_IE_STAT);
  163. u32 frm_stat = readl_relaxed(dev->base + FRM_STAT);
  164. u32 frm_cfg = readl_relaxed(dev->base + FRM_CFG);
  165. u32 frm_intr_stat = readl_relaxed(dev->base +
  166. FRM_INT_STAT);
  167. u32 frm_ie_stat = readl_relaxed(dev->base +
  168. FRM_IE_STAT);
  169. u32 intf_stat = readl_relaxed(dev->base + INTF_STAT);
  170. u32 intf_intr_stat = readl_relaxed(dev->base +
  171. INTF_INT_STAT);
  172. u32 intf_ie_stat = readl_relaxed(dev->base +
  173. INTF_IE_STAT);
  174. writel_relaxed(MGR_INT_TX_NACKED_2,
  175. dev->base + MGR_INT_CLR);
  176. pr_err("TX Nack MGR dump:int_stat:0x%x, mgr_stat:0x%x",
  177. stat, mgr_stat);
  178. pr_err("TX Nack MGR dump:ie_stat:0x%x", mgr_ie_stat);
  179. pr_err("TX Nack FRM dump:int_stat:0x%x, frm_stat:0x%x",
  180. frm_intr_stat, frm_stat);
  181. pr_err("TX Nack FRM dump:frm_cfg:0x%x, ie_stat:0x%x",
  182. frm_cfg, frm_ie_stat);
  183. pr_err("TX Nack INTF dump:intr_st:0x%x, intf_stat:0x%x",
  184. intf_intr_stat, intf_stat);
  185. pr_err("TX Nack INTF dump:ie_stat:0x%x", intf_ie_stat);
  186. dev->err = -EIO;
  187. }
  188. /*
  189. * Guarantee that interrupt clear bit write goes through before
  190. * signalling completion/exiting ISR
  191. */
  192. mb();
  193. msm_slim_manage_tx_msgq(dev, false, NULL);
  194. }
  195. if (stat & MGR_INT_RX_MSG_RCVD) {
  196. u32 rx_buf[10];
  197. u32 mc, mt;
  198. u8 len, i;
  199. rx_buf[0] = readl_relaxed(dev->base + MGR_RX_MSG);
  200. len = rx_buf[0] & 0x1F;
  201. for (i = 1; i < ((len + 3) >> 2); i++) {
  202. rx_buf[i] = readl_relaxed(dev->base + MGR_RX_MSG +
  203. (4 * i));
  204. dev_dbg(dev->dev, "reading data: %x\n", rx_buf[i]);
  205. }
  206. mt = (rx_buf[0] >> 5) & 0x7;
  207. mc = (rx_buf[0] >> 8) & 0xff;
  208. dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
  209. if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
  210. mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
  211. u8 laddr = (u8)((rx_buf[0] >> 16) & 0xFF);
  212. struct msm_slim_sat *sat = addr_to_sat(dev, laddr);
  213. if (sat)
  214. msm_sat_enqueue(sat, rx_buf, len);
  215. else
  216. dev_err(dev->dev, "unknown sat:%d message",
  217. laddr);
  218. writel_relaxed(MGR_INT_RX_MSG_RCVD,
  219. dev->base + MGR_INT_CLR);
  220. /*
  221. * Guarantee that CLR bit write goes through before
  222. * queuing work
  223. */
  224. mb();
  225. if (sat)
  226. queue_work(sat->wq, &sat->wd);
  227. } else if (mt == SLIM_MSG_MT_CORE &&
  228. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  229. u8 e_addr[6];
  230. msm_get_eaddr(e_addr, rx_buf);
  231. msm_slim_rx_enqueue(dev, rx_buf, len);
  232. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  233. MGR_INT_CLR);
  234. /*
  235. * Guarantee that CLR bit write goes through
  236. * before signalling completion
  237. */
  238. mb();
  239. complete(&dev->rx_msgq_notify);
  240. } else if (mt == SLIM_MSG_MT_CORE &&
  241. mc == SLIM_MSG_MC_REPORT_ABSENT) {
  242. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  243. MGR_INT_CLR);
  244. /*
  245. * Guarantee that CLR bit write goes through
  246. * before signalling completion
  247. */
  248. mb();
  249. complete(&dev->rx_msgq_notify);
  250. } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
  251. mc == SLIM_MSG_MC_REPLY_VALUE) {
  252. msm_slim_rx_enqueue(dev, rx_buf, len);
  253. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  254. MGR_INT_CLR);
  255. /*
  256. * Guarantee that CLR bit write goes through
  257. * before signalling completion
  258. */
  259. mb();
  260. complete(&dev->rx_msgq_notify);
  261. } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
  262. u8 *buf = (u8 *)rx_buf;
  263. u8 l_addr = buf[2];
  264. u16 ele = (u16)buf[4] << 4;
  265. ele |= ((buf[3] & 0xf0) >> 4);
  266. dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
  267. l_addr, ele);
  268. for (i = 0; i < len - 5; i++)
  269. dev_err(dev->dev, "offset:0x%x:bit mask:%x",
  270. i, buf[i+5]);
  271. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  272. MGR_INT_CLR);
  273. /*
  274. * Guarantee that CLR bit write goes through
  275. * before exiting
  276. */
  277. mb();
  278. } else {
  279. dev_err(dev->dev, "Unexpected MC,%x MT:%x, len:%d",
  280. mc, mt, len);
  281. for (i = 0; i < ((len + 3) >> 2); i++)
  282. dev_err(dev->dev, "error msg: %x", rx_buf[i]);
  283. writel_relaxed(MGR_INT_RX_MSG_RCVD, dev->base +
  284. MGR_INT_CLR);
  285. /*
  286. * Guarantee that CLR bit write goes through
  287. * before exiting
  288. */
  289. mb();
  290. }
  291. }
  292. if (stat & MGR_INT_RECFG_DONE) {
  293. writel_relaxed(MGR_INT_RECFG_DONE, dev->base + MGR_INT_CLR);
  294. /*
  295. * Guarantee that CLR bit write goes through
  296. * before exiting ISR
  297. */
  298. mb();
  299. complete(&dev->reconf);
  300. }
  301. pstat = readl_relaxed(PGD_THIS_EE(PGD_PORT_INT_ST_EEn, dev->ver));
  302. if (pstat != 0) {
  303. return msm_slim_port_irq_handler(dev, pstat);
  304. }
  305. return IRQ_HANDLED;
  306. }
  307. static int msm_xfer_msg(struct slim_controller *ctrl, struct slim_msg_txn *txn)
  308. {
  309. DECLARE_COMPLETION_ONSTACK(done);
  310. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  311. u32 *pbuf;
  312. u8 *puc;
  313. int timeout;
  314. int msgv = -1;
  315. u8 la = txn->la;
  316. u8 mc = (u8)(txn->mc & 0xFF);
  317. /*
  318. * Voting for runtime PM: Slimbus has 2 possible use cases:
  319. * 1. messaging
  320. * 2. Data channels
  321. * Messaging case goes through messaging slots and data channels
  322. * use their own slots
  323. * This "get" votes for messaging bandwidth
  324. */
  325. if (!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG))
  326. msgv = msm_slim_get_ctrl(dev);
  327. if (msgv >= 0)
  328. dev->state = MSM_CTRL_AWAKE;
  329. mutex_lock(&dev->tx_lock);
  330. if (dev->state == MSM_CTRL_ASLEEP ||
  331. ((!(txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  332. dev->state == MSM_CTRL_IDLE)) {
  333. dev_err(dev->dev, "runtime or system PM suspended state");
  334. mutex_unlock(&dev->tx_lock);
  335. if (msgv >= 0)
  336. msm_slim_put_ctrl(dev);
  337. return -EBUSY;
  338. }
  339. if (txn->mt == SLIM_MSG_MT_CORE &&
  340. mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION) {
  341. if (dev->reconf_busy) {
  342. wait_for_completion(&dev->reconf);
  343. dev->reconf_busy = false;
  344. }
  345. /* This "get" votes for data channels */
  346. if (dev->ctrl.sched.usedslots != 0 &&
  347. !dev->chan_active) {
  348. int chv = msm_slim_get_ctrl(dev);
  349. if (chv >= 0)
  350. dev->chan_active = true;
  351. }
  352. }
  353. txn->rl--;
  354. pbuf = msm_get_msg_buf(dev, txn->rl, &done);
  355. dev->err = 0;
  356. if (txn->dt == SLIM_MSG_DEST_ENUMADDR) {
  357. mutex_unlock(&dev->tx_lock);
  358. if (msgv >= 0)
  359. msm_slim_put_ctrl(dev);
  360. return -EPROTONOSUPPORT;
  361. }
  362. if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
  363. (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
  364. mc == SLIM_MSG_MC_CONNECT_SINK ||
  365. mc == SLIM_MSG_MC_DISCONNECT_PORT))
  366. la = dev->pgdla;
  367. if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
  368. *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 0, la);
  369. else
  370. *pbuf = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, mc, 1, la);
  371. if (txn->dt == SLIM_MSG_DEST_LOGICALADDR)
  372. puc = ((u8 *)pbuf) + 3;
  373. else
  374. puc = ((u8 *)pbuf) + 2;
  375. if (txn->rbuf)
  376. *(puc++) = txn->tid;
  377. if ((txn->mt == SLIM_MSG_MT_CORE) &&
  378. ((mc >= SLIM_MSG_MC_REQUEST_INFORMATION &&
  379. mc <= SLIM_MSG_MC_REPORT_INFORMATION) ||
  380. (mc >= SLIM_MSG_MC_REQUEST_VALUE &&
  381. mc <= SLIM_MSG_MC_CHANGE_VALUE))) {
  382. *(puc++) = (txn->ec & 0xFF);
  383. *(puc++) = (txn->ec >> 8)&0xFF;
  384. }
  385. if (txn->wbuf)
  386. memcpy(puc, txn->wbuf, txn->len);
  387. if (txn->mt == SLIM_MSG_MT_CORE && txn->la == 0xFF &&
  388. (mc == SLIM_MSG_MC_CONNECT_SOURCE ||
  389. mc == SLIM_MSG_MC_CONNECT_SINK ||
  390. mc == SLIM_MSG_MC_DISCONNECT_PORT)) {
  391. if (mc != SLIM_MSG_MC_DISCONNECT_PORT)
  392. dev->err = msm_slim_connect_pipe_port(dev, *puc);
  393. else {
  394. /*
  395. * Remove channel disconnects master-side ports from
  396. * channel. No need to send that again on the bus
  397. * Only disable port
  398. */
  399. writel_relaxed(0, PGD_PORT(PGD_PORT_CFGn,
  400. (*puc + dev->port_b), dev->ver));
  401. mutex_unlock(&dev->tx_lock);
  402. if (msgv >= 0)
  403. msm_slim_put_ctrl(dev);
  404. return 0;
  405. }
  406. if (dev->err) {
  407. dev_err(dev->dev, "pipe-port connect err:%d", dev->err);
  408. mutex_unlock(&dev->tx_lock);
  409. if (msgv >= 0)
  410. msm_slim_put_ctrl(dev);
  411. return dev->err;
  412. }
  413. *(puc) = *(puc) + dev->port_b;
  414. }
  415. if (txn->mt == SLIM_MSG_MT_CORE &&
  416. mc == SLIM_MSG_MC_BEGIN_RECONFIGURATION)
  417. dev->reconf_busy = true;
  418. msm_send_msg_buf(dev, pbuf, txn->rl, MGR_TX_MSG);
  419. timeout = wait_for_completion_timeout(&done, HZ);
  420. if (mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
  421. if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
  422. SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  423. timeout) {
  424. timeout = wait_for_completion_timeout(&dev->reconf, HZ);
  425. dev->reconf_busy = false;
  426. if (timeout) {
  427. clk_disable_unprepare(dev->rclk);
  428. disable_irq(dev->irq);
  429. }
  430. }
  431. if ((txn->mc == (SLIM_MSG_MC_RECONFIGURE_NOW |
  432. SLIM_MSG_CLK_PAUSE_SEQ_FLG)) &&
  433. !timeout) {
  434. dev->reconf_busy = false;
  435. dev_err(dev->dev, "clock pause failed");
  436. mutex_unlock(&dev->tx_lock);
  437. return -ETIMEDOUT;
  438. }
  439. if (txn->mt == SLIM_MSG_MT_CORE &&
  440. txn->mc == SLIM_MSG_MC_RECONFIGURE_NOW) {
  441. if (dev->ctrl.sched.usedslots == 0 &&
  442. dev->chan_active) {
  443. dev->chan_active = false;
  444. msm_slim_put_ctrl(dev);
  445. }
  446. }
  447. }
  448. mutex_unlock(&dev->tx_lock);
  449. if (msgv >= 0)
  450. msm_slim_put_ctrl(dev);
  451. if (!timeout)
  452. dev_err(dev->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
  453. txn->mt);
  454. return timeout ? dev->err : -ETIMEDOUT;
  455. }
  456. static void msm_slim_wait_retry(struct msm_slim_ctrl *dev)
  457. {
  458. int msec_per_frm = 0;
  459. int sfr_per_sec;
  460. /* Wait for 1 superframe, or default time and then retry */
  461. sfr_per_sec = dev->framer.superfreq /
  462. (1 << (SLIM_MAX_CLK_GEAR - dev->ctrl.clkgear));
  463. if (sfr_per_sec)
  464. msec_per_frm = MSEC_PER_SEC / sfr_per_sec;
  465. if (msec_per_frm < DEF_RETRY_MS)
  466. msec_per_frm = DEF_RETRY_MS;
  467. msleep(msec_per_frm);
  468. }
  469. static int msm_set_laddr(struct slim_controller *ctrl, const u8 *ea,
  470. u8 elen, u8 laddr)
  471. {
  472. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  473. struct completion done;
  474. int timeout, ret, retries = 0;
  475. u32 *buf;
  476. retry_laddr:
  477. init_completion(&done);
  478. mutex_lock(&dev->tx_lock);
  479. buf = msm_get_msg_buf(dev, 9, &done);
  480. if (buf == NULL)
  481. return -ENOMEM;
  482. buf[0] = SLIM_MSG_ASM_FIRST_WORD(9, SLIM_MSG_MT_CORE,
  483. SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
  484. SLIM_MSG_DEST_LOGICALADDR,
  485. ea[5] | ea[4] << 8);
  486. buf[1] = ea[3] | (ea[2] << 8) | (ea[1] << 16) | (ea[0] << 24);
  487. buf[2] = laddr;
  488. ret = msm_send_msg_buf(dev, buf, 9, MGR_TX_MSG);
  489. timeout = wait_for_completion_timeout(&done, HZ);
  490. if (!timeout)
  491. dev->err = -ETIMEDOUT;
  492. if (dev->err) {
  493. ret = dev->err;
  494. dev->err = 0;
  495. }
  496. mutex_unlock(&dev->tx_lock);
  497. if (ret) {
  498. pr_err("set LADDR:0x%x failed:ret:%d, retrying", laddr, ret);
  499. if (retries < INIT_MX_RETRIES) {
  500. msm_slim_wait_retry(dev);
  501. retries++;
  502. goto retry_laddr;
  503. } else {
  504. pr_err("set LADDR failed after retrying:ret:%d", ret);
  505. }
  506. }
  507. return ret;
  508. }
  509. static int msm_clk_pause_wakeup(struct slim_controller *ctrl)
  510. {
  511. struct msm_slim_ctrl *dev = slim_get_ctrldata(ctrl);
  512. enable_irq(dev->irq);
  513. clk_prepare_enable(dev->rclk);
  514. writel_relaxed(1, dev->base + FRM_WAKEUP);
  515. /* Make sure framer wakeup write goes through before exiting function */
  516. mb();
  517. /*
  518. * Workaround: Currently, slave is reporting lost-sync messages
  519. * after slimbus comes out of clock pause.
  520. * Transaction with slave fail before slave reports that message
  521. * Give some time for that report to come
  522. * Slimbus wakes up in clock gear 10 at 24.576MHz. With each superframe
  523. * being 250 usecs, we wait for 20 superframes here to ensure
  524. * we get the message
  525. */
  526. usleep_range(5000, 5000);
  527. return 0;
  528. }
  529. static int msm_sat_define_ch(struct msm_slim_sat *sat, u8 *buf, u8 len, u8 mc)
  530. {
  531. struct msm_slim_ctrl *dev = sat->dev;
  532. enum slim_ch_control oper;
  533. int i;
  534. int ret = 0;
  535. if (mc == SLIM_USR_MC_CHAN_CTRL) {
  536. for (i = 0; i < sat->nsatch; i++) {
  537. if (buf[5] == sat->satch[i].chan)
  538. break;
  539. }
  540. if (i >= sat->nsatch)
  541. return -ENOTCONN;
  542. oper = ((buf[3] & 0xC0) >> 6);
  543. /* part of grp. activating/removing 1 will take care of rest */
  544. ret = slim_control_ch(&sat->satcl, sat->satch[i].chanh, oper,
  545. false);
  546. if (!ret) {
  547. for (i = 5; i < len; i++) {
  548. int j;
  549. for (j = 0; j < sat->nsatch; j++) {
  550. if (buf[i] == sat->satch[j].chan) {
  551. if (oper == SLIM_CH_REMOVE)
  552. sat->satch[j].req_rem++;
  553. else
  554. sat->satch[j].req_def++;
  555. break;
  556. }
  557. }
  558. }
  559. }
  560. } else {
  561. u16 chh[40];
  562. struct slim_ch prop;
  563. u32 exp;
  564. u16 *grph = NULL;
  565. u8 coeff, cc;
  566. u8 prrate = buf[6];
  567. if (len <= 8)
  568. return -EINVAL;
  569. for (i = 8; i < len; i++) {
  570. int j = 0;
  571. for (j = 0; j < sat->nsatch; j++) {
  572. if (sat->satch[j].chan == buf[i]) {
  573. chh[i - 8] = sat->satch[j].chanh;
  574. break;
  575. }
  576. }
  577. if (j < sat->nsatch) {
  578. u16 dummy;
  579. ret = slim_query_ch(&sat->satcl, buf[i],
  580. &dummy);
  581. if (ret)
  582. return ret;
  583. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  584. sat->satch[j].req_def++;
  585. /* First channel in group from satellite */
  586. if (i == 8)
  587. grph = &sat->satch[j].chanh;
  588. continue;
  589. }
  590. if (sat->nsatch >= MSM_MAX_SATCH)
  591. return -EXFULL;
  592. ret = slim_query_ch(&sat->satcl, buf[i], &chh[i - 8]);
  593. if (ret)
  594. return ret;
  595. sat->satch[j].chan = buf[i];
  596. sat->satch[j].chanh = chh[i - 8];
  597. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  598. sat->satch[j].req_def++;
  599. if (i == 8)
  600. grph = &sat->satch[j].chanh;
  601. sat->nsatch++;
  602. }
  603. prop.dataf = (enum slim_ch_dataf)((buf[3] & 0xE0) >> 5);
  604. prop.auxf = (enum slim_ch_auxf)((buf[4] & 0xC0) >> 5);
  605. prop.baser = SLIM_RATE_4000HZ;
  606. if (prrate & 0x8)
  607. prop.baser = SLIM_RATE_11025HZ;
  608. else
  609. prop.baser = SLIM_RATE_4000HZ;
  610. prop.prot = (enum slim_ch_proto)(buf[5] & 0x0F);
  611. prop.sampleszbits = (buf[4] & 0x1F)*SLIM_CL_PER_SL;
  612. exp = (u32)((buf[5] & 0xF0) >> 4);
  613. coeff = (buf[4] & 0x20) >> 5;
  614. cc = (coeff ? 3 : 1);
  615. prop.ratem = cc * (1 << exp);
  616. if (i > 9)
  617. ret = slim_define_ch(&sat->satcl, &prop, chh, len - 8,
  618. true, &chh[0]);
  619. else
  620. ret = slim_define_ch(&sat->satcl, &prop,
  621. chh, 1, true, &chh[0]);
  622. dev_dbg(dev->dev, "define sat grp returned:%d", ret);
  623. if (ret)
  624. return ret;
  625. else if (grph)
  626. *grph = chh[0];
  627. /* part of group so activating 1 will take care of rest */
  628. if (mc == SLIM_USR_MC_DEF_ACT_CHAN)
  629. ret = slim_control_ch(&sat->satcl,
  630. chh[0],
  631. SLIM_CH_ACTIVATE, false);
  632. }
  633. return ret;
  634. }
  635. static void msm_slim_rxwq(struct msm_slim_ctrl *dev)
  636. {
  637. u8 buf[40];
  638. u8 mc, mt, len;
  639. int i, ret;
  640. if ((msm_slim_rx_dequeue(dev, (u8 *)buf)) != -ENODATA) {
  641. len = buf[0] & 0x1F;
  642. mt = (buf[0] >> 5) & 0x7;
  643. mc = buf[1];
  644. if (mt == SLIM_MSG_MT_CORE &&
  645. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  646. u8 laddr;
  647. u8 e_addr[6];
  648. for (i = 0; i < 6; i++)
  649. e_addr[i] = buf[7-i];
  650. ret = slim_assign_laddr(&dev->ctrl, e_addr, 6, &laddr,
  651. false);
  652. /* Is this Qualcomm ported generic device? */
  653. if (!ret && e_addr[5] == QC_MFGID_LSB &&
  654. e_addr[4] == QC_MFGID_MSB &&
  655. e_addr[1] == QC_DEVID_PGD &&
  656. e_addr[2] != QC_CHIPID_SL)
  657. dev->pgdla = laddr;
  658. if (!ret && !pm_runtime_enabled(dev->dev) &&
  659. laddr == (QC_MSM_DEVS - 1))
  660. pm_runtime_enable(dev->dev);
  661. if (!ret && msm_is_sat_dev(e_addr)) {
  662. struct msm_slim_sat *sat = addr_to_sat(dev,
  663. laddr);
  664. if (!sat)
  665. sat = msm_slim_alloc_sat(dev);
  666. if (!sat)
  667. return;
  668. sat->satcl.laddr = laddr;
  669. msm_sat_enqueue(sat, (u32 *)buf, len);
  670. queue_work(sat->wq, &sat->wd);
  671. }
  672. if (ret)
  673. pr_err("assign laddr failed, error:%d", ret);
  674. } else if (mc == SLIM_MSG_MC_REPLY_INFORMATION ||
  675. mc == SLIM_MSG_MC_REPLY_VALUE) {
  676. u8 tid = buf[3];
  677. dev_dbg(dev->dev, "tid:%d, len:%d\n", tid, len - 4);
  678. slim_msg_response(&dev->ctrl, &buf[4], tid,
  679. len - 4);
  680. pm_runtime_mark_last_busy(dev->dev);
  681. } else if (mc == SLIM_MSG_MC_REPORT_INFORMATION) {
  682. u8 l_addr = buf[2];
  683. u16 ele = (u16)buf[4] << 4;
  684. ele |= ((buf[3] & 0xf0) >> 4);
  685. dev_err(dev->dev, "Slim-dev:%d report inf element:0x%x",
  686. l_addr, ele);
  687. for (i = 0; i < len - 5; i++)
  688. dev_err(dev->dev, "offset:0x%x:bit mask:%x",
  689. i, buf[i+5]);
  690. } else {
  691. dev_err(dev->dev, "unexpected message:mc:%x, mt:%x",
  692. mc, mt);
  693. for (i = 0; i < len; i++)
  694. dev_err(dev->dev, "error msg: %x", buf[i]);
  695. }
  696. } else
  697. dev_err(dev->dev, "rxwq called and no dequeue");
  698. }
  699. static void slim_sat_rxprocess(struct work_struct *work)
  700. {
  701. struct msm_slim_sat *sat = container_of(work, struct msm_slim_sat, wd);
  702. struct msm_slim_ctrl *dev = sat->dev;
  703. u8 buf[40];
  704. while ((msm_sat_dequeue(sat, buf)) != -ENODATA) {
  705. struct slim_msg_txn txn;
  706. u8 len, mc, mt;
  707. u32 bw_sl;
  708. int ret = 0;
  709. int satv = -1;
  710. bool gen_ack = false;
  711. u8 tid;
  712. u8 wbuf[8];
  713. int i, retries = 0;
  714. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  715. txn.dt = SLIM_MSG_DEST_LOGICALADDR;
  716. txn.ec = 0;
  717. txn.rbuf = NULL;
  718. txn.la = sat->satcl.laddr;
  719. /* satellite handling */
  720. len = buf[0] & 0x1F;
  721. mc = buf[1];
  722. mt = (buf[0] >> 5) & 0x7;
  723. if (mt == SLIM_MSG_MT_CORE &&
  724. mc == SLIM_MSG_MC_REPORT_PRESENT) {
  725. u8 e_addr[6];
  726. for (i = 0; i < 6; i++)
  727. e_addr[i] = buf[7-i];
  728. if (pm_runtime_enabled(dev->dev)) {
  729. satv = msm_slim_get_ctrl(dev);
  730. if (satv >= 0)
  731. sat->pending_capability = true;
  732. }
  733. /*
  734. * Since capability message is already sent, present
  735. * message will indicate subsystem hosting this
  736. * satellite has restarted.
  737. * Remove all active channels of this satellite
  738. * when this is detected
  739. */
  740. if (sat->sent_capability) {
  741. for (i = 0; i < sat->nsatch; i++) {
  742. if (sat->satch[i].reconf) {
  743. pr_err("SSR, sat:%d, rm ch:%d",
  744. sat->satcl.laddr,
  745. sat->satch[i].chan);
  746. slim_control_ch(&sat->satcl,
  747. sat->satch[i].chanh,
  748. SLIM_CH_REMOVE, true);
  749. slim_dealloc_ch(&sat->satcl,
  750. sat->satch[i].chanh);
  751. sat->satch[i].reconf = false;
  752. }
  753. }
  754. }
  755. } else if (mt != SLIM_MSG_MT_CORE &&
  756. mc != SLIM_MSG_MC_REPORT_PRESENT) {
  757. satv = msm_slim_get_ctrl(dev);
  758. }
  759. switch (mc) {
  760. case SLIM_MSG_MC_REPORT_PRESENT:
  761. /* Remove runtime_pm vote once satellite acks */
  762. if (mt != SLIM_MSG_MT_CORE) {
  763. if (pm_runtime_enabled(dev->dev) &&
  764. sat->pending_capability) {
  765. msm_slim_put_ctrl(dev);
  766. sat->pending_capability = false;
  767. }
  768. continue;
  769. }
  770. /* send a Manager capability msg */
  771. if (sat->sent_capability) {
  772. if (mt == SLIM_MSG_MT_CORE)
  773. goto send_capability;
  774. else
  775. continue;
  776. }
  777. ret = slim_add_device(&dev->ctrl, &sat->satcl);
  778. if (ret) {
  779. dev_err(dev->dev,
  780. "Satellite-init failed");
  781. continue;
  782. }
  783. /* Satellite-channels */
  784. sat->satch = kzalloc(MSM_MAX_SATCH *
  785. sizeof(struct msm_sat_chan),
  786. GFP_KERNEL);
  787. send_capability:
  788. txn.mc = SLIM_USR_MC_MASTER_CAPABILITY;
  789. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  790. txn.la = sat->satcl.laddr;
  791. txn.rl = 8;
  792. wbuf[0] = SAT_MAGIC_LSB;
  793. wbuf[1] = SAT_MAGIC_MSB;
  794. wbuf[2] = SAT_MSG_VER;
  795. wbuf[3] = SAT_MSG_PROT;
  796. txn.wbuf = wbuf;
  797. txn.len = 4;
  798. ret = msm_xfer_msg(&dev->ctrl, &txn);
  799. if (ret) {
  800. pr_err("capability for:0x%x fail:%d, retry:%d",
  801. sat->satcl.laddr, ret, retries);
  802. if (retries < INIT_MX_RETRIES) {
  803. msm_slim_wait_retry(dev);
  804. retries++;
  805. goto send_capability;
  806. } else {
  807. pr_err("failed after all retries:%d",
  808. ret);
  809. }
  810. } else {
  811. sat->sent_capability = true;
  812. }
  813. break;
  814. case SLIM_USR_MC_ADDR_QUERY:
  815. memcpy(&wbuf[1], &buf[4], 6);
  816. ret = slim_get_logical_addr(&sat->satcl,
  817. &wbuf[1], 6, &wbuf[7]);
  818. if (ret)
  819. memset(&wbuf[1], 0, 6);
  820. wbuf[0] = buf[3];
  821. txn.mc = SLIM_USR_MC_ADDR_REPLY;
  822. txn.rl = 12;
  823. txn.len = 8;
  824. txn.wbuf = wbuf;
  825. msm_xfer_msg(&dev->ctrl, &txn);
  826. break;
  827. case SLIM_USR_MC_DEFINE_CHAN:
  828. case SLIM_USR_MC_DEF_ACT_CHAN:
  829. case SLIM_USR_MC_CHAN_CTRL:
  830. if (mc != SLIM_USR_MC_CHAN_CTRL)
  831. tid = buf[7];
  832. else
  833. tid = buf[4];
  834. gen_ack = true;
  835. ret = msm_sat_define_ch(sat, buf, len, mc);
  836. if (ret) {
  837. dev_err(dev->dev,
  838. "SAT define_ch returned:%d",
  839. ret);
  840. }
  841. if (!sat->pending_reconf) {
  842. int chv = msm_slim_get_ctrl(dev);
  843. if (chv >= 0)
  844. sat->pending_reconf = true;
  845. }
  846. break;
  847. case SLIM_USR_MC_RECONFIG_NOW:
  848. tid = buf[3];
  849. gen_ack = true;
  850. ret = slim_reconfigure_now(&sat->satcl);
  851. for (i = 0; i < sat->nsatch; i++) {
  852. struct msm_sat_chan *sch = &sat->satch[i];
  853. if (sch->req_rem && sch->reconf) {
  854. if (!ret) {
  855. slim_dealloc_ch(&sat->satcl,
  856. sch->chanh);
  857. sch->reconf = false;
  858. }
  859. sch->req_rem--;
  860. } else if (sch->req_def) {
  861. if (ret)
  862. slim_dealloc_ch(&sat->satcl,
  863. sch->chanh);
  864. else
  865. sch->reconf = true;
  866. sch->req_def--;
  867. }
  868. }
  869. if (sat->pending_reconf) {
  870. msm_slim_put_ctrl(dev);
  871. sat->pending_reconf = false;
  872. }
  873. break;
  874. case SLIM_USR_MC_REQ_BW:
  875. /* what we get is in SLOTS */
  876. bw_sl = (u32)buf[4] << 3 |
  877. ((buf[3] & 0xE0) >> 5);
  878. sat->satcl.pending_msgsl = bw_sl;
  879. tid = buf[5];
  880. gen_ack = true;
  881. break;
  882. case SLIM_USR_MC_CONNECT_SRC:
  883. case SLIM_USR_MC_CONNECT_SINK:
  884. if (mc == SLIM_USR_MC_CONNECT_SRC)
  885. txn.mc = SLIM_MSG_MC_CONNECT_SOURCE;
  886. else
  887. txn.mc = SLIM_MSG_MC_CONNECT_SINK;
  888. wbuf[0] = buf[4] & 0x1F;
  889. wbuf[1] = buf[5];
  890. tid = buf[6];
  891. txn.la = buf[3];
  892. txn.mt = SLIM_MSG_MT_CORE;
  893. txn.rl = 6;
  894. txn.len = 2;
  895. txn.wbuf = wbuf;
  896. gen_ack = true;
  897. ret = msm_xfer_msg(&dev->ctrl, &txn);
  898. break;
  899. case SLIM_USR_MC_DISCONNECT_PORT:
  900. txn.mc = SLIM_MSG_MC_DISCONNECT_PORT;
  901. wbuf[0] = buf[4] & 0x1F;
  902. tid = buf[5];
  903. txn.la = buf[3];
  904. txn.rl = 5;
  905. txn.len = 1;
  906. txn.mt = SLIM_MSG_MT_CORE;
  907. txn.wbuf = wbuf;
  908. gen_ack = true;
  909. ret = msm_xfer_msg(&dev->ctrl, &txn);
  910. break;
  911. case SLIM_MSG_MC_REPORT_ABSENT:
  912. dev_info(dev->dev, "Received Report Absent Message\n");
  913. break;
  914. default:
  915. break;
  916. }
  917. if (!gen_ack) {
  918. if (mc != SLIM_MSG_MC_REPORT_PRESENT && satv >= 0)
  919. msm_slim_put_ctrl(dev);
  920. continue;
  921. }
  922. wbuf[0] = tid;
  923. if (!ret)
  924. wbuf[1] = MSM_SAT_SUCCSS;
  925. else
  926. wbuf[1] = 0;
  927. txn.mc = SLIM_USR_MC_GENERIC_ACK;
  928. txn.la = sat->satcl.laddr;
  929. txn.rl = 6;
  930. txn.len = 2;
  931. txn.wbuf = wbuf;
  932. txn.mt = SLIM_MSG_MT_SRC_REFERRED_USER;
  933. msm_xfer_msg(&dev->ctrl, &txn);
  934. if (satv >= 0)
  935. msm_slim_put_ctrl(dev);
  936. }
  937. }
  938. static struct msm_slim_sat *msm_slim_alloc_sat(struct msm_slim_ctrl *dev)
  939. {
  940. struct msm_slim_sat *sat;
  941. char *name;
  942. if (dev->nsats >= MSM_MAX_NSATS)
  943. return NULL;
  944. sat = kzalloc(sizeof(struct msm_slim_sat), GFP_KERNEL);
  945. if (!sat) {
  946. dev_err(dev->dev, "no memory for satellite");
  947. return NULL;
  948. }
  949. name = kzalloc(SLIMBUS_NAME_SIZE, GFP_KERNEL);
  950. if (!name) {
  951. dev_err(dev->dev, "no memory for satellite name");
  952. kfree(sat);
  953. return NULL;
  954. }
  955. dev->satd[dev->nsats] = sat;
  956. sat->dev = dev;
  957. snprintf(name, SLIMBUS_NAME_SIZE, "msm_sat%d", dev->nsats);
  958. sat->satcl.name = name;
  959. spin_lock_init(&sat->lock);
  960. INIT_WORK(&sat->wd, slim_sat_rxprocess);
  961. sat->wq = create_singlethread_workqueue(sat->satcl.name);
  962. if (!sat->wq) {
  963. kfree(name);
  964. kfree(sat);
  965. return NULL;
  966. }
  967. /*
  968. * Both sats will be allocated from RX thread and RX thread will
  969. * process messages sequentially. No synchronization necessary
  970. */
  971. dev->nsats++;
  972. return sat;
  973. }
  974. static int msm_slim_rx_msgq_thread(void *data)
  975. {
  976. struct msm_slim_ctrl *dev = (struct msm_slim_ctrl *)data;
  977. struct completion *notify = &dev->rx_msgq_notify;
  978. struct msm_slim_sat *sat = NULL;
  979. u32 mc = 0;
  980. u32 mt = 0;
  981. u32 buffer[10];
  982. int index = 0;
  983. u8 msg_len = 0;
  984. int ret;
  985. dev_dbg(dev->dev, "rx thread started");
  986. while (!kthread_should_stop()) {
  987. set_current_state(TASK_INTERRUPTIBLE);
  988. ret = wait_for_completion_interruptible(notify);
  989. if (ret)
  990. dev_err(dev->dev, "rx thread wait error:%d", ret);
  991. /* 1 irq notification per message */
  992. if (dev->use_rx_msgqs != MSM_MSGQ_ENABLED) {
  993. msm_slim_rxwq(dev);
  994. continue;
  995. }
  996. ret = msm_slim_rx_msgq_get(dev, buffer, index);
  997. if (ret) {
  998. dev_err(dev->dev, "rx_msgq_get() failed 0x%x\n", ret);
  999. continue;
  1000. }
  1001. pr_debug("message[%d] = 0x%x\n", index, *buffer);
  1002. /* Decide if we use generic RX or satellite RX */
  1003. if (index++ == 0) {
  1004. msg_len = *buffer & 0x1F;
  1005. pr_debug("Start of new message, len = %d\n", msg_len);
  1006. mt = (buffer[0] >> 5) & 0x7;
  1007. mc = (buffer[0] >> 8) & 0xff;
  1008. dev_dbg(dev->dev, "MC: %x, MT: %x\n", mc, mt);
  1009. if (mt == SLIM_MSG_MT_DEST_REFERRED_USER ||
  1010. mt == SLIM_MSG_MT_SRC_REFERRED_USER) {
  1011. u8 laddr;
  1012. laddr = (u8)((buffer[0] >> 16) & 0xff);
  1013. sat = addr_to_sat(dev, laddr);
  1014. }
  1015. }
  1016. if ((index * 4) >= msg_len) {
  1017. index = 0;
  1018. if (sat) {
  1019. msm_sat_enqueue(sat, buffer, msg_len);
  1020. queue_work(sat->wq, &sat->wd);
  1021. sat = NULL;
  1022. } else {
  1023. msm_slim_rx_enqueue(dev, buffer, msg_len);
  1024. msm_slim_rxwq(dev);
  1025. }
  1026. }
  1027. }
  1028. return 0;
  1029. }
  1030. static void msm_slim_prg_slew(struct platform_device *pdev,
  1031. struct msm_slim_ctrl *dev)
  1032. {
  1033. struct resource *slew_io;
  1034. void __iomem *slew_reg;
  1035. /* SLEW RATE register for this slimbus */
  1036. dev->slew_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1037. "slimbus_slew_reg");
  1038. if (!dev->slew_mem) {
  1039. dev_dbg(&pdev->dev, "no slimbus slew resource\n");
  1040. return;
  1041. }
  1042. slew_io = request_mem_region(dev->slew_mem->start,
  1043. resource_size(dev->slew_mem), pdev->name);
  1044. if (!slew_io) {
  1045. dev_dbg(&pdev->dev, "slimbus-slew mem claimed\n");
  1046. dev->slew_mem = NULL;
  1047. return;
  1048. }
  1049. slew_reg = ioremap(dev->slew_mem->start, resource_size(dev->slew_mem));
  1050. if (!slew_reg) {
  1051. dev_dbg(dev->dev, "slew register mapping failed");
  1052. release_mem_region(dev->slew_mem->start,
  1053. resource_size(dev->slew_mem));
  1054. dev->slew_mem = NULL;
  1055. return;
  1056. }
  1057. writel_relaxed(1, slew_reg);
  1058. /* Make sure slimbus-slew rate enabling goes through */
  1059. wmb();
  1060. iounmap(slew_reg);
  1061. }
  1062. static int __devinit msm_slim_probe(struct platform_device *pdev)
  1063. {
  1064. struct msm_slim_ctrl *dev;
  1065. int ret;
  1066. enum apr_subsys_state q6_state;
  1067. struct resource *bam_mem, *bam_io;
  1068. struct resource *slim_mem, *slim_io;
  1069. struct resource *irq, *bam_irq;
  1070. bool rxreg_access = false;
  1071. q6_state = apr_get_q6_state();
  1072. if (q6_state == APR_SUBSYS_DOWN) {
  1073. dev_dbg(&pdev->dev, "defering %s, adsp_state %d\n", __func__,
  1074. q6_state);
  1075. return -EPROBE_DEFER;
  1076. } else
  1077. dev_dbg(&pdev->dev, "adsp is ready\n");
  1078. slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1079. "slimbus_physical");
  1080. if (!slim_mem) {
  1081. dev_err(&pdev->dev, "no slimbus physical memory resource\n");
  1082. return -ENODEV;
  1083. }
  1084. slim_io = request_mem_region(slim_mem->start, resource_size(slim_mem),
  1085. pdev->name);
  1086. if (!slim_io) {
  1087. dev_err(&pdev->dev, "slimbus memory already claimed\n");
  1088. return -EBUSY;
  1089. }
  1090. bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1091. "slimbus_bam_physical");
  1092. if (!bam_mem) {
  1093. dev_err(&pdev->dev, "no slimbus BAM memory resource\n");
  1094. ret = -ENODEV;
  1095. goto err_get_res_bam_failed;
  1096. }
  1097. bam_io = request_mem_region(bam_mem->start, resource_size(bam_mem),
  1098. pdev->name);
  1099. if (!bam_io) {
  1100. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1101. dev_err(&pdev->dev, "slimbus BAM memory already claimed\n");
  1102. ret = -EBUSY;
  1103. goto err_get_res_bam_failed;
  1104. }
  1105. irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1106. "slimbus_irq");
  1107. if (!irq) {
  1108. dev_err(&pdev->dev, "no slimbus IRQ resource\n");
  1109. ret = -ENODEV;
  1110. goto err_get_res_failed;
  1111. }
  1112. bam_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
  1113. "slimbus_bam_irq");
  1114. if (!bam_irq) {
  1115. dev_err(&pdev->dev, "no slimbus BAM IRQ resource\n");
  1116. ret = -ENODEV;
  1117. goto err_get_res_failed;
  1118. }
  1119. dev = kzalloc(sizeof(struct msm_slim_ctrl), GFP_KERNEL);
  1120. if (!dev) {
  1121. dev_err(&pdev->dev, "no memory for MSM slimbus controller\n");
  1122. ret = -ENOMEM;
  1123. goto err_get_res_failed;
  1124. }
  1125. dev->wr_comp = kzalloc(sizeof(struct completion *) * MSM_TX_BUFS,
  1126. GFP_KERNEL);
  1127. if (!dev->wr_comp)
  1128. return -ENOMEM;
  1129. dev->dev = &pdev->dev;
  1130. platform_set_drvdata(pdev, dev);
  1131. slim_set_ctrldata(&dev->ctrl, dev);
  1132. dev->base = ioremap(slim_mem->start, resource_size(slim_mem));
  1133. if (!dev->base) {
  1134. dev_err(&pdev->dev, "IOremap failed\n");
  1135. ret = -ENOMEM;
  1136. goto err_ioremap_failed;
  1137. }
  1138. dev->bam.base = ioremap(bam_mem->start, resource_size(bam_mem));
  1139. if (!dev->bam.base) {
  1140. dev_err(&pdev->dev, "BAM IOremap failed\n");
  1141. ret = -ENOMEM;
  1142. goto err_ioremap_bam_failed;
  1143. }
  1144. if (pdev->dev.of_node) {
  1145. ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
  1146. &dev->ctrl.nr);
  1147. if (ret) {
  1148. dev_err(&pdev->dev, "Cell index not specified:%d", ret);
  1149. goto err_of_init_failed;
  1150. }
  1151. rxreg_access = of_property_read_bool(pdev->dev.of_node,
  1152. "qcom,rxreg-access");
  1153. /* Optional properties */
  1154. ret = of_property_read_u32(pdev->dev.of_node,
  1155. "qcom,min-clk-gear", &dev->ctrl.min_cg);
  1156. ret = of_property_read_u32(pdev->dev.of_node,
  1157. "qcom,max-clk-gear", &dev->ctrl.max_cg);
  1158. pr_debug("min_cg:%d, max_cg:%d, rxreg: %d", dev->ctrl.min_cg,
  1159. dev->ctrl.max_cg, rxreg_access);
  1160. } else {
  1161. dev->ctrl.nr = pdev->id;
  1162. }
  1163. dev->ctrl.nchans = MSM_SLIM_NCHANS;
  1164. dev->ctrl.nports = MSM_SLIM_NPORTS;
  1165. dev->ctrl.set_laddr = msm_set_laddr;
  1166. dev->ctrl.xfer_msg = msm_xfer_msg;
  1167. dev->ctrl.wakeup = msm_clk_pause_wakeup;
  1168. dev->ctrl.alloc_port = msm_alloc_port;
  1169. dev->ctrl.dealloc_port = msm_dealloc_port;
  1170. dev->ctrl.port_xfer = msm_slim_port_xfer;
  1171. dev->ctrl.port_xfer_status = msm_slim_port_xfer_status;
  1172. /* Reserve some messaging BW for satellite-apps driver communication */
  1173. dev->ctrl.sched.pending_msgsl = 30;
  1174. init_completion(&dev->reconf);
  1175. mutex_init(&dev->tx_lock);
  1176. spin_lock_init(&dev->rx_lock);
  1177. dev->ee = 1;
  1178. if (rxreg_access)
  1179. dev->use_rx_msgqs = MSM_MSGQ_DISABLED;
  1180. else
  1181. dev->use_rx_msgqs = MSM_MSGQ_RESET;
  1182. dev->irq = irq->start;
  1183. dev->bam.irq = bam_irq->start;
  1184. dev->hclk = clk_get(dev->dev, "iface_clk");
  1185. if (IS_ERR(dev->hclk))
  1186. dev->hclk = NULL;
  1187. else
  1188. clk_prepare_enable(dev->hclk);
  1189. ret = msm_slim_sps_init(dev, bam_mem, MGR_STATUS, false);
  1190. if (ret != 0) {
  1191. dev_err(dev->dev, "error SPS init\n");
  1192. goto err_sps_init_failed;
  1193. }
  1194. /* Fire up the Rx message queue thread */
  1195. dev->rx_msgq_thread = kthread_run(msm_slim_rx_msgq_thread, dev,
  1196. MSM_SLIM_NAME "_rx_msgq_thread");
  1197. if (IS_ERR(dev->rx_msgq_thread)) {
  1198. ret = PTR_ERR(dev->rx_msgq_thread);
  1199. dev_err(dev->dev, "Failed to start Rx message queue thread\n");
  1200. goto err_thread_create_failed;
  1201. }
  1202. dev->framer.rootfreq = SLIM_ROOT_FREQ >> 3;
  1203. dev->framer.superfreq =
  1204. dev->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
  1205. dev->ctrl.a_framer = &dev->framer;
  1206. dev->ctrl.clkgear = SLIM_MAX_CLK_GEAR;
  1207. dev->ctrl.dev.parent = &pdev->dev;
  1208. dev->ctrl.dev.of_node = pdev->dev.of_node;
  1209. ret = request_threaded_irq(dev->irq, NULL, msm_slim_interrupt,
  1210. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  1211. "msm_slim_irq", dev);
  1212. if (ret) {
  1213. dev_err(&pdev->dev, "request IRQ failed\n");
  1214. goto err_request_irq_failed;
  1215. }
  1216. msm_slim_prg_slew(pdev, dev);
  1217. /* Register with framework before enabling frame, clock */
  1218. ret = slim_add_numbered_controller(&dev->ctrl);
  1219. if (ret) {
  1220. dev_err(dev->dev, "error adding controller\n");
  1221. goto err_ctrl_failed;
  1222. }
  1223. dev->rclk = clk_get(dev->dev, "core_clk");
  1224. if (!dev->rclk) {
  1225. dev_err(dev->dev, "slimbus clock not found");
  1226. goto err_clk_get_failed;
  1227. }
  1228. clk_set_rate(dev->rclk, SLIM_ROOT_FREQ);
  1229. clk_prepare_enable(dev->rclk);
  1230. dev->ver = readl_relaxed(dev->base);
  1231. /* Version info in 16 MSbits */
  1232. dev->ver >>= 16;
  1233. /* Component register initialization */
  1234. writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1235. writel_relaxed((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
  1236. dev->base + CFG_PORT(COMP_TRUST_CFG, dev->ver));
  1237. /*
  1238. * Manager register initialization
  1239. * If RX msg Q is used, disable RX_MSG_RCVD interrupt
  1240. */
  1241. if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  1242. writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
  1243. MGR_INT_MSG_BUF_CONTE | /* MGR_INT_RX_MSG_RCVD | */
  1244. MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
  1245. else
  1246. writel_relaxed((MGR_INT_RECFG_DONE | MGR_INT_TX_NACKED_2 |
  1247. MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
  1248. MGR_INT_TX_MSG_SENT), dev->base + MGR_INT_EN);
  1249. writel_relaxed(1, dev->base + MGR_CFG);
  1250. /*
  1251. * Framer registers are beyond 1K memory region after Manager and/or
  1252. * component registers. Make sure those writes are ordered
  1253. * before framer register writes
  1254. */
  1255. wmb();
  1256. /* Framer register initialization */
  1257. writel_relaxed((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
  1258. (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
  1259. dev->base + FRM_CFG);
  1260. /*
  1261. * Make sure that framer wake-up and enabling writes go through
  1262. * before any other component is enabled. Framer is responsible for
  1263. * clocking the bus and enabling framer first will ensure that other
  1264. * devices can report presence when they are enabled
  1265. */
  1266. mb();
  1267. /* Enable RX msg Q */
  1268. if (dev->use_rx_msgqs == MSM_MSGQ_ENABLED)
  1269. writel_relaxed(MGR_CFG_ENABLE | MGR_CFG_RX_MSGQ_EN,
  1270. dev->base + MGR_CFG);
  1271. else
  1272. writel_relaxed(MGR_CFG_ENABLE, dev->base + MGR_CFG);
  1273. /*
  1274. * Make sure that manager-enable is written through before interface
  1275. * device is enabled
  1276. */
  1277. mb();
  1278. writel_relaxed(1, dev->base + INTF_CFG);
  1279. /*
  1280. * Make sure that interface-enable is written through before enabling
  1281. * ported generic device inside MSM manager
  1282. */
  1283. mb();
  1284. writel_relaxed(1, dev->base + CFG_PORT(PGD_CFG, dev->ver));
  1285. writel_relaxed(0x3F<<17, dev->base + CFG_PORT(PGD_OWN_EEn, dev->ver) +
  1286. (4 * dev->ee));
  1287. /*
  1288. * Make sure that ported generic device is enabled and port-EE settings
  1289. * are written through before finally enabling the component
  1290. */
  1291. mb();
  1292. writel_relaxed(1, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1293. /*
  1294. * Make sure that all writes have gone through before exiting this
  1295. * function
  1296. */
  1297. mb();
  1298. /* Add devices registered with board-info now that controller is up */
  1299. slim_ctrl_add_boarddevs(&dev->ctrl);
  1300. if (pdev->dev.of_node)
  1301. of_register_slim_devices(&dev->ctrl);
  1302. pm_runtime_use_autosuspend(&pdev->dev);
  1303. pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_SLIM_AUTOSUSPEND);
  1304. pm_runtime_set_active(&pdev->dev);
  1305. dev_dbg(dev->dev, "MSM SB controller is up!\n");
  1306. return 0;
  1307. err_ctrl_failed:
  1308. writel_relaxed(0, dev->base + CFG_PORT(COMP_CFG, dev->ver));
  1309. err_clk_get_failed:
  1310. kfree(dev->satd);
  1311. err_request_irq_failed:
  1312. kthread_stop(dev->rx_msgq_thread);
  1313. err_thread_create_failed:
  1314. msm_slim_sps_exit(dev, true);
  1315. err_sps_init_failed:
  1316. if (dev->hclk) {
  1317. clk_disable_unprepare(dev->hclk);
  1318. clk_put(dev->hclk);
  1319. }
  1320. err_of_init_failed:
  1321. iounmap(dev->bam.base);
  1322. err_ioremap_bam_failed:
  1323. iounmap(dev->base);
  1324. err_ioremap_failed:
  1325. kfree(dev->wr_comp);
  1326. kfree(dev);
  1327. err_get_res_failed:
  1328. release_mem_region(bam_mem->start, resource_size(bam_mem));
  1329. err_get_res_bam_failed:
  1330. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1331. return ret;
  1332. }
  1333. static int __devexit msm_slim_remove(struct platform_device *pdev)
  1334. {
  1335. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1336. struct resource *bam_mem;
  1337. struct resource *slim_mem;
  1338. struct resource *slew_mem = dev->slew_mem;
  1339. int i;
  1340. for (i = 0; i < dev->nsats; i++) {
  1341. struct msm_slim_sat *sat = dev->satd[i];
  1342. int j;
  1343. for (j = 0; j < sat->nsatch; j++)
  1344. slim_dealloc_ch(&sat->satcl, sat->satch[j].chanh);
  1345. slim_remove_device(&sat->satcl);
  1346. kfree(sat->satch);
  1347. destroy_workqueue(sat->wq);
  1348. kfree(sat->satcl.name);
  1349. kfree(sat);
  1350. }
  1351. pm_runtime_disable(&pdev->dev);
  1352. pm_runtime_set_suspended(&pdev->dev);
  1353. free_irq(dev->irq, dev);
  1354. slim_del_controller(&dev->ctrl);
  1355. clk_put(dev->rclk);
  1356. if (dev->hclk)
  1357. clk_put(dev->hclk);
  1358. msm_slim_sps_exit(dev, true);
  1359. kthread_stop(dev->rx_msgq_thread);
  1360. iounmap(dev->bam.base);
  1361. iounmap(dev->base);
  1362. kfree(dev->wr_comp);
  1363. kfree(dev);
  1364. bam_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1365. "slimbus_bam_physical");
  1366. if (bam_mem)
  1367. release_mem_region(bam_mem->start, resource_size(bam_mem));
  1368. if (slew_mem)
  1369. release_mem_region(slew_mem->start, resource_size(slew_mem));
  1370. slim_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
  1371. "slimbus_physical");
  1372. if (slim_mem)
  1373. release_mem_region(slim_mem->start, resource_size(slim_mem));
  1374. return 0;
  1375. }
  1376. #ifdef CONFIG_PM_RUNTIME
  1377. static int msm_slim_runtime_idle(struct device *device)
  1378. {
  1379. struct platform_device *pdev = to_platform_device(device);
  1380. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1381. if (dev->state == MSM_CTRL_AWAKE)
  1382. dev->state = MSM_CTRL_IDLE;
  1383. dev_dbg(device, "pm_runtime: idle...\n");
  1384. pm_request_autosuspend(device);
  1385. return -EAGAIN;
  1386. }
  1387. #endif
  1388. /*
  1389. * If PM_RUNTIME is not defined, these 2 functions become helper
  1390. * functions to be called from system suspend/resume. So they are not
  1391. * inside ifdef CONFIG_PM_RUNTIME
  1392. */
  1393. #ifdef CONFIG_PM_SLEEP
  1394. static int msm_slim_runtime_suspend(struct device *device)
  1395. {
  1396. struct platform_device *pdev = to_platform_device(device);
  1397. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1398. int ret;
  1399. dev_dbg(device, "pm_runtime: suspending...\n");
  1400. ret = slim_ctrl_clk_pause(&dev->ctrl, false, SLIM_CLK_UNSPECIFIED);
  1401. if (ret) {
  1402. dev_err(device, "clk pause not entered:%d", ret);
  1403. dev->state = MSM_CTRL_AWAKE;
  1404. } else {
  1405. dev->state = MSM_CTRL_ASLEEP;
  1406. }
  1407. return ret;
  1408. }
  1409. static int msm_slim_runtime_resume(struct device *device)
  1410. {
  1411. struct platform_device *pdev = to_platform_device(device);
  1412. struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
  1413. int ret = 0;
  1414. dev_dbg(device, "pm_runtime: resuming...\n");
  1415. if (dev->state == MSM_CTRL_ASLEEP)
  1416. ret = slim_ctrl_clk_pause(&dev->ctrl, true, 0);
  1417. if (ret) {
  1418. dev_err(device, "clk pause not exited:%d", ret);
  1419. dev->state = MSM_CTRL_ASLEEP;
  1420. } else {
  1421. dev->state = MSM_CTRL_AWAKE;
  1422. }
  1423. return ret;
  1424. }
  1425. static int msm_slim_suspend(struct device *dev)
  1426. {
  1427. int ret = -EBUSY;
  1428. struct platform_device *pdev = to_platform_device(dev);
  1429. struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
  1430. if (!pm_runtime_enabled(dev) ||
  1431. (!pm_runtime_suspended(dev) &&
  1432. cdev->state == MSM_CTRL_IDLE)) {
  1433. dev_dbg(dev, "system suspend");
  1434. ret = msm_slim_runtime_suspend(dev);
  1435. if (!ret) {
  1436. if (cdev->hclk)
  1437. clk_disable_unprepare(cdev->hclk);
  1438. }
  1439. }
  1440. if (ret == -EBUSY) {
  1441. /*
  1442. * If the clock pause failed due to active channels, there is
  1443. * a possibility that some audio stream is active during suspend
  1444. * We dont want to return suspend failure in that case so that
  1445. * display and relevant components can still go to suspend.
  1446. * If there is some other error, then it should be passed-on
  1447. * to system level suspend
  1448. */
  1449. ret = 0;
  1450. }
  1451. return ret;
  1452. }
  1453. static int msm_slim_resume(struct device *dev)
  1454. {
  1455. /* If runtime_pm is enabled, this resume shouldn't do anything */
  1456. if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
  1457. struct platform_device *pdev = to_platform_device(dev);
  1458. struct msm_slim_ctrl *cdev = platform_get_drvdata(pdev);
  1459. int ret;
  1460. dev_dbg(dev, "system resume");
  1461. if (cdev->hclk)
  1462. clk_prepare_enable(cdev->hclk);
  1463. ret = msm_slim_runtime_resume(dev);
  1464. if (!ret) {
  1465. pm_runtime_mark_last_busy(dev);
  1466. pm_request_autosuspend(dev);
  1467. }
  1468. return ret;
  1469. }
  1470. return 0;
  1471. }
  1472. #endif /* CONFIG_PM_SLEEP */
  1473. static const struct dev_pm_ops msm_slim_dev_pm_ops = {
  1474. SET_SYSTEM_SLEEP_PM_OPS(
  1475. msm_slim_suspend,
  1476. msm_slim_resume
  1477. )
  1478. SET_RUNTIME_PM_OPS(
  1479. msm_slim_runtime_suspend,
  1480. msm_slim_runtime_resume,
  1481. msm_slim_runtime_idle
  1482. )
  1483. };
  1484. static struct of_device_id msm_slim_dt_match[] = {
  1485. {
  1486. .compatible = "qcom,slim-msm",
  1487. },
  1488. {}
  1489. };
  1490. static struct platform_driver msm_slim_driver = {
  1491. .probe = msm_slim_probe,
  1492. .remove = msm_slim_remove,
  1493. .driver = {
  1494. .name = MSM_SLIM_NAME,
  1495. .owner = THIS_MODULE,
  1496. .pm = &msm_slim_dev_pm_ops,
  1497. .of_match_table = msm_slim_dt_match,
  1498. },
  1499. };
  1500. static int msm_slim_init(void)
  1501. {
  1502. return platform_driver_register(&msm_slim_driver);
  1503. }
  1504. subsys_initcall(msm_slim_init);
  1505. static void msm_slim_exit(void)
  1506. {
  1507. platform_driver_unregister(&msm_slim_driver);
  1508. }
  1509. module_exit(msm_slim_exit);
  1510. MODULE_LICENSE("GPL v2");
  1511. MODULE_VERSION("0.1");
  1512. MODULE_DESCRIPTION("MSM Slimbus controller");
  1513. MODULE_ALIAS("platform:msm-slim");