rpm-smd.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/kernel.h>
  15. #include <linux/types.h>
  16. #include <linux/bug.h>
  17. #include <linux/completion.h>
  18. #include <linux/delay.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/io.h>
  22. #include <linux/irq.h>
  23. #include <linux/list.h>
  24. #include <linux/mutex.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/string.h>
  27. #include <linux/device.h>
  28. #include <linux/notifier.h>
  29. #include <linux/slab.h>
  30. #include <linux/workqueue.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/of.h>
  33. #include <linux/of_platform.h>
  34. #include <linux/rbtree.h>
  35. #include <mach/socinfo.h>
  36. #include <mach/msm_smd.h>
  37. #include <mach/rpm-smd.h>
  38. #define CREATE_TRACE_POINTS
  39. #include <mach/trace_rpm_smd.h>
  40. #include "rpm-notifier.h"
  41. /* Debug Definitions */
  42. enum {
  43. MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
  44. MSM_RPM_LOG_REQUEST_RAW = BIT(1),
  45. MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
  46. };
  47. static int msm_rpm_debug_mask;
  48. module_param_named(
  49. debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
  50. );
  51. struct msm_rpm_driver_data {
  52. const char *ch_name;
  53. uint32_t ch_type;
  54. smd_channel_t *ch_info;
  55. struct work_struct work;
  56. spinlock_t smd_lock_write;
  57. spinlock_t smd_lock_read;
  58. struct completion smd_open;
  59. struct completion remote_open;
  60. };
  61. #define DEFAULT_BUFFER_SIZE 256
  62. #define DEBUG_PRINT_BUFFER_SIZE 512
  63. #define MAX_SLEEP_BUFFER 128
  64. #define SMD_CHANNEL_NOTIF_TIMEOUT 5000
  65. #define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
  66. #define INV_RSC "resource does not exist"
  67. #define ERR "err\0"
  68. #define MAX_ERR_BUFFER_SIZE 128
  69. #define INIT_ERROR 1
  70. static __refdata ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
  71. static bool standalone;
  72. int msm_rpm_register_notifier(struct notifier_block *nb)
  73. {
  74. return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
  75. }
  76. int msm_rpm_unregister_notifier(struct notifier_block *nb)
  77. {
  78. return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
  79. }
  80. static struct workqueue_struct *msm_rpm_smd_wq;
  81. enum {
  82. MSM_RPM_MSG_REQUEST_TYPE = 0,
  83. MSM_RPM_MSG_TYPE_NR,
  84. };
  85. static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
  86. 0x716572, /* 'req\0' */
  87. };
  88. /*the order of fields matter and reflect the order expected by the RPM*/
  89. struct rpm_request_header {
  90. uint32_t service_type;
  91. uint32_t request_len;
  92. };
  93. struct rpm_message_header {
  94. uint32_t msg_id;
  95. enum msm_rpm_set set;
  96. uint32_t resource_type;
  97. uint32_t resource_id;
  98. uint32_t data_len;
  99. };
  100. struct kvp {
  101. unsigned int k;
  102. unsigned int s;
  103. };
  104. struct msm_rpm_kvp_data {
  105. uint32_t key;
  106. uint32_t nbytes; /* number of bytes */
  107. uint8_t *value;
  108. bool valid;
  109. };
  110. struct slp_buf {
  111. struct rb_node node;
  112. char ubuf[MAX_SLEEP_BUFFER];
  113. char *buf;
  114. bool valid;
  115. };
  116. static struct rb_root tr_root = RB_ROOT;
  117. static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
  118. static uint32_t msm_rpm_get_next_msg_id(void);
  119. static inline unsigned int get_rsc_type(char *buf)
  120. {
  121. struct rpm_message_header *h;
  122. h = (struct rpm_message_header *)
  123. (buf + sizeof(struct rpm_request_header));
  124. return h->resource_type;
  125. }
  126. static inline unsigned int get_rsc_id(char *buf)
  127. {
  128. struct rpm_message_header *h;
  129. h = (struct rpm_message_header *)
  130. (buf + sizeof(struct rpm_request_header));
  131. return h->resource_id;
  132. }
  133. #define get_data_len(buf) \
  134. (((struct rpm_message_header *) \
  135. (buf + sizeof(struct rpm_request_header)))->data_len)
  136. #define get_req_len(buf) \
  137. (((struct rpm_request_header *)(buf))->request_len)
  138. #define get_msg_id(buf) \
  139. (((struct rpm_message_header *) \
  140. (buf + sizeof(struct rpm_request_header)))->msg_id)
  141. static inline int get_buf_len(char *buf)
  142. {
  143. return get_req_len(buf) + sizeof(struct rpm_request_header);
  144. }
  145. static inline struct kvp *get_first_kvp(char *buf)
  146. {
  147. return (struct kvp *)(buf + sizeof(struct rpm_request_header)
  148. + sizeof(struct rpm_message_header));
  149. }
  150. static inline struct kvp *get_next_kvp(struct kvp *k)
  151. {
  152. return (struct kvp *)((void *)k + sizeof(*k) + k->s);
  153. }
  154. static inline void *get_data(struct kvp *k)
  155. {
  156. return (void *)k + sizeof(*k);
  157. }
  158. static void delete_kvp(char *msg, struct kvp *d)
  159. {
  160. struct kvp *n;
  161. int dec;
  162. uint32_t size;
  163. n = get_next_kvp(d);
  164. dec = (void *)n - (void *)d;
  165. size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
  166. memcpy((void *)d, (void *)n, size);
  167. get_data_len(msg) -= dec;
  168. get_req_len(msg) -= dec;
  169. }
  170. static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
  171. {
  172. memcpy(get_data(dest), get_data(src), src->s);
  173. }
  174. static void add_kvp(char *buf, struct kvp *n)
  175. {
  176. uint32_t inc = sizeof(*n) + n->s;
  177. BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
  178. memcpy(buf + get_buf_len(buf), n, inc);
  179. get_data_len(buf) += inc;
  180. get_req_len(buf) += inc;
  181. }
  182. static struct slp_buf *tr_search(struct rb_root *root, char *slp)
  183. {
  184. unsigned int type = get_rsc_type(slp);
  185. unsigned int id = get_rsc_id(slp);
  186. struct rb_node *node = root->rb_node;
  187. while (node) {
  188. struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
  189. unsigned int ctype = get_rsc_type(cur->buf);
  190. unsigned int cid = get_rsc_id(cur->buf);
  191. if (type < ctype)
  192. node = node->rb_left;
  193. else if (type > ctype)
  194. node = node->rb_right;
  195. else if (id < cid)
  196. node = node->rb_left;
  197. else if (id > cid)
  198. node = node->rb_right;
  199. else
  200. return cur;
  201. }
  202. return NULL;
  203. }
  204. static int tr_insert(struct rb_root *root, struct slp_buf *slp)
  205. {
  206. unsigned int type = get_rsc_type(slp->buf);
  207. unsigned int id = get_rsc_id(slp->buf);
  208. struct rb_node **node = &(root->rb_node), *parent = NULL;
  209. while (*node) {
  210. struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
  211. unsigned int ctype = get_rsc_type(curr->buf);
  212. unsigned int cid = get_rsc_id(curr->buf);
  213. parent = *node;
  214. if (type < ctype)
  215. node = &((*node)->rb_left);
  216. else if (type > ctype)
  217. node = &((*node)->rb_right);
  218. else if (id < cid)
  219. node = &((*node)->rb_left);
  220. else if (id > cid)
  221. node = &((*node)->rb_right);
  222. else
  223. return -EINVAL;
  224. }
  225. rb_link_node(&slp->node, parent, node);
  226. rb_insert_color(&slp->node, root);
  227. slp->valid = true;
  228. return 0;
  229. }
  230. #define for_each_kvp(buf, k) \
  231. for (k = (struct kvp *)get_first_kvp(buf); \
  232. ((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
  233. k = get_next_kvp(k))
  234. static void tr_update(struct slp_buf *s, char *buf)
  235. {
  236. struct kvp *e, *n;
  237. for_each_kvp(buf, n) {
  238. bool found = false;
  239. for_each_kvp(s->buf, e) {
  240. if (n->k == e->k) {
  241. found = true;
  242. if (n->s == e->s) {
  243. void *e_data = get_data(e);
  244. void *n_data = get_data(n);
  245. if (memcmp(e_data, n_data, n->s)) {
  246. update_kvp_data(e, n);
  247. s->valid = true;
  248. }
  249. } else {
  250. delete_kvp(s->buf, e);
  251. add_kvp(s->buf, n);
  252. s->valid = true;
  253. }
  254. break;
  255. }
  256. }
  257. if (!found) {
  258. add_kvp(s->buf, n);
  259. s->valid = true;
  260. }
  261. }
  262. }
  263. int msm_rpm_smd_buffer_request(char *buf, uint32_t size, gfp_t flag)
  264. {
  265. struct slp_buf *slp;
  266. static __refdata DEFINE_SPINLOCK(slp_buffer_lock);
  267. unsigned long flags;
  268. if (size > MAX_SLEEP_BUFFER)
  269. return -ENOMEM;
  270. spin_lock_irqsave(&slp_buffer_lock, flags);
  271. slp = tr_search(&tr_root, buf);
  272. if (!slp) {
  273. slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
  274. if (!slp) {
  275. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  276. return -ENOMEM;
  277. }
  278. slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
  279. memcpy(slp->buf, buf, size);
  280. if (tr_insert(&tr_root, slp))
  281. pr_err("%s(): Error updating sleep request\n",
  282. __func__);
  283. } else {
  284. /* handle unsent requests */
  285. tr_update(slp, buf);
  286. }
  287. spin_unlock_irqrestore(&slp_buffer_lock, flags);
  288. return 0;
  289. }
  290. static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
  291. {
  292. char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
  293. int pos;
  294. int buflen = DEBUG_PRINT_BUFFER_SIZE;
  295. char ch[5] = {0};
  296. u32 type;
  297. struct kvp *e;
  298. if (!s)
  299. return;
  300. if (!s->valid)
  301. return;
  302. type = get_rsc_type(s->buf);
  303. memcpy(ch, &type, sizeof(u32));
  304. pos = scnprintf(buf, buflen,
  305. "Sleep request type = 0x%08x(%s)",
  306. get_rsc_type(s->buf), ch);
  307. pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
  308. get_rsc_id(s->buf));
  309. for_each_kvp(s->buf, e) {
  310. uint32_t i;
  311. char *data = get_data(e);
  312. memcpy(ch, &e->k, sizeof(u32));
  313. pos += scnprintf(buf + pos, buflen - pos,
  314. "\n\t\tkey = 0x%08x(%s)",
  315. e->k, ch);
  316. pos += scnprintf(buf + pos, buflen - pos,
  317. " sz= %d data =", e->s);
  318. for (i = 0; i < e->s; i++)
  319. pos += scnprintf(buf + pos, buflen - pos,
  320. " 0x%02X", data[i]);
  321. }
  322. pos += scnprintf(buf + pos, buflen - pos, "\n");
  323. printk(buf);
  324. }
  325. static int msm_rpm_flush_requests(bool print)
  326. {
  327. struct rb_node *t;
  328. int ret;
  329. for (t = rb_first(&tr_root); t; t = rb_next(t)) {
  330. struct slp_buf *s = rb_entry(t, struct slp_buf, node);
  331. if (!s->valid)
  332. continue;
  333. if (print)
  334. msm_rpm_print_sleep_buffer(s);
  335. get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
  336. ret = msm_rpm_send_smd_buffer(s->buf,
  337. get_buf_len(s->buf), true);
  338. /* By not adding the message to a wait list we can reduce
  339. * latency involved in waiting for a ACK from RPM. The ACK
  340. * messages will be processed when we wakeup from sleep but
  341. * processing should be minimal
  342. * msm_rpm_wait_for_ack_noirq(get_msg_id(s->buf));
  343. */
  344. WARN_ON(ret != get_buf_len(s->buf));
  345. trace_rpm_send_message(true, MSM_RPM_CTX_SLEEP_SET,
  346. get_rsc_type(s->buf),
  347. get_rsc_id(s->buf),
  348. get_msg_id(s->buf));
  349. s->valid = false;
  350. }
  351. return 0;
  352. }
  353. static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
  354. static struct msm_rpm_driver_data msm_rpm_data;
  355. struct msm_rpm_request {
  356. struct rpm_request_header req_hdr;
  357. struct rpm_message_header msg_hdr;
  358. struct msm_rpm_kvp_data *kvp;
  359. uint32_t num_elements;
  360. uint32_t write_idx;
  361. uint8_t *buf;
  362. uint32_t numbytes;
  363. };
  364. /*
  365. * Data related to message acknowledgement
  366. */
  367. LIST_HEAD(msm_rpm_wait_list);
  368. struct msm_rpm_wait_data {
  369. struct list_head list;
  370. uint32_t msg_id;
  371. bool ack_recd;
  372. int errno;
  373. struct completion ack;
  374. };
  375. __refdata DEFINE_SPINLOCK(msm_rpm_list_lock);
  376. struct msm_rpm_ack_msg {
  377. uint32_t req;
  378. uint32_t req_len;
  379. uint32_t rsc_id;
  380. uint32_t msg_len;
  381. uint32_t id_ack;
  382. };
  383. LIST_HEAD(msm_rpm_ack_list);
  384. static DECLARE_COMPLETION(data_ready);
  385. static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
  386. struct msm_rpm_kvp_data *kvp)
  387. {
  388. struct msm_rpm_notifier_data notif;
  389. notif.rsc_type = hdr->resource_type;
  390. notif.rsc_id = hdr->resource_id;
  391. notif.key = kvp->key;
  392. notif.size = kvp->nbytes;
  393. notif.value = kvp->value;
  394. atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, &notif);
  395. }
  396. static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
  397. uint32_t key, const uint8_t *data, int size, bool noirq)
  398. {
  399. uint32_t i;
  400. uint32_t data_size, msg_size;
  401. if (!handle) {
  402. pr_err("%s(): Invalid handle\n", __func__);
  403. return -EINVAL;
  404. }
  405. if (size < 0)
  406. return -EINVAL;
  407. data_size = ALIGN(size, SZ_4);
  408. msg_size = data_size + sizeof(struct rpm_request_header);
  409. for (i = 0; i < handle->write_idx; i++) {
  410. if (handle->kvp[i].key != key)
  411. continue;
  412. if (handle->kvp[i].nbytes != data_size) {
  413. kfree(handle->kvp[i].value);
  414. handle->kvp[i].value = NULL;
  415. } else {
  416. if (!memcmp(handle->kvp[i].value, data, data_size))
  417. return 0;
  418. }
  419. break;
  420. }
  421. if (i >= handle->num_elements) {
  422. pr_err("%s(): Number of resources exceeds max allocated\n",
  423. __func__);
  424. return -ENOMEM;
  425. }
  426. if (i == handle->write_idx)
  427. handle->write_idx++;
  428. if (!handle->kvp[i].value) {
  429. handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
  430. if (!handle->kvp[i].value) {
  431. pr_err("%s(): Failed malloc\n", __func__);
  432. return -ENOMEM;
  433. }
  434. } else {
  435. /* We enter the else case, if a key already exists but the
  436. * data doesn't match. In which case, we should zero the data
  437. * out.
  438. */
  439. memset(handle->kvp[i].value, 0, data_size);
  440. }
  441. if (!handle->kvp[i].valid)
  442. handle->msg_hdr.data_len += msg_size;
  443. else
  444. handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
  445. handle->kvp[i].nbytes = data_size;
  446. handle->kvp[i].key = key;
  447. memcpy(handle->kvp[i].value, data, size);
  448. handle->kvp[i].valid = true;
  449. return 0;
  450. }
  451. static struct msm_rpm_request *msm_rpm_create_request_common(
  452. enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
  453. int num_elements, bool noirq)
  454. {
  455. struct msm_rpm_request *cdata;
  456. cdata = kzalloc(sizeof(struct msm_rpm_request),
  457. GFP_FLAG(noirq));
  458. if (!cdata) {
  459. printk(KERN_INFO"%s():Cannot allocate memory for client data\n",
  460. __func__);
  461. goto cdata_alloc_fail;
  462. }
  463. cdata->msg_hdr.set = set;
  464. cdata->msg_hdr.resource_type = rsc_type;
  465. cdata->msg_hdr.resource_id = rsc_id;
  466. cdata->msg_hdr.data_len = 0;
  467. cdata->num_elements = num_elements;
  468. cdata->write_idx = 0;
  469. cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
  470. GFP_FLAG(noirq));
  471. if (!cdata->kvp) {
  472. pr_warn("%s(): Cannot allocate memory for key value data\n",
  473. __func__);
  474. goto kvp_alloc_fail;
  475. }
  476. cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
  477. if (!cdata->buf)
  478. goto buf_alloc_fail;
  479. cdata->numbytes = DEFAULT_BUFFER_SIZE;
  480. return cdata;
  481. buf_alloc_fail:
  482. kfree(cdata->kvp);
  483. kvp_alloc_fail:
  484. kfree(cdata);
  485. cdata_alloc_fail:
  486. return NULL;
  487. }
  488. void msm_rpm_free_request(struct msm_rpm_request *handle)
  489. {
  490. int i;
  491. if (!handle)
  492. return;
  493. for (i = 0; i < handle->num_elements; i++)
  494. kfree(handle->kvp[i].value);
  495. kfree(handle->kvp);
  496. kfree(handle->buf);
  497. kfree(handle);
  498. }
  499. EXPORT_SYMBOL(msm_rpm_free_request);
  500. struct msm_rpm_request *msm_rpm_create_request(
  501. enum msm_rpm_set set, uint32_t rsc_type,
  502. uint32_t rsc_id, int num_elements)
  503. {
  504. return msm_rpm_create_request_common(set, rsc_type, rsc_id,
  505. num_elements, false);
  506. }
  507. EXPORT_SYMBOL(msm_rpm_create_request);
  508. struct msm_rpm_request *msm_rpm_create_request_noirq(
  509. enum msm_rpm_set set, uint32_t rsc_type,
  510. uint32_t rsc_id, int num_elements)
  511. {
  512. return msm_rpm_create_request_common(set, rsc_type, rsc_id,
  513. num_elements, true);
  514. }
  515. EXPORT_SYMBOL(msm_rpm_create_request_noirq);
  516. int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
  517. uint32_t key, const uint8_t *data, int size)
  518. {
  519. return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
  520. }
  521. EXPORT_SYMBOL(msm_rpm_add_kvp_data);
  522. int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
  523. uint32_t key, const uint8_t *data, int size)
  524. {
  525. return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
  526. }
  527. EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
  528. /* Runs in interrupt context */
  529. static void msm_rpm_notify(void *data, unsigned event)
  530. {
  531. struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
  532. BUG_ON(!pdata);
  533. if (!(pdata->ch_info))
  534. return;
  535. switch (event) {
  536. case SMD_EVENT_DATA:
  537. complete(&data_ready);
  538. break;
  539. case SMD_EVENT_OPEN:
  540. complete(&pdata->smd_open);
  541. break;
  542. case SMD_EVENT_CLOSE:
  543. case SMD_EVENT_STATUS:
  544. case SMD_EVENT_REOPEN_READY:
  545. break;
  546. default:
  547. pr_info("Unknown SMD event\n");
  548. }
  549. }
  550. bool msm_rpm_waiting_for_ack(void)
  551. {
  552. bool ret;
  553. unsigned long flags;
  554. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  555. ret = list_empty(&msm_rpm_wait_list);
  556. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  557. return !ret;
  558. }
  559. static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
  560. {
  561. struct list_head *ptr;
  562. struct msm_rpm_wait_data *elem = NULL;
  563. unsigned long flags;
  564. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  565. list_for_each(ptr, &msm_rpm_wait_list) {
  566. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  567. if (elem && (elem->msg_id == msg_id))
  568. break;
  569. elem = NULL;
  570. }
  571. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  572. return elem;
  573. }
  574. static uint32_t msm_rpm_get_next_msg_id(void)
  575. {
  576. uint32_t id;
  577. /*
  578. * A message id of 0 is used by the driver to indicate a error
  579. * condition. The RPM driver uses a id of 1 to indicate unsent data
  580. * when the data sent over hasn't been modified. This isn't a error
  581. * scenario and wait for ack returns a success when the message id is 1.
  582. */
  583. do {
  584. id = atomic_inc_return(&msm_rpm_msg_id);
  585. } while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
  586. return id;
  587. }
  588. static int msm_rpm_add_wait_list(uint32_t msg_id)
  589. {
  590. unsigned long flags;
  591. struct msm_rpm_wait_data *data =
  592. kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
  593. if (!data)
  594. return -ENOMEM;
  595. init_completion(&data->ack);
  596. data->ack_recd = false;
  597. data->msg_id = msg_id;
  598. data->errno = INIT_ERROR;
  599. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  600. list_add(&data->list, &msm_rpm_wait_list);
  601. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  602. return 0;
  603. }
  604. static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
  605. {
  606. unsigned long flags;
  607. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  608. list_del(&elem->list);
  609. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  610. kfree(elem);
  611. }
  612. static void msm_rpm_process_ack(uint32_t msg_id, int errno)
  613. {
  614. struct list_head *ptr;
  615. struct msm_rpm_wait_data *elem = NULL;
  616. unsigned long flags;
  617. spin_lock_irqsave(&msm_rpm_list_lock, flags);
  618. list_for_each(ptr, &msm_rpm_wait_list) {
  619. elem = list_entry(ptr, struct msm_rpm_wait_data, list);
  620. if (elem && (elem->msg_id == msg_id)) {
  621. elem->errno = errno;
  622. elem->ack_recd = true;
  623. complete(&elem->ack);
  624. break;
  625. }
  626. elem = NULL;
  627. }
  628. /* Special case where the sleep driver doesn't
  629. * wait for ACKs. This would decrease the latency involved with
  630. * entering RPM assisted power collapse.
  631. */
  632. if (!elem)
  633. trace_rpm_ack_recd(0, msg_id);
  634. spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
  635. }
  636. struct msm_rpm_kvp_packet {
  637. uint32_t id;
  638. uint32_t len;
  639. uint32_t val;
  640. };
  641. static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
  642. {
  643. return ((struct msm_rpm_ack_msg *)buf)->id_ack;
  644. }
  645. static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
  646. {
  647. uint8_t *tmp;
  648. uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
  649. int rc = -ENODEV;
  650. req_len -= sizeof(struct msm_rpm_ack_msg);
  651. req_len += 2 * sizeof(uint32_t);
  652. if (!req_len)
  653. return 0;
  654. tmp = buf + sizeof(struct msm_rpm_ack_msg);
  655. BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
  656. tmp += 2 * sizeof(uint32_t);
  657. if (!(memcmp(tmp, INV_RSC, min(req_len, sizeof(INV_RSC))-1))) {
  658. pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
  659. rc = -EINVAL;
  660. } else {
  661. pr_err("%s(): RPM NACK Invalid header\n", __func__);
  662. }
  663. return rc;
  664. }
  665. static int msm_rpm_read_smd_data(char *buf)
  666. {
  667. uint32_t pkt_sz;
  668. int bytes_read = 0;
  669. pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
  670. if (!pkt_sz)
  671. return -EAGAIN;
  672. BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
  673. if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
  674. return -EAGAIN;
  675. do {
  676. int len;
  677. len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
  678. pkt_sz -= len;
  679. bytes_read += len;
  680. } while (pkt_sz > 0);
  681. BUG_ON(pkt_sz < 0);
  682. return 0;
  683. }
  684. static void msm_rpm_smd_work(struct work_struct *work)
  685. {
  686. uint32_t msg_id;
  687. int errno;
  688. char buf[MAX_ERR_BUFFER_SIZE] = {0};
  689. while (1) {
  690. wait_for_completion_interruptible(&data_ready);
  691. spin_lock(&msm_rpm_data.smd_lock_read);
  692. while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
  693. if (msm_rpm_read_smd_data(buf))
  694. break;
  695. msg_id = msm_rpm_get_msg_id_from_ack(buf);
  696. errno = msm_rpm_get_error_from_ack(buf);
  697. msm_rpm_process_ack(msg_id, errno);
  698. }
  699. spin_unlock(&msm_rpm_data.smd_lock_read);
  700. }
  701. }
  702. static void msm_rpm_log_request(struct msm_rpm_request *cdata)
  703. {
  704. char buf[DEBUG_PRINT_BUFFER_SIZE];
  705. size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
  706. char name[5];
  707. u32 value;
  708. uint32_t i;
  709. int j, prev_valid;
  710. int valid_count = 0;
  711. int pos = 0;
  712. name[4] = 0;
  713. for (i = 0; i < cdata->write_idx; i++)
  714. if (cdata->kvp[i].valid)
  715. valid_count++;
  716. pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
  717. if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
  718. pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
  719. cdata->msg_hdr.msg_id);
  720. pos += scnprintf(buf + pos, buflen - pos, "s=%s",
  721. (cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
  722. if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
  723. && (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
  724. /* Both pretty and raw formatting */
  725. memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
  726. pos += scnprintf(buf + pos, buflen - pos,
  727. ", rsc_type=0x%08X (%s), rsc_id=%u; ",
  728. cdata->msg_hdr.resource_type, name,
  729. cdata->msg_hdr.resource_id);
  730. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  731. if (!cdata->kvp[i].valid)
  732. continue;
  733. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  734. pos += scnprintf(buf + pos, buflen - pos,
  735. "[key=0x%08X (%s), value=%s",
  736. cdata->kvp[i].key, name,
  737. (cdata->kvp[i].nbytes ? "0x" : "null"));
  738. for (j = 0; j < cdata->kvp[i].nbytes; j++)
  739. pos += scnprintf(buf + pos, buflen - pos,
  740. "%02X ",
  741. cdata->kvp[i].value[j]);
  742. if (cdata->kvp[i].nbytes)
  743. pos += scnprintf(buf + pos, buflen - pos, "(");
  744. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  745. value = 0;
  746. memcpy(&value, &cdata->kvp[i].value[j],
  747. min(sizeof(uint32_t),
  748. cdata->kvp[i].nbytes - j));
  749. pos += scnprintf(buf + pos, buflen - pos, "%u",
  750. value);
  751. if (j + 4 < cdata->kvp[i].nbytes)
  752. pos += scnprintf(buf + pos,
  753. buflen - pos, " ");
  754. }
  755. if (cdata->kvp[i].nbytes)
  756. pos += scnprintf(buf + pos, buflen - pos, ")");
  757. pos += scnprintf(buf + pos, buflen - pos, "]");
  758. if (prev_valid + 1 < valid_count)
  759. pos += scnprintf(buf + pos, buflen - pos, ", ");
  760. prev_valid++;
  761. }
  762. } else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
  763. /* Pretty formatting only */
  764. memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
  765. pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
  766. cdata->msg_hdr.resource_id);
  767. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  768. if (!cdata->kvp[i].valid)
  769. continue;
  770. memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
  771. pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
  772. name, (cdata->kvp[i].nbytes ? "" : "null"));
  773. for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
  774. value = 0;
  775. memcpy(&value, &cdata->kvp[i].value[j],
  776. min(sizeof(uint32_t),
  777. cdata->kvp[i].nbytes - j));
  778. pos += scnprintf(buf + pos, buflen - pos, "%u",
  779. value);
  780. if (j + 4 < cdata->kvp[i].nbytes)
  781. pos += scnprintf(buf + pos,
  782. buflen - pos, " ");
  783. }
  784. if (prev_valid + 1 < valid_count)
  785. pos += scnprintf(buf + pos, buflen - pos, ", ");
  786. prev_valid++;
  787. }
  788. } else {
  789. /* Raw formatting only */
  790. pos += scnprintf(buf + pos, buflen - pos,
  791. ", rsc_type=0x%08X, rsc_id=%u; ",
  792. cdata->msg_hdr.resource_type,
  793. cdata->msg_hdr.resource_id);
  794. for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
  795. if (!cdata->kvp[i].valid)
  796. continue;
  797. pos += scnprintf(buf + pos, buflen - pos,
  798. "[key=0x%08X, value=%s",
  799. cdata->kvp[i].key,
  800. (cdata->kvp[i].nbytes ? "0x" : "null"));
  801. for (j = 0; j < cdata->kvp[i].nbytes; j++) {
  802. pos += scnprintf(buf + pos, buflen - pos,
  803. "%02X",
  804. cdata->kvp[i].value[j]);
  805. if (j + 1 < cdata->kvp[i].nbytes)
  806. pos += scnprintf(buf + pos,
  807. buflen - pos, " ");
  808. }
  809. pos += scnprintf(buf + pos, buflen - pos, "]");
  810. if (prev_valid + 1 < valid_count)
  811. pos += scnprintf(buf + pos, buflen - pos, ", ");
  812. prev_valid++;
  813. }
  814. }
  815. pos += scnprintf(buf + pos, buflen - pos, "\n");
  816. printk(buf);
  817. }
  818. static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
  819. {
  820. unsigned long flags;
  821. int ret;
  822. spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
  823. ret = smd_write_avail(msm_rpm_data.ch_info);
  824. while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
  825. if (ret < 0)
  826. break;
  827. if (!noirq) {
  828. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
  829. flags);
  830. cpu_relax();
  831. spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
  832. } else
  833. udelay(5);
  834. }
  835. if (ret < 0) {
  836. pr_err("%s(): SMD not initialized\n", __func__);
  837. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
  838. return ret;
  839. }
  840. ret = smd_write(msm_rpm_data.ch_info, buf, size);
  841. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
  842. return ret;
  843. }
  844. static int msm_rpm_send_data(struct msm_rpm_request *cdata,
  845. int msg_type, bool noirq)
  846. {
  847. uint8_t *tmpbuff;
  848. int ret;
  849. uint32_t i;
  850. uint32_t msg_size;
  851. int req_hdr_sz, msg_hdr_sz;
  852. if (!cdata->msg_hdr.data_len)
  853. return 1;
  854. req_hdr_sz = sizeof(cdata->req_hdr);
  855. msg_hdr_sz = sizeof(cdata->msg_hdr);
  856. cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
  857. cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
  858. msg_size = cdata->req_hdr.request_len + req_hdr_sz;
  859. /* populate data_len */
  860. if (msg_size > cdata->numbytes) {
  861. kfree(cdata->buf);
  862. cdata->numbytes = msg_size;
  863. cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
  864. }
  865. if (!cdata->buf) {
  866. pr_err("%s(): Failed malloc\n", __func__);
  867. return 0;
  868. }
  869. tmpbuff = cdata->buf;
  870. tmpbuff += req_hdr_sz + msg_hdr_sz;
  871. for (i = 0; (i < cdata->write_idx); i++) {
  872. /* Sanity check */
  873. BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
  874. if (!cdata->kvp[i].valid)
  875. continue;
  876. memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
  877. tmpbuff += sizeof(uint32_t);
  878. memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
  879. tmpbuff += sizeof(uint32_t);
  880. memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
  881. tmpbuff += cdata->kvp[i].nbytes;
  882. if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
  883. msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
  884. &cdata->kvp[i]);
  885. }
  886. memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
  887. if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
  888. !msm_rpm_smd_buffer_request(cdata->buf, msg_size,
  889. GFP_FLAG(noirq)))
  890. return 1;
  891. cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
  892. memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
  893. if (msm_rpm_debug_mask
  894. & (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
  895. msm_rpm_log_request(cdata);
  896. if (standalone) {
  897. for (i = 0; (i < cdata->write_idx); i++)
  898. cdata->kvp[i].valid = false;
  899. cdata->msg_hdr.data_len = 0;
  900. ret = cdata->msg_hdr.msg_id;
  901. return ret;
  902. }
  903. msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
  904. ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
  905. if (ret == msg_size) {
  906. trace_rpm_send_message(noirq, cdata->msg_hdr.set,
  907. cdata->msg_hdr.resource_type,
  908. cdata->msg_hdr.resource_id,
  909. cdata->msg_hdr.msg_id);
  910. for (i = 0; (i < cdata->write_idx); i++)
  911. cdata->kvp[i].valid = false;
  912. cdata->msg_hdr.data_len = 0;
  913. ret = cdata->msg_hdr.msg_id;
  914. } else if (ret < msg_size) {
  915. struct msm_rpm_wait_data *rc;
  916. ret = 0;
  917. pr_err("Failed to write data msg_size:%d ret:%d\n",
  918. msg_size, ret);
  919. rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
  920. if (rc)
  921. msm_rpm_free_list_entry(rc);
  922. }
  923. return ret;
  924. }
  925. int msm_rpm_send_request(struct msm_rpm_request *handle)
  926. {
  927. int ret;
  928. static DEFINE_MUTEX(send_mtx);
  929. mutex_lock(&send_mtx);
  930. ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
  931. mutex_unlock(&send_mtx);
  932. return ret;
  933. }
  934. EXPORT_SYMBOL(msm_rpm_send_request);
  935. int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
  936. {
  937. return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
  938. }
  939. EXPORT_SYMBOL(msm_rpm_send_request_noirq);
  940. int msm_rpm_wait_for_ack(uint32_t msg_id)
  941. {
  942. struct msm_rpm_wait_data *elem;
  943. int rc = 0;
  944. if (!msg_id) {
  945. pr_err("%s(): Invalid msg id\n", __func__);
  946. return -ENOMEM;
  947. }
  948. if (msg_id == 1)
  949. return rc;
  950. if (standalone)
  951. return rc;
  952. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  953. if (!elem)
  954. return rc;
  955. wait_for_completion(&elem->ack);
  956. trace_rpm_ack_recd(0, msg_id);
  957. rc = elem->errno;
  958. msm_rpm_free_list_entry(elem);
  959. return rc;
  960. }
  961. EXPORT_SYMBOL(msm_rpm_wait_for_ack);
  962. int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
  963. {
  964. struct msm_rpm_wait_data *elem;
  965. unsigned long flags;
  966. int rc = 0;
  967. uint32_t id = 0;
  968. if (!msg_id) {
  969. pr_err("%s(): Invalid msg id\n", __func__);
  970. return -ENOMEM;
  971. }
  972. if (msg_id == 1)
  973. return 0;
  974. if (standalone)
  975. return 0;
  976. spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
  977. elem = msm_rpm_get_entry_from_msg_id(msg_id);
  978. if (!elem)
  979. /* Should this be a bug
  980. * Is it ok for another thread to read the msg?
  981. */
  982. goto wait_ack_cleanup;
  983. if (elem->errno != INIT_ERROR) {
  984. rc = elem->errno;
  985. msm_rpm_free_list_entry(elem);
  986. goto wait_ack_cleanup;
  987. }
  988. while (id != msg_id) {
  989. if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
  990. int errno;
  991. char buf[MAX_ERR_BUFFER_SIZE] = {};
  992. msm_rpm_read_smd_data(buf);
  993. id = msm_rpm_get_msg_id_from_ack(buf);
  994. errno = msm_rpm_get_error_from_ack(buf);
  995. msm_rpm_process_ack(id, errno);
  996. }
  997. }
  998. rc = elem->errno;
  999. trace_rpm_ack_recd(1, msg_id);
  1000. msm_rpm_free_list_entry(elem);
  1001. wait_ack_cleanup:
  1002. spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
  1003. if (smd_is_pkt_avail(msm_rpm_data.ch_info))
  1004. complete(&data_ready);
  1005. return rc;
  1006. }
  1007. EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
  1008. int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
  1009. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1010. {
  1011. int i, rc;
  1012. struct msm_rpm_request *req =
  1013. msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
  1014. if (!req)
  1015. return -ENOMEM;
  1016. for (i = 0; i < nelems; i++) {
  1017. rc = msm_rpm_add_kvp_data(req, kvp[i].key,
  1018. kvp[i].data, kvp[i].length);
  1019. if (rc)
  1020. goto bail;
  1021. }
  1022. rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
  1023. bail:
  1024. msm_rpm_free_request(req);
  1025. return rc;
  1026. }
  1027. EXPORT_SYMBOL(msm_rpm_send_message);
  1028. int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
  1029. uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
  1030. {
  1031. int i, rc;
  1032. struct msm_rpm_request *req =
  1033. msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
  1034. if (!req)
  1035. return -ENOMEM;
  1036. for (i = 0; i < nelems; i++) {
  1037. rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
  1038. kvp[i].data, kvp[i].length);
  1039. if (rc)
  1040. goto bail;
  1041. }
  1042. rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
  1043. bail:
  1044. msm_rpm_free_request(req);
  1045. return rc;
  1046. }
  1047. EXPORT_SYMBOL(msm_rpm_send_message_noirq);
  1048. /**
  1049. * During power collapse, the rpm driver disables the SMD interrupts to make
  1050. * sure that the interrupt doesn't wakes us from sleep.
  1051. */
  1052. int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
  1053. {
  1054. if (standalone)
  1055. return 0;
  1056. msm_rpm_flush_requests(print);
  1057. return smd_mask_receive_interrupt(msm_rpm_data.ch_info, true, cpumask);
  1058. }
  1059. EXPORT_SYMBOL(msm_rpm_enter_sleep);
  1060. /**
  1061. * When the system resumes from power collapse, the SMD interrupt disabled by
  1062. * enter function has to reenabled to continue processing SMD message.
  1063. */
  1064. void msm_rpm_exit_sleep(void)
  1065. {
  1066. if (standalone)
  1067. return;
  1068. smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
  1069. }
  1070. EXPORT_SYMBOL(msm_rpm_exit_sleep);
  1071. static int __devinit msm_rpm_smd_remote_probe(struct platform_device *pdev)
  1072. {
  1073. if (pdev && pdev->id == msm_rpm_data.ch_type)
  1074. complete(&msm_rpm_data.remote_open);
  1075. return 0;
  1076. }
  1077. static struct platform_driver msm_rpm_smd_remote_driver = {
  1078. .probe = msm_rpm_smd_remote_probe,
  1079. .driver = {
  1080. .owner = THIS_MODULE,
  1081. },
  1082. };
  1083. static int __devinit msm_rpm_dev_probe(struct platform_device *pdev)
  1084. {
  1085. char *key = NULL;
  1086. int ret;
  1087. key = "rpm-channel-name";
  1088. ret = of_property_read_string(pdev->dev.of_node, key,
  1089. &msm_rpm_data.ch_name);
  1090. if (ret)
  1091. goto fail;
  1092. key = "rpm-channel-type";
  1093. ret = of_property_read_u32(pdev->dev.of_node, key,
  1094. &msm_rpm_data.ch_type);
  1095. if (ret)
  1096. goto fail;
  1097. key = "rpm-standalone";
  1098. standalone = of_property_read_bool(pdev->dev.of_node, key);
  1099. msm_rpm_smd_remote_driver.driver.name = msm_rpm_data.ch_name;
  1100. init_completion(&msm_rpm_data.remote_open);
  1101. init_completion(&msm_rpm_data.smd_open);
  1102. spin_lock_init(&msm_rpm_data.smd_lock_write);
  1103. spin_lock_init(&msm_rpm_data.smd_lock_read);
  1104. INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
  1105. ret = platform_driver_register(&msm_rpm_smd_remote_driver);
  1106. if (ret < 0)
  1107. goto fail_driver;
  1108. ret = wait_for_completion_timeout(&msm_rpm_data.remote_open,
  1109. msecs_to_jiffies(SMD_CHANNEL_NOTIF_TIMEOUT));
  1110. if (!ret || smd_named_open_on_edge(msm_rpm_data.ch_name,
  1111. msm_rpm_data.ch_type,
  1112. &msm_rpm_data.ch_info,
  1113. &msm_rpm_data,
  1114. msm_rpm_notify)) {
  1115. pr_info("Cannot open RPM channel %s %d\n", msm_rpm_data.ch_name,
  1116. msm_rpm_data.ch_type);
  1117. BUG_ON(!standalone);
  1118. complete(&msm_rpm_data.smd_open);
  1119. } else {
  1120. /*
  1121. * Override DT's suggestion to try standalone; since we have an
  1122. * SMD channel.
  1123. */
  1124. standalone = false;
  1125. }
  1126. wait_for_completion(&msm_rpm_data.smd_open);
  1127. smd_disable_read_intr(msm_rpm_data.ch_info);
  1128. if (!standalone) {
  1129. msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
  1130. WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
  1131. if (!msm_rpm_smd_wq)
  1132. return -EINVAL;
  1133. queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
  1134. }
  1135. of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
  1136. if (standalone)
  1137. pr_info("%s(): RPM running in standalone mode\n", __func__);
  1138. return 0;
  1139. fail:
  1140. pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
  1141. pdev->dev.of_node->full_name, key);
  1142. return -EINVAL;
  1143. fail_driver:
  1144. return ret;
  1145. }
  1146. static struct of_device_id msm_rpm_match_table[] __initdata = {
  1147. {.compatible = "qcom,rpm-smd"},
  1148. {},
  1149. };
  1150. static struct platform_driver msm_rpm_device_driver __refdata = {
  1151. .probe = msm_rpm_dev_probe,
  1152. .driver = {
  1153. .name = "rpm-smd",
  1154. .owner = THIS_MODULE,
  1155. .of_match_table = msm_rpm_match_table,
  1156. },
  1157. };
  1158. int __init msm_rpm_driver_init(void)
  1159. {
  1160. static bool registered;
  1161. if (registered)
  1162. return 0;
  1163. registered = true;
  1164. return platform_driver_register(&msm_rpm_device_driver);
  1165. }
  1166. EXPORT_SYMBOL(msm_rpm_driver_init);
  1167. late_initcall(msm_rpm_driver_init);