rdbg.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /*
  2. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/types.h>
  15. #include <linux/cdev.h>
  16. #include <linux/gfp.h>
  17. #include <linux/device.h>
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/completion.h>
  22. #include <linux/of_gpio.h>
  23. #include <linux/mutex.h>
  24. #include <mach/msm_smsm.h>
  25. #include <linux/uaccess.h>
  26. #include <asm/system.h>
  27. #define SMP2P_NUM_PROCS 8
  28. #define SM_VERSION 1
  29. #define SM_BLOCKSIZE 128
  30. #define SMQ_MAGIC_INIT 0xFF00FF00
  31. #define SMQ_MAGIC_PRODUCER (SMQ_MAGIC_INIT | 0x1)
  32. #define SMQ_MAGIC_CONSUMER (SMQ_MAGIC_INIT | 0x2)
  33. enum SMQ_STATUS {
  34. SMQ_SUCCESS = 0,
  35. SMQ_ENOMEMORY = -1,
  36. SMQ_EBADPARM = -2,
  37. SMQ_UNDERFLOW = -3,
  38. SMQ_OVERFLOW = -4
  39. };
  40. enum smq_type {
  41. PRODUCER = 1,
  42. CONSUMER = 2,
  43. INVALID = 3
  44. };
  45. struct smq_block_map {
  46. uint32_t index_read;
  47. uint32_t num_blocks;
  48. uint8_t *map;
  49. };
  50. struct smq_node {
  51. uint16_t index_block;
  52. uint16_t num_blocks;
  53. } __attribute__ ((__packed__));
  54. struct smq_hdr {
  55. uint8_t producer_version;
  56. uint8_t consumer_version;
  57. } __attribute__ ((__packed__));
  58. struct smq_out_state {
  59. uint32_t init;
  60. uint32_t index_check_queue_for_reset;
  61. uint32_t index_sent_write;
  62. uint32_t index_free_read;
  63. } __attribute__ ((__packed__));
  64. struct smq_out {
  65. struct smq_out_state s;
  66. struct smq_node sent[1];
  67. };
  68. struct smq_in_state {
  69. uint32_t init;
  70. uint32_t index_check_queue_for_reset_ack;
  71. uint32_t index_sent_read;
  72. uint32_t index_free_write;
  73. } __attribute__ ((__packed__));
  74. struct smq_in {
  75. struct smq_in_state s;
  76. struct smq_node free[1];
  77. };
  78. struct smq {
  79. struct smq_hdr *hdr;
  80. struct smq_out *out;
  81. struct smq_in *in;
  82. uint8_t *blocks;
  83. uint32_t num_blocks;
  84. struct mutex *lock;
  85. uint32_t initialized;
  86. struct smq_block_map block_map;
  87. enum smq_type type;
  88. };
  89. struct gpio_info {
  90. int gpio_base_id;
  91. int irq_base_id;
  92. };
  93. struct rdbg_data {
  94. struct device *device;
  95. struct completion work;
  96. struct gpio_info in;
  97. struct gpio_info out;
  98. bool device_initialized;
  99. int gpio_out_offset;
  100. bool device_opened;
  101. void *smem_addr;
  102. size_t smem_size;
  103. struct smq producer_smrb;
  104. struct smq consumer_smrb;
  105. struct mutex write_mutex;
  106. };
  107. struct rdbg_device {
  108. struct cdev cdev;
  109. struct class *class;
  110. dev_t dev_no;
  111. int num_devices;
  112. struct rdbg_data *rdbg_data;
  113. };
  114. static struct rdbg_device g_rdbg_instance = {
  115. { {0} },
  116. NULL,
  117. 0,
  118. SMP2P_NUM_PROCS,
  119. NULL
  120. };
  121. struct processor_specific_info {
  122. char *name;
  123. unsigned int smem_buffer_addr;
  124. size_t smem_buffer_size;
  125. };
  126. static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
  127. {0}, /*APPS*/
  128. {"rdbg_modem", 0, 0}, /*MODEM*/
  129. {"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
  130. {0}, /*SMP2P_RESERVED_PROC_1*/
  131. {"rdbg_wcnss", 0, 0}, /*WCNSS*/
  132. {0}, /*SMP2P_RESERVED_PROC_2*/
  133. {0}, /*SMP2P_POWER_PROC*/
  134. {0} /*SMP2P_REMOTE_MOCK_PROC*/
  135. };
  136. static int smq_blockmap_get(struct smq_block_map *block_map,
  137. uint32_t *block_index, uint32_t n)
  138. {
  139. uint32_t start;
  140. uint32_t mark = 0;
  141. uint32_t found = 0;
  142. uint32_t i = 0;
  143. start = block_map->index_read;
  144. if (n == 1) {
  145. do {
  146. if (!block_map->map[block_map->index_read]) {
  147. *block_index = block_map->index_read;
  148. block_map->map[block_map->index_read] = 1;
  149. block_map->index_read++;
  150. block_map->index_read %= block_map->num_blocks;
  151. return SMQ_SUCCESS;
  152. }
  153. block_map->index_read++;
  154. } while (start != (block_map->index_read %=
  155. block_map->num_blocks));
  156. } else {
  157. mark = block_map->num_blocks;
  158. do {
  159. if (!block_map->map[block_map->index_read]) {
  160. if (mark > block_map->index_read) {
  161. mark = block_map->index_read;
  162. start = block_map->index_read;
  163. found = 0;
  164. }
  165. found++;
  166. if (found == n) {
  167. *block_index = mark;
  168. for (i = 0; i < n; i++)
  169. block_map->map[mark + i] =
  170. (uint8_t)(n - i);
  171. block_map->index_read += block_map->map
  172. [block_map->index_read] - 1;
  173. return SMQ_SUCCESS;
  174. }
  175. } else {
  176. found = 0;
  177. block_map->index_read += block_map->map
  178. [block_map->index_read] - 1;
  179. mark = block_map->num_blocks;
  180. }
  181. block_map->index_read++;
  182. } while (start != (block_map->index_read %=
  183. block_map->num_blocks));
  184. }
  185. return SMQ_ENOMEMORY;
  186. }
  187. static void smq_blockmap_put(struct smq_block_map *block_map, uint32_t i)
  188. {
  189. uint32_t num_blocks = block_map->map[i];
  190. while (num_blocks--) {
  191. block_map->map[i] = 0;
  192. i++;
  193. }
  194. }
  195. static int smq_blockmap_reset(struct smq_block_map *block_map)
  196. {
  197. if (!block_map->map)
  198. return SMQ_ENOMEMORY;
  199. memset(block_map->map, 0 , block_map->num_blocks + 1);
  200. block_map->index_read = 0;
  201. return SMQ_SUCCESS;
  202. }
  203. static int smq_blockmap_ctor(struct smq_block_map *block_map,
  204. uint32_t num_blocks)
  205. {
  206. if (num_blocks <= 1)
  207. return SMQ_ENOMEMORY;
  208. block_map->map = kcalloc(num_blocks, sizeof(uint8_t), GFP_KERNEL);
  209. if (!block_map->map)
  210. return SMQ_ENOMEMORY;
  211. block_map->num_blocks = num_blocks - 1;
  212. smq_blockmap_reset(block_map);
  213. return SMQ_SUCCESS;
  214. }
  215. static void smq_blockmap_dtor(struct smq_block_map *block_map)
  216. {
  217. kfree(block_map->map);
  218. block_map->map = NULL;
  219. }
  220. static int smq_free(struct smq *smq, void *data)
  221. {
  222. struct smq_node node;
  223. uint32_t index_block;
  224. int err = SMQ_SUCCESS;
  225. if (smq->lock)
  226. mutex_lock(smq->lock);
  227. if ((SM_VERSION != smq->hdr->producer_version) &&
  228. (SMQ_MAGIC_PRODUCER != smq->out->s.init)) {
  229. err = SMQ_UNDERFLOW;
  230. goto bail;
  231. }
  232. index_block = ((uint8_t *)data - smq->blocks) / SM_BLOCKSIZE;
  233. if (index_block >= smq->num_blocks) {
  234. err = SMQ_EBADPARM;
  235. goto bail;
  236. }
  237. node.index_block = (uint16_t)index_block;
  238. node.num_blocks = 0;
  239. *((struct smq_node *)(smq->in->free + smq->in->
  240. s.index_free_write)) = node;
  241. smq->in->s.index_free_write = (smq->in->s.index_free_write + 1)
  242. % smq->num_blocks;
  243. bail:
  244. if (smq->lock)
  245. mutex_unlock(smq->lock);
  246. return err;
  247. }
  248. static int smq_receive(struct smq *smq, void **pp, int *pnsize, int *pbmore)
  249. {
  250. struct smq_node *node;
  251. int err = SMQ_SUCCESS;
  252. int more = 0;
  253. if ((SM_VERSION != smq->hdr->producer_version) &&
  254. (SMQ_MAGIC_PRODUCER != smq->out->s.init))
  255. return SMQ_UNDERFLOW;
  256. if (smq->in->s.index_sent_read == smq->out->s.index_sent_write) {
  257. err = SMQ_UNDERFLOW;
  258. goto bail;
  259. }
  260. node = (struct smq_node *)(smq->out->sent + smq->in->s.index_sent_read);
  261. if (node->index_block >= smq->num_blocks) {
  262. err = SMQ_EBADPARM;
  263. goto bail;
  264. }
  265. smq->in->s.index_sent_read = (smq->in->s.index_sent_read + 1)
  266. % smq->num_blocks;
  267. *pp = smq->blocks + (node->index_block * SM_BLOCKSIZE);
  268. *pnsize = SM_BLOCKSIZE * node->num_blocks;
  269. rmb();
  270. if (smq->in->s.index_sent_read != smq->out->s.index_sent_write)
  271. more = 1;
  272. bail:
  273. *pbmore = more;
  274. return err;
  275. }
  276. static int smq_alloc_send(struct smq *smq, const uint8_t *pcb, int nsize)
  277. {
  278. void *pv = 0;
  279. int num_blocks;
  280. uint32_t index_block = 0;
  281. int err = SMQ_SUCCESS;
  282. struct smq_node *node = NULL;
  283. mutex_lock(smq->lock);
  284. if ((SMQ_MAGIC_CONSUMER == smq->in->s.init) &&
  285. (SM_VERSION == smq->hdr->consumer_version)) {
  286. if (smq->out->s.index_check_queue_for_reset ==
  287. smq->in->s.index_check_queue_for_reset_ack) {
  288. while (smq->out->s.index_free_read !=
  289. smq->in->s.index_free_write) {
  290. node = (struct smq_node *)(
  291. smq->in->free +
  292. smq->out->s.index_free_read);
  293. if (node->index_block >= smq->num_blocks) {
  294. err = SMQ_EBADPARM;
  295. goto bail;
  296. }
  297. smq->out->s.index_free_read =
  298. (smq->out->s.index_free_read + 1)
  299. % smq->num_blocks;
  300. smq_blockmap_put(&smq->block_map,
  301. node->index_block);
  302. rmb();
  303. }
  304. }
  305. }
  306. num_blocks = ALIGN(nsize, SM_BLOCKSIZE)/SM_BLOCKSIZE;
  307. err = smq_blockmap_get(&smq->block_map, &index_block, num_blocks);
  308. if (SMQ_SUCCESS != err)
  309. goto bail;
  310. pv = smq->blocks + (SM_BLOCKSIZE * index_block);
  311. err = copy_from_user((void *)pv, (void *)pcb, nsize);
  312. if (0 != err)
  313. goto bail;
  314. ((struct smq_node *)(smq->out->sent +
  315. smq->out->s.index_sent_write))->index_block
  316. = (uint16_t)index_block;
  317. ((struct smq_node *)(smq->out->sent +
  318. smq->out->s.index_sent_write))->num_blocks
  319. = (uint16_t)num_blocks;
  320. smq->out->s.index_sent_write = (smq->out->s.index_sent_write + 1)
  321. % smq->num_blocks;
  322. bail:
  323. if (SMQ_SUCCESS != err) {
  324. if (pv)
  325. smq_blockmap_put(&smq->block_map, index_block);
  326. }
  327. mutex_unlock(smq->lock);
  328. return err;
  329. }
  330. static int smq_reset_producer_queue_internal(struct smq *smq,
  331. uint32_t reset_num)
  332. {
  333. int retval = 0;
  334. uint32_t i;
  335. if (PRODUCER != smq->type)
  336. goto bail;
  337. mutex_lock(smq->lock);
  338. if (smq->out->s.index_check_queue_for_reset != reset_num) {
  339. smq->out->s.index_check_queue_for_reset = reset_num;
  340. for (i = 0; i < smq->num_blocks; i++)
  341. (smq->out->sent + i)->index_block = 0xFFFF;
  342. smq_blockmap_reset(&smq->block_map);
  343. smq->out->s.index_sent_write = 0;
  344. smq->out->s.index_free_read = 0;
  345. retval = 1;
  346. }
  347. mutex_unlock(smq->lock);
  348. bail:
  349. return retval;
  350. }
  351. static int smq_check_queue_reset(struct smq *p_cons, struct smq *p_prod)
  352. {
  353. int retval = 0;
  354. uint32_t reset_num, i;
  355. if ((CONSUMER != p_cons->type) ||
  356. (SMQ_MAGIC_PRODUCER != p_cons->out->s.init) ||
  357. (SM_VERSION != p_cons->hdr->producer_version))
  358. goto bail;
  359. reset_num = p_cons->out->s.index_check_queue_for_reset;
  360. if (p_cons->in->s.index_check_queue_for_reset_ack != reset_num) {
  361. p_cons->in->s.index_check_queue_for_reset_ack = reset_num;
  362. for (i = 0; i < p_cons->num_blocks; i++)
  363. (p_cons->in->free + i)->index_block = 0xFFFF;
  364. p_cons->in->s.index_sent_read = 0;
  365. p_cons->in->s.index_free_write = 0;
  366. retval = smq_reset_producer_queue_internal(p_prod, reset_num);
  367. }
  368. bail:
  369. return retval;
  370. }
  371. static int check_subsystem_debug_enabled(void *base_addr, int size)
  372. {
  373. int num_blocks;
  374. uint8_t *pb_orig;
  375. uint8_t *pb;
  376. struct smq smq;
  377. int err = 0;
  378. pb = pb_orig = (uint8_t *)base_addr;
  379. pb += sizeof(struct smq_hdr);
  380. pb = PTR_ALIGN(pb, 8);
  381. size -= pb - (uint8_t *)pb_orig;
  382. num_blocks = (int)((size - sizeof(struct smq_out_state) -
  383. sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
  384. sizeof(struct smq_node) * 2));
  385. if (0 >= num_blocks) {
  386. err = SMQ_EBADPARM;
  387. goto bail;
  388. }
  389. pb += num_blocks * SM_BLOCKSIZE;
  390. smq.out = (struct smq_out *)pb;
  391. pb += sizeof(struct smq_out_state) + (num_blocks *
  392. sizeof(struct smq_node));
  393. smq.in = (struct smq_in *)pb;
  394. if (SMQ_MAGIC_CONSUMER != smq.in->s.init) {
  395. pr_err("%s, smq in consumer not initialized", __func__);
  396. err = -ECOMM;
  397. }
  398. bail:
  399. return err;
  400. }
  401. static void smq_dtor(struct smq *smq)
  402. {
  403. if (SMQ_MAGIC_INIT == smq->initialized) {
  404. switch (smq->type) {
  405. case PRODUCER:
  406. smq->out->s.init = 0;
  407. smq_blockmap_dtor(&smq->block_map);
  408. break;
  409. case CONSUMER:
  410. smq->in->s.init = 0;
  411. break;
  412. default:
  413. case INVALID:
  414. break;
  415. }
  416. smq->initialized = 0;
  417. }
  418. }
  419. /*
  420. * The shared memory is used as a circular ring buffer in each direction.
  421. * Thus we have a bi-directional shared memory channel between the AP
  422. * and a subsystem. We call this SMQ. Each memory channel contains a header,
  423. * data and a control mechanism that is used to synchronize read and write
  424. * of data between the AP and the remote subsystem.
  425. *
  426. * Overall SMQ memory view:
  427. *
  428. * +------------------------------------------------+
  429. * | SMEM buffer |
  430. * |-----------------------+------------------------|
  431. * |Producer: LA | Producer: Remote |
  432. * |Consumer: Remote | subsystem |
  433. * | subsystem | Consumer: LA |
  434. * | | |
  435. * | Producer| Consumer|
  436. * +-----------------------+------------------------+
  437. * | |
  438. * | |
  439. * | +--------------------------------------+
  440. * | |
  441. * | |
  442. * v v
  443. * +--------------------------------------------------------------+
  444. * | Header | Data | Control |
  445. * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
  446. * | | b | b | b | | S |n |n | | S |n |n | |
  447. * | Producer | l | l | l | | M |o |o | | M |o |o | |
  448. * | Ver | o | o | o | | Q |d |d | | Q |d |d | |
  449. * |-----------| c | c | c | ... | |e |e | ... | |e |e | ... |
  450. * | | k | k | k | | O | | | | I | | | |
  451. * | Consumer | | | | | u |0 |1 | | n |0 |1 | |
  452. * | Ver | 0 | 1 | 2 | | t | | | | | | | |
  453. * +-----------+---+---+---+-----+----+--+--+-----+---+--+--+-----+
  454. * | |
  455. * + |
  456. * |
  457. * +------------------------+
  458. * |
  459. * v
  460. * +----+----+----+----+
  461. * | SMQ Nodes |
  462. * |----|----|----|----|
  463. * Node # | 0 | 1 | 2 | ...|
  464. * |----|----|----|----|
  465. * Starting Block Index # | 0 | 3 | 8 | ...|
  466. * |----|----|----|----|
  467. * # of blocks | 3 | 5 | 1 | ...|
  468. * +----+----+----+----+
  469. *
  470. * Header: Contains version numbers for software compatibility to ensure
  471. * that both producers and consumers on the AP and subsystems know how to
  472. * read from and write to the queue.
  473. * Both the producer and consumer versions are 1.
  474. * +---------+-------------------+
  475. * | Size | Field |
  476. * +---------+-------------------+
  477. * | 1 byte | Producer Version |
  478. * +---------+-------------------+
  479. * | 1 byte | Consumer Version |
  480. * +---------+-------------------+
  481. *
  482. * Data: The data portion contains multiple blocks [0..N] of a fixed size.
  483. * The block size SM_BLOCKSIZE is fixed to 128 bytes for header version #1.
  484. * Payload sent from the debug agent app is split (if necessary) and placed
  485. * in these blocks. The first data block is placed at the next 8 byte aligned
  486. * address after the header.
  487. *
  488. * The number of blocks for a given SMEM allocation is derived as follows:
  489. * Number of Blocks = ((Total Size - Alignment - Size of Header
  490. * - Size of SMQIn - Size of SMQOut)/(SM_BLOCKSIZE))
  491. *
  492. * The producer maintains a private block map of each of these blocks to
  493. * determine which of these blocks in the queue is available and which are free.
  494. *
  495. * Control:
  496. * The control portion contains a list of nodes [0..N] where N is number
  497. * of available data blocks. Each node identifies the data
  498. * block indexes that contain a particular debug message to be transfered,
  499. * and the number of blocks it took to hold the contents of the message.
  500. *
  501. * Each node has the following structure:
  502. * +---------+-------------------+
  503. * | Size | Field |
  504. * +---------+-------------------+
  505. * | 2 bytes |Staring Block Index|
  506. * +---------+-------------------+
  507. * | 2 bytes |Number of Blocks |
  508. * +---------+-------------------+
  509. *
  510. * The producer and the consumer update different parts of the control channel
  511. * (SMQOut / SMQIn) respectively. Each of these control data structures contains
  512. * information about the last node that was written / read, and the actual nodes
  513. * that were written/read.
  514. *
  515. * SMQOut Structure (R/W by producer, R by consumer):
  516. * +---------+-------------------+
  517. * | Size | Field |
  518. * +---------+-------------------+
  519. * | 4 bytes | Magic Init Number |
  520. * +---------+-------------------+
  521. * | 4 bytes | Reset |
  522. * +---------+-------------------+
  523. * | 4 bytes | Last Sent Index |
  524. * +---------+-------------------+
  525. * | 4 bytes | Index Free Read |
  526. * +---------+-------------------+
  527. *
  528. * SMQIn Structure (R/W by consumer, R by producer):
  529. * +---------+-------------------+
  530. * | Size | Field |
  531. * +---------+-------------------+
  532. * | 4 bytes | Magic Init Number |
  533. * +---------+-------------------+
  534. * | 4 bytes | Reset ACK |
  535. * +---------+-------------------+
  536. * | 4 bytes | Last Read Index |
  537. * +---------+-------------------+
  538. * | 4 bytes | Index Free Write |
  539. * +---------+-------------------+
  540. *
  541. * Magic Init Number:
  542. * Both SMQ Out and SMQ In initialize this field with a predefined magic
  543. * number so as to make sure that both the consumer and producer blocks
  544. * have fully initialized and have valid data in the shared memory control area.
  545. * Producer Magic #: 0xFF00FF01
  546. * Consumer Magic #: 0xFF00FF02
  547. */
  548. static int smq_ctor(struct smq *smq, void *base_addr, int size,
  549. enum smq_type type, struct mutex *lock_ptr)
  550. {
  551. int num_blocks;
  552. uint8_t *pb_orig;
  553. uint8_t *pb;
  554. uint32_t i;
  555. int err;
  556. if (SMQ_MAGIC_INIT == smq->initialized) {
  557. err = SMQ_EBADPARM;
  558. goto bail;
  559. }
  560. if (!base_addr || !size) {
  561. err = SMQ_EBADPARM;
  562. goto bail;
  563. }
  564. if (type == PRODUCER)
  565. smq->lock = lock_ptr;
  566. pb_orig = (uint8_t *)base_addr;
  567. smq->hdr = (struct smq_hdr *)pb_orig;
  568. pb = pb_orig;
  569. pb += sizeof(struct smq_hdr);
  570. pb = PTR_ALIGN(pb, 8);
  571. size -= pb - (uint8_t *)pb_orig;
  572. num_blocks = (int)((size - sizeof(struct smq_out_state) -
  573. sizeof(struct smq_in_state))/(SM_BLOCKSIZE +
  574. sizeof(struct smq_node) * 2));
  575. if (0 >= num_blocks) {
  576. err = SMQ_ENOMEMORY;
  577. goto bail;
  578. }
  579. smq->blocks = pb;
  580. smq->num_blocks = num_blocks;
  581. pb += num_blocks * SM_BLOCKSIZE;
  582. smq->out = (struct smq_out *)pb;
  583. pb += sizeof(struct smq_out_state) + (num_blocks *
  584. sizeof(struct smq_node));
  585. smq->in = (struct smq_in *)pb;
  586. smq->type = type;
  587. if (PRODUCER == type) {
  588. smq->hdr->producer_version = SM_VERSION;
  589. for (i = 0; i < smq->num_blocks; i++)
  590. (smq->out->sent + i)->index_block = 0xFFFF;
  591. err = smq_blockmap_ctor(&smq->block_map, smq->num_blocks);
  592. if (SMQ_SUCCESS != err)
  593. goto bail;
  594. smq->out->s.index_sent_write = 0;
  595. smq->out->s.index_free_read = 0;
  596. if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
  597. smq->out->s.index_check_queue_for_reset += 1;
  598. } else {
  599. smq->out->s.index_check_queue_for_reset = 1;
  600. smq->out->s.init = SMQ_MAGIC_PRODUCER;
  601. }
  602. } else {
  603. smq->hdr->consumer_version = SM_VERSION;
  604. for (i = 0; i < smq->num_blocks; i++)
  605. (smq->in->free + i)->index_block = 0xFFFF;
  606. smq->in->s.index_sent_read = 0;
  607. smq->in->s.index_free_write = 0;
  608. if (smq->out->s.init == SMQ_MAGIC_PRODUCER) {
  609. smq->in->s.index_check_queue_for_reset_ack =
  610. smq->out->s.index_check_queue_for_reset;
  611. } else {
  612. smq->in->s.index_check_queue_for_reset_ack = 0;
  613. }
  614. smq->in->s.init = SMQ_MAGIC_CONSUMER;
  615. }
  616. smq->initialized = SMQ_MAGIC_INIT;
  617. err = SMQ_SUCCESS;
  618. bail:
  619. return err;
  620. }
  621. static void send_interrupt_to_subsystem(struct rdbg_data *rdbgdata)
  622. {
  623. int offset = rdbgdata->gpio_out_offset;
  624. int val = 1 ^ gpio_get_value(rdbgdata->out.gpio_base_id + offset);
  625. gpio_set_value(rdbgdata->out.gpio_base_id + offset, val);
  626. rdbgdata->gpio_out_offset = (offset + 1) % 32;
  627. dev_dbg(rdbgdata->device, "%s: sent interrupt %d to subsystem",
  628. __func__, val);
  629. }
  630. static irqreturn_t on_interrupt_from(int irq, void *ptr)
  631. {
  632. struct rdbg_data *rdbgdata = (struct rdbg_data *) ptr;
  633. dev_dbg(rdbgdata->device, "%s: Received interrupt %d from subsystem",
  634. __func__, irq);
  635. complete(&(rdbgdata->work));
  636. return IRQ_HANDLED;
  637. }
  638. static int initialize_smq(struct rdbg_data *rdbgdata)
  639. {
  640. int err = 0;
  641. if (smq_ctor(&(rdbgdata->producer_smrb), (void *)(rdbgdata->smem_addr),
  642. ((rdbgdata->smem_size)/2), PRODUCER, &rdbgdata->write_mutex)) {
  643. dev_err(rdbgdata->device, "%s: smq producer allocation failed",
  644. __func__);
  645. err = -ENOMEM;
  646. goto bail;
  647. }
  648. if (smq_ctor(&(rdbgdata->consumer_smrb), (void *)((uint32_t)
  649. (rdbgdata->smem_addr) + ((rdbgdata->smem_size)/2)),
  650. ((rdbgdata->smem_size)/2), CONSUMER, NULL)) {
  651. dev_err(rdbgdata->device, "%s: smq conmsumer allocation failed",
  652. __func__);
  653. err = -ENOMEM;
  654. }
  655. bail:
  656. return err;
  657. }
  658. static int rdbg_open(struct inode *inode, struct file *filp)
  659. {
  660. int device_id = -1;
  661. struct rdbg_device *device = &g_rdbg_instance;
  662. struct rdbg_data *rdbgdata = NULL;
  663. int err = 0;
  664. if (!inode || !device->rdbg_data) {
  665. pr_err("Memory not allocated yet");
  666. err = -ENODEV;
  667. goto bail;
  668. }
  669. device_id = MINOR(inode->i_rdev);
  670. rdbgdata = &device->rdbg_data[device_id];
  671. if (rdbgdata->device_opened) {
  672. dev_err(rdbgdata->device, "%s: Device already opened",
  673. __func__);
  674. err = -EEXIST;
  675. goto bail;
  676. }
  677. rdbgdata->smem_size = proc_info[device_id].smem_buffer_size;
  678. if (!rdbgdata->smem_size) {
  679. dev_err(rdbgdata->device, "%s: smem not initialized", __func__);
  680. err = -ENOMEM;
  681. goto bail;
  682. }
  683. rdbgdata->smem_addr = smem_alloc(proc_info[device_id].smem_buffer_addr,
  684. rdbgdata->smem_size);
  685. if (!rdbgdata->smem_addr) {
  686. dev_err(rdbgdata->device, "%s: Could not allocate smem memory",
  687. __func__);
  688. err = -ENOMEM;
  689. goto bail;
  690. }
  691. dev_dbg(rdbgdata->device, "%s: SMEM address=0x%x smem_size=%d",
  692. __func__, (unsigned int)rdbgdata->smem_addr,
  693. rdbgdata->smem_size);
  694. if (check_subsystem_debug_enabled(rdbgdata->smem_addr,
  695. rdbgdata->smem_size/2)) {
  696. dev_err(rdbgdata->device, "%s: Subsystem %s is not debug enabled",
  697. __func__, proc_info[device_id].name);
  698. err = -ECOMM;
  699. goto bail;
  700. }
  701. init_completion(&rdbgdata->work);
  702. err = request_irq(rdbgdata->in.irq_base_id, on_interrupt_from,
  703. IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
  704. proc_info[device_id].name,
  705. (void *)&device->rdbg_data[device_id]);
  706. if (err) {
  707. dev_err(rdbgdata->device,
  708. "%s: Failed to register interrupt.Err=%d,irqid=%d.",
  709. __func__, err, rdbgdata->in.irq_base_id);
  710. goto irq_bail;
  711. }
  712. err = enable_irq_wake(rdbgdata->in.irq_base_id);
  713. if (err < 0) {
  714. dev_dbg(rdbgdata->device, "enable_irq_wake() failed with err=%d",
  715. err);
  716. err = 0;
  717. }
  718. mutex_init(&rdbgdata->write_mutex);
  719. err = initialize_smq(rdbgdata);
  720. if (err) {
  721. dev_err(rdbgdata->device, "Error initializing smq. Err=%d",
  722. err);
  723. goto smq_bail;
  724. }
  725. rdbgdata->device_opened = 1;
  726. filp->private_data = (void *)rdbgdata;
  727. return 0;
  728. smq_bail:
  729. smq_dtor(&(rdbgdata->producer_smrb));
  730. smq_dtor(&(rdbgdata->consumer_smrb));
  731. mutex_destroy(&rdbgdata->write_mutex);
  732. irq_bail:
  733. free_irq(rdbgdata->in.irq_base_id, (void *)
  734. &device->rdbg_data[device_id]);
  735. bail:
  736. return err;
  737. }
  738. static int rdbg_release(struct inode *inode, struct file *filp)
  739. {
  740. int device_id = -1;
  741. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  742. struct rdbg_data *rdbgdata = NULL;
  743. int err = 0;
  744. if (!inode || !rdbgdevice->rdbg_data) {
  745. pr_err("Memory not allocated yet");
  746. err = -ENODEV;
  747. goto bail;
  748. }
  749. device_id = MINOR(inode->i_rdev);
  750. rdbgdata = &rdbgdevice->rdbg_data[device_id];
  751. if (rdbgdata->device_opened == 1) {
  752. dev_dbg(rdbgdata->device, "%s: Destroying %s.", __func__,
  753. proc_info[device_id].name);
  754. rdbgdata->device_opened = 0;
  755. complete(&(rdbgdata->work));
  756. free_irq(rdbgdata->in.irq_base_id, (void *)
  757. &rdbgdevice->rdbg_data[device_id]);
  758. if (rdbgdevice->rdbg_data[device_id].producer_smrb.initialized)
  759. smq_dtor(&(rdbgdevice->rdbg_data[device_id].
  760. producer_smrb));
  761. if (rdbgdevice->rdbg_data[device_id].consumer_smrb.initialized)
  762. smq_dtor(&(rdbgdevice->rdbg_data[device_id].
  763. consumer_smrb));
  764. mutex_destroy(&rdbgdata->write_mutex);
  765. }
  766. filp->private_data = NULL;
  767. bail:
  768. return err;
  769. }
  770. static ssize_t rdbg_read(struct file *filp, char __user *buf, size_t size,
  771. loff_t *offset)
  772. {
  773. int err = 0;
  774. struct rdbg_data *rdbgdata = filp->private_data;
  775. void *p_sent_buffer = NULL;
  776. int nsize = 0;
  777. int more = 0;
  778. if (!rdbgdata) {
  779. pr_err("Invalid argument");
  780. err = -EINVAL;
  781. goto bail;
  782. }
  783. dev_dbg(rdbgdata->device, "%s: In receive", __func__);
  784. err = wait_for_completion_interruptible(&(rdbgdata->work));
  785. if (err) {
  786. dev_err(rdbgdata->device, "%s: Error in wait", __func__);
  787. goto bail;
  788. }
  789. smq_check_queue_reset(&(rdbgdata->consumer_smrb),
  790. &(rdbgdata->producer_smrb));
  791. if (SMQ_SUCCESS != smq_receive(&(rdbgdata->consumer_smrb),
  792. &p_sent_buffer, &nsize, &more)) {
  793. dev_err(rdbgdata->device, "%s: Error in smq_recv(). Err code = %d",
  794. __func__, err);
  795. err = -ENODATA;
  796. goto bail;
  797. }
  798. size = ((size < nsize) ? size : nsize);
  799. err = copy_to_user(buf, p_sent_buffer, size);
  800. if (err != 0) {
  801. dev_err(rdbgdata->device, "%s: Error in copy_to_user(). Err code = %d",
  802. __func__, err);
  803. err = -ENODATA;
  804. goto bail;
  805. }
  806. smq_free(&(rdbgdata->consumer_smrb), p_sent_buffer);
  807. err = size;
  808. dev_dbg(rdbgdata->device, "%s: Read data to buffer with address 0x%x",
  809. __func__, (unsigned int) buf);
  810. bail:
  811. dev_dbg(rdbgdata->device, "%s: Returning from receive", __func__);
  812. return err;
  813. }
  814. static ssize_t rdbg_write(struct file *filp, const char __user *buf,
  815. size_t size, loff_t *offset)
  816. {
  817. int err = 0;
  818. struct rdbg_data *rdbgdata = filp->private_data;
  819. if (!rdbgdata) {
  820. pr_err("Invalid argument");
  821. err = -EINVAL;
  822. goto bail;
  823. }
  824. if (smq_alloc_send(&(rdbgdata->producer_smrb), buf, size)) {
  825. dev_err(rdbgdata->device, "%s, Error sending", __func__);
  826. err = -ECOMM;
  827. goto bail;
  828. }
  829. send_interrupt_to_subsystem(rdbgdata);
  830. err = size;
  831. bail:
  832. return err;
  833. }
  834. static const struct file_operations rdbg_fops = {
  835. .open = rdbg_open,
  836. .read = rdbg_read,
  837. .write = rdbg_write,
  838. .release = rdbg_release,
  839. };
  840. static int register_smp2p(char *node_name, struct gpio_info *gpio_info_ptr)
  841. {
  842. struct device_node *node = NULL;
  843. int cnt = 0;
  844. int id = 0;
  845. node = of_find_compatible_node(NULL, NULL, node_name);
  846. if (node) {
  847. cnt = of_gpio_count(node);
  848. if (cnt && gpio_info_ptr) {
  849. id = of_get_gpio(node, 0);
  850. gpio_info_ptr->gpio_base_id = id;
  851. gpio_info_ptr->irq_base_id = gpio_to_irq(id);
  852. return 0;
  853. }
  854. }
  855. return -EINVAL;
  856. }
  857. static int __init rdbg_init(void)
  858. {
  859. int err = 0;
  860. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  861. int minor = 0;
  862. int major = 0;
  863. int minor_nodes_created = 0;
  864. char *rdbg_compatible_string = "qcom,smp2pgpio_client_rdbg_";
  865. int max_len = strlen(rdbg_compatible_string) + strlen("xx_out");
  866. char *node_name = kcalloc(max_len, sizeof(char), GFP_KERNEL);
  867. if (!node_name) {
  868. pr_err("Not enough memory");
  869. err = -ENOMEM;
  870. goto bail;
  871. }
  872. if (rdbgdevice->num_devices < 1 ||
  873. rdbgdevice->num_devices > SMP2P_NUM_PROCS) {
  874. pr_err("rgdb: invalid num_devices");
  875. err = -EDOM;
  876. goto name_bail;
  877. }
  878. rdbgdevice->rdbg_data = kcalloc(rdbgdevice->num_devices,
  879. sizeof(struct rdbg_data), GFP_KERNEL);
  880. if (!rdbgdevice->rdbg_data) {
  881. pr_err("Not enough memory for rdbg devices");
  882. err = -ENOMEM;
  883. goto name_bail;
  884. }
  885. err = alloc_chrdev_region(&rdbgdevice->dev_no, 0,
  886. rdbgdevice->num_devices, "rdbgctl");
  887. if (err) {
  888. pr_err("Error in alloc_chrdev_region.");
  889. goto data_bail;
  890. }
  891. major = MAJOR(rdbgdevice->dev_no);
  892. cdev_init(&rdbgdevice->cdev, &rdbg_fops);
  893. rdbgdevice->cdev.owner = THIS_MODULE;
  894. err = cdev_add(&rdbgdevice->cdev, MKDEV(major, 0),
  895. rdbgdevice->num_devices);
  896. if (err) {
  897. pr_err("Error in cdev_add");
  898. goto chrdev_bail;
  899. }
  900. rdbgdevice->class = class_create(THIS_MODULE, "rdbg");
  901. if (IS_ERR(rdbgdevice->class)) {
  902. err = PTR_ERR(rdbgdevice->class);
  903. pr_err("Error in class_create");
  904. goto cdev_bail;
  905. }
  906. for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
  907. if (!proc_info[minor].name)
  908. continue;
  909. if (snprintf(node_name, max_len, "%s%d_in",
  910. rdbg_compatible_string, minor) <= 0) {
  911. pr_err("Error in snprintf");
  912. err = -ENOMEM;
  913. goto device_bail;
  914. }
  915. if (register_smp2p(node_name,
  916. &rdbgdevice->rdbg_data[minor].in)) {
  917. pr_debug("No incoming device tree entry found for %s",
  918. proc_info[minor].name);
  919. continue;
  920. }
  921. if (snprintf(node_name, max_len, "%s%d_out",
  922. rdbg_compatible_string, minor) <= 0) {
  923. pr_err("Error in snprintf");
  924. err = -ENOMEM;
  925. goto device_bail;
  926. }
  927. if (register_smp2p(node_name,
  928. &rdbgdevice->rdbg_data[minor].out)) {
  929. pr_err("No outgoing device tree entry found for %s",
  930. proc_info[minor].name);
  931. err = -EINVAL;
  932. goto device_bail;
  933. }
  934. rdbgdevice->rdbg_data[minor].device = device_create(
  935. rdbgdevice->class, NULL, MKDEV(major, minor),
  936. NULL, "%s", proc_info[minor].name);
  937. if (IS_ERR(rdbgdevice->rdbg_data[minor].device)) {
  938. err = PTR_ERR(rdbgdevice->rdbg_data[minor].device);
  939. pr_err("Error in device_create");
  940. goto device_bail;
  941. }
  942. rdbgdevice->rdbg_data[minor].device_initialized = 1;
  943. minor_nodes_created++;
  944. dev_dbg(rdbgdevice->rdbg_data[minor].device,
  945. "%s: created /dev/%s c %d %d'", __func__,
  946. proc_info[minor].name, major, minor);
  947. }
  948. if (!minor_nodes_created) {
  949. pr_err("No device tree entries found");
  950. err = -EINVAL;
  951. goto class_bail;
  952. }
  953. goto name_bail;
  954. device_bail:
  955. for (--minor; minor >= 0; minor--) {
  956. if (rdbgdevice->rdbg_data[minor].device_initialized)
  957. device_destroy(rdbgdevice->class,
  958. MKDEV(MAJOR(rdbgdevice->dev_no), minor));
  959. }
  960. class_bail:
  961. class_destroy(rdbgdevice->class);
  962. cdev_bail:
  963. cdev_del(&rdbgdevice->cdev);
  964. chrdev_bail:
  965. unregister_chrdev_region(rdbgdevice->dev_no, rdbgdevice->num_devices);
  966. data_bail:
  967. kfree(rdbgdevice->rdbg_data);
  968. name_bail:
  969. kfree(node_name);
  970. bail:
  971. return err;
  972. }
  973. static void __exit rdbg_exit(void)
  974. {
  975. struct rdbg_device *rdbgdevice = &g_rdbg_instance;
  976. int minor;
  977. for (minor = 0; minor < rdbgdevice->num_devices; minor++) {
  978. if (rdbgdevice->rdbg_data[minor].device_initialized) {
  979. device_destroy(rdbgdevice->class,
  980. MKDEV(MAJOR(rdbgdevice->dev_no), minor));
  981. }
  982. }
  983. class_destroy(rdbgdevice->class);
  984. cdev_del(&rdbgdevice->cdev);
  985. unregister_chrdev_region(rdbgdevice->dev_no, 1);
  986. kfree(rdbgdevice->rdbg_data);
  987. }
  988. module_init(rdbg_init);
  989. module_exit(rdbg_exit);
  990. MODULE_DESCRIPTION("rdbg module");
  991. MODULE_LICENSE("GPL v2");