diag_masks.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /* Copyright (c) 2008-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/delay.h>
  14. #include <linux/diagchar.h>
  15. #include <linux/kmemleak.h>
  16. #include <linux/workqueue.h>
  17. #include "diagchar.h"
  18. #include "diagfwd_cntl.h"
  19. #include "diag_masks.h"
  20. int diag_event_num_bytes;
  21. #define DIAG_CTRL_MASK_INVALID 0
  22. #define DIAG_CTRL_MASK_ALL_DISABLED 1
  23. #define DIAG_CTRL_MASK_ALL_ENABLED 2
  24. #define DIAG_CTRL_MASK_VALID 3
  25. #define ALL_EQUIP_ID 100
  26. #define ALL_SSID -1
  27. #define FEATURE_MASK_LEN_BYTES 2
  28. struct mask_info {
  29. int equip_id;
  30. int num_items;
  31. int index;
  32. };
  33. #define CREATE_MSG_MASK_TBL_ROW(XX) \
  34. do { \
  35. *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX; \
  36. msg_mask_tbl_ptr += 4; \
  37. *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX ## _LAST; \
  38. msg_mask_tbl_ptr += 4; \
  39. /* mimic the last entry as actual_last while creation */ \
  40. *(int *)(msg_mask_tbl_ptr) = MSG_SSID_ ## XX ## _LAST; \
  41. msg_mask_tbl_ptr += 4; \
  42. /* increment by MAX_SSID_PER_RANGE cells */ \
  43. msg_mask_tbl_ptr += MAX_SSID_PER_RANGE * sizeof(int); \
  44. } while (0)
  45. static void diag_print_mask_table(void)
  46. {
  47. /* Enable this to print mask table when updated */
  48. #ifdef MASK_DEBUG
  49. int first, last, actual_last;
  50. uint8_t *ptr = driver->msg_masks;
  51. int i = 0;
  52. pr_info("diag: F3 message mask table\n");
  53. while (*(uint32_t *)(ptr + 4)) {
  54. first = *(uint32_t *)ptr;
  55. ptr += 4;
  56. last = *(uint32_t *)ptr;
  57. ptr += 4;
  58. actual_last = *(uint32_t *)ptr;
  59. ptr += 4;
  60. pr_info("diag: SSID %d, %d - %d\n", first, last, actual_last);
  61. for (i = 0 ; i <= actual_last - first ; i++)
  62. pr_info("diag: MASK:%x\n", *((uint32_t *)ptr + i));
  63. ptr += MAX_SSID_PER_RANGE*4;
  64. }
  65. #endif
  66. }
  67. void diag_create_msg_mask_table(void)
  68. {
  69. uint8_t *msg_mask_tbl_ptr = driver->msg_masks;
  70. CREATE_MSG_MASK_TBL_ROW(0);
  71. CREATE_MSG_MASK_TBL_ROW(1);
  72. CREATE_MSG_MASK_TBL_ROW(2);
  73. CREATE_MSG_MASK_TBL_ROW(3);
  74. CREATE_MSG_MASK_TBL_ROW(4);
  75. CREATE_MSG_MASK_TBL_ROW(5);
  76. CREATE_MSG_MASK_TBL_ROW(6);
  77. CREATE_MSG_MASK_TBL_ROW(7);
  78. CREATE_MSG_MASK_TBL_ROW(8);
  79. CREATE_MSG_MASK_TBL_ROW(9);
  80. CREATE_MSG_MASK_TBL_ROW(10);
  81. CREATE_MSG_MASK_TBL_ROW(11);
  82. CREATE_MSG_MASK_TBL_ROW(12);
  83. CREATE_MSG_MASK_TBL_ROW(13);
  84. CREATE_MSG_MASK_TBL_ROW(14);
  85. CREATE_MSG_MASK_TBL_ROW(15);
  86. CREATE_MSG_MASK_TBL_ROW(16);
  87. CREATE_MSG_MASK_TBL_ROW(17);
  88. CREATE_MSG_MASK_TBL_ROW(18);
  89. CREATE_MSG_MASK_TBL_ROW(19);
  90. CREATE_MSG_MASK_TBL_ROW(20);
  91. CREATE_MSG_MASK_TBL_ROW(21);
  92. CREATE_MSG_MASK_TBL_ROW(22);
  93. CREATE_MSG_MASK_TBL_ROW(23);
  94. }
  95. static void diag_set_msg_mask(int rt_mask)
  96. {
  97. int first_ssid, last_ssid, i;
  98. uint8_t *parse_ptr, *ptr = driver->msg_masks;
  99. mutex_lock(&driver->diagchar_mutex);
  100. driver->msg_status = rt_mask ? DIAG_CTRL_MASK_ALL_ENABLED :
  101. DIAG_CTRL_MASK_ALL_DISABLED;
  102. while (*(uint32_t *)(ptr + 4)) {
  103. first_ssid = *(uint32_t *)ptr;
  104. ptr += 8; /* increment by 8 to skip 'last' */
  105. last_ssid = *(uint32_t *)ptr;
  106. ptr += 4;
  107. parse_ptr = ptr;
  108. pr_debug("diag: updating range %d %d\n", first_ssid, last_ssid);
  109. for (i = 0; i < last_ssid - first_ssid + 1; i++) {
  110. *(int *)parse_ptr = rt_mask;
  111. parse_ptr += 4;
  112. }
  113. ptr += MAX_SSID_PER_RANGE * 4;
  114. }
  115. mutex_unlock(&driver->diagchar_mutex);
  116. }
  117. static void diag_update_msg_mask(int start, int end , uint8_t *buf)
  118. {
  119. int found = 0, first, last, actual_last;
  120. uint8_t *actual_last_ptr;
  121. uint8_t *ptr = driver->msg_masks;
  122. uint8_t *ptr_buffer_start = &(*(driver->msg_masks));
  123. uint8_t *ptr_buffer_end = &(*(driver->msg_masks)) + MSG_MASK_SIZE;
  124. uint32_t copy_len = (end - start + 1) * sizeof(int);
  125. mutex_lock(&driver->diagchar_mutex);
  126. /* First SSID can be zero : So check that last is non-zero */
  127. while (*(uint32_t *)(ptr + 4)) {
  128. first = *(uint32_t *)ptr;
  129. ptr += 4;
  130. last = *(uint32_t *)ptr;
  131. ptr += 4;
  132. actual_last = *(uint32_t *)ptr;
  133. actual_last_ptr = ptr;
  134. ptr += 4;
  135. if (start >= first && start <= actual_last) {
  136. ptr += (start - first)*4;
  137. if (end > actual_last) {
  138. pr_info("diag: ssid range mismatch\n");
  139. actual_last = end;
  140. *(uint32_t *)(actual_last_ptr) = end;
  141. }
  142. if (actual_last-first >= MAX_SSID_PER_RANGE) {
  143. pr_err("diag: In %s, truncating ssid range, %d-%d to max allowed: %d",
  144. __func__, first, actual_last,
  145. MAX_SSID_PER_RANGE);
  146. copy_len = MAX_SSID_PER_RANGE;
  147. actual_last = first + MAX_SSID_PER_RANGE;
  148. *(uint32_t *)actual_last_ptr = actual_last;
  149. }
  150. if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end,
  151. copy_len)) {
  152. pr_debug("diag: update ssid start %d, end %d\n",
  153. start, end);
  154. memcpy(ptr, buf, copy_len);
  155. } else
  156. pr_alert("diag: Not enough space MSG_MASK\n");
  157. found = 1;
  158. break;
  159. } else {
  160. ptr += MAX_SSID_PER_RANGE*4;
  161. }
  162. }
  163. /* Entry was not found - add new table */
  164. if (!found) {
  165. if (CHK_OVERFLOW(ptr_buffer_start, ptr, ptr_buffer_end,
  166. 8 + ((end - start) + 1)*4)) {
  167. memcpy(ptr, &(start) , 4);
  168. ptr += 4;
  169. memcpy(ptr, &(end), 4);
  170. ptr += 4;
  171. memcpy(ptr, &(end), 4); /* create actual_last entry */
  172. ptr += 4;
  173. pr_debug("diag: adding NEW ssid start %d, end %d\n",
  174. start, end);
  175. memcpy(ptr, buf , ((end - start) + 1)*4);
  176. } else
  177. pr_alert("diag: Not enough buffer space for MSG_MASK\n");
  178. }
  179. driver->msg_status = DIAG_CTRL_MASK_VALID;
  180. mutex_unlock(&driver->diagchar_mutex);
  181. diag_print_mask_table();
  182. }
  183. void diag_toggle_event_mask(int toggle)
  184. {
  185. uint8_t *ptr = driver->event_masks;
  186. mutex_lock(&driver->diagchar_mutex);
  187. if (toggle) {
  188. driver->event_status = DIAG_CTRL_MASK_ALL_ENABLED;
  189. memset(ptr, 0xFF, EVENT_MASK_SIZE);
  190. } else {
  191. driver->event_status = DIAG_CTRL_MASK_ALL_DISABLED;
  192. memset(ptr, 0, EVENT_MASK_SIZE);
  193. }
  194. mutex_unlock(&driver->diagchar_mutex);
  195. }
  196. static void diag_update_event_mask(uint8_t *buf, int num_bytes)
  197. {
  198. uint8_t *ptr = driver->event_masks;
  199. uint8_t *temp = buf + 2;
  200. mutex_lock(&driver->diagchar_mutex);
  201. if (CHK_OVERFLOW(ptr, ptr, ptr+EVENT_MASK_SIZE, num_bytes)) {
  202. memcpy(ptr, temp, num_bytes);
  203. driver->event_status = DIAG_CTRL_MASK_VALID;
  204. } else {
  205. pr_err("diag: In %s, not enough buffer space\n", __func__);
  206. }
  207. mutex_unlock(&driver->diagchar_mutex);
  208. }
  209. static void diag_disable_log_mask(void)
  210. {
  211. int i = 0;
  212. struct diag_log_mask_t *log_item = NULL;
  213. mutex_lock(&driver->log_mask_mutex);
  214. log_item = (struct diag_log_mask_t *)driver->log_masks;
  215. for (i = 0; i < MAX_EQUIP_ID; i++, log_item++)
  216. memset(log_item->ptr, 0, MAX_ITEMS_PER_EQUIP_ID);
  217. driver->log_status = DIAG_CTRL_MASK_ALL_DISABLED;
  218. mutex_unlock(&driver->log_mask_mutex);
  219. }
  220. static int copy_log_mask_equip(int equip_id, uint8_t *buf)
  221. {
  222. int i, ret = 0;
  223. uint8_t *temp = buf;
  224. struct diag_log_mask_t *log_item = NULL;
  225. uint32_t mask_size = 0;
  226. if (!buf)
  227. return ret;
  228. log_item = (struct diag_log_mask_t *)driver->log_masks;
  229. for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
  230. if (log_item->equip_id != equip_id)
  231. continue;
  232. *(int *)temp = log_item->equip_id;
  233. temp += sizeof(int);
  234. *(int *)(temp) = log_item->num_items;
  235. temp += sizeof(int);
  236. mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items);
  237. if (mask_size > MAX_ITEMS_PER_EQUIP_ID) {
  238. pr_err("diag: Invalid length: %d in %s, perimissible: %d",
  239. mask_size, __func__, MAX_ITEMS_PER_EQUIP_ID);
  240. break;
  241. }
  242. if (mask_size > 0) {
  243. memcpy(temp, log_item->ptr, mask_size);
  244. /*
  245. * Return the total number of bytes copied = size of
  246. * equip_id (int) + size of num_items (int) + mask_size
  247. */
  248. ret = (2 * sizeof(int)) + mask_size;
  249. }
  250. break;
  251. }
  252. return ret;
  253. }
  254. static void diag_update_log_mask(int equip_id, uint8_t *buf, int num_items)
  255. {
  256. int i = 0;
  257. struct diag_log_mask_t *log_item = NULL;
  258. uint32_t mask_size = 0;
  259. mutex_lock(&driver->log_mask_mutex);
  260. driver->log_status = DIAG_CTRL_MASK_INVALID;
  261. if (!buf || (equip_id < 0 || equip_id >= MAX_EQUIP_ID) ||
  262. num_items < 1) {
  263. pr_err("diag: Invalid params in %s, buf: %x equip_id: %d, num_items: %d\n",
  264. __func__, (unsigned int)buf, equip_id, num_items);
  265. mutex_unlock(&driver->log_mask_mutex);
  266. return;
  267. }
  268. mask_size = LOG_ITEMS_TO_SIZE(num_items);
  269. if (mask_size > MAX_ITEMS_PER_EQUIP_ID) {
  270. pr_err("diag: In %s, Invalid mask_size %d\n", __func__,
  271. mask_size);
  272. mutex_unlock(&driver->log_mask_mutex);
  273. return;
  274. }
  275. log_item = (struct diag_log_mask_t *)driver->log_masks;
  276. for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
  277. if (log_item->equip_id != equip_id)
  278. continue;
  279. /* Found the equip id */
  280. log_item->num_items = num_items;
  281. if (mask_size > 0)
  282. memcpy(log_item->ptr, buf, mask_size);
  283. driver->log_status = DIAG_CTRL_MASK_VALID;
  284. break;
  285. }
  286. mutex_unlock(&driver->log_mask_mutex);
  287. }
  288. void diag_mask_update_fn(struct work_struct *work)
  289. {
  290. struct diag_smd_info *smd_info = container_of(work,
  291. struct diag_smd_info,
  292. diag_notify_update_smd_work);
  293. if (!smd_info) {
  294. pr_err("diag: In %s, smd info is null, cannot update masks for the peripheral\n",
  295. __func__);
  296. return;
  297. }
  298. diag_send_feature_mask_update(smd_info);
  299. diag_send_msg_mask_update(smd_info, ALL_SSID, ALL_SSID,
  300. smd_info->peripheral);
  301. diag_send_log_mask_update(smd_info, ALL_EQUIP_ID);
  302. diag_send_event_mask_update(smd_info, diag_event_num_bytes);
  303. if (smd_info->notify_context == SMD_EVENT_OPEN)
  304. diag_send_diag_mode_update_by_smd(smd_info,
  305. driver->real_time_mode);
  306. smd_info->notify_context = 0;
  307. }
  308. void diag_send_log_mask_update(struct diag_smd_info *smd_info, int equip_id)
  309. {
  310. void *buf = driver->buf_log_mask_update;
  311. struct diag_log_mask_t *log_item = NULL;
  312. struct diag_ctrl_log_mask ctrl_pkt;
  313. uint32_t log_mask_size = 0;
  314. int wr_size = -ENOMEM, retry_count = 0;
  315. int i, header_size, send_once = 0;
  316. if (!smd_info) {
  317. pr_err("diag: In %s, null smd info pointer\n",
  318. __func__);
  319. return;
  320. }
  321. header_size = sizeof(struct diag_ctrl_log_mask);
  322. log_item = (struct diag_log_mask_t *)driver->log_masks;
  323. mutex_lock(&driver->diag_cntl_mutex);
  324. for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
  325. if (equip_id != i && equip_id != ALL_EQUIP_ID)
  326. continue;
  327. log_mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items);
  328. ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
  329. ctrl_pkt.data_len = 11 + log_mask_size;
  330. ctrl_pkt.stream_id = 1;
  331. ctrl_pkt.status = driver->log_status;
  332. switch (driver->log_status) {
  333. case DIAG_CTRL_MASK_ALL_DISABLED:
  334. ctrl_pkt.equip_id = 0;
  335. ctrl_pkt.num_items = 0;
  336. ctrl_pkt.log_mask_size = 0;
  337. send_once = 1;
  338. break;
  339. case DIAG_CTRL_MASK_ALL_ENABLED:
  340. ctrl_pkt.equip_id = 0;
  341. ctrl_pkt.num_items = 0;
  342. ctrl_pkt.log_mask_size = 0;
  343. send_once = 1;
  344. break;
  345. case DIAG_CTRL_MASK_VALID:
  346. ctrl_pkt.equip_id = i;
  347. ctrl_pkt.num_items = log_item->num_items;
  348. ctrl_pkt.log_mask_size = log_mask_size;
  349. send_once = 0;
  350. break;
  351. default:
  352. pr_err("diag: In %s, invalid status %d", __func__,
  353. driver->log_status);
  354. mutex_unlock(&driver->diag_cntl_mutex);
  355. return;
  356. }
  357. memcpy(buf, &ctrl_pkt, header_size);
  358. if (log_mask_size > 0) {
  359. memcpy(buf + header_size, log_item->ptr,
  360. log_mask_size);
  361. }
  362. if (smd_info->ch) {
  363. while (retry_count < 3) {
  364. mutex_lock(&smd_info->smd_ch_mutex);
  365. wr_size = smd_write(smd_info->ch, buf,
  366. header_size + log_mask_size);
  367. mutex_unlock(&smd_info->smd_ch_mutex);
  368. if (wr_size == -ENOMEM) {
  369. retry_count++;
  370. usleep_range(10000, 10100);
  371. } else
  372. break;
  373. }
  374. if (wr_size != header_size + log_mask_size)
  375. pr_err("diag: log mask update failed %d, tried %d",
  376. wr_size, header_size + log_mask_size);
  377. else
  378. pr_debug("diag: updated log equip ID %d,len %d\n",
  379. i, log_mask_size);
  380. } else
  381. pr_err("diag: ch not valid for log update\n");
  382. if (send_once)
  383. break;
  384. }
  385. mutex_unlock(&driver->diag_cntl_mutex);
  386. }
  387. void diag_send_event_mask_update(struct diag_smd_info *smd_info, int num_bytes)
  388. {
  389. void *buf = driver->buf_event_mask_update;
  390. int header_size = sizeof(struct diag_ctrl_event_mask);
  391. int wr_size = -ENOMEM, retry_count = 0;
  392. if (!smd_info) {
  393. pr_err("diag: In %s, null smd info pointer\n",
  394. __func__);
  395. return;
  396. }
  397. mutex_lock(&driver->diag_cntl_mutex);
  398. if (num_bytes == 0) {
  399. pr_debug("diag: event mask not set yet, so no update\n");
  400. mutex_unlock(&driver->diag_cntl_mutex);
  401. return;
  402. }
  403. /* send event mask update */
  404. driver->event_mask->cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
  405. driver->event_mask->data_len = 7 + num_bytes;
  406. driver->event_mask->stream_id = 1; /* 2, if dual stream */
  407. driver->event_mask->status = driver->event_status;
  408. switch (driver->event_status) {
  409. case DIAG_CTRL_MASK_ALL_DISABLED:
  410. driver->event_mask->event_config = 0;
  411. driver->event_mask->event_mask_size = 0;
  412. break;
  413. case DIAG_CTRL_MASK_ALL_ENABLED:
  414. driver->event_mask->event_config = 1;
  415. driver->event_mask->event_mask_size = 0;
  416. break;
  417. case DIAG_CTRL_MASK_VALID:
  418. driver->event_mask->event_config = 1;
  419. driver->event_mask->event_mask_size = num_bytes;
  420. memcpy(buf + header_size, driver->event_masks, num_bytes);
  421. break;
  422. default:
  423. /* Event status is not set yet or the buffer is corrupted */
  424. pr_err("diag: In %s, invalid status %d", __func__,
  425. driver->event_status);
  426. driver->event_mask->status = DIAG_CTRL_MASK_INVALID;
  427. }
  428. if (driver->event_mask->status == DIAG_CTRL_MASK_INVALID) {
  429. mutex_unlock(&driver->diag_cntl_mutex);
  430. return;
  431. }
  432. memcpy(buf, driver->event_mask, header_size);
  433. if (smd_info->ch) {
  434. while (retry_count < 3) {
  435. mutex_lock(&smd_info->smd_ch_mutex);
  436. wr_size = smd_write(smd_info->ch, buf,
  437. header_size + num_bytes);
  438. mutex_unlock(&smd_info->smd_ch_mutex);
  439. if (wr_size == -ENOMEM) {
  440. retry_count++;
  441. usleep_range(10000, 10100);
  442. } else
  443. break;
  444. }
  445. if (wr_size != header_size + num_bytes)
  446. pr_err("diag: error writing event mask %d, tried %d\n",
  447. wr_size, header_size + num_bytes);
  448. } else
  449. pr_err("diag: ch not valid for event update\n");
  450. mutex_unlock(&driver->diag_cntl_mutex);
  451. }
  452. void diag_send_msg_mask_update(struct diag_smd_info *smd_info,
  453. int updated_ssid_first, int updated_ssid_last,
  454. int proc)
  455. {
  456. void *buf = driver->buf_msg_mask_update;
  457. int first, last, actual_last, size = -ENOMEM, retry_count = 0;
  458. int header_size = sizeof(struct diag_ctrl_msg_mask);
  459. uint8_t *ptr = driver->msg_masks;
  460. if (!smd_info) {
  461. pr_err("diag: In %s, null smd info pointer\n",
  462. __func__);
  463. return;
  464. }
  465. mutex_lock(&driver->diag_cntl_mutex);
  466. while (*(uint32_t *)(ptr + 4)) {
  467. first = *(uint32_t *)ptr;
  468. ptr += 4;
  469. last = *(uint32_t *)ptr;
  470. ptr += 4;
  471. actual_last = *(uint32_t *)ptr;
  472. ptr += 4;
  473. if (!((updated_ssid_first >= first && updated_ssid_last <=
  474. actual_last) || (updated_ssid_first == ALL_SSID))) {
  475. ptr += MAX_SSID_PER_RANGE*4;
  476. continue;
  477. }
  478. /* send f3 mask update */
  479. driver->msg_mask->cmd_type = DIAG_CTRL_MSG_F3_MASK;
  480. driver->msg_mask->status = driver->msg_status;
  481. switch (driver->msg_status) {
  482. case DIAG_CTRL_MASK_ALL_DISABLED:
  483. driver->msg_mask->msg_mask_size = 0;
  484. break;
  485. case DIAG_CTRL_MASK_ALL_ENABLED:
  486. driver->msg_mask->msg_mask_size = 1;
  487. memcpy(buf+header_size, ptr,
  488. 4 * (driver->msg_mask->msg_mask_size));
  489. break;
  490. case DIAG_CTRL_MASK_VALID:
  491. driver->msg_mask->msg_mask_size = actual_last -
  492. first + 1;
  493. /* Limit the msg_mask_size to MAX_SSID_PER_RANGE */
  494. if (driver->msg_mask->msg_mask_size >
  495. MAX_SSID_PER_RANGE) {
  496. pr_err("diag: in %s, Invalid msg mask size %d, max: %d",
  497. __func__,
  498. driver->msg_mask->msg_mask_size,
  499. MAX_SSID_PER_RANGE);
  500. driver->msg_mask->msg_mask_size =
  501. MAX_SSID_PER_RANGE;
  502. }
  503. memcpy(buf+header_size, ptr,
  504. 4 * (driver->msg_mask->msg_mask_size));
  505. break;
  506. default:
  507. /* Msg status is not set or the buffer is corrupted */
  508. pr_err("diag: In %s, invalid status %d", __func__,
  509. driver->msg_status);
  510. driver->msg_mask->status = DIAG_CTRL_MASK_INVALID;
  511. }
  512. if (driver->msg_mask->status == DIAG_CTRL_MASK_INVALID) {
  513. mutex_unlock(&driver->diag_cntl_mutex);
  514. return;
  515. }
  516. driver->msg_mask->data_len = 11 +
  517. 4 * (driver->msg_mask->msg_mask_size);
  518. driver->msg_mask->stream_id = 1; /* 2, if dual stream */
  519. driver->msg_mask->msg_mode = 0; /* Legcay mode */
  520. driver->msg_mask->ssid_first = first;
  521. driver->msg_mask->ssid_last = actual_last;
  522. memcpy(buf, driver->msg_mask, header_size);
  523. if (smd_info->ch) {
  524. while (retry_count < 3) {
  525. mutex_lock(&smd_info->smd_ch_mutex);
  526. size = smd_write(smd_info->ch, buf, header_size
  527. + 4*(driver->msg_mask->msg_mask_size));
  528. mutex_unlock(&smd_info->smd_ch_mutex);
  529. if (size == -ENOMEM) {
  530. retry_count++;
  531. usleep_range(10000, 10100);
  532. } else
  533. break;
  534. }
  535. if (size != header_size +
  536. 4*(driver->msg_mask->msg_mask_size))
  537. pr_err("diag: proc %d, msg mask update fail %d, tried %d\n",
  538. proc, size, (header_size +
  539. 4*(driver->msg_mask->msg_mask_size)));
  540. else
  541. pr_debug("diag: sending mask update for ssid first %d, last %d on PROC %d\n",
  542. first, actual_last, proc);
  543. } else
  544. pr_err("diag: proc %d, ch invalid msg mask update\n",
  545. proc);
  546. ptr += MAX_SSID_PER_RANGE*4;
  547. }
  548. mutex_unlock(&driver->diag_cntl_mutex);
  549. }
  550. void diag_send_feature_mask_update(struct diag_smd_info *smd_info)
  551. {
  552. void *buf = driver->buf_feature_mask_update;
  553. int header_size = sizeof(struct diag_ctrl_feature_mask);
  554. int wr_size = -ENOMEM, retry_count = 0;
  555. uint8_t feature_bytes[FEATURE_MASK_LEN_BYTES] = {0, 0};
  556. int total_len = 0;
  557. if (!smd_info) {
  558. pr_err("diag: In %s, null smd info pointer\n",
  559. __func__);
  560. return;
  561. }
  562. if (!smd_info->ch) {
  563. pr_err("diag: In %s, smd channel not open for peripheral: %d, type: %d\n",
  564. __func__, smd_info->peripheral, smd_info->type);
  565. return;
  566. }
  567. mutex_lock(&driver->diag_cntl_mutex);
  568. /* send feature mask update */
  569. driver->feature_mask->ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
  570. driver->feature_mask->ctrl_pkt_data_len = 4 + FEATURE_MASK_LEN_BYTES;
  571. driver->feature_mask->feature_mask_len = FEATURE_MASK_LEN_BYTES;
  572. memcpy(buf, driver->feature_mask, header_size);
  573. feature_bytes[0] |= F_DIAG_INT_FEATURE_MASK;
  574. feature_bytes[0] |= F_DIAG_LOG_ON_DEMAND_RSP_ON_MASTER;
  575. feature_bytes[0] |= driver->supports_separate_cmdrsp ?
  576. F_DIAG_REQ_RSP_CHANNEL : 0;
  577. feature_bytes[0] |= driver->supports_apps_hdlc_encoding ?
  578. F_DIAG_HDLC_ENCODE_IN_APPS_MASK : 0;
  579. feature_bytes[1] |= F_DIAG_OVER_STM;
  580. memcpy(buf+header_size, &feature_bytes, FEATURE_MASK_LEN_BYTES);
  581. total_len = header_size + FEATURE_MASK_LEN_BYTES;
  582. while (retry_count < 3) {
  583. mutex_lock(&smd_info->smd_ch_mutex);
  584. wr_size = smd_write(smd_info->ch, buf, total_len);
  585. mutex_unlock(&smd_info->smd_ch_mutex);
  586. if (wr_size == -ENOMEM) {
  587. retry_count++;
  588. /*
  589. * The smd channel is full. Delay while
  590. * smd processes existing data and smd
  591. * has memory become available. The delay
  592. * of 10000 was determined empirically as
  593. * best value to use.
  594. */
  595. usleep_range(10000, 10100);
  596. } else
  597. break;
  598. }
  599. if (wr_size != total_len)
  600. pr_err("diag: In %s, peripheral %d fail feature update, size: %d, tried: %d",
  601. __func__, smd_info->peripheral, wr_size, total_len);
  602. mutex_unlock(&driver->diag_cntl_mutex);
  603. }
  604. int diag_process_apps_masks(unsigned char *buf, int len)
  605. {
  606. int packet_type = 1;
  607. int i;
  608. int ssid_first, ssid_last, ssid_range;
  609. int rt_mask, rt_first_ssid, rt_last_ssid, rt_mask_size;
  610. uint8_t *rt_mask_ptr;
  611. int equip_id, copy_len;
  612. #if defined(CONFIG_DIAG_OVER_USB)
  613. int payload_length;
  614. #endif
  615. /* Set log masks */
  616. if (*buf == 0x73 && *(int *)(buf+4) == 3) {
  617. buf += 8;
  618. diag_update_log_mask(*(int *)buf, buf+8, *(int *)(buf+4));
  619. diag_update_userspace_clients(LOG_MASKS_TYPE);
  620. #if defined(CONFIG_DIAG_OVER_USB)
  621. if (chk_apps_only()) {
  622. driver->apps_rsp_buf[0] = 0x73;
  623. *(int *)(driver->apps_rsp_buf + 4) = 0x3; /* op. ID */
  624. *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* success */
  625. payload_length = 8 +
  626. LOG_ITEMS_TO_SIZE(*(int *)(buf + 4));
  627. if (payload_length > APPS_BUF_SIZE - 12) {
  628. pr_err("diag: log masks: buffer overflow\n");
  629. return -EIO;
  630. }
  631. for (i = 0; i < payload_length; i++)
  632. *(int *)(driver->apps_rsp_buf+12+i) = *(buf+i);
  633. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  634. if (driver->smd_cntl[i].ch)
  635. diag_send_log_mask_update(
  636. &driver->smd_cntl[i],
  637. *(int *)buf);
  638. }
  639. encode_rsp_and_send(12 + payload_length - 1);
  640. return 0;
  641. }
  642. #endif
  643. } /* Get log masks */
  644. else if (*buf == 0x73 && *(int *)(buf+4) == 4) {
  645. #if defined(CONFIG_DIAG_OVER_USB)
  646. if (!(driver->smd_data[MODEM_DATA].ch) &&
  647. chk_apps_only()) {
  648. equip_id = *(int *)(buf + 8);
  649. driver->apps_rsp_buf[0] = 0x73;
  650. driver->apps_rsp_buf[1] = 0x0;
  651. driver->apps_rsp_buf[2] = 0x0;
  652. driver->apps_rsp_buf[3] = 0x0;
  653. *(int *)(driver->apps_rsp_buf + 4) = 0x4;
  654. copy_len = copy_log_mask_equip(equip_id,
  655. driver->apps_rsp_buf + 12);
  656. *(int *)(driver->apps_rsp_buf + 8) =
  657. (copy_len == 0) ? 1 : 0;
  658. encode_rsp_and_send(12 + copy_len);
  659. return 0;
  660. }
  661. #endif
  662. } /* Disable log masks */
  663. else if (*buf == 0x73 && *(int *)(buf+4) == 0) {
  664. /* Disable mask for each log code */
  665. diag_disable_log_mask();
  666. diag_update_userspace_clients(LOG_MASKS_TYPE);
  667. #if defined(CONFIG_DIAG_OVER_USB)
  668. if (chk_apps_only()) {
  669. driver->apps_rsp_buf[0] = 0x73;
  670. driver->apps_rsp_buf[1] = 0x0;
  671. driver->apps_rsp_buf[2] = 0x0;
  672. driver->apps_rsp_buf[3] = 0x0;
  673. *(int *)(driver->apps_rsp_buf + 4) = 0x0;
  674. *(int *)(driver->apps_rsp_buf + 8) = 0x0; /* status */
  675. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  676. if (driver->smd_cntl[i].ch)
  677. diag_send_log_mask_update(
  678. &driver->smd_cntl[i],
  679. ALL_EQUIP_ID);
  680. }
  681. encode_rsp_and_send(11);
  682. return 0;
  683. }
  684. #endif
  685. } /* Get runtime message mask */
  686. else if ((*buf == 0x7d) && (*(buf+1) == 0x3)) {
  687. ssid_first = *(uint16_t *)(buf + 2);
  688. ssid_last = *(uint16_t *)(buf + 4);
  689. #if defined(CONFIG_DIAG_OVER_USB)
  690. if (!(driver->smd_data[MODEM_DATA].ch) &&
  691. chk_apps_only()) {
  692. driver->apps_rsp_buf[0] = 0x7d;
  693. driver->apps_rsp_buf[1] = 0x3;
  694. *(uint16_t *)(driver->apps_rsp_buf+2) = ssid_first;
  695. *(uint16_t *)(driver->apps_rsp_buf+4) = ssid_last;
  696. driver->apps_rsp_buf[6] = 0x1; /* Success Status */
  697. driver->apps_rsp_buf[7] = 0x0;
  698. rt_mask_ptr = driver->msg_masks;
  699. while (*(uint32_t *)(rt_mask_ptr + 4)) {
  700. rt_first_ssid = *(uint32_t *)rt_mask_ptr;
  701. rt_mask_ptr += 8; /* +8 to skip 'last' */
  702. rt_last_ssid = *(uint32_t *)rt_mask_ptr;
  703. rt_mask_ptr += 4;
  704. if (ssid_first == rt_first_ssid && ssid_last ==
  705. rt_last_ssid) {
  706. rt_mask_size = 4 * (rt_last_ssid -
  707. rt_first_ssid + 1);
  708. if (rt_mask_size > APPS_BUF_SIZE - 8) {
  709. pr_err("diag: rt masks: buffer overflow\n");
  710. return -EIO;
  711. }
  712. memcpy(driver->apps_rsp_buf+8,
  713. rt_mask_ptr, rt_mask_size);
  714. encode_rsp_and_send(8+rt_mask_size-1);
  715. return 0;
  716. }
  717. rt_mask_ptr += MAX_SSID_PER_RANGE*4;
  718. }
  719. }
  720. #endif
  721. } /* Set runtime message mask */
  722. else if ((*buf == 0x7d) && (*(buf+1) == 0x4)) {
  723. ssid_first = *(uint16_t *)(buf + 2);
  724. ssid_last = *(uint16_t *)(buf + 4);
  725. if (ssid_last < ssid_first) {
  726. pr_err("diag: Invalid msg mask ssid values, first: %d, last: %d\n",
  727. ssid_first, ssid_last);
  728. return -EIO;
  729. }
  730. ssid_range = 4 * (ssid_last - ssid_first + 1);
  731. if (ssid_range > APPS_BUF_SIZE - 8) {
  732. pr_err("diag: Not enough space for message mask, ssid_range: %d\n",
  733. ssid_range);
  734. return -EIO;
  735. }
  736. pr_debug("diag: received mask update for ssid_first = %d, ssid_last = %d",
  737. ssid_first, ssid_last);
  738. diag_update_msg_mask(ssid_first, ssid_last , buf + 8);
  739. diag_update_userspace_clients(MSG_MASKS_TYPE);
  740. #if defined(CONFIG_DIAG_OVER_USB)
  741. if (chk_apps_only()) {
  742. for (i = 0; i < 8 + ssid_range; i++)
  743. *(driver->apps_rsp_buf + i) = *(buf+i);
  744. *(driver->apps_rsp_buf + 6) = 0x1;
  745. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  746. if (driver->smd_cntl[i].ch)
  747. diag_send_msg_mask_update(
  748. &driver->smd_cntl[i],
  749. ssid_first, ssid_last,
  750. driver->smd_cntl[i].peripheral);
  751. }
  752. encode_rsp_and_send(8 + ssid_range - 1);
  753. return 0;
  754. }
  755. #endif
  756. } /* Set ALL runtime message mask */
  757. else if ((*buf == 0x7d) && (*(buf+1) == 0x5)) {
  758. rt_mask = *(int *)(buf + 4);
  759. diag_set_msg_mask(rt_mask);
  760. diag_update_userspace_clients(MSG_MASKS_TYPE);
  761. #if defined(CONFIG_DIAG_OVER_USB)
  762. if (chk_apps_only()) {
  763. driver->apps_rsp_buf[0] = 0x7d; /* cmd_code */
  764. driver->apps_rsp_buf[1] = 0x5; /* set subcommand */
  765. driver->apps_rsp_buf[2] = 1; /* success */
  766. driver->apps_rsp_buf[3] = 0; /* rsvd */
  767. *(int *)(driver->apps_rsp_buf + 4) = rt_mask;
  768. /* send msg mask update to peripheral */
  769. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  770. if (driver->smd_cntl[i].ch)
  771. diag_send_msg_mask_update(
  772. &driver->smd_cntl[i],
  773. ALL_SSID, ALL_SSID,
  774. driver->smd_cntl[i].peripheral);
  775. }
  776. encode_rsp_and_send(7);
  777. return 0;
  778. }
  779. #endif
  780. } else if (*buf == 0x82) { /* event mask change */
  781. buf += 4;
  782. diag_event_num_bytes = (*(uint16_t *)buf)/8+1;
  783. diag_update_event_mask(buf, diag_event_num_bytes);
  784. diag_update_userspace_clients(EVENT_MASKS_TYPE);
  785. #if defined(CONFIG_DIAG_OVER_USB)
  786. if (chk_apps_only()) {
  787. driver->apps_rsp_buf[0] = 0x82;
  788. driver->apps_rsp_buf[1] = 0x0;
  789. *(uint16_t *)(driver->apps_rsp_buf + 2) = 0x0;
  790. *(uint16_t *)(driver->apps_rsp_buf + 4) =
  791. EVENT_LAST_ID + 1;
  792. memcpy(driver->apps_rsp_buf+6, driver->event_masks,
  793. EVENT_LAST_ID/8+1);
  794. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  795. if (driver->smd_cntl[i].ch)
  796. diag_send_event_mask_update(
  797. &driver->smd_cntl[i],
  798. diag_event_num_bytes);
  799. }
  800. encode_rsp_and_send(6 + EVENT_LAST_ID/8);
  801. return 0;
  802. }
  803. #endif
  804. } else if (*buf == 0x60) {
  805. diag_toggle_event_mask(*(buf+1));
  806. diag_update_userspace_clients(EVENT_MASKS_TYPE);
  807. #if defined(CONFIG_DIAG_OVER_USB)
  808. if (chk_apps_only()) {
  809. driver->apps_rsp_buf[0] = 0x60;
  810. driver->apps_rsp_buf[1] = 0x0;
  811. driver->apps_rsp_buf[2] = 0x0;
  812. for (i = 0; i < NUM_SMD_CONTROL_CHANNELS; i++) {
  813. if (driver->smd_cntl[i].ch)
  814. diag_send_event_mask_update(
  815. &driver->smd_cntl[i],
  816. diag_event_num_bytes);
  817. }
  818. encode_rsp_and_send(2);
  819. return 0;
  820. }
  821. #endif
  822. } else if (*buf == 0x78) {
  823. if (!(driver->smd_cntl[MODEM_DATA].ch) ||
  824. (driver->log_on_demand_support)) {
  825. driver->apps_rsp_buf[0] = 0x78;
  826. /* Copy log code received */
  827. *(uint16_t *)(driver->apps_rsp_buf + 1) =
  828. *(uint16_t *)(buf + 1);
  829. driver->apps_rsp_buf[3] = 0x1;/* Unknown */
  830. encode_rsp_and_send(3);
  831. }
  832. }
  833. return packet_type;
  834. }
  835. static void diag_log_mask_init(void)
  836. {
  837. struct diag_log_mask_t *log_item = NULL;
  838. uint8_t i;
  839. mutex_init(&driver->log_mask_mutex);
  840. log_item = (struct diag_log_mask_t *)driver->log_masks;
  841. for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
  842. log_item->equip_id = i;
  843. log_item->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
  844. }
  845. }
  846. void diag_masks_init(void)
  847. {
  848. driver->event_status = DIAG_CTRL_MASK_INVALID;
  849. driver->msg_status = DIAG_CTRL_MASK_INVALID;
  850. driver->log_status = DIAG_CTRL_MASK_INVALID;
  851. if (driver->event_mask == NULL) {
  852. driver->event_mask = kzalloc(sizeof(
  853. struct diag_ctrl_event_mask), GFP_KERNEL);
  854. if (driver->event_mask == NULL)
  855. goto err;
  856. kmemleak_not_leak(driver->event_mask);
  857. }
  858. if (driver->msg_mask == NULL) {
  859. driver->msg_mask = kzalloc(sizeof(
  860. struct diag_ctrl_msg_mask), GFP_KERNEL);
  861. if (driver->msg_mask == NULL)
  862. goto err;
  863. kmemleak_not_leak(driver->msg_mask);
  864. }
  865. if (driver->log_mask == NULL) {
  866. driver->log_mask = kzalloc(sizeof(
  867. struct diag_ctrl_log_mask), GFP_KERNEL);
  868. if (driver->log_mask == NULL)
  869. goto err;
  870. kmemleak_not_leak(driver->log_mask);
  871. }
  872. if (driver->buf_msg_mask_update == NULL) {
  873. driver->buf_msg_mask_update = kzalloc(APPS_BUF_SIZE,
  874. GFP_KERNEL);
  875. if (driver->buf_msg_mask_update == NULL)
  876. goto err;
  877. kmemleak_not_leak(driver->buf_msg_mask_update);
  878. }
  879. if (driver->buf_log_mask_update == NULL) {
  880. driver->buf_log_mask_update = kzalloc(APPS_BUF_SIZE,
  881. GFP_KERNEL);
  882. if (driver->buf_log_mask_update == NULL)
  883. goto err;
  884. kmemleak_not_leak(driver->buf_log_mask_update);
  885. }
  886. if (driver->buf_event_mask_update == NULL) {
  887. driver->buf_event_mask_update = kzalloc(APPS_BUF_SIZE,
  888. GFP_KERNEL);
  889. if (driver->buf_event_mask_update == NULL)
  890. goto err;
  891. kmemleak_not_leak(driver->buf_event_mask_update);
  892. }
  893. if (driver->msg_masks == NULL) {
  894. driver->msg_masks = kzalloc(MSG_MASK_SIZE, GFP_KERNEL);
  895. if (driver->msg_masks == NULL)
  896. goto err;
  897. kmemleak_not_leak(driver->msg_masks);
  898. }
  899. if (driver->buf_feature_mask_update == NULL) {
  900. driver->buf_feature_mask_update = kzalloc(sizeof(
  901. struct diag_ctrl_feature_mask) +
  902. FEATURE_MASK_LEN_BYTES, GFP_KERNEL);
  903. if (driver->buf_feature_mask_update == NULL)
  904. goto err;
  905. kmemleak_not_leak(driver->buf_feature_mask_update);
  906. }
  907. if (driver->feature_mask == NULL) {
  908. driver->feature_mask = kzalloc(sizeof(
  909. struct diag_ctrl_feature_mask), GFP_KERNEL);
  910. if (driver->feature_mask == NULL)
  911. goto err;
  912. kmemleak_not_leak(driver->feature_mask);
  913. }
  914. diag_create_msg_mask_table();
  915. diag_event_num_bytes = 0;
  916. if (driver->log_masks == NULL) {
  917. driver->log_masks = kzalloc(LOG_MASK_SIZE, GFP_KERNEL);
  918. if (driver->log_masks == NULL)
  919. goto err;
  920. kmemleak_not_leak(driver->log_masks);
  921. }
  922. diag_log_mask_init();
  923. if (driver->event_masks == NULL) {
  924. driver->event_masks = kzalloc(EVENT_MASK_SIZE, GFP_KERNEL);
  925. if (driver->event_masks == NULL)
  926. goto err;
  927. kmemleak_not_leak(driver->event_masks);
  928. }
  929. return;
  930. err:
  931. pr_err("diag: Could not initialize diag mask buffers");
  932. kfree(driver->event_mask);
  933. kfree(driver->log_mask);
  934. kfree(driver->msg_mask);
  935. kfree(driver->msg_masks);
  936. kfree(driver->log_masks);
  937. kfree(driver->event_masks);
  938. kfree(driver->feature_mask);
  939. kfree(driver->buf_feature_mask_update);
  940. }
  941. void diag_masks_exit(void)
  942. {
  943. kfree(driver->event_mask);
  944. kfree(driver->log_mask);
  945. kfree(driver->msg_mask);
  946. kfree(driver->msg_masks);
  947. kfree(driver->log_masks);
  948. kfree(driver->event_masks);
  949. kfree(driver->feature_mask);
  950. kfree(driver->buf_feature_mask_update);
  951. }