diag_dci.c 69 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439
  1. /* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/slab.h>
  13. #include <linux/init.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/diagchar.h>
  16. #include <linux/sched.h>
  17. #include <linux/err.h>
  18. #include <linux/delay.h>
  19. #include <linux/workqueue.h>
  20. #include <linux/pm_runtime.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_wakeup.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/ratelimit.h>
  25. #include <linux/reboot.h>
  26. #include <asm/current.h>
  27. #include <mach/restart.h>
  28. #ifdef CONFIG_DIAG_OVER_USB
  29. #include <mach/usbdiag.h>
  30. #endif
  31. #include "diagchar_hdlc.h"
  32. #include "diagmem.h"
  33. #include "diagchar.h"
  34. #include "diagfwd.h"
  35. #include "diagfwd_cntl.h"
  36. #include "diag_dci.h"
  37. static struct timer_list dci_drain_timer;
  38. static int dci_timer_in_progress;
  39. static struct work_struct dci_data_drain_work;
  40. unsigned int dci_max_reg = 100;
  41. unsigned int dci_max_clients = 10;
  42. unsigned char dci_cumulative_log_mask[DCI_LOG_MASK_SIZE];
  43. unsigned char dci_cumulative_event_mask[DCI_EVENT_MASK_SIZE];
  44. struct mutex dci_log_mask_mutex;
  45. struct mutex dci_event_mask_mutex;
  46. struct mutex dci_health_mutex;
  47. spinlock_t ws_lock;
  48. unsigned long ws_lock_flags;
  49. /* Number of milliseconds anticipated to process the DCI data */
  50. #define DCI_WAKEUP_TIMEOUT 1
  51. #define DCI_CAN_ADD_BUF_TO_LIST(buf) \
  52. (buf && buf->data && !buf->in_busy && buf->data_len > 0) \
  53. #ifdef CONFIG_DEBUG_FS
  54. struct diag_dci_data_info *dci_data_smd;
  55. struct mutex dci_stat_mutex;
  56. void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
  57. uint8_t peripheral)
  58. {
  59. static int curr_dci_data_smd;
  60. static unsigned long iteration;
  61. struct diag_dci_data_info *temp_data = dci_data_smd;
  62. if (!temp_data)
  63. return;
  64. mutex_lock(&dci_stat_mutex);
  65. if (curr_dci_data_smd == DIAG_DCI_DEBUG_CNT)
  66. curr_dci_data_smd = 0;
  67. temp_data += curr_dci_data_smd;
  68. temp_data->iteration = iteration + 1;
  69. temp_data->data_size = read_bytes;
  70. temp_data->peripheral = peripheral;
  71. temp_data->ch_type = ch_type;
  72. diag_get_timestamp(temp_data->time_stamp);
  73. curr_dci_data_smd++;
  74. iteration++;
  75. mutex_unlock(&dci_stat_mutex);
  76. }
  77. #else
  78. void diag_dci_smd_record_info(int read_bytes, uint8_t ch_type,
  79. uint8_t peripheral) { }
  80. #endif
  81. static void dci_drain_data(unsigned long data)
  82. {
  83. queue_work(driver->diag_dci_wq, &dci_data_drain_work);
  84. }
  85. static void dci_check_drain_timer(void)
  86. {
  87. if (!dci_timer_in_progress) {
  88. dci_timer_in_progress = 1;
  89. mod_timer(&dci_drain_timer, jiffies + msecs_to_jiffies(500));
  90. }
  91. }
  92. static int diag_dci_init_buffer(struct diag_dci_buffer_t *buffer, int type)
  93. {
  94. if (!buffer || buffer->data)
  95. return -EINVAL;
  96. switch (type) {
  97. case DCI_BUF_PRIMARY:
  98. buffer->data = kzalloc(IN_BUF_SIZE, GFP_KERNEL);
  99. if (!buffer->data)
  100. return -ENOMEM;
  101. buffer->capacity = IN_BUF_SIZE;
  102. break;
  103. case DCI_BUF_SECONDARY:
  104. buffer->data = NULL;
  105. buffer->capacity = IN_BUF_SIZE;
  106. break;
  107. case DCI_BUF_CMD:
  108. buffer->data = kzalloc(PKT_SIZE, GFP_KERNEL);
  109. if (!buffer->data)
  110. return -ENOMEM;
  111. buffer->capacity = PKT_SIZE;
  112. break;
  113. default:
  114. pr_err("diag: In %s, unknown type %d", __func__, type);
  115. return -EINVAL;
  116. }
  117. buffer->data_len = 0;
  118. buffer->in_busy = 0;
  119. buffer->buf_type = type;
  120. mutex_init(&buffer->data_mutex);
  121. return 0;
  122. }
  123. static inline int diag_dci_check_buffer(struct diag_dci_buffer_t *buf, int len)
  124. {
  125. if (!buf)
  126. return -EINVAL;
  127. /* Return 1 if the buffer is not busy and can hold new data */
  128. if ((buf->data_len + len < buf->capacity) && !buf->in_busy)
  129. return 1;
  130. return 0;
  131. }
  132. static void dci_add_buffer_to_list(struct diag_dci_client_tbl *client,
  133. struct diag_dci_buffer_t *buf)
  134. {
  135. if (!buf || !client || !buf->data)
  136. return;
  137. if (buf->in_list || buf->data_len == 0)
  138. return;
  139. mutex_lock(&client->write_buf_mutex);
  140. list_add_tail(&buf->buf_track, &client->list_write_buf);
  141. mutex_lock(&buf->data_mutex);
  142. buf->in_busy = 1;
  143. buf->in_list = 1;
  144. mutex_unlock(&buf->data_mutex);
  145. mutex_unlock(&client->write_buf_mutex);
  146. }
  147. static int diag_dci_get_buffer(struct diag_dci_client_tbl *client,
  148. int data_source, int len)
  149. {
  150. struct diag_dci_buffer_t *buf_primary = NULL;
  151. struct diag_dci_buffer_t *buf_temp = NULL;
  152. struct diag_dci_buffer_t *curr = NULL;
  153. if (!client)
  154. return -EINVAL;
  155. if (len < 0 || len > IN_BUF_SIZE)
  156. return -EINVAL;
  157. curr = client->buffers[data_source].buf_curr;
  158. buf_primary = client->buffers[data_source].buf_primary;
  159. if (curr && diag_dci_check_buffer(curr, len) == 1)
  160. return 0;
  161. dci_add_buffer_to_list(client, curr);
  162. client->buffers[data_source].buf_curr = NULL;
  163. if (diag_dci_check_buffer(buf_primary, len) == 1) {
  164. client->buffers[data_source].buf_curr = buf_primary;
  165. return 0;
  166. }
  167. buf_temp = kzalloc(sizeof(struct diag_dci_buffer_t), GFP_KERNEL);
  168. if (!buf_temp)
  169. return -EIO;
  170. if (!diag_dci_init_buffer(buf_temp, DCI_BUF_SECONDARY)) {
  171. buf_temp->data = diagmem_alloc(driver, driver->itemsize_dci,
  172. POOL_TYPE_DCI);
  173. if (!buf_temp->data) {
  174. kfree(buf_temp);
  175. buf_temp = NULL;
  176. return -ENOMEM;
  177. }
  178. client->buffers[data_source].buf_curr = buf_temp;
  179. return 0;
  180. }
  181. kfree(buf_temp);
  182. buf_temp = NULL;
  183. return -EIO;
  184. }
  185. void diag_dci_wakeup_clients()
  186. {
  187. struct list_head *start, *temp;
  188. struct diag_dci_client_tbl *entry = NULL;
  189. list_for_each_safe(start, temp, &driver->dci_client_list) {
  190. entry = list_entry(start, struct diag_dci_client_tbl, track);
  191. /*
  192. * Don't wake up the client when there is no pending buffer to
  193. * write or when it is writing to user space
  194. */
  195. if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
  196. mutex_lock(&entry->write_buf_mutex);
  197. entry->in_service = 1;
  198. mutex_unlock(&entry->write_buf_mutex);
  199. diag_update_sleeping_process(entry->client->tgid,
  200. DCI_DATA_TYPE);
  201. }
  202. }
  203. }
  204. void dci_data_drain_work_fn(struct work_struct *work)
  205. {
  206. int i;
  207. struct list_head *start, *temp;
  208. struct diag_dci_client_tbl *entry = NULL;
  209. struct diag_dci_buf_peripheral_t *proc_buf = NULL;
  210. struct diag_dci_buffer_t *buf_temp = NULL;
  211. list_for_each_safe(start, temp, &driver->dci_client_list) {
  212. entry = list_entry(start, struct diag_dci_client_tbl, track);
  213. for (i = 0; i < NUM_DCI_PROC; i++) {
  214. proc_buf = &entry->buffers[i];
  215. buf_temp = proc_buf->buf_primary;
  216. if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
  217. dci_add_buffer_to_list(entry, buf_temp);
  218. buf_temp = proc_buf->buf_cmd;
  219. if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp))
  220. dci_add_buffer_to_list(entry, buf_temp);
  221. buf_temp = proc_buf->buf_curr;
  222. if (DCI_CAN_ADD_BUF_TO_LIST(buf_temp)) {
  223. dci_add_buffer_to_list(entry, buf_temp);
  224. mutex_lock(&proc_buf->buf_mutex);
  225. proc_buf->buf_curr = NULL;
  226. mutex_unlock(&proc_buf->buf_mutex);
  227. }
  228. }
  229. if (!list_empty(&entry->list_write_buf) && !entry->in_service) {
  230. mutex_lock(&entry->write_buf_mutex);
  231. entry->in_service = 1;
  232. mutex_unlock(&entry->write_buf_mutex);
  233. diag_update_sleeping_process(entry->client->tgid,
  234. DCI_DATA_TYPE);
  235. }
  236. }
  237. dci_timer_in_progress = 0;
  238. }
  239. /* Process the data read from apps userspace client */
  240. void diag_process_apps_dci_read_data(int data_type, void *buf, int recd_bytes)
  241. {
  242. uint8_t cmd_code;
  243. if (!buf) {
  244. pr_err_ratelimited("diag: In %s, Null buf pointer\n", __func__);
  245. return;
  246. }
  247. if (data_type != DATA_TYPE_DCI_LOG && data_type != DATA_TYPE_DCI_EVENT
  248. && data_type != DCI_PKT_TYPE) {
  249. pr_err("diag: In %s, unsupported data_type: 0x%x\n",
  250. __func__, (unsigned int)data_type);
  251. return;
  252. }
  253. cmd_code = *(uint8_t *)buf;
  254. switch (cmd_code) {
  255. case LOG_CMD_CODE:
  256. extract_dci_log(buf, recd_bytes, APPS_DATA);
  257. break;
  258. case EVENT_CMD_CODE:
  259. extract_dci_events(buf, recd_bytes, APPS_DATA);
  260. break;
  261. case DCI_PKT_RSP_CODE:
  262. case DCI_DELAYED_RSP_CODE:
  263. extract_dci_pkt_rsp(buf, recd_bytes, APPS_DATA, NULL);
  264. break;
  265. default:
  266. pr_err("diag: In %s, unsupported command code: 0x%x, not log or event\n",
  267. __func__, cmd_code);
  268. return;
  269. }
  270. /* wake up all sleeping DCI clients which have some data */
  271. diag_dci_wakeup_clients();
  272. dci_check_drain_timer();
  273. }
  274. /* Process the data read from the smd dci channel */
  275. int diag_process_smd_dci_read_data(struct diag_smd_info *smd_info, void *buf,
  276. int recd_bytes)
  277. {
  278. int read_bytes, dci_pkt_len;
  279. uint8_t recv_pkt_cmd_code;
  280. /*
  281. * Release wakeup source when there are no more clients to
  282. * process DCI data
  283. */
  284. if (driver->num_dci_client == 0) {
  285. diag_dci_try_deactivate_wakeup_source();
  286. return 0;
  287. }
  288. diag_dci_smd_record_info(recd_bytes, (uint8_t)smd_info->type,
  289. (uint8_t)smd_info->peripheral);
  290. /* Each SMD read can have multiple DCI packets */
  291. read_bytes = 0;
  292. while (read_bytes < recd_bytes) {
  293. /* read actual length of dci pkt */
  294. dci_pkt_len = *(uint16_t *)(buf+2);
  295. /* Check if the length of the current packet is lesser than the
  296. * remaining bytes in the received buffer. This includes space
  297. * for the Start byte (1), Version byte (1), length bytes (2)
  298. * and End byte (1)
  299. */
  300. if ((dci_pkt_len+5) > (recd_bytes-read_bytes)) {
  301. pr_err("diag: Invalid length in %s, len: %d, dci_pkt_len: %d",
  302. __func__, recd_bytes, dci_pkt_len);
  303. diag_dci_try_deactivate_wakeup_source();
  304. return 0;
  305. }
  306. /* process one dci packet */
  307. pr_debug("diag: dci: peripheral = %d bytes read = %d, single dci pkt len = %d\n",
  308. smd_info->peripheral, read_bytes, dci_pkt_len);
  309. /* print_hex_dump(KERN_DEBUG, "Single DCI packet :",
  310. DUMP_PREFIX_ADDRESS, 16, 1, buf, 5 + dci_pkt_len, 1); */
  311. recv_pkt_cmd_code = *(uint8_t *)(buf+4);
  312. if (recv_pkt_cmd_code == LOG_CMD_CODE) {
  313. /* Don't include the 4 bytes for command code */
  314. extract_dci_log(buf + 4, recd_bytes - 4,
  315. smd_info->peripheral);
  316. } else if (recv_pkt_cmd_code == EVENT_CMD_CODE) {
  317. /* Don't include the 4 bytes for command code */
  318. extract_dci_events(buf + 4, recd_bytes - 4,
  319. smd_info->peripheral);
  320. } else
  321. extract_dci_pkt_rsp(buf + 4, dci_pkt_len,
  322. smd_info->peripheral, smd_info);
  323. read_bytes += 5 + dci_pkt_len;
  324. buf += 5 + dci_pkt_len; /* advance to next DCI pkt */
  325. }
  326. /* wake up all sleeping DCI clients which have some data */
  327. diag_dci_wakeup_clients();
  328. dci_check_drain_timer();
  329. diag_dci_try_deactivate_wakeup_source();
  330. return 0;
  331. }
  332. static inline struct diag_dci_client_tbl *__diag_dci_get_client_entry(
  333. int client_id)
  334. {
  335. struct list_head *start, *temp;
  336. struct diag_dci_client_tbl *entry = NULL;
  337. struct pid *pid_struct = NULL;
  338. struct task_struct *task_s = NULL;
  339. list_for_each_safe(start, temp, &driver->dci_client_list) {
  340. entry = list_entry(start, struct diag_dci_client_tbl, track);
  341. pid_struct = find_get_pid(entry->tgid);
  342. if (!pid_struct) {
  343. pr_err("diag: valid pid doesn't exist for pid = %d\n",
  344. entry->tgid);
  345. continue;
  346. }
  347. task_s = get_pid_task(pid_struct, PIDTYPE_PID);
  348. if (!task_s) {
  349. pr_err("diag: valid task doesn't exist for pid = %d\n",
  350. entry->tgid);
  351. continue;
  352. }
  353. if (task_s == entry->client)
  354. if (entry->client->tgid == client_id)
  355. return entry;
  356. }
  357. return NULL;
  358. }
  359. static inline int __diag_dci_query_log_mask(struct diag_dci_client_tbl *entry,
  360. uint16_t log_code)
  361. {
  362. uint16_t item_num;
  363. uint8_t equip_id, *log_mask_ptr, byte_mask;
  364. int byte_index, offset;
  365. if (!entry) {
  366. pr_err("diag: In %s, invalid client entry\n", __func__);
  367. return 0;
  368. }
  369. equip_id = LOG_GET_EQUIP_ID(log_code);
  370. item_num = LOG_GET_ITEM_NUM(log_code);
  371. byte_index = item_num/8 + 2;
  372. byte_mask = 0x01 << (item_num % 8);
  373. offset = equip_id * 514;
  374. if (offset + byte_index > DCI_LOG_MASK_SIZE) {
  375. pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n",
  376. __func__, offset, log_code, byte_index);
  377. return 0;
  378. }
  379. log_mask_ptr = entry->dci_log_mask;
  380. log_mask_ptr = log_mask_ptr + offset + byte_index;
  381. return ((*log_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
  382. }
  383. static inline int __diag_dci_query_event_mask(struct diag_dci_client_tbl *entry,
  384. uint16_t event_id)
  385. {
  386. uint8_t *event_mask_ptr, byte_mask;
  387. int byte_index, bit_index;
  388. if (!entry) {
  389. pr_err("diag: In %s, invalid client entry\n", __func__);
  390. return 0;
  391. }
  392. byte_index = event_id/8;
  393. bit_index = event_id % 8;
  394. byte_mask = 0x1 << bit_index;
  395. if (byte_index > DCI_EVENT_MASK_SIZE) {
  396. pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n",
  397. __func__, event_id, byte_index);
  398. return 0;
  399. }
  400. event_mask_ptr = entry->dci_event_mask;
  401. event_mask_ptr = event_mask_ptr + byte_index;
  402. return ((*event_mask_ptr & byte_mask) == byte_mask) ? 1 : 0;
  403. }
  404. static int diag_dci_filter_commands(struct diag_pkt_header_t *header)
  405. {
  406. if (!header)
  407. return -ENOMEM;
  408. switch (header->cmd_code) {
  409. case 0x7d: /* Msg Mask Configuration */
  410. case 0x73: /* Log Mask Configuration */
  411. case 0x81: /* Event Mask Configuration */
  412. case 0x82: /* Event Mask Change */
  413. case 0x60: /* Event Mask Toggle */
  414. return 1;
  415. }
  416. if (header->cmd_code == 0x4b && header->subsys_id == 0x12) {
  417. switch (header->subsys_cmd_code) {
  418. case 0x60: /* Extended Event Mask Config */
  419. case 0x61: /* Extended Msg Mask Config */
  420. case 0x62: /* Extended Log Mask Config */
  421. case 0x20C: /* Set current Preset ID */
  422. case 0x20D: /* Get current Preset ID */
  423. return 1;
  424. }
  425. }
  426. return 0;
  427. }
  428. static struct dci_pkt_req_entry_t *diag_register_dci_transaction(int uid)
  429. {
  430. struct dci_pkt_req_entry_t *entry = NULL;
  431. entry = kzalloc(sizeof(struct dci_pkt_req_entry_t), GFP_KERNEL);
  432. if (!entry)
  433. return NULL;
  434. mutex_lock(&driver->dci_mutex);
  435. driver->dci_tag++;
  436. entry->pid = current->tgid;
  437. entry->uid = uid;
  438. entry->tag = driver->dci_tag;
  439. list_add_tail(&entry->track, &driver->dci_req_list);
  440. mutex_unlock(&driver->dci_mutex);
  441. return entry;
  442. }
  443. static struct dci_pkt_req_entry_t *diag_dci_get_request_entry(int tag)
  444. {
  445. struct list_head *start, *temp;
  446. struct dci_pkt_req_entry_t *entry = NULL;
  447. list_for_each_safe(start, temp, &driver->dci_req_list) {
  448. entry = list_entry(start, struct dci_pkt_req_entry_t, track);
  449. if (entry->tag == tag)
  450. return entry;
  451. }
  452. return NULL;
  453. }
  454. static int diag_dci_remove_req_entry(unsigned char *buf, int len,
  455. struct dci_pkt_req_entry_t *entry)
  456. {
  457. uint16_t rsp_count = 0, delayed_rsp_id = 0;
  458. if (!buf || len <= 0 || !entry) {
  459. pr_err("diag: In %s, invalid input buf: %p, len: %d, entry: %p\n",
  460. __func__, buf, len, entry);
  461. return -EIO;
  462. }
  463. /* It is an immediate response, delete it from the table */
  464. if (*buf != 0x80) {
  465. list_del(&entry->track);
  466. kfree(entry);
  467. return 1;
  468. }
  469. /* It is a delayed response. Check if the length is valid */
  470. if (len < MIN_DELAYED_RSP_LEN) {
  471. pr_err("diag: Invalid delayed rsp packet length %d\n", len);
  472. return -EINVAL;
  473. }
  474. /*
  475. * If the delayed response id field (uint16_t at byte 8) is 0 then
  476. * there is only one response and we can remove the request entry.
  477. */
  478. delayed_rsp_id = *(uint16_t *)(buf + 8);
  479. if (delayed_rsp_id == 0) {
  480. list_del(&entry->track);
  481. kfree(entry);
  482. return 1;
  483. }
  484. /*
  485. * Check the response count field (uint16 at byte 10). The request
  486. * entry can be deleted it it is the last response in the sequence.
  487. * It is the last response in the sequence if the response count
  488. * is 1 or if the signed bit gets dropped.
  489. */
  490. rsp_count = *(uint16_t *)(buf + 10);
  491. if (rsp_count > 0 && rsp_count < 0x1000) {
  492. list_del(&entry->track);
  493. kfree(entry);
  494. return 1;
  495. }
  496. return 0;
  497. }
  498. void extract_dci_pkt_rsp(unsigned char *buf, int len, int data_source,
  499. struct diag_smd_info *smd_info)
  500. {
  501. int tag, curr_client_pid = 0;
  502. struct diag_dci_client_tbl *entry = NULL;
  503. void *temp_buf = NULL;
  504. uint8_t dci_cmd_code, cmd_code_len, delete_flag = 0;
  505. uint32_t rsp_len = 0;
  506. struct diag_dci_buffer_t *rsp_buf = NULL;
  507. struct dci_pkt_req_entry_t *req_entry = NULL;
  508. unsigned char *temp = buf;
  509. int save_req_uid = 0;
  510. struct diag_dci_pkt_rsp_header_t pkt_rsp_header;
  511. if (!buf) {
  512. pr_err("diag: Invalid pointer in %s\n", __func__);
  513. return;
  514. }
  515. dci_cmd_code = *(uint8_t *)(temp);
  516. if (dci_cmd_code == DCI_PKT_RSP_CODE) {
  517. cmd_code_len = sizeof(uint8_t);
  518. } else if (dci_cmd_code == DCI_DELAYED_RSP_CODE) {
  519. cmd_code_len = sizeof(uint32_t);
  520. } else {
  521. pr_err("diag: In %s, invalid command code %d\n", __func__,
  522. dci_cmd_code);
  523. return;
  524. }
  525. temp += cmd_code_len;
  526. tag = *(int *)temp;
  527. temp += sizeof(int);
  528. /*
  529. * The size of the response is (total length) - (length of the command
  530. * code, the tag (int)
  531. */
  532. rsp_len = len - (cmd_code_len + sizeof(int));
  533. /*
  534. * Check if the length embedded in the packet is correct.
  535. * Include the start (1), version (1), length (2) and the end
  536. * (1) bytes while checking. Total = 5 bytes
  537. */
  538. if ((rsp_len == 0) || (rsp_len > (len - 5))) {
  539. pr_err("diag: Invalid length in %s, len: %d, rsp_len: %d",
  540. __func__, len, rsp_len);
  541. return;
  542. }
  543. req_entry = diag_dci_get_request_entry(tag);
  544. if (!req_entry) {
  545. pr_err("diag: No matching PID for DCI data\n");
  546. return;
  547. }
  548. curr_client_pid = req_entry->pid;
  549. save_req_uid = req_entry->uid;
  550. /* Remove the headers and send only the response to this function */
  551. mutex_lock(&driver->dci_mutex);
  552. delete_flag = diag_dci_remove_req_entry(temp, rsp_len, req_entry);
  553. if (delete_flag < 0) {
  554. mutex_unlock(&driver->dci_mutex);
  555. return;
  556. }
  557. mutex_unlock(&driver->dci_mutex);
  558. entry = __diag_dci_get_client_entry(curr_client_pid);
  559. if (!entry) {
  560. pr_err("diag: In %s, couldn't find entry\n", __func__);
  561. return;
  562. }
  563. rsp_buf = entry->buffers[data_source].buf_cmd;
  564. mutex_lock(&rsp_buf->data_mutex);
  565. /*
  566. * Check if we can fit the data in the rsp buffer. The total length of
  567. * the rsp is the rsp length (write_len) + DCI_PKT_RSP_TYPE header (int)
  568. * + field for length (int) + delete_flag (uint8_t)
  569. */
  570. if ((rsp_buf->data_len + 9 + rsp_len) > rsp_buf->capacity) {
  571. pr_alert("diag: create capacity for pkt rsp\n");
  572. rsp_buf->capacity += 9 + rsp_len;
  573. temp_buf = krealloc(rsp_buf->data, rsp_buf->capacity,
  574. GFP_KERNEL);
  575. if (!temp_buf) {
  576. pr_err("diag: DCI realloc failed\n");
  577. mutex_unlock(&rsp_buf->data_mutex);
  578. return;
  579. } else {
  580. rsp_buf->data = temp_buf;
  581. }
  582. }
  583. /* Fill in packet response header information */
  584. pkt_rsp_header.type = DCI_PKT_RSP_TYPE;
  585. /* Packet Length = Response Length + Length of uid field (int) */
  586. pkt_rsp_header.length = rsp_len + sizeof(int);
  587. pkt_rsp_header.delete_flag = delete_flag;
  588. pkt_rsp_header.uid = save_req_uid;
  589. memcpy(rsp_buf->data, &pkt_rsp_header,
  590. sizeof(struct diag_dci_pkt_rsp_header_t));
  591. rsp_buf->data_len += sizeof(struct diag_dci_pkt_rsp_header_t);
  592. memcpy(rsp_buf->data + rsp_buf->data_len, temp, rsp_len);
  593. rsp_buf->data_len += rsp_len;
  594. rsp_buf->data_source = data_source;
  595. if (smd_info)
  596. smd_info->in_busy_1 = 1;
  597. mutex_unlock(&rsp_buf->data_mutex);
  598. /*
  599. * Add directly to the list for writing responses to the
  600. * userspace as these shouldn't be buffered and shouldn't wait
  601. * for log and event buffers to be full
  602. */
  603. dci_add_buffer_to_list(entry, rsp_buf);
  604. }
  605. static void copy_dci_event(unsigned char *buf, int len,
  606. struct diag_dci_client_tbl *client, int data_source)
  607. {
  608. struct diag_dci_buffer_t *data_buffer = NULL;
  609. struct diag_dci_buf_peripheral_t *proc_buf = NULL;
  610. int err = 0, total_len = 0;
  611. if (!buf || !client) {
  612. pr_err("diag: Invalid pointers in %s", __func__);
  613. return;
  614. }
  615. total_len = sizeof(int) + len;
  616. proc_buf = &client->buffers[data_source];
  617. mutex_lock(&proc_buf->buf_mutex);
  618. mutex_lock(&proc_buf->health_mutex);
  619. err = diag_dci_get_buffer(client, data_source, total_len);
  620. if (err) {
  621. if (err == -ENOMEM)
  622. proc_buf->health.dropped_events++;
  623. else
  624. pr_err("diag: In %s, invalid packet\n", __func__);
  625. mutex_unlock(&proc_buf->health_mutex);
  626. mutex_unlock(&proc_buf->buf_mutex);
  627. return;
  628. }
  629. data_buffer = proc_buf->buf_curr;
  630. proc_buf->health.received_events++;
  631. mutex_unlock(&proc_buf->health_mutex);
  632. mutex_unlock(&proc_buf->buf_mutex);
  633. mutex_lock(&data_buffer->data_mutex);
  634. *(int *)(data_buffer->data + data_buffer->data_len) = DCI_EVENT_TYPE;
  635. data_buffer->data_len += sizeof(int);
  636. memcpy(data_buffer->data + data_buffer->data_len, buf, len);
  637. data_buffer->data_len += len;
  638. data_buffer->data_source = data_source;
  639. mutex_unlock(&data_buffer->data_mutex);
  640. }
  641. void extract_dci_events(unsigned char *buf, int len, int data_source)
  642. {
  643. uint16_t event_id, event_id_packet, length, temp_len;
  644. uint8_t payload_len, payload_len_field;
  645. uint8_t timestamp[8], timestamp_len;
  646. unsigned char event_data[MAX_EVENT_SIZE];
  647. unsigned int total_event_len;
  648. struct list_head *start, *temp;
  649. struct diag_dci_client_tbl *entry = NULL;
  650. if (!buf) {
  651. pr_err("diag: In %s buffer is NULL\n", __func__);
  652. return;
  653. }
  654. /*
  655. * 1 byte for event code and 2 bytes for the length field.
  656. */
  657. if (len < 3) {
  658. pr_err("diag: In %s invalid len: %d\n", __func__, len);
  659. return;
  660. }
  661. length = *(uint16_t *)(buf + 1); /* total length of event series */
  662. if ((length == 0) || (len != (length + 3))) {
  663. pr_err("diag: Incoming dci event length: %d is invalid\n",
  664. length);
  665. return;
  666. }
  667. /*
  668. * Move directly to the start of the event series.
  669. * The event parsing should happen from start of event
  670. * series till the end.
  671. */
  672. temp_len = 3;
  673. while (temp_len < (length - 1)) {
  674. event_id_packet = *(uint16_t *)(buf + temp_len);
  675. event_id = event_id_packet & 0x0FFF; /* extract 12 bits */
  676. if (event_id_packet & 0x8000) {
  677. /* The packet has the two smallest byte of the
  678. * timestamp
  679. */
  680. timestamp_len = 2;
  681. } else {
  682. /* The packet has the full timestamp. The first event
  683. * will always have full timestamp. Save it in the
  684. * timestamp buffer and use it for subsequent events if
  685. * necessary.
  686. */
  687. timestamp_len = 8;
  688. if ((temp_len + timestamp_len + 2) <= len)
  689. memcpy(timestamp, buf + temp_len + 2,
  690. timestamp_len);
  691. else {
  692. pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
  693. __func__, len, temp_len);
  694. return;
  695. }
  696. }
  697. /* 13th and 14th bit represent the payload length */
  698. if (((event_id_packet & 0x6000) >> 13) == 3) {
  699. payload_len_field = 1;
  700. if ((temp_len + timestamp_len + 3) <= len) {
  701. payload_len = *(uint8_t *)
  702. (buf + temp_len + 2 + timestamp_len);
  703. } else {
  704. pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
  705. __func__, len, temp_len);
  706. return;
  707. }
  708. if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
  709. ((temp_len + timestamp_len + payload_len + 3) <= len)) {
  710. /*
  711. * Copy the payload length and the payload
  712. * after skipping temp_len bytes for already
  713. * parsed packet, timestamp_len for timestamp
  714. * buffer, 2 bytes for event_id_packet.
  715. */
  716. memcpy(event_data + 12, buf + temp_len + 2 +
  717. timestamp_len, 1);
  718. memcpy(event_data + 13, buf + temp_len + 2 +
  719. timestamp_len + 1, payload_len);
  720. } else {
  721. pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
  722. (MAX_EVENT_SIZE - 13), payload_len, temp_len);
  723. return;
  724. }
  725. } else {
  726. payload_len_field = 0;
  727. payload_len = (event_id_packet & 0x6000) >> 13;
  728. /*
  729. * Copy the payload after skipping temp_len bytes
  730. * for already parsed packet, timestamp_len for
  731. * timestamp buffer, 2 bytes for event_id_packet.
  732. */
  733. if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
  734. ((temp_len + timestamp_len + payload_len + 2) <= len))
  735. memcpy(event_data + 12, buf + temp_len + 2 +
  736. timestamp_len, payload_len);
  737. else {
  738. pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
  739. (MAX_EVENT_SIZE - 12), payload_len, temp_len);
  740. return;
  741. }
  742. }
  743. /* Before copying the data to userspace, check if we are still
  744. * within the buffer limit. This is an error case, don't count
  745. * it towards the health statistics.
  746. *
  747. * Here, the offset of 2 bytes(uint16_t) is for the
  748. * event_id_packet length
  749. */
  750. temp_len += sizeof(uint16_t) + timestamp_len +
  751. payload_len_field + payload_len;
  752. if (temp_len > len) {
  753. pr_err("diag: Invalid length in %s, len: %d, read: %d",
  754. __func__, len, temp_len);
  755. return;
  756. }
  757. /* 2 bytes for the event id & timestamp len is hard coded to 8,
  758. as individual events have full timestamp */
  759. *(uint16_t *)(event_data) = 10 +
  760. payload_len_field + payload_len;
  761. *(uint16_t *)(event_data + 2) = event_id_packet & 0x7FFF;
  762. memcpy(event_data + 4, timestamp, 8);
  763. /* 2 bytes for the event length field which is added to
  764. the event data */
  765. total_event_len = 2 + 10 + payload_len_field + payload_len;
  766. /* parse through event mask tbl of each client and check mask */
  767. list_for_each_safe(start, temp, &driver->dci_client_list) {
  768. entry = list_entry(start, struct diag_dci_client_tbl,
  769. track);
  770. if (__diag_dci_query_event_mask(entry, event_id)) {
  771. /* copy to client buffer */
  772. copy_dci_event(event_data, total_event_len,
  773. entry, data_source);
  774. }
  775. }
  776. }
  777. }
  778. static void copy_dci_log(unsigned char *buf, int len,
  779. struct diag_dci_client_tbl *client, int data_source)
  780. {
  781. uint16_t log_length = 0;
  782. struct diag_dci_buffer_t *data_buffer = NULL;
  783. struct diag_dci_buf_peripheral_t *proc_buf = NULL;
  784. int err = 0, total_len = 0;
  785. if (!buf || !client) {
  786. pr_err("diag: Invalid pointers in %s", __func__);
  787. return;
  788. }
  789. log_length = *(uint16_t *)(buf + 2);
  790. if (log_length > USHRT_MAX - 4) {
  791. pr_err("diag: Integer overflow in %s, log_len: %d",
  792. __func__, log_length);
  793. return;
  794. }
  795. total_len = sizeof(int) + log_length;
  796. /* Check if we are within the len. The check should include the
  797. * first 4 bytes for the Log code(2) and the length bytes (2)
  798. */
  799. if ((log_length + sizeof(uint16_t) + 2) > len) {
  800. pr_err("diag: Invalid length in %s, log_len: %d, len: %d",
  801. __func__, log_length, len);
  802. return;
  803. }
  804. proc_buf = &client->buffers[data_source];
  805. mutex_lock(&proc_buf->buf_mutex);
  806. mutex_lock(&proc_buf->health_mutex);
  807. err = diag_dci_get_buffer(client, data_source, total_len);
  808. if (err) {
  809. if (err == -ENOMEM)
  810. proc_buf->health.dropped_logs++;
  811. else
  812. pr_err("diag: In %s, invalid packet\n", __func__);
  813. mutex_unlock(&proc_buf->health_mutex);
  814. mutex_unlock(&proc_buf->buf_mutex);
  815. return;
  816. }
  817. data_buffer = proc_buf->buf_curr;
  818. proc_buf->health.received_logs++;
  819. mutex_unlock(&proc_buf->health_mutex);
  820. mutex_unlock(&proc_buf->buf_mutex);
  821. mutex_lock(&data_buffer->data_mutex);
  822. if (!data_buffer->data) {
  823. mutex_unlock(&data_buffer->data_mutex);
  824. return;
  825. }
  826. *(int *)(data_buffer->data + data_buffer->data_len) = DCI_LOG_TYPE;
  827. data_buffer->data_len += sizeof(int);
  828. memcpy(data_buffer->data + data_buffer->data_len, buf + sizeof(int),
  829. log_length);
  830. data_buffer->data_len += log_length;
  831. data_buffer->data_source = data_source;
  832. mutex_unlock(&data_buffer->data_mutex);
  833. }
  834. void extract_dci_log(unsigned char *buf, int len, int data_source)
  835. {
  836. uint16_t log_code, read_bytes = 0;
  837. struct list_head *start, *temp;
  838. struct diag_dci_client_tbl *entry = NULL;
  839. if (!buf) {
  840. pr_err("diag: In %s buffer is NULL\n", __func__);
  841. return;
  842. }
  843. /*
  844. * The first eight bytes for the incoming log packet contains
  845. * Command code (2), the length of the packet (2), the length
  846. * of the log (2) and log code (2)
  847. */
  848. if (len < 8) {
  849. pr_err("diag: In %s invalid len: %d\n", __func__, len);
  850. return;
  851. }
  852. log_code = *(uint16_t *)(buf + 6);
  853. read_bytes += sizeof(uint16_t) + 6;
  854. /* parse through log mask table of each client and check mask */
  855. list_for_each_safe(start, temp, &driver->dci_client_list) {
  856. entry = list_entry(start, struct diag_dci_client_tbl, track);
  857. if (__diag_dci_query_log_mask(entry, log_code)) {
  858. pr_debug("\t log code %x needed by client %d",
  859. log_code, entry->client->tgid);
  860. /* copy to client buffer */
  861. copy_dci_log(buf, len, entry, data_source);
  862. }
  863. }
  864. }
  865. void diag_update_smd_dci_work_fn(struct work_struct *work)
  866. {
  867. struct diag_smd_info *smd_info = container_of(work,
  868. struct diag_smd_info,
  869. diag_notify_update_smd_work);
  870. int i, j;
  871. char dirty_bits[16];
  872. uint8_t *client_log_mask_ptr;
  873. uint8_t *log_mask_ptr;
  874. int ret;
  875. struct list_head *start, *temp;
  876. struct diag_dci_client_tbl *entry = NULL;
  877. /* Update apps and peripheral(s) with the dci log and event masks */
  878. memset(dirty_bits, 0, 16 * sizeof(uint8_t));
  879. /*
  880. * From each log entry used by each client, determine
  881. * which log entries in the cumulative logs that need
  882. * to be updated on the peripheral.
  883. */
  884. list_for_each_safe(start, temp, &driver->dci_client_list) {
  885. entry = list_entry(start, struct diag_dci_client_tbl, track);
  886. client_log_mask_ptr = entry->dci_log_mask;
  887. for (j = 0; j < 16; j++) {
  888. if (*(client_log_mask_ptr+1))
  889. dirty_bits[j] = 1;
  890. client_log_mask_ptr += 514;
  891. }
  892. }
  893. mutex_lock(&dci_log_mask_mutex);
  894. /* Update the appropriate dirty bits in the cumulative mask */
  895. log_mask_ptr = dci_cumulative_log_mask;
  896. for (i = 0; i < 16; i++) {
  897. if (dirty_bits[i])
  898. *(log_mask_ptr+1) = dirty_bits[i];
  899. log_mask_ptr += 514;
  900. }
  901. mutex_unlock(&dci_log_mask_mutex);
  902. /* Send updated mask to userspace clients */
  903. diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
  904. /* Send updated log mask to peripherals */
  905. ret = diag_send_dci_log_mask();
  906. /* Send updated event mask to userspace clients */
  907. diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
  908. /* Send updated event mask to peripheral */
  909. ret = diag_send_dci_event_mask();
  910. smd_info->notify_context = 0;
  911. }
  912. void diag_dci_notify_client(int peripheral_mask, int data)
  913. {
  914. int stat;
  915. struct siginfo info;
  916. struct list_head *start, *temp;
  917. struct diag_dci_client_tbl *entry = NULL;
  918. memset(&info, 0, sizeof(struct siginfo));
  919. info.si_code = SI_QUEUE;
  920. info.si_int = (peripheral_mask | data);
  921. /* Notify the DCI process that the peripheral DCI Channel is up */
  922. list_for_each_safe(start, temp, &driver->dci_client_list) {
  923. entry = list_entry(start, struct diag_dci_client_tbl, track);
  924. if (entry->client_info.notification_list & peripheral_mask) {
  925. info.si_signo = entry->client_info.signal_type;
  926. stat = send_sig_info(entry->client_info.signal_type,
  927. &info, entry->client);
  928. if (stat)
  929. pr_err("diag: Err sending dci signal to client, signal data: 0x%x, stat: %d\n",
  930. info.si_int, stat);
  931. }
  932. }
  933. }
  934. static int diag_send_dci_pkt(struct diag_master_table entry,
  935. unsigned char *buf, int len, int tag)
  936. {
  937. int i, status = DIAG_DCI_NO_ERROR;
  938. unsigned int read_len = 0;
  939. /* The first 4 bytes is the uid tag and the next four bytes is
  940. the minmum packet length of a request packet */
  941. if (len < DCI_PKT_REQ_MIN_LEN) {
  942. pr_err("diag: dci: Invalid pkt len %d in %s\n", len, __func__);
  943. return -EIO;
  944. }
  945. if (len > APPS_BUF_SIZE - 10) {
  946. pr_err("diag: dci: Invalid payload length in %s\n", __func__);
  947. return -EIO;
  948. }
  949. /* remove UID from user space pkt before sending to peripheral*/
  950. buf = buf + sizeof(int);
  951. read_len += sizeof(int);
  952. len = len - sizeof(int);
  953. mutex_lock(&driver->dci_mutex);
  954. /* prepare DCI packet */
  955. driver->apps_dci_buf[0] = CONTROL_CHAR; /* start */
  956. driver->apps_dci_buf[1] = 1; /* version */
  957. *(uint16_t *)(driver->apps_dci_buf + 2) = len + 4 + 1; /* length */
  958. driver->apps_dci_buf[4] = DCI_PKT_RSP_CODE;
  959. *(int *)(driver->apps_dci_buf + 5) = tag;
  960. for (i = 0; i < len; i++)
  961. driver->apps_dci_buf[i+9] = *(buf+i);
  962. read_len += len;
  963. driver->apps_dci_buf[9+len] = CONTROL_CHAR; /* end */
  964. if ((read_len + 9) >= USER_SPACE_DATA) {
  965. pr_err("diag: dci: Invalid length while forming dci pkt in %s",
  966. __func__);
  967. mutex_unlock(&driver->dci_mutex);
  968. return -EIO;
  969. }
  970. /* This command is registered locally on the Apps */
  971. if (entry.client_id == APPS_DATA) {
  972. driver->dci_pkt_length = len + 10;
  973. diag_update_pkt_buffer(driver->apps_dci_buf, DCI_PKT_TYPE);
  974. diag_update_sleeping_process(entry.process_id, DCI_PKT_TYPE);
  975. mutex_unlock(&driver->dci_mutex);
  976. return DIAG_DCI_NO_ERROR;
  977. }
  978. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
  979. if (entry.client_id == i) {
  980. status = 1;
  981. break;
  982. }
  983. if (status) {
  984. status = diag_dci_write_proc(entry.client_id,
  985. DIAG_DATA_TYPE,
  986. driver->apps_dci_buf,
  987. len + 10);
  988. } else {
  989. pr_err("diag: Cannot send packet to peripheral %d",
  990. entry.client_id);
  991. status = DIAG_DCI_SEND_DATA_FAIL;
  992. }
  993. mutex_unlock(&driver->dci_mutex);
  994. return status;
  995. }
  996. static int diag_dci_process_apps_pkt(struct diag_pkt_header_t *pkt_header,
  997. unsigned char *req_buf, int tag)
  998. {
  999. uint8_t cmd_code, subsys_id, i, goto_download = 0;
  1000. uint8_t header_len = sizeof(struct diag_dci_pkt_header_t);
  1001. uint16_t ss_cmd_code;
  1002. uint32_t write_len = 0;
  1003. unsigned char *dest_buf = driver->apps_dci_buf;
  1004. unsigned char *payload_ptr = driver->apps_dci_buf + header_len;
  1005. struct diag_dci_pkt_header_t dci_header;
  1006. if (!pkt_header || !req_buf || tag < 0)
  1007. return -EIO;
  1008. cmd_code = pkt_header->cmd_code;
  1009. subsys_id = pkt_header->subsys_id;
  1010. ss_cmd_code = pkt_header->subsys_cmd_code;
  1011. if (cmd_code == DIAG_CMD_DOWNLOAD) {
  1012. *payload_ptr = DIAG_CMD_DOWNLOAD;
  1013. write_len = sizeof(uint8_t);
  1014. goto_download = 1;
  1015. goto fill_buffer;
  1016. } else if (cmd_code == DIAG_CMD_VERSION) {
  1017. if (chk_polling_response()) {
  1018. for (i = 0; i < 55; i++, write_len++, payload_ptr++)
  1019. *(payload_ptr) = 0;
  1020. goto fill_buffer;
  1021. }
  1022. } else if (cmd_code == DIAG_CMD_EXT_BUILD) {
  1023. if (chk_polling_response()) {
  1024. *payload_ptr = DIAG_CMD_EXT_BUILD;
  1025. write_len = sizeof(uint8_t);
  1026. payload_ptr += sizeof(uint8_t);
  1027. for (i = 0; i < 8; i++, write_len++, payload_ptr++)
  1028. *(payload_ptr) = 0;
  1029. *(int *)(payload_ptr) = chk_config_get_id();
  1030. write_len += sizeof(int);
  1031. goto fill_buffer;
  1032. }
  1033. } else if (cmd_code == DIAG_CMD_LOG_ON_DMND) {
  1034. if (driver->log_on_demand_support) {
  1035. *payload_ptr = DIAG_CMD_LOG_ON_DMND;
  1036. write_len = sizeof(uint8_t);
  1037. payload_ptr += sizeof(uint8_t);
  1038. *(uint16_t *)(payload_ptr) = *(uint16_t *)(req_buf + 1);
  1039. write_len += sizeof(uint16_t);
  1040. payload_ptr += sizeof(uint16_t);
  1041. *payload_ptr = 0x1; /* Unknown */
  1042. write_len += sizeof(uint8_t);
  1043. goto fill_buffer;
  1044. }
  1045. } else if (cmd_code != DIAG_CMD_DIAG_SUBSYS) {
  1046. return DIAG_DCI_TABLE_ERR;
  1047. }
  1048. if (subsys_id == DIAG_SS_DIAG) {
  1049. if (ss_cmd_code == DIAG_DIAG_MAX_PKT_SZ) {
  1050. memcpy(payload_ptr, pkt_header,
  1051. sizeof(struct diag_pkt_header_t));
  1052. write_len = sizeof(struct diag_pkt_header_t);
  1053. *(uint32_t *)(payload_ptr + write_len) = PKT_SIZE;
  1054. write_len += sizeof(uint32_t);
  1055. } else if (ss_cmd_code == DIAG_DIAG_STM) {
  1056. write_len = diag_process_stm_cmd(req_buf, payload_ptr);
  1057. }
  1058. } else if (subsys_id == DIAG_SS_PARAMS) {
  1059. if (ss_cmd_code == DIAG_DIAG_POLL) {
  1060. if (chk_polling_response()) {
  1061. memcpy(payload_ptr, pkt_header,
  1062. sizeof(struct diag_pkt_header_t));
  1063. write_len = sizeof(struct diag_pkt_header_t);
  1064. payload_ptr += write_len;
  1065. for (i = 0; i < 12; i++, write_len++) {
  1066. *(payload_ptr) = 0;
  1067. payload_ptr++;
  1068. }
  1069. }
  1070. } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP) {
  1071. memcpy(payload_ptr, pkt_header,
  1072. sizeof(struct diag_pkt_header_t));
  1073. write_len = sizeof(struct diag_pkt_header_t);
  1074. *(int *)(payload_ptr + write_len) = wrap_enabled;
  1075. write_len += sizeof(int);
  1076. } else if (ss_cmd_code == DIAG_DEL_RSP_WRAP_CNT) {
  1077. wrap_enabled = true;
  1078. memcpy(payload_ptr, pkt_header,
  1079. sizeof(struct diag_pkt_header_t));
  1080. write_len = sizeof(struct diag_pkt_header_t);
  1081. *(uint16_t *)(payload_ptr + write_len) = wrap_count;
  1082. write_len += sizeof(uint16_t);
  1083. }
  1084. }
  1085. fill_buffer:
  1086. if (write_len > 0) {
  1087. /* Check if we are within the range of the buffer*/
  1088. if (write_len + header_len > PKT_SIZE) {
  1089. pr_err("diag: In %s, invalid length %d\n", __func__,
  1090. write_len + header_len);
  1091. return -ENOMEM;
  1092. }
  1093. dci_header.start = CONTROL_CHAR;
  1094. dci_header.version = 1;
  1095. /*
  1096. * Length of the rsp pkt = actual data len + pkt rsp code
  1097. * (uint8_t) + tag (int)
  1098. */
  1099. dci_header.len = write_len + sizeof(uint8_t) + sizeof(int);
  1100. dci_header.pkt_code = DCI_PKT_RSP_CODE;
  1101. dci_header.tag = tag;
  1102. driver->in_busy_dcipktdata = 1;
  1103. memcpy(dest_buf, &dci_header, header_len);
  1104. diag_process_apps_dci_read_data(DCI_PKT_TYPE, dest_buf + 4,
  1105. dci_header.len);
  1106. driver->in_busy_dcipktdata = 0;
  1107. if (goto_download) {
  1108. /*
  1109. * Sleep for sometime so that the response reaches the
  1110. * client. The value 5000 empirically as an optimum
  1111. * time for the response to reach the client.
  1112. */
  1113. usleep_range(5000, 5100);
  1114. /* call download API */
  1115. msm_set_restart_mode(RESTART_DLOAD);
  1116. pr_alert("diag: download mode set, Rebooting SoC..\n");
  1117. kernel_restart(NULL);
  1118. }
  1119. return DIAG_DCI_NO_ERROR;
  1120. }
  1121. return DIAG_DCI_TABLE_ERR;
  1122. }
  1123. static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
  1124. {
  1125. int req_uid, ret = DIAG_DCI_TABLE_ERR, i;
  1126. struct diag_pkt_header_t *header = NULL;
  1127. unsigned char *temp = buf;
  1128. unsigned char *req_buf = NULL;
  1129. uint8_t retry_count = 0, max_retries = 3, found = 0;
  1130. uint32_t read_len = 0;
  1131. struct diag_master_table entry;
  1132. struct dci_pkt_req_entry_t *req_entry = NULL;
  1133. if (!buf)
  1134. return -EIO;
  1135. if (len < DCI_PKT_REQ_MIN_LEN || len > USER_SPACE_DATA) {
  1136. pr_err("diag: dci: Invalid length %d len in %s", len, __func__);
  1137. return -EIO;
  1138. }
  1139. req_uid = *(int *)temp; /* UID of the request */
  1140. temp += sizeof(int);
  1141. req_buf = temp; /* Start of the Request */
  1142. header = (struct diag_pkt_header_t *)temp;
  1143. temp += sizeof(struct diag_pkt_header_t);
  1144. read_len = sizeof(int) + sizeof(struct diag_pkt_header_t);
  1145. if (read_len >= USER_SPACE_DATA) {
  1146. pr_err("diag: dci: Invalid length in %s\n", __func__);
  1147. return -EIO;
  1148. }
  1149. /* Check if the command is allowed on DCI */
  1150. if (diag_dci_filter_commands(header)) {
  1151. pr_debug("diag: command not supported %d %d %d",
  1152. header->cmd_code, header->subsys_id,
  1153. header->subsys_cmd_code);
  1154. return DIAG_DCI_SEND_DATA_FAIL;
  1155. }
  1156. /*
  1157. * Previous packet is yet to be consumed by the client. Wait
  1158. * till the buffer is free.
  1159. */
  1160. while (retry_count < max_retries) {
  1161. retry_count++;
  1162. if (driver->in_busy_dcipktdata)
  1163. usleep_range(10000, 10100);
  1164. else
  1165. break;
  1166. }
  1167. /* The buffer is still busy */
  1168. if (driver->in_busy_dcipktdata) {
  1169. pr_err("diag: In %s, apps dci buffer is still busy. Dropping packet\n",
  1170. __func__);
  1171. return -EAGAIN;
  1172. }
  1173. /* Register this new DCI packet */
  1174. req_entry = diag_register_dci_transaction(req_uid);
  1175. if (!req_entry) {
  1176. pr_alert("diag: registering new DCI transaction failed\n");
  1177. return DIAG_DCI_NO_REG;
  1178. }
  1179. /* Check if it is a dedicated Apps command */
  1180. ret = diag_dci_process_apps_pkt(header, req_buf, req_entry->tag);
  1181. if (ret == DIAG_DCI_NO_ERROR || ret < 0)
  1182. return ret;
  1183. /* Check the registration table for command entries */
  1184. for (i = 0; i < diag_max_reg && !found; i++) {
  1185. entry = driver->table[i];
  1186. if (entry.process_id == NO_PROCESS)
  1187. continue;
  1188. if (entry.cmd_code == header->cmd_code &&
  1189. entry.subsys_id == header->subsys_id &&
  1190. entry.cmd_code_lo <= header->subsys_cmd_code &&
  1191. entry.cmd_code_hi >= header->subsys_cmd_code) {
  1192. ret = diag_send_dci_pkt(entry, buf, len,
  1193. req_entry->tag);
  1194. found = 1;
  1195. } else if (entry.cmd_code == 255 && header->cmd_code == 75) {
  1196. if (entry.subsys_id == header->subsys_id &&
  1197. entry.cmd_code_lo <= header->subsys_cmd_code &&
  1198. entry.cmd_code_hi >= header->subsys_cmd_code) {
  1199. ret = diag_send_dci_pkt(entry, buf, len,
  1200. req_entry->tag);
  1201. found = 1;
  1202. }
  1203. } else if (entry.cmd_code == 255 && entry.subsys_id == 255) {
  1204. if (entry.cmd_code_lo <= header->cmd_code &&
  1205. entry.cmd_code_hi >= header->cmd_code) {
  1206. /*
  1207. * If its a Mode reset command, make sure it is
  1208. * registered on the Apps Processor
  1209. */
  1210. if (entry.cmd_code_lo == MODE_CMD &&
  1211. entry.cmd_code_hi == MODE_CMD &&
  1212. header->subsys_id == RESET_ID) {
  1213. if (entry.client_id != APPS_DATA)
  1214. continue;
  1215. }
  1216. ret = diag_send_dci_pkt(entry, buf, len,
  1217. req_entry->tag);
  1218. found = 1;
  1219. }
  1220. }
  1221. }
  1222. return ret;
  1223. }
  1224. int diag_process_dci_transaction(unsigned char *buf, int len)
  1225. {
  1226. unsigned char *temp = buf;
  1227. uint16_t log_code, item_num;
  1228. int ret = -1, found = 0;
  1229. int count, set_mask, num_codes, bit_index, event_id, offset = 0;
  1230. unsigned int byte_index, read_len = 0;
  1231. uint8_t equip_id, *log_mask_ptr, *head_log_mask_ptr, byte_mask;
  1232. uint8_t *event_mask_ptr;
  1233. struct diag_dci_client_tbl *dci_entry = NULL;
  1234. if (!temp || len < sizeof(int)) {
  1235. pr_err("diag: Invalid input in %s\n", __func__);
  1236. return -EINVAL;
  1237. }
  1238. /* This is Pkt request/response transaction */
  1239. if (*(int *)temp > 0) {
  1240. return diag_process_dci_pkt_rsp(buf, len);
  1241. } else if (*(int *)temp == DCI_LOG_TYPE) {
  1242. /* Minimum length of a log mask config is 12 + 2 bytes for
  1243. atleast one log code to be set or reset */
  1244. if (len < DCI_LOG_CON_MIN_LEN || len > USER_SPACE_DATA) {
  1245. pr_err("diag: dci: Invalid length in %s\n", __func__);
  1246. return -EIO;
  1247. }
  1248. /* find client table entry */
  1249. dci_entry = diag_dci_get_client_entry();
  1250. if (!dci_entry) {
  1251. pr_err("diag: In %s, invalid client\n", __func__);
  1252. return ret;
  1253. }
  1254. /* Extract each log code and put in client table */
  1255. temp += sizeof(int);
  1256. read_len += sizeof(int);
  1257. set_mask = *(int *)temp;
  1258. temp += sizeof(int);
  1259. read_len += sizeof(int);
  1260. num_codes = *(int *)temp;
  1261. temp += sizeof(int);
  1262. read_len += sizeof(int);
  1263. if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
  1264. pr_err("diag: dci: Invalid number of log codes %d\n",
  1265. num_codes);
  1266. return -EIO;
  1267. }
  1268. head_log_mask_ptr = dci_entry->dci_log_mask;
  1269. if (!head_log_mask_ptr) {
  1270. pr_err("diag: dci: Invalid Log mask pointer in %s\n",
  1271. __func__);
  1272. return -ENOMEM;
  1273. }
  1274. pr_debug("diag: head of dci log mask %p\n", head_log_mask_ptr);
  1275. count = 0; /* iterator for extracting log codes */
  1276. while (count < num_codes) {
  1277. if (read_len + sizeof(uint16_t) > len) {
  1278. pr_err("diag: dci: Invalid length for log type in %s",
  1279. __func__);
  1280. return -EIO;
  1281. }
  1282. log_code = *(uint16_t *)temp;
  1283. equip_id = LOG_GET_EQUIP_ID(log_code);
  1284. item_num = LOG_GET_ITEM_NUM(log_code);
  1285. byte_index = item_num/8 + 2;
  1286. if (byte_index >= (DCI_MAX_ITEMS_PER_LOG_CODE+2)) {
  1287. pr_err("diag: dci: Log type, invalid byte index\n");
  1288. return ret;
  1289. }
  1290. byte_mask = 0x01 << (item_num % 8);
  1291. /*
  1292. * Parse through log mask table and find
  1293. * relevant range
  1294. */
  1295. log_mask_ptr = head_log_mask_ptr;
  1296. found = 0;
  1297. offset = 0;
  1298. while (log_mask_ptr && (offset < DCI_LOG_MASK_SIZE)) {
  1299. if (*log_mask_ptr == equip_id) {
  1300. found = 1;
  1301. pr_debug("diag: find equip id = %x at %p\n",
  1302. equip_id, log_mask_ptr);
  1303. break;
  1304. } else {
  1305. pr_debug("diag: did not find equip id = %x at %p\n",
  1306. equip_id, log_mask_ptr);
  1307. log_mask_ptr += 514;
  1308. offset += 514;
  1309. }
  1310. }
  1311. if (!found) {
  1312. pr_err("diag: dci equip id not found\n");
  1313. return ret;
  1314. }
  1315. *(log_mask_ptr+1) = 1; /* set the dirty byte */
  1316. log_mask_ptr = log_mask_ptr + byte_index;
  1317. if (set_mask)
  1318. *log_mask_ptr |= byte_mask;
  1319. else
  1320. *log_mask_ptr &= ~byte_mask;
  1321. /* add to cumulative mask */
  1322. update_dci_cumulative_log_mask(
  1323. offset, byte_index,
  1324. byte_mask);
  1325. temp += 2;
  1326. read_len += 2;
  1327. count++;
  1328. ret = DIAG_DCI_NO_ERROR;
  1329. }
  1330. /* send updated mask to userspace clients */
  1331. diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
  1332. /* send updated mask to peripherals */
  1333. ret = diag_send_dci_log_mask();
  1334. } else if (*(int *)temp == DCI_EVENT_TYPE) {
  1335. /* Minimum length of a event mask config is 12 + 4 bytes for
  1336. atleast one event id to be set or reset. */
  1337. if (len < DCI_EVENT_CON_MIN_LEN || len > USER_SPACE_DATA) {
  1338. pr_err("diag: dci: Invalid length in %s\n", __func__);
  1339. return -EIO;
  1340. }
  1341. /* find client table entry */
  1342. dci_entry = diag_dci_get_client_entry();
  1343. if (!dci_entry) {
  1344. pr_err("diag: In %s, invalid client\n", __func__);
  1345. return ret;
  1346. }
  1347. /* Extract each log code and put in client table */
  1348. temp += sizeof(int);
  1349. read_len += sizeof(int);
  1350. set_mask = *(int *)temp;
  1351. temp += sizeof(int);
  1352. read_len += sizeof(int);
  1353. num_codes = *(int *)temp;
  1354. temp += sizeof(int);
  1355. read_len += sizeof(int);
  1356. /* Check for positive number of event ids. Also, the number of
  1357. event ids should fit in the buffer along with set_mask and
  1358. num_codes which are 4 bytes each */
  1359. if (num_codes == 0 || (num_codes >= (USER_SPACE_DATA - 8)/2)) {
  1360. pr_err("diag: dci: Invalid number of event ids %d\n",
  1361. num_codes);
  1362. return -EIO;
  1363. }
  1364. event_mask_ptr = dci_entry->dci_event_mask;
  1365. if (!event_mask_ptr) {
  1366. pr_err("diag: dci: Invalid event mask pointer in %s\n",
  1367. __func__);
  1368. return -ENOMEM;
  1369. }
  1370. pr_debug("diag: head of dci event mask %p\n", event_mask_ptr);
  1371. count = 0; /* iterator for extracting log codes */
  1372. while (count < num_codes) {
  1373. if (read_len + sizeof(int) > len) {
  1374. pr_err("diag: dci: Invalid length for event type in %s",
  1375. __func__);
  1376. return -EIO;
  1377. }
  1378. event_id = *(int *)temp;
  1379. byte_index = event_id/8;
  1380. if (byte_index >= DCI_EVENT_MASK_SIZE) {
  1381. pr_err("diag: dci: Event type, invalid byte index\n");
  1382. return ret;
  1383. }
  1384. bit_index = event_id % 8;
  1385. byte_mask = 0x1 << bit_index;
  1386. /*
  1387. * Parse through event mask table and set
  1388. * relevant byte & bit combination
  1389. */
  1390. if (set_mask)
  1391. *(event_mask_ptr + byte_index) |= byte_mask;
  1392. else
  1393. *(event_mask_ptr + byte_index) &= ~byte_mask;
  1394. /* add to cumulative mask */
  1395. update_dci_cumulative_event_mask(byte_index, byte_mask);
  1396. temp += sizeof(int);
  1397. read_len += sizeof(int);
  1398. count++;
  1399. ret = DIAG_DCI_NO_ERROR;
  1400. }
  1401. /* send updated mask to userspace clients */
  1402. diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
  1403. /* send updated mask to peripherals */
  1404. ret = diag_send_dci_event_mask();
  1405. } else {
  1406. pr_alert("diag: Incorrect DCI transaction\n");
  1407. }
  1408. return ret;
  1409. }
  1410. struct diag_dci_client_tbl *diag_dci_get_client_entry()
  1411. {
  1412. return __diag_dci_get_client_entry(current->tgid);
  1413. }
  1414. void update_dci_cumulative_event_mask(int offset, uint8_t byte_mask)
  1415. {
  1416. uint8_t *event_mask_ptr;
  1417. uint8_t *update_ptr = dci_cumulative_event_mask;
  1418. struct list_head *start, *temp;
  1419. struct diag_dci_client_tbl *entry = NULL;
  1420. bool is_set = false;
  1421. mutex_lock(&dci_event_mask_mutex);
  1422. update_ptr += offset;
  1423. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1424. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1425. event_mask_ptr = entry->dci_event_mask;
  1426. event_mask_ptr += offset;
  1427. if ((*event_mask_ptr & byte_mask) == byte_mask) {
  1428. is_set = true;
  1429. /* break even if one client has the event mask set */
  1430. break;
  1431. }
  1432. }
  1433. if (is_set == false)
  1434. *update_ptr &= ~byte_mask;
  1435. else
  1436. *update_ptr |= byte_mask;
  1437. mutex_unlock(&dci_event_mask_mutex);
  1438. }
  1439. void diag_dci_invalidate_cumulative_event_mask()
  1440. {
  1441. int i = 0;
  1442. struct list_head *start, *temp;
  1443. struct diag_dci_client_tbl *entry = NULL;
  1444. uint8_t *update_ptr, *event_mask_ptr;
  1445. update_ptr = dci_cumulative_event_mask;
  1446. mutex_lock(&dci_event_mask_mutex);
  1447. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1448. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1449. event_mask_ptr = entry->dci_event_mask;
  1450. for (i = 0; i < DCI_EVENT_MASK_SIZE; i++)
  1451. *(update_ptr+i) |= *(event_mask_ptr+i);
  1452. }
  1453. mutex_unlock(&dci_event_mask_mutex);
  1454. }
  1455. int diag_send_dci_event_mask()
  1456. {
  1457. void *buf = driver->buf_event_mask_update;
  1458. int header_size = sizeof(struct diag_ctrl_event_mask);
  1459. int ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR, i;
  1460. mutex_lock(&driver->diag_cntl_mutex);
  1461. /* send event mask update */
  1462. driver->event_mask->cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
  1463. driver->event_mask->data_len = 7 + DCI_EVENT_MASK_SIZE;
  1464. driver->event_mask->stream_id = DCI_MASK_STREAM;
  1465. driver->event_mask->status = 3; /* status for valid mask */
  1466. driver->event_mask->event_config = 0; /* event config */
  1467. driver->event_mask->event_mask_size = DCI_EVENT_MASK_SIZE;
  1468. for (i = 0; i < DCI_EVENT_MASK_SIZE; i++) {
  1469. if (dci_cumulative_event_mask[i] != 0) {
  1470. driver->event_mask->event_config = 1;
  1471. break;
  1472. }
  1473. }
  1474. memcpy(buf, driver->event_mask, header_size);
  1475. memcpy(buf+header_size, dci_cumulative_event_mask, DCI_EVENT_MASK_SIZE);
  1476. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
  1477. /*
  1478. * Don't send to peripheral if its regular channel
  1479. * is down. It may also mean that the peripheral doesn't
  1480. * support DCI.
  1481. */
  1482. if (!driver->smd_dci[i].ch)
  1483. continue;
  1484. err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
  1485. header_size + DCI_EVENT_MASK_SIZE);
  1486. if (err != DIAG_DCI_NO_ERROR)
  1487. ret = DIAG_DCI_SEND_DATA_FAIL;
  1488. }
  1489. mutex_unlock(&driver->diag_cntl_mutex);
  1490. return ret;
  1491. }
  1492. void update_dci_cumulative_log_mask(int offset, unsigned int byte_index,
  1493. uint8_t byte_mask)
  1494. {
  1495. int i;
  1496. uint8_t *update_ptr = dci_cumulative_log_mask;
  1497. uint8_t *log_mask_ptr;
  1498. bool is_set = false;
  1499. struct list_head *start, *temp;
  1500. struct diag_dci_client_tbl *entry = NULL;
  1501. mutex_lock(&dci_log_mask_mutex);
  1502. *update_ptr = 0;
  1503. /* set the equipment IDs */
  1504. for (i = 0; i < 16; i++)
  1505. *(update_ptr + (i*514)) = i;
  1506. update_ptr += offset;
  1507. /* update the dirty bit */
  1508. *(update_ptr+1) = 1;
  1509. update_ptr = update_ptr + byte_index;
  1510. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1511. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1512. log_mask_ptr = entry->dci_log_mask;
  1513. log_mask_ptr = log_mask_ptr + offset + byte_index;
  1514. if ((*log_mask_ptr & byte_mask) == byte_mask) {
  1515. is_set = true;
  1516. /* break even if one client has the log mask set */
  1517. break;
  1518. }
  1519. }
  1520. if (is_set == false)
  1521. *update_ptr &= ~byte_mask;
  1522. else
  1523. *update_ptr |= byte_mask;
  1524. mutex_unlock(&dci_log_mask_mutex);
  1525. }
  1526. void diag_dci_invalidate_cumulative_log_mask()
  1527. {
  1528. int i = 0;
  1529. struct list_head *start, *temp;
  1530. struct diag_dci_client_tbl *entry = NULL;
  1531. uint8_t *update_ptr, *log_mask_ptr;
  1532. update_ptr = dci_cumulative_log_mask;
  1533. mutex_lock(&dci_log_mask_mutex);
  1534. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1535. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1536. log_mask_ptr = entry->dci_log_mask;
  1537. for (i = 0; i < DCI_LOG_MASK_SIZE; i++)
  1538. *(update_ptr+i) |= *(log_mask_ptr+i);
  1539. }
  1540. mutex_unlock(&dci_log_mask_mutex);
  1541. }
  1542. int diag_send_dci_log_mask()
  1543. {
  1544. void *buf = driver->buf_log_mask_update;
  1545. int header_size = sizeof(struct diag_ctrl_log_mask);
  1546. uint8_t *log_mask_ptr = dci_cumulative_log_mask;
  1547. int i, j, ret = DIAG_DCI_NO_ERROR, err = DIAG_DCI_NO_ERROR;
  1548. int updated;
  1549. mutex_lock(&driver->diag_cntl_mutex);
  1550. for (i = 0; i < 16; i++) {
  1551. updated = 1;
  1552. driver->log_mask->cmd_type = DIAG_CTRL_MSG_LOG_MASK;
  1553. driver->log_mask->num_items = 512;
  1554. driver->log_mask->data_len = 11 + 512;
  1555. driver->log_mask->stream_id = DCI_MASK_STREAM;
  1556. driver->log_mask->status = 3; /* status for valid mask */
  1557. driver->log_mask->equip_id = *log_mask_ptr;
  1558. driver->log_mask->log_mask_size = 512;
  1559. memcpy(buf, driver->log_mask, header_size);
  1560. memcpy(buf+header_size, log_mask_ptr+2, 512);
  1561. /* if dirty byte is set and channel is valid */
  1562. for (j = 0; j < NUM_SMD_DCI_CHANNELS; j++) {
  1563. /*
  1564. * Don't send to peripheral if its regular channel
  1565. * is down. It may also mean that the peripheral
  1566. * doesn't support DCI.
  1567. */
  1568. if (!driver->smd_dci[j].ch)
  1569. continue;
  1570. if (!(*(log_mask_ptr+1)))
  1571. continue;
  1572. err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
  1573. header_size + DCI_MAX_ITEMS_PER_LOG_CODE);
  1574. if (err != DIAG_DCI_NO_ERROR) {
  1575. updated = 0;
  1576. ret = DIAG_DCI_SEND_DATA_FAIL;
  1577. }
  1578. }
  1579. if (updated)
  1580. *(log_mask_ptr+1) = 0; /* clear dirty byte */
  1581. log_mask_ptr += 514;
  1582. }
  1583. mutex_unlock(&driver->diag_cntl_mutex);
  1584. return ret;
  1585. }
  1586. void create_dci_log_mask_tbl(unsigned char *tbl_buf)
  1587. {
  1588. uint8_t i; int count = 0;
  1589. if (!tbl_buf)
  1590. return;
  1591. /* create hard coded table for log mask with 16 categories */
  1592. for (i = 0; i < 16; i++) {
  1593. *(uint8_t *)tbl_buf = i;
  1594. pr_debug("diag: put value %x at %p\n", i, tbl_buf);
  1595. memset(tbl_buf+1, 0, 513); /* set dirty bit as 0 */
  1596. tbl_buf += 514;
  1597. count += 514;
  1598. }
  1599. }
  1600. void create_dci_event_mask_tbl(unsigned char *tbl_buf)
  1601. {
  1602. memset(tbl_buf, 0, 512);
  1603. }
  1604. static int diag_dci_probe(struct platform_device *pdev)
  1605. {
  1606. int err = 0;
  1607. int index;
  1608. if (pdev->id == SMD_APPS_MODEM) {
  1609. index = MODEM_DATA;
  1610. err = smd_named_open_on_edge("DIAG_2",
  1611. SMD_APPS_MODEM,
  1612. &driver->smd_dci[index].ch,
  1613. &driver->smd_dci[index],
  1614. diag_smd_notify);
  1615. driver->smd_dci[index].ch_save =
  1616. driver->smd_dci[index].ch;
  1617. if (err)
  1618. pr_err("diag: In %s, cannot open DCI Modem port, Id = %d, err: %d\n",
  1619. __func__, pdev->id, err);
  1620. }
  1621. return err;
  1622. }
  1623. static int diag_dci_cmd_probe(struct platform_device *pdev)
  1624. {
  1625. int err = 0;
  1626. int index;
  1627. if (pdev->id == SMD_APPS_MODEM) {
  1628. index = MODEM_DATA;
  1629. err = smd_named_open_on_edge("DIAG_2_CMD",
  1630. pdev->id,
  1631. &driver->smd_dci_cmd[index].ch,
  1632. &driver->smd_dci_cmd[index],
  1633. diag_smd_notify);
  1634. driver->smd_dci_cmd[index].ch_save =
  1635. driver->smd_dci_cmd[index].ch;
  1636. if (err)
  1637. pr_err("diag: In %s, cannot open DCI Modem CMD port, Id = %d, err: %d\n",
  1638. __func__, pdev->id, err);
  1639. }
  1640. return err;
  1641. }
  1642. static int diag_dci_runtime_suspend(struct device *dev)
  1643. {
  1644. dev_dbg(dev, "pm_runtime: suspending...\n");
  1645. return 0;
  1646. }
  1647. static int diag_dci_runtime_resume(struct device *dev)
  1648. {
  1649. dev_dbg(dev, "pm_runtime: resuming...\n");
  1650. return 0;
  1651. }
  1652. static const struct dev_pm_ops diag_dci_dev_pm_ops = {
  1653. .runtime_suspend = diag_dci_runtime_suspend,
  1654. .runtime_resume = diag_dci_runtime_resume,
  1655. };
  1656. struct platform_driver msm_diag_dci_driver = {
  1657. .probe = diag_dci_probe,
  1658. .driver = {
  1659. .name = "DIAG_2",
  1660. .owner = THIS_MODULE,
  1661. .pm = &diag_dci_dev_pm_ops,
  1662. },
  1663. };
  1664. struct platform_driver msm_diag_dci_cmd_driver = {
  1665. .probe = diag_dci_cmd_probe,
  1666. .driver = {
  1667. .name = "DIAG_2_CMD",
  1668. .owner = THIS_MODULE,
  1669. .pm = &diag_dci_dev_pm_ops,
  1670. },
  1671. };
  1672. int diag_dci_init(void)
  1673. {
  1674. int success = 0;
  1675. int i;
  1676. driver->dci_tag = 0;
  1677. driver->dci_client_id = 0;
  1678. driver->num_dci_client = 0;
  1679. mutex_init(&driver->dci_mutex);
  1680. mutex_init(&dci_log_mask_mutex);
  1681. mutex_init(&dci_event_mask_mutex);
  1682. mutex_init(&dci_health_mutex);
  1683. spin_lock_init(&ws_lock);
  1684. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++) {
  1685. success = diag_smd_constructor(&driver->smd_dci[i], i,
  1686. SMD_DCI_TYPE);
  1687. if (!success)
  1688. goto err;
  1689. }
  1690. if (driver->supports_separate_cmdrsp) {
  1691. for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++) {
  1692. success = diag_smd_constructor(&driver->smd_dci_cmd[i],
  1693. i, SMD_DCI_CMD_TYPE);
  1694. if (!success)
  1695. goto err;
  1696. }
  1697. }
  1698. if (driver->apps_dci_buf == NULL) {
  1699. driver->apps_dci_buf = kzalloc(APPS_BUF_SIZE, GFP_KERNEL);
  1700. if (driver->apps_dci_buf == NULL)
  1701. goto err;
  1702. }
  1703. INIT_LIST_HEAD(&driver->dci_client_list);
  1704. INIT_LIST_HEAD(&driver->dci_req_list);
  1705. driver->diag_dci_wq = create_singlethread_workqueue("diag_dci_wq");
  1706. INIT_WORK(&dci_data_drain_work, dci_data_drain_work_fn);
  1707. success = platform_driver_register(&msm_diag_dci_driver);
  1708. if (success) {
  1709. pr_err("diag: Could not register DCI driver\n");
  1710. goto err;
  1711. }
  1712. if (driver->supports_separate_cmdrsp) {
  1713. success = platform_driver_register(&msm_diag_dci_cmd_driver);
  1714. if (success) {
  1715. pr_err("diag: Could not register DCI cmd driver\n");
  1716. goto err;
  1717. }
  1718. }
  1719. setup_timer(&dci_drain_timer, dci_drain_data, 0);
  1720. return DIAG_DCI_NO_ERROR;
  1721. err:
  1722. pr_err("diag: Could not initialize diag DCI buffers");
  1723. kfree(driver->apps_dci_buf);
  1724. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
  1725. diag_smd_destructor(&driver->smd_dci[i]);
  1726. if (driver->supports_separate_cmdrsp)
  1727. for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
  1728. diag_smd_destructor(&driver->smd_dci_cmd[i]);
  1729. if (driver->diag_dci_wq)
  1730. destroy_workqueue(driver->diag_dci_wq);
  1731. mutex_destroy(&driver->dci_mutex);
  1732. mutex_destroy(&dci_log_mask_mutex);
  1733. mutex_destroy(&dci_event_mask_mutex);
  1734. mutex_destroy(&dci_health_mutex);
  1735. return DIAG_DCI_NO_REG;
  1736. }
  1737. void diag_dci_exit(void)
  1738. {
  1739. int i;
  1740. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
  1741. diag_smd_destructor(&driver->smd_dci[i]);
  1742. platform_driver_unregister(&msm_diag_dci_driver);
  1743. if (driver->supports_separate_cmdrsp) {
  1744. for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
  1745. diag_smd_destructor(&driver->smd_dci_cmd[i]);
  1746. platform_driver_unregister(&msm_diag_dci_cmd_driver);
  1747. }
  1748. kfree(driver->apps_dci_buf);
  1749. mutex_destroy(&driver->dci_mutex);
  1750. mutex_destroy(&dci_log_mask_mutex);
  1751. mutex_destroy(&dci_event_mask_mutex);
  1752. mutex_destroy(&dci_health_mutex);
  1753. destroy_workqueue(driver->diag_dci_wq);
  1754. }
  1755. int diag_dci_clear_log_mask()
  1756. {
  1757. int j, k, err = DIAG_DCI_NO_ERROR;
  1758. uint8_t *log_mask_ptr, *update_ptr;
  1759. struct list_head *start, *temp;
  1760. struct diag_dci_client_tbl *entry = NULL;
  1761. entry = diag_dci_get_client_entry();
  1762. if (!entry) {
  1763. pr_err("diag: In %s, invalid client entry\n", __func__);
  1764. return DIAG_DCI_TABLE_ERR;
  1765. }
  1766. mutex_lock(&dci_log_mask_mutex);
  1767. create_dci_log_mask_tbl(entry->dci_log_mask);
  1768. memset(dci_cumulative_log_mask, 0x0, DCI_LOG_MASK_SIZE);
  1769. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1770. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1771. update_ptr = dci_cumulative_log_mask;
  1772. log_mask_ptr = entry->dci_log_mask;
  1773. for (j = 0; j < 16; j++) {
  1774. *update_ptr = j;
  1775. *(update_ptr + 1) = 1;
  1776. update_ptr += 2;
  1777. log_mask_ptr += 2;
  1778. for (k = 0; k < 513; k++) {
  1779. *update_ptr |= *log_mask_ptr;
  1780. update_ptr++;
  1781. log_mask_ptr++;
  1782. }
  1783. }
  1784. }
  1785. mutex_unlock(&dci_log_mask_mutex);
  1786. /* send updated mask to userspace clients */
  1787. diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
  1788. /* Send updated mask to peripherals */
  1789. err = diag_send_dci_log_mask();
  1790. return err;
  1791. }
  1792. int diag_dci_clear_event_mask()
  1793. {
  1794. int j, err = DIAG_DCI_NO_ERROR;
  1795. uint8_t *event_mask_ptr, *update_ptr;
  1796. struct list_head *start, *temp;
  1797. struct diag_dci_client_tbl *entry = NULL;
  1798. entry = diag_dci_get_client_entry();
  1799. if (!entry) {
  1800. pr_err("diag: In %s, invalid client entry\n", __func__);
  1801. return DIAG_DCI_TABLE_ERR;
  1802. }
  1803. mutex_lock(&dci_event_mask_mutex);
  1804. memset(entry->dci_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
  1805. memset(dci_cumulative_event_mask, 0x0, DCI_EVENT_MASK_SIZE);
  1806. update_ptr = dci_cumulative_event_mask;
  1807. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1808. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1809. event_mask_ptr = entry->dci_event_mask;
  1810. for (j = 0; j < DCI_EVENT_MASK_SIZE; j++)
  1811. *(update_ptr + j) |= *(event_mask_ptr + j);
  1812. }
  1813. mutex_unlock(&dci_event_mask_mutex);
  1814. /* send updated mask to userspace clients */
  1815. diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
  1816. /* Send updated mask to peripherals */
  1817. err = diag_send_dci_event_mask();
  1818. return err;
  1819. }
  1820. int diag_dci_query_log_mask(uint16_t log_code)
  1821. {
  1822. return __diag_dci_query_log_mask(diag_dci_get_client_entry(),
  1823. log_code);
  1824. }
  1825. int diag_dci_query_event_mask(uint16_t event_id)
  1826. {
  1827. return __diag_dci_query_event_mask(diag_dci_get_client_entry(),
  1828. event_id);
  1829. }
  1830. uint8_t diag_dci_get_cumulative_real_time()
  1831. {
  1832. uint8_t real_time = MODE_NONREALTIME;
  1833. struct list_head *start, *temp;
  1834. struct diag_dci_client_tbl *entry = NULL;
  1835. list_for_each_safe(start, temp, &driver->dci_client_list) {
  1836. entry = list_entry(start, struct diag_dci_client_tbl, track);
  1837. if (entry->real_time == MODE_REALTIME) {
  1838. real_time = 1;
  1839. break;
  1840. }
  1841. }
  1842. return real_time;
  1843. }
  1844. int diag_dci_set_real_time(uint8_t real_time)
  1845. {
  1846. struct diag_dci_client_tbl *entry = NULL;
  1847. entry = diag_dci_get_client_entry();
  1848. if (!entry) {
  1849. pr_err("diag: In %s, invalid client entry\n", __func__);
  1850. return 0;
  1851. }
  1852. entry->real_time = real_time;
  1853. return 1;
  1854. }
  1855. void diag_dci_try_activate_wakeup_source()
  1856. {
  1857. spin_lock_irqsave(&ws_lock, ws_lock_flags);
  1858. pm_wakeup_event(driver->diag_dev, DCI_WAKEUP_TIMEOUT);
  1859. pm_stay_awake(driver->diag_dev);
  1860. spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
  1861. }
  1862. void diag_dci_try_deactivate_wakeup_source()
  1863. {
  1864. spin_lock_irqsave(&ws_lock, ws_lock_flags);
  1865. pm_relax(driver->diag_dev);
  1866. spin_unlock_irqrestore(&ws_lock, ws_lock_flags);
  1867. }
  1868. int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
  1869. {
  1870. int i, err = 0;
  1871. struct diag_dci_client_tbl *new_entry = NULL;
  1872. struct diag_dci_buf_peripheral_t *proc_buf = NULL;
  1873. if (!reg_entry)
  1874. return DIAG_DCI_NO_REG;
  1875. if (driver->dci_state == DIAG_DCI_NO_REG)
  1876. return DIAG_DCI_NO_REG;
  1877. if (driver->num_dci_client >= MAX_DCI_CLIENTS)
  1878. return DIAG_DCI_NO_REG;
  1879. new_entry = kzalloc(sizeof(struct diag_dci_client_tbl), GFP_KERNEL);
  1880. if (new_entry == NULL) {
  1881. pr_err("diag: unable to alloc memory\n");
  1882. return DIAG_DCI_NO_REG;
  1883. }
  1884. mutex_lock(&driver->dci_mutex);
  1885. if (!(driver->num_dci_client)) {
  1886. for (i = 0; i < NUM_SMD_DCI_CHANNELS; i++)
  1887. driver->smd_dci[i].in_busy_1 = 0;
  1888. if (driver->supports_separate_cmdrsp)
  1889. for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
  1890. driver->smd_dci_cmd[i].in_busy_1 = 0;
  1891. }
  1892. new_entry->client = current;
  1893. new_entry->client_info.notification_list =
  1894. reg_entry->notification_list;
  1895. new_entry->client_info.signal_type =
  1896. reg_entry->signal_type;
  1897. new_entry->real_time = MODE_REALTIME;
  1898. new_entry->in_service = 0;
  1899. INIT_LIST_HEAD(&new_entry->list_write_buf);
  1900. mutex_init(&new_entry->write_buf_mutex);
  1901. new_entry->dci_log_mask = kzalloc(DCI_LOG_MASK_SIZE, GFP_KERNEL);
  1902. if (!new_entry->dci_log_mask) {
  1903. pr_err("diag: Unable to create log mask for client, %d",
  1904. driver->dci_client_id);
  1905. goto fail_alloc;
  1906. }
  1907. create_dci_log_mask_tbl(new_entry->dci_log_mask);
  1908. new_entry->dci_event_mask = kzalloc(DCI_EVENT_MASK_SIZE, GFP_KERNEL);
  1909. if (!new_entry->dci_event_mask) {
  1910. pr_err("diag: Unable to create event mask for client, %d",
  1911. driver->dci_client_id);
  1912. goto fail_alloc;
  1913. }
  1914. create_dci_event_mask_tbl(new_entry->dci_event_mask);
  1915. for (i = 0; i < NUM_DCI_PROC; i++) {
  1916. proc_buf = &new_entry->buffers[i];
  1917. if (!proc_buf)
  1918. goto fail_alloc;
  1919. mutex_init(&proc_buf->health_mutex);
  1920. mutex_init(&proc_buf->buf_mutex);
  1921. proc_buf->health.dropped_events = 0;
  1922. proc_buf->health.dropped_logs = 0;
  1923. proc_buf->health.received_events = 0;
  1924. proc_buf->health.received_logs = 0;
  1925. proc_buf->buf_primary = kzalloc(
  1926. sizeof(struct diag_dci_buffer_t),
  1927. GFP_KERNEL);
  1928. if (!proc_buf->buf_primary)
  1929. goto fail_alloc;
  1930. proc_buf->buf_cmd = kzalloc(sizeof(struct diag_dci_buffer_t),
  1931. GFP_KERNEL);
  1932. if (!proc_buf->buf_cmd)
  1933. goto fail_alloc;
  1934. err = diag_dci_init_buffer(proc_buf->buf_primary,
  1935. DCI_BUF_PRIMARY);
  1936. if (err)
  1937. goto fail_alloc;
  1938. err = diag_dci_init_buffer(proc_buf->buf_cmd, DCI_BUF_CMD);
  1939. if (err)
  1940. goto fail_alloc;
  1941. proc_buf->buf_curr = proc_buf->buf_primary;
  1942. }
  1943. list_add_tail(&new_entry->track, &driver->dci_client_list);
  1944. driver->dci_client_id++;
  1945. new_entry->client_info.client_id = driver->dci_client_id;
  1946. reg_entry->client_id = driver->dci_client_id;
  1947. driver->num_dci_client++;
  1948. if (driver->num_dci_client == 1)
  1949. diag_update_proc_vote(DIAG_PROC_DCI, VOTE_UP);
  1950. queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
  1951. mutex_unlock(&driver->dci_mutex);
  1952. return driver->dci_client_id;
  1953. fail_alloc:
  1954. if (new_entry) {
  1955. for (i = 0; i < NUM_DCI_PROC; i++) {
  1956. proc_buf = &new_entry->buffers[i];
  1957. mutex_destroy(&proc_buf->health_mutex);
  1958. mutex_destroy(&proc_buf->buf_primary->data_mutex);
  1959. mutex_destroy(&proc_buf->buf_cmd->data_mutex);
  1960. if (proc_buf->buf_primary)
  1961. kfree(proc_buf->buf_primary->data);
  1962. kfree(proc_buf->buf_primary);
  1963. if (proc_buf->buf_cmd)
  1964. kfree(proc_buf->buf_cmd->data);
  1965. kfree(proc_buf->buf_cmd);
  1966. }
  1967. kfree(new_entry->dci_event_mask);
  1968. kfree(new_entry->dci_log_mask);
  1969. }
  1970. kfree(new_entry);
  1971. put_task_struct(current);
  1972. mutex_unlock(&driver->dci_mutex);
  1973. return DIAG_DCI_NO_REG;
  1974. }
  1975. int diag_dci_deinit_client()
  1976. {
  1977. int ret = DIAG_DCI_NO_ERROR, real_time = MODE_REALTIME, i, peripheral;
  1978. struct diag_dci_buf_peripheral_t *proc_buf = NULL;
  1979. struct diag_dci_client_tbl *entry = diag_dci_get_client_entry();
  1980. struct diag_dci_buffer_t *buf_entry, *temp;
  1981. struct list_head *start, *req_temp;
  1982. struct dci_pkt_req_entry_t *req_entry = NULL;
  1983. struct diag_smd_info *smd_info = NULL;
  1984. if (!entry)
  1985. return DIAG_DCI_NOT_SUPPORTED;
  1986. mutex_lock(&driver->dci_mutex);
  1987. /*
  1988. * Remove the entry from the list before freeing the buffers
  1989. * to ensure that we don't have any invalid access.
  1990. */
  1991. list_del(&entry->track);
  1992. driver->num_dci_client--;
  1993. /*
  1994. * Clear the client's log and event masks, update the cumulative
  1995. * masks and send the masks to peripherals
  1996. */
  1997. kfree(entry->dci_log_mask);
  1998. diag_update_userspace_clients(DCI_LOG_MASKS_TYPE);
  1999. diag_dci_invalidate_cumulative_log_mask();
  2000. ret = diag_send_dci_event_mask();
  2001. if (ret != DIAG_DCI_NO_ERROR) {
  2002. mutex_unlock(&driver->dci_mutex);
  2003. return ret;
  2004. }
  2005. kfree(entry->dci_event_mask);
  2006. diag_update_userspace_clients(DCI_EVENT_MASKS_TYPE);
  2007. diag_dci_invalidate_cumulative_event_mask();
  2008. ret = diag_send_dci_log_mask();
  2009. if (ret != DIAG_DCI_NO_ERROR) {
  2010. mutex_unlock(&driver->dci_mutex);
  2011. return ret;
  2012. }
  2013. list_for_each_safe(start, req_temp, &driver->dci_req_list) {
  2014. req_entry = list_entry(start, struct dci_pkt_req_entry_t,
  2015. track);
  2016. if (req_entry->pid == current->tgid) {
  2017. list_del(&req_entry->track);
  2018. kfree(req_entry);
  2019. }
  2020. }
  2021. /* Clean up any buffer that is pending write */
  2022. mutex_lock(&entry->write_buf_mutex);
  2023. list_for_each_entry_safe(buf_entry, temp, &entry->list_write_buf,
  2024. buf_track) {
  2025. list_del(&buf_entry->buf_track);
  2026. if (buf_entry->buf_type == DCI_BUF_SECONDARY) {
  2027. mutex_lock(&buf_entry->data_mutex);
  2028. diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
  2029. buf_entry->data = NULL;
  2030. mutex_unlock(&buf_entry->data_mutex);
  2031. kfree(buf_entry);
  2032. } else if (buf_entry->buf_type == DCI_BUF_CMD) {
  2033. peripheral = buf_entry->data_source;
  2034. if (peripheral == APPS_DATA)
  2035. continue;
  2036. mutex_lock(&buf_entry->data_mutex);
  2037. smd_info = driver->separate_cmdrsp[peripheral] ?
  2038. &driver->smd_dci_cmd[peripheral] :
  2039. &driver->smd_dci[peripheral];
  2040. smd_info->in_busy_1 = 0;
  2041. mutex_unlock(&buf_entry->data_mutex);
  2042. }
  2043. diag_dci_try_deactivate_wakeup_source();
  2044. }
  2045. mutex_unlock(&entry->write_buf_mutex);
  2046. for (i = 0; i < NUM_DCI_PROC; i++) {
  2047. proc_buf = &entry->buffers[i];
  2048. buf_entry = proc_buf->buf_curr;
  2049. mutex_lock(&proc_buf->buf_mutex);
  2050. /* Clean up secondary buffer from mempool that is active */
  2051. if (buf_entry && buf_entry->buf_type == DCI_BUF_SECONDARY) {
  2052. mutex_lock(&buf_entry->data_mutex);
  2053. diagmem_free(driver, buf_entry->data, POOL_TYPE_DCI);
  2054. buf_entry->data = NULL;
  2055. mutex_unlock(&buf_entry->data_mutex);
  2056. mutex_destroy(&buf_entry->data_mutex);
  2057. kfree(buf_entry);
  2058. }
  2059. mutex_lock(&proc_buf->buf_primary->data_mutex);
  2060. kfree(proc_buf->buf_primary->data);
  2061. mutex_unlock(&proc_buf->buf_primary->data_mutex);
  2062. mutex_lock(&proc_buf->buf_cmd->data_mutex);
  2063. kfree(proc_buf->buf_cmd->data);
  2064. mutex_unlock(&proc_buf->buf_cmd->data_mutex);
  2065. mutex_destroy(&proc_buf->health_mutex);
  2066. mutex_destroy(&proc_buf->buf_primary->data_mutex);
  2067. mutex_destroy(&proc_buf->buf_cmd->data_mutex);
  2068. kfree(proc_buf->buf_primary);
  2069. kfree(proc_buf->buf_cmd);
  2070. mutex_unlock(&proc_buf->buf_mutex);
  2071. }
  2072. mutex_destroy(&entry->write_buf_mutex);
  2073. kfree(entry);
  2074. if (driver->num_dci_client == 0) {
  2075. diag_update_proc_vote(DIAG_PROC_DCI, VOTE_DOWN);
  2076. } else {
  2077. real_time = diag_dci_get_cumulative_real_time();
  2078. diag_update_real_time_vote(DIAG_PROC_DCI, real_time);
  2079. }
  2080. queue_work(driver->diag_real_time_wq, &driver->diag_real_time_work);
  2081. mutex_unlock(&driver->dci_mutex);
  2082. return DIAG_DCI_NO_ERROR;
  2083. }
  2084. int diag_dci_write_proc(int peripheral, int pkt_type, char *buf, int len)
  2085. {
  2086. struct diag_smd_info *smd_info = NULL;
  2087. int wr_size = 0, retry = 0, err = -EAGAIN, timer = 0, i;
  2088. if (!buf || (peripheral < 0 || peripheral > NUM_SMD_DCI_CHANNELS)
  2089. || len < 0) {
  2090. pr_err("diag: In %s, invalid data 0x%p, peripheral: %d, len: %d\n",
  2091. __func__, buf, peripheral, len);
  2092. return -EINVAL;
  2093. }
  2094. if (pkt_type == DIAG_DATA_TYPE) {
  2095. for (i = 0; i < NUM_SMD_DCI_CMD_CHANNELS; i++)
  2096. if (peripheral == i)
  2097. smd_info = &driver->smd_dci_cmd[peripheral];
  2098. /*
  2099. * This peripheral doesn't support separate channel for
  2100. * command response.
  2101. */
  2102. if (!smd_info)
  2103. smd_info = &driver->smd_dci[peripheral];
  2104. } else if (pkt_type == DIAG_CNTL_TYPE) {
  2105. smd_info = &driver->smd_cntl[peripheral];
  2106. } else {
  2107. pr_err("diag: Invalid DCI pkt type in %s", __func__);
  2108. return -EINVAL;
  2109. }
  2110. if (!smd_info || !smd_info->ch)
  2111. return -EINVAL;
  2112. while (retry < 3) {
  2113. mutex_lock(&smd_info->smd_ch_mutex);
  2114. wr_size = smd_write(smd_info->ch, buf, len);
  2115. if (wr_size == len) {
  2116. pr_debug("diag: successfully wrote pkt_type %d of len %d to %d in trial %d",
  2117. pkt_type, len, peripheral, (retry+1));
  2118. err = DIAG_DCI_NO_ERROR;
  2119. mutex_unlock(&smd_info->smd_ch_mutex);
  2120. break;
  2121. }
  2122. pr_debug("diag: cannot write pkt_type %d of len %d to %d in trial %d",
  2123. pkt_type, len, peripheral, (retry+1));
  2124. retry++;
  2125. mutex_unlock(&smd_info->smd_ch_mutex);
  2126. /*
  2127. * Sleep for sometime before retrying. The delay of 2000 was
  2128. * determined empirically as best value to use.
  2129. */
  2130. for (timer = 0; timer < 5; timer++)
  2131. usleep(2000);
  2132. }
  2133. return err;
  2134. }
  2135. int diag_dci_copy_health_stats(struct diag_dci_health_stats *stats, int proc)
  2136. {
  2137. struct diag_dci_client_tbl *entry = NULL;
  2138. struct diag_dci_health_t *health = NULL;
  2139. int i;
  2140. if (!stats)
  2141. return -EINVAL;
  2142. if (proc < ALL_PROC || proc > APPS_DATA)
  2143. return -EINVAL;
  2144. entry = diag_dci_get_client_entry();
  2145. if (!entry)
  2146. return DIAG_DCI_NOT_SUPPORTED;
  2147. stats->stats.dropped_logs = 0;
  2148. stats->stats.dropped_events = 0;
  2149. stats->stats.received_logs = 0;
  2150. stats->stats.received_events = 0;
  2151. if (proc != ALL_PROC) {
  2152. health = &entry->buffers[proc].health;
  2153. stats->stats.dropped_logs = health->dropped_logs;
  2154. stats->stats.dropped_events = health->dropped_events;
  2155. stats->stats.received_logs = health->received_logs;
  2156. stats->stats.received_events = health->received_events;
  2157. if (stats->reset_status) {
  2158. mutex_lock(&entry->buffers[proc].health_mutex);
  2159. health->dropped_logs = 0;
  2160. health->dropped_events = 0;
  2161. health->received_logs = 0;
  2162. health->received_events = 0;
  2163. mutex_unlock(&entry->buffers[proc].health_mutex);
  2164. }
  2165. return DIAG_DCI_NO_ERROR;
  2166. }
  2167. for (i = 0; i < NUM_DCI_PROC; i++) {
  2168. health = &entry->buffers[i].health;
  2169. stats->stats.dropped_logs += health->dropped_logs;
  2170. stats->stats.dropped_events += health->dropped_events;
  2171. stats->stats.received_logs += health->received_logs;
  2172. stats->stats.received_events += health->received_events;
  2173. if (stats->reset_status) {
  2174. mutex_lock(&entry->buffers[i].health_mutex);
  2175. health->dropped_logs = 0;
  2176. health->dropped_events = 0;
  2177. health->received_logs = 0;
  2178. health->received_events = 0;
  2179. mutex_unlock(&entry->buffers[i].health_mutex);
  2180. }
  2181. }
  2182. return DIAG_DCI_NO_ERROR;
  2183. }