smux_test.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434
  1. /* drivers/tty/smux_test.c
  2. *
  3. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This software is licensed under the terms of the GNU General Public
  6. * License version 2, as published by the Free Software Foundation, and
  7. * may be copied, distributed, and modified under those terms.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. */
  15. #include <linux/debugfs.h>
  16. #include <linux/list.h>
  17. #include <linux/ctype.h>
  18. #include <linux/jiffies.h>
  19. #include <linux/slab.h>
  20. #include <linux/delay.h>
  21. #include <linux/completion.h>
  22. #include <linux/termios.h>
  23. #include <linux/sched.h>
  24. #include <linux/smux.h>
  25. #include <mach/subsystem_restart.h>
  26. #include "smux_private.h"
  27. #define DEBUG_BUFMAX 4096
  28. #define RED_ZONE_SIZE 16
  29. #define RED_ZONE_PRE_CH 0xAB
  30. #define RED_ZONE_POS_CH 0xBA
  31. #define SMUX_REMOTE_INACTIVITY_TIME_MS 50
  32. #define SMUX_REMOTE_DELAY_TIME_MS 250
  33. /**
  34. * Unit test assertion for logging test cases.
  35. *
  36. * @a lval
  37. * @b rval
  38. * @cmp comparison operator
  39. *
  40. * Assertion fails if (@a cmp @b) is not true which then
  41. * logs the function and line number where the error occurred
  42. * along with the values of @a and @b.
  43. *
  44. * Assumes that the following local variables exist:
  45. * @buf - buffer to write failure message to
  46. * @i - number of bytes written to buffer
  47. * @max - maximum size of the buffer
  48. * @failed - set to true if test fails
  49. */
  50. #define UT_ASSERT_INT(a, cmp, b) \
  51. { \
  52. int a_tmp = (a); \
  53. int b_tmp = (b); \
  54. if (!((a_tmp)cmp(b_tmp))) { \
  55. i += scnprintf(buf + i, max - i, \
  56. "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
  57. __func__, __LINE__, \
  58. a_tmp, b_tmp); \
  59. failed = 1; \
  60. break; \
  61. } \
  62. }
  63. #define UT_ASSERT_PTR(a, cmp, b) \
  64. { \
  65. void *a_tmp = (a); \
  66. void *b_tmp = (b); \
  67. if (!((a_tmp)cmp(b_tmp))) { \
  68. i += scnprintf(buf + i, max - i, \
  69. "%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
  70. __func__, __LINE__, \
  71. a_tmp, b_tmp); \
  72. failed = 1; \
  73. break; \
  74. } \
  75. }
  76. #define UT_ASSERT_UINT(a, cmp, b) \
  77. { \
  78. unsigned a_tmp = (a); \
  79. unsigned b_tmp = (b); \
  80. if (!((a_tmp)cmp(b_tmp))) { \
  81. i += scnprintf(buf + i, max - i, \
  82. "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
  83. __func__, __LINE__, \
  84. a_tmp, b_tmp); \
  85. failed = 1; \
  86. break; \
  87. } \
  88. }
  89. /**
  90. * In-range unit test assertion for test cases.
  91. *
  92. * @a lval
  93. * @minv Minimum value
  94. * @maxv Maximum value
  95. *
  96. * Assertion fails if @a is not on the exclusive range minv, maxv
  97. * ((@a < @minv) or (@a > @maxv)). In the failure case, the macro
  98. * logs the function and line number where the error occurred along
  99. * with the values of @a and @minv, @maxv.
  100. *
  101. * Assumes that the following local variables exist:
  102. * @buf - buffer to write failure message to
  103. * @i - number of bytes written to buffer
  104. * @max - maximum size of the buffer
  105. * @failed - set to true if test fails
  106. */
  107. #define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
  108. { \
  109. int a_tmp = (a); \
  110. int minv_tmp = (minv); \
  111. int maxv_tmp = (maxv); \
  112. if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
  113. i += scnprintf(buf + i, max - i, \
  114. "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
  115. #a "(%d) > " #maxv "(%d)\n", \
  116. __func__, __LINE__, \
  117. a_tmp, minv_tmp, a_tmp, maxv_tmp); \
  118. failed = 1; \
  119. break; \
  120. } \
  121. }
  122. static unsigned char test_array[] = {1, 1, 2, 3, 5, 8, 13, 21, 34, 55,
  123. 89, 144, 233};
  124. /* when 1, forces failure of get_rx_buffer_mock function */
  125. static int get_rx_buffer_mock_fail;
  126. /* Used for mapping local to remote TIOCM signals */
  127. struct tiocm_test_vector {
  128. uint32_t input;
  129. uint32_t set_old;
  130. uint32_t set_new;
  131. uint32_t clr_old;
  132. };
  133. /**
  134. * Allocates a new buffer for SMUX for every call.
  135. */
  136. static int get_rx_buffer(void *priv, void **pkt_priv, void **buffer, int size)
  137. {
  138. void *rx_buf;
  139. rx_buf = kmalloc(size, GFP_KERNEL);
  140. *pkt_priv = (void *)0x1234;
  141. *buffer = rx_buf;
  142. return 0;
  143. }
  144. /* Test vector for packet tests. */
  145. struct test_vector {
  146. const char *data;
  147. const unsigned len;
  148. };
  149. /* Mock object metadata for SMUX_READ_DONE event */
  150. struct mock_read_event {
  151. struct list_head list;
  152. struct smux_meta_read meta;
  153. };
  154. /* Mock object metadata for SMUX_WRITE_DONE event */
  155. struct mock_write_event {
  156. struct list_head list;
  157. struct smux_meta_write meta;
  158. };
  159. /* Mock object metadata for get_rx_buffer failure event */
  160. struct mock_get_rx_buff_event {
  161. struct list_head list;
  162. int size;
  163. unsigned long jiffies;
  164. };
  165. /* Mock object for all SMUX callback events */
  166. struct smux_mock_callback {
  167. int cb_count;
  168. struct completion cb_completion;
  169. spinlock_t lock;
  170. /* status changes */
  171. int event_connected;
  172. int event_disconnected;
  173. int event_disconnected_ssr;
  174. int event_low_wm;
  175. int event_high_wm;
  176. int event_rx_retry_high_wm;
  177. int event_rx_retry_low_wm;
  178. int event_local_closed;
  179. int event_remote_closed;
  180. /* TIOCM changes */
  181. int event_tiocm;
  182. struct smux_meta_tiocm tiocm_meta;
  183. /* read event data */
  184. int event_read_done;
  185. int event_read_failed;
  186. struct list_head read_events;
  187. /* read retry data */
  188. int get_rx_buff_retry_count;
  189. struct list_head get_rx_buff_retry_events;
  190. /* write event data */
  191. int event_write_done;
  192. int event_write_failed;
  193. struct list_head write_events;
  194. };
  195. static int get_rx_buffer_mock(void *priv, void **pkt_priv,
  196. void **buffer, int size);
  197. /**
  198. * Initialize mock callback data. Only call once.
  199. *
  200. * @cb Mock callback data
  201. */
  202. static void mock_cb_data_init(struct smux_mock_callback *cb)
  203. {
  204. init_completion(&cb->cb_completion);
  205. spin_lock_init(&cb->lock);
  206. INIT_LIST_HEAD(&cb->read_events);
  207. INIT_LIST_HEAD(&cb->get_rx_buff_retry_events);
  208. INIT_LIST_HEAD(&cb->write_events);
  209. }
  210. /**
  211. * Reset mock callback data to default values.
  212. *
  213. * @cb Mock callback data
  214. *
  215. * All packets are freed and counters reset to zero.
  216. */
  217. static void mock_cb_data_reset(struct smux_mock_callback *cb)
  218. {
  219. cb->cb_count = 0;
  220. INIT_COMPLETION(cb->cb_completion);
  221. cb->event_connected = 0;
  222. cb->event_disconnected = 0;
  223. cb->event_disconnected_ssr = 0;
  224. cb->event_low_wm = 0;
  225. cb->event_high_wm = 0;
  226. cb->event_rx_retry_high_wm = 0;
  227. cb->event_rx_retry_low_wm = 0;
  228. cb->event_local_closed = 0;
  229. cb->event_remote_closed = 0;
  230. cb->event_tiocm = 0;
  231. cb->tiocm_meta.tiocm_old = 0;
  232. cb->tiocm_meta.tiocm_new = 0;
  233. cb->event_read_done = 0;
  234. cb->event_read_failed = 0;
  235. while (!list_empty(&cb->read_events)) {
  236. struct mock_read_event *meta;
  237. meta = list_first_entry(&cb->read_events,
  238. struct mock_read_event,
  239. list);
  240. kfree(meta->meta.buffer);
  241. list_del(&meta->list);
  242. kfree(meta);
  243. }
  244. cb->get_rx_buff_retry_count = 0;
  245. while (!list_empty(&cb->get_rx_buff_retry_events)) {
  246. struct mock_get_rx_buff_event *meta;
  247. meta = list_first_entry(&cb->get_rx_buff_retry_events,
  248. struct mock_get_rx_buff_event,
  249. list);
  250. list_del(&meta->list);
  251. kfree(meta);
  252. }
  253. cb->event_write_done = 0;
  254. cb->event_write_failed = 0;
  255. while (!list_empty(&cb->write_events)) {
  256. struct mock_write_event *meta;
  257. meta = list_first_entry(&cb->write_events,
  258. struct mock_write_event,
  259. list);
  260. list_del(&meta->list);
  261. kfree(meta);
  262. }
  263. }
  264. /**
  265. * Dump the values of the mock callback data for debug purposes.
  266. *
  267. * @cb Mock callback data
  268. * @buf Print buffer
  269. * @max Maximum number of characters to print
  270. *
  271. * @returns Number of characters added to buffer
  272. */
  273. static int mock_cb_data_print(const struct smux_mock_callback *cb,
  274. char *buf, int max)
  275. {
  276. int i = 0;
  277. i += scnprintf(buf + i, max - i,
  278. "\tcb_count=%d\n"
  279. "\tcb_completion.done=%d\n"
  280. "\tevent_connected=%d\n"
  281. "\tevent_disconnected=%d\n"
  282. "\tevent_disconnected_ssr=%d\n"
  283. "\tevent_low_wm=%d\n"
  284. "\tevent_high_wm=%d\n"
  285. "\tevent_rx_retry_high_wm=%d\n"
  286. "\tevent_rx_retry_low_wm=%d\n"
  287. "\tevent_local_closed=%d\n"
  288. "\tevent_remote_closed=%d\n"
  289. "\tevent_tiocm=%d\n"
  290. "\tevent_read_done=%d\n"
  291. "\tevent_read_failed=%d\n"
  292. "\tread_events empty=%d\n"
  293. "\tget_rx_retry=%d\n"
  294. "\tget_rx_retry_events empty=%d\n"
  295. "\tevent_write_done=%d\n"
  296. "\tevent_write_failed=%d\n"
  297. "\twrite_events empty=%d\n",
  298. cb->cb_count,
  299. cb->cb_completion.done,
  300. cb->event_connected,
  301. cb->event_disconnected,
  302. cb->event_disconnected_ssr,
  303. cb->event_low_wm,
  304. cb->event_high_wm,
  305. cb->event_rx_retry_high_wm,
  306. cb->event_rx_retry_low_wm,
  307. cb->event_local_closed,
  308. cb->event_remote_closed,
  309. cb->event_tiocm,
  310. cb->event_read_done,
  311. cb->event_read_failed,
  312. list_empty(&cb->read_events),
  313. cb->get_rx_buff_retry_count,
  314. list_empty(&cb->get_rx_buff_retry_events),
  315. cb->event_write_done,
  316. cb->event_write_failed,
  317. list_empty(&cb->write_events)
  318. );
  319. return i;
  320. }
  321. /**
  322. * Mock object event callback. Used to logs events for analysis in the unit
  323. * tests.
  324. */
  325. static void smux_mock_cb(void *priv, int event, const void *metadata)
  326. {
  327. struct smux_mock_callback *cb_data_ptr;
  328. struct mock_write_event *write_event_meta;
  329. struct mock_read_event *read_event_meta;
  330. unsigned long flags;
  331. cb_data_ptr = (struct smux_mock_callback *)priv;
  332. if (cb_data_ptr == NULL) {
  333. pr_err("%s: invalid private data\n", __func__);
  334. return;
  335. }
  336. switch (event) {
  337. case SMUX_CONNECTED:
  338. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  339. ++cb_data_ptr->event_connected;
  340. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  341. break;
  342. case SMUX_DISCONNECTED:
  343. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  344. ++cb_data_ptr->event_disconnected;
  345. cb_data_ptr->event_disconnected_ssr =
  346. ((struct smux_meta_disconnected *)metadata)->is_ssr;
  347. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  348. break;
  349. case SMUX_READ_DONE:
  350. read_event_meta = kmalloc(sizeof(struct mock_read_event),
  351. GFP_KERNEL);
  352. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  353. ++cb_data_ptr->event_read_done;
  354. if (read_event_meta) {
  355. read_event_meta->meta =
  356. *(struct smux_meta_read *)metadata;
  357. list_add_tail(&read_event_meta->list,
  358. &cb_data_ptr->read_events);
  359. }
  360. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  361. break;
  362. case SMUX_READ_FAIL:
  363. read_event_meta = kmalloc(sizeof(struct mock_read_event),
  364. GFP_KERNEL);
  365. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  366. ++cb_data_ptr->event_read_failed;
  367. if (read_event_meta) {
  368. if (metadata)
  369. read_event_meta->meta =
  370. *(struct smux_meta_read *)metadata;
  371. else
  372. memset(&read_event_meta->meta, 0x0,
  373. sizeof(struct smux_meta_read));
  374. list_add_tail(&read_event_meta->list,
  375. &cb_data_ptr->read_events);
  376. }
  377. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  378. break;
  379. case SMUX_WRITE_DONE:
  380. write_event_meta = kmalloc(sizeof(struct mock_write_event),
  381. GFP_KERNEL);
  382. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  383. ++cb_data_ptr->event_write_done;
  384. if (write_event_meta) {
  385. write_event_meta->meta =
  386. *(struct smux_meta_write *)metadata;
  387. list_add_tail(&write_event_meta->list,
  388. &cb_data_ptr->write_events);
  389. }
  390. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  391. break;
  392. case SMUX_WRITE_FAIL:
  393. write_event_meta = kmalloc(sizeof(struct mock_write_event),
  394. GFP_KERNEL);
  395. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  396. ++cb_data_ptr->event_write_failed;
  397. if (write_event_meta) {
  398. write_event_meta->meta =
  399. *(struct smux_meta_write *)metadata;
  400. list_add_tail(&write_event_meta->list,
  401. &cb_data_ptr->write_events);
  402. }
  403. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  404. break;
  405. case SMUX_LOW_WM_HIT:
  406. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  407. ++cb_data_ptr->event_low_wm;
  408. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  409. break;
  410. case SMUX_HIGH_WM_HIT:
  411. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  412. ++cb_data_ptr->event_high_wm;
  413. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  414. break;
  415. case SMUX_RX_RETRY_HIGH_WM_HIT:
  416. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  417. ++cb_data_ptr->event_rx_retry_high_wm;
  418. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  419. break;
  420. case SMUX_RX_RETRY_LOW_WM_HIT:
  421. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  422. ++cb_data_ptr->event_rx_retry_low_wm;
  423. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  424. break;
  425. case SMUX_TIOCM_UPDATE:
  426. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  427. ++cb_data_ptr->event_tiocm;
  428. cb_data_ptr->tiocm_meta = *(struct smux_meta_tiocm *)metadata;
  429. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  430. break;
  431. case SMUX_LOCAL_CLOSED:
  432. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  433. ++cb_data_ptr->event_local_closed;
  434. cb_data_ptr->event_disconnected_ssr =
  435. ((struct smux_meta_disconnected *)metadata)->is_ssr;
  436. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  437. break;
  438. case SMUX_REMOTE_CLOSED:
  439. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  440. ++cb_data_ptr->event_remote_closed;
  441. cb_data_ptr->event_disconnected_ssr =
  442. ((struct smux_meta_disconnected *)metadata)->is_ssr;
  443. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  444. break;
  445. default:
  446. pr_err("%s: unknown event %d\n", __func__, event);
  447. };
  448. spin_lock_irqsave(&cb_data_ptr->lock, flags);
  449. ++cb_data_ptr->cb_count;
  450. complete(&cb_data_ptr->cb_completion);
  451. spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
  452. }
  453. /**
  454. * Test Read/write usage.
  455. *
  456. * @buf Output buffer for failure/status messages
  457. * @max Size of @buf
  458. * @vectors Test vector data (must end with NULL item)
  459. * @name Name of the test case for failure messages
  460. *
  461. * Perform a sanity test consisting of opening a port, writing test packet(s),
  462. * reading the response(s), and closing the port.
  463. *
  464. * The port should already be configured to use either local or remote
  465. * loopback.
  466. */
  467. static int smux_ut_basic_core(char *buf, int max,
  468. const struct test_vector *vectors,
  469. const char *name)
  470. {
  471. int i = 0;
  472. int failed = 0;
  473. static struct smux_mock_callback cb_data;
  474. static int cb_initialized;
  475. int ret;
  476. if (!cb_initialized)
  477. mock_cb_data_init(&cb_data);
  478. mock_cb_data_reset(&cb_data);
  479. while (!failed) {
  480. struct mock_write_event *write_event;
  481. struct mock_read_event *read_event;
  482. /* open port */
  483. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  484. get_rx_buffer);
  485. UT_ASSERT_INT(ret, ==, 0);
  486. UT_ASSERT_INT(
  487. (int)wait_for_completion_timeout(
  488. &cb_data.cb_completion, HZ), >, 0);
  489. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  490. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  491. mock_cb_data_reset(&cb_data);
  492. /* write, read, and verify the test vector data */
  493. for (; vectors->data != NULL; ++vectors) {
  494. const char *test_data = vectors->data;
  495. const unsigned test_len = vectors->len;
  496. unsigned long long start_t;
  497. unsigned long long end_t;
  498. unsigned long long val;
  499. unsigned long rem;
  500. i += scnprintf(buf + i, max - i,
  501. "Writing vector %p len %d: ",
  502. test_data, test_len);
  503. /* write data */
  504. start_t = sched_clock();
  505. msm_smux_write(SMUX_TEST_LCID, (void *)0xCAFEFACE,
  506. test_data, test_len);
  507. UT_ASSERT_INT(ret, ==, 0);
  508. UT_ASSERT_INT(
  509. (int)wait_for_completion_timeout(
  510. &cb_data.cb_completion, HZ), >, 0);
  511. /* wait for write and echo'd read to complete */
  512. INIT_COMPLETION(cb_data.cb_completion);
  513. if (cb_data.cb_count < 2)
  514. UT_ASSERT_INT(
  515. (int)wait_for_completion_timeout(
  516. &cb_data.cb_completion, HZ),
  517. >, 0);
  518. end_t = sched_clock();
  519. UT_ASSERT_INT(cb_data.cb_count, >=, 1);
  520. UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
  521. UT_ASSERT_INT(list_empty(&cb_data.write_events), ==, 0);
  522. write_event = list_first_entry(&cb_data.write_events,
  523. struct mock_write_event, list);
  524. UT_ASSERT_PTR(write_event->meta.pkt_priv, ==,
  525. (void *)0xCAFEFACE);
  526. UT_ASSERT_PTR(write_event->meta.buffer, ==,
  527. (void *)test_data);
  528. UT_ASSERT_INT(write_event->meta.len, ==, test_len);
  529. /* verify read event */
  530. UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
  531. UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
  532. read_event = list_first_entry(&cb_data.read_events,
  533. struct mock_read_event, list);
  534. UT_ASSERT_PTR(read_event->meta.pkt_priv, ==,
  535. (void *)0x1234);
  536. UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
  537. if (read_event->meta.len != test_len ||
  538. memcmp(read_event->meta.buffer,
  539. test_data, test_len)) {
  540. /* data mismatch */
  541. char linebuff[80];
  542. hex_dump_to_buffer(test_data, test_len,
  543. 16, 1, linebuff, sizeof(linebuff), 1);
  544. i += scnprintf(buf + i, max - i,
  545. "Failed\nExpected:\n%s\n\n", linebuff);
  546. hex_dump_to_buffer(read_event->meta.buffer,
  547. read_event->meta.len,
  548. 16, 1, linebuff, sizeof(linebuff), 1);
  549. i += scnprintf(buf + i, max - i,
  550. "Failed\nActual:\n%s\n", linebuff);
  551. failed = 1;
  552. break;
  553. }
  554. /* calculate throughput stats */
  555. val = end_t - start_t;
  556. rem = do_div(val, 1000);
  557. i += scnprintf(buf + i, max - i,
  558. "OK - %u us",
  559. (unsigned int)val);
  560. val = 1000000000LL * 2 * test_len;
  561. rem = do_div(val, end_t - start_t);
  562. i += scnprintf(buf + i, max - i,
  563. " (%u kB/sec)\n", (unsigned int)val);
  564. mock_cb_data_reset(&cb_data);
  565. }
  566. /* close port */
  567. ret = msm_smux_close(SMUX_TEST_LCID);
  568. UT_ASSERT_INT(ret, ==, 0);
  569. while (cb_data.cb_count < 3) {
  570. UT_ASSERT_INT(
  571. (int)wait_for_completion_timeout(
  572. &cb_data.cb_completion, HZ),
  573. >, 0);
  574. INIT_COMPLETION(cb_data.cb_completion);
  575. }
  576. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  577. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  578. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  579. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  580. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  581. break;
  582. }
  583. if (!failed) {
  584. i += scnprintf(buf + i, max - i, "\tOK\n");
  585. } else {
  586. pr_err("%s: Failed\n", name);
  587. i += scnprintf(buf + i, max - i, "\tFailed\n");
  588. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  589. msm_smux_close(SMUX_TEST_LCID);
  590. }
  591. mock_cb_data_reset(&cb_data);
  592. return i;
  593. }
  594. /**
  595. * Verify Basic Local Loopback Support
  596. *
  597. * Perform a sanity test consisting of opening a port in local loopback
  598. * mode and writing a packet and reading the echo'd packet back.
  599. */
  600. static int smux_ut_basic(char *buf, int max)
  601. {
  602. const struct test_vector test_data[] = {
  603. {"hello\0world\n", sizeof("hello\0world\n")},
  604. {0, 0},
  605. };
  606. int i = 0;
  607. int failed = 0;
  608. int ret;
  609. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  610. while (!failed) {
  611. /* enable loopback mode */
  612. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  613. SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
  614. UT_ASSERT_INT(ret, ==, 0);
  615. i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
  616. break;
  617. }
  618. if (failed) {
  619. pr_err("%s: Failed\n", __func__);
  620. i += scnprintf(buf + i, max - i, "\tFailed\n");
  621. }
  622. return i;
  623. }
  624. /**
  625. * Verify Basic Remote Loopback Support
  626. *
  627. * Perform a sanity test consisting of opening a port in remote loopback
  628. * mode and writing a packet and reading the echo'd packet back.
  629. */
  630. static int smux_ut_remote_basic(char *buf, int max)
  631. {
  632. const struct test_vector test_data[] = {
  633. {"hello\0world\n", sizeof("hello\0world\n")},
  634. {0, 0},
  635. };
  636. int i = 0;
  637. int failed = 0;
  638. int ret;
  639. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  640. while (!failed) {
  641. /* enable remote mode */
  642. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  643. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  644. UT_ASSERT_INT(ret, ==, 0);
  645. i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
  646. break;
  647. }
  648. if (failed) {
  649. pr_err("%s: Failed\n", __func__);
  650. i += scnprintf(buf + i, max - i, "\tFailed\n");
  651. }
  652. return i;
  653. }
  654. /**
  655. * Verify Basic Subsystem Restart Support
  656. *
  657. * Run a basic loopback test followed by a subsystem restart and then another
  658. * loopback test.
  659. */
  660. static int smux_ut_ssr_remote_basic(char *buf, int max)
  661. {
  662. const struct test_vector test_data[] = {
  663. {"hello\0world\n", sizeof("hello\0world\n")},
  664. {0, 0},
  665. };
  666. int i = 0;
  667. int failed = 0;
  668. int retry_count = 0;
  669. int ret;
  670. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  671. while (!failed) {
  672. /* enable remote mode */
  673. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  674. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  675. UT_ASSERT_INT(ret, ==, 0);
  676. i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
  677. subsystem_restart("external_modem");
  678. do {
  679. msleep(500);
  680. ++retry_count;
  681. UT_ASSERT_INT(retry_count, <, 20);
  682. } while (!smux_remote_is_active() && !failed);
  683. i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
  684. break;
  685. }
  686. if (failed) {
  687. pr_err("%s: Failed\n", __func__);
  688. i += scnprintf(buf + i, max - i, "\tFailed\n");
  689. }
  690. return i;
  691. }
  692. /**
  693. * Verify Subsystem Restart Support During Port Open
  694. */
  695. static int smux_ut_ssr_remote_open(char *buf, int max)
  696. {
  697. static struct smux_mock_callback cb_data;
  698. static int cb_initialized;
  699. int ret;
  700. int retry_count;
  701. int i = 0;
  702. int failed = 0;
  703. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  704. if (!cb_initialized)
  705. mock_cb_data_init(&cb_data);
  706. mock_cb_data_reset(&cb_data);
  707. while (!failed) {
  708. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  709. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  710. UT_ASSERT_INT(ret, ==, 0);
  711. /* open port */
  712. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  713. get_rx_buffer);
  714. UT_ASSERT_INT(ret, ==, 0);
  715. UT_ASSERT_INT(
  716. (int)wait_for_completion_timeout(
  717. &cb_data.cb_completion, HZ), >, 0);
  718. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  719. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  720. mock_cb_data_reset(&cb_data);
  721. /* restart modem */
  722. subsystem_restart("external_modem");
  723. /* verify SSR events */
  724. UT_ASSERT_INT(ret, ==, 0);
  725. while (cb_data.cb_count < 3) {
  726. UT_ASSERT_INT(
  727. (int)wait_for_completion_timeout(
  728. &cb_data.cb_completion, 10*HZ),
  729. >, 0);
  730. INIT_COMPLETION(cb_data.cb_completion);
  731. }
  732. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  733. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  734. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
  735. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  736. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  737. mock_cb_data_reset(&cb_data);
  738. /* close port */
  739. ret = msm_smux_close(SMUX_TEST_LCID);
  740. UT_ASSERT_INT(ret, ==, 0);
  741. /* wait for remote side to finish booting */
  742. retry_count = 0;
  743. do {
  744. msleep(500);
  745. ++retry_count;
  746. UT_ASSERT_INT(retry_count, <, 20);
  747. } while (!smux_remote_is_active() && !failed);
  748. break;
  749. }
  750. if (!failed) {
  751. i += scnprintf(buf + i, max - i, "\tOK\n");
  752. } else {
  753. pr_err("%s: Failed\n", __func__);
  754. i += scnprintf(buf + i, max - i, "\tFailed\n");
  755. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  756. msm_smux_close(SMUX_TEST_LCID);
  757. }
  758. mock_cb_data_reset(&cb_data);
  759. return i;
  760. }
  761. /**
  762. * Verify get_rx_buffer callback retry doesn't livelock SSR
  763. * until all RX Bufffer Retries have timed out.
  764. *
  765. * @buf Buffer for status message
  766. * @max Size of buffer
  767. *
  768. * @returns Number of bytes written to @buf
  769. */
  770. static int smux_ut_ssr_remote_rx_buff_retry(char *buf, int max)
  771. {
  772. static struct smux_mock_callback cb_data;
  773. static int cb_initialized;
  774. int i = 0;
  775. int failed = 0;
  776. int retry_count;
  777. int ret;
  778. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  779. pr_err("%s", buf);
  780. if (!cb_initialized)
  781. mock_cb_data_init(&cb_data);
  782. mock_cb_data_reset(&cb_data);
  783. while (!failed) {
  784. /* open port for loopback */
  785. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  786. SMUX_CH_OPTION_REMOTE_LOOPBACK,
  787. 0);
  788. UT_ASSERT_INT(ret, ==, 0);
  789. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
  790. smux_mock_cb, get_rx_buffer_mock);
  791. UT_ASSERT_INT(ret, ==, 0);
  792. UT_ASSERT_INT(
  793. (int)wait_for_completion_timeout(
  794. &cb_data.cb_completion, HZ), >, 0);
  795. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  796. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  797. mock_cb_data_reset(&cb_data);
  798. /* Queue up an RX buffer retry */
  799. get_rx_buffer_mock_fail = 1;
  800. ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
  801. test_array, sizeof(test_array));
  802. UT_ASSERT_INT(ret, ==, 0);
  803. while (!cb_data.get_rx_buff_retry_count) {
  804. UT_ASSERT_INT(
  805. (int)wait_for_completion_timeout(
  806. &cb_data.cb_completion, HZ),
  807. >, 0);
  808. INIT_COMPLETION(cb_data.cb_completion);
  809. }
  810. if (failed)
  811. break;
  812. mock_cb_data_reset(&cb_data);
  813. /* trigger SSR */
  814. subsystem_restart("external_modem");
  815. /* verify SSR completed */
  816. retry_count = 0;
  817. while (cb_data.event_disconnected_ssr == 0) {
  818. (void)wait_for_completion_timeout(
  819. &cb_data.cb_completion, HZ);
  820. INIT_COMPLETION(cb_data.cb_completion);
  821. ++retry_count;
  822. UT_ASSERT_INT(retry_count, <, 10);
  823. }
  824. if (failed)
  825. break;
  826. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  827. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 1);
  828. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  829. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  830. mock_cb_data_reset(&cb_data);
  831. /* close port */
  832. ret = msm_smux_close(SMUX_TEST_LCID);
  833. UT_ASSERT_INT(ret, ==, 0);
  834. /* wait for remote side to finish booting */
  835. retry_count = 0;
  836. do {
  837. msleep(500);
  838. ++retry_count;
  839. UT_ASSERT_INT(retry_count, <, 20);
  840. } while (!smux_remote_is_active() && !failed);
  841. break;
  842. }
  843. if (!failed) {
  844. i += scnprintf(buf + i, max - i, "\tOK\n");
  845. } else {
  846. pr_err("%s: Failed\n", __func__);
  847. i += scnprintf(buf + i, max - i, "\tFailed\n");
  848. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  849. msm_smux_close(SMUX_TEST_LCID);
  850. }
  851. mock_cb_data_reset(&cb_data);
  852. return i;
  853. }
  854. /**
  855. * Fill test pattern into provided buffer including an optional
  856. * redzone before and after the buffer.
  857. *
  858. * buf ---------
  859. * redzone
  860. * --------- <- returned pointer
  861. * data
  862. * --------- <- returned pointer + len
  863. * redzone
  864. * ---------
  865. *
  866. * @buf Pointer to the buffer of size len or len+2*RED_ZONE_SIZE (redzone)
  867. * @len Length of the *data* buffer (excluding the extra redzone buffers)
  868. * @redzone If true, adds redzone data
  869. *
  870. * @returns pointer to buffer (buf + RED_ZONE_SIZE if redzone enabled)
  871. */
  872. static uint8_t *test_pattern_fill(char *buf, int len, int redzone)
  873. {
  874. char *buf_ptr;
  875. uint8_t ch;
  876. if (redzone) {
  877. memset(buf, RED_ZONE_PRE_CH, RED_ZONE_SIZE);
  878. buf += RED_ZONE_SIZE;
  879. memset(buf + len, RED_ZONE_POS_CH, RED_ZONE_SIZE);
  880. }
  881. for (ch = 0, buf_ptr = buf; len > 0; --len, ++ch)
  882. *buf_ptr++ = (char)ch;
  883. return buf;
  884. }
  885. /**
  886. * Verify test pattern generated by test_pattern_fill.
  887. *
  888. * @buf_ptr Pointer to buffer pointer
  889. * @len Length of the *data* buffer (excluding redzone bytes)
  890. * @redzone If true, verifies redzone and adjusts *buf_ptr
  891. * @errmsg Buffer for error message
  892. * @errmsg_max Size of error message buffer
  893. *
  894. * @returns 0 for success; length of error message otherwise
  895. */
  896. static unsigned test_pattern_verify(char **buf_ptr, int len, int redzone,
  897. char *errmsg, int errmsg_max)
  898. {
  899. int n;
  900. int i = 0;
  901. char linebuff[80];
  902. char *zone_ptr;
  903. if (redzone) {
  904. *buf_ptr -= RED_ZONE_SIZE;
  905. zone_ptr = *buf_ptr;
  906. /* verify prefix redzone */
  907. for (n = 0; n < RED_ZONE_SIZE; ++n) {
  908. if (zone_ptr[n] != RED_ZONE_PRE_CH) {
  909. hex_dump_to_buffer(zone_ptr, RED_ZONE_SIZE,
  910. RED_ZONE_SIZE, 1, linebuff,
  911. sizeof(linebuff), 1);
  912. i += scnprintf(errmsg + i, errmsg_max - i,
  913. "Pre-redzone violation: %s\n",
  914. linebuff);
  915. break;
  916. }
  917. }
  918. /* verify postfix redzone */
  919. zone_ptr = *buf_ptr + RED_ZONE_SIZE + len;
  920. for (n = 0; n < RED_ZONE_SIZE; ++n) {
  921. if (zone_ptr[n] != RED_ZONE_POS_CH) {
  922. hex_dump_to_buffer(zone_ptr, RED_ZONE_SIZE,
  923. RED_ZONE_SIZE, 1, linebuff,
  924. sizeof(linebuff), 1);
  925. i += scnprintf(errmsg + i, errmsg_max - i,
  926. "Post-redzone violation: %s\n",
  927. linebuff);
  928. break;
  929. }
  930. }
  931. }
  932. return i;
  933. }
  934. /**
  935. * Write a multiple packets in ascending size and verify packet is received
  936. * correctly.
  937. *
  938. * @buf Buffer for status message
  939. * @max Size of buffer
  940. * @name Name of the test for error reporting
  941. *
  942. * @returns Number of bytes written to @buf
  943. *
  944. * Requires that the port already be opened and loopback mode is
  945. * configured correctly (if required).
  946. */
  947. static int smux_ut_loopback_big_pkt(char *buf, int max, const char *name)
  948. {
  949. struct test_vector test_data[] = {
  950. {0, 64},
  951. {0, 128},
  952. {0, 256},
  953. {0, 512},
  954. {0, 1024},
  955. {0, 1500},
  956. {0, 2048},
  957. {0, 4096},
  958. {0, 0},
  959. };
  960. int i = 0;
  961. int failed = 0;
  962. struct test_vector *tv;
  963. /* generate test data */
  964. for (tv = test_data; tv->len > 0; ++tv) {
  965. tv->data = kmalloc(tv->len + 2 * RED_ZONE_SIZE, GFP_KERNEL);
  966. if (!tv->data) {
  967. i += scnprintf(buf + i, max - i,
  968. "%s: Unable to allocate %d bytes\n",
  969. __func__, tv->len);
  970. failed = 1;
  971. goto out;
  972. }
  973. tv->data = test_pattern_fill((uint8_t *)tv->data, tv->len, 1);
  974. }
  975. /* run test */
  976. i += scnprintf(buf + i, max - i, "Running %s\n", name);
  977. while (!failed) {
  978. i += smux_ut_basic_core(buf + i, max - i, test_data, name);
  979. break;
  980. }
  981. out:
  982. if (failed) {
  983. pr_err("%s: Failed\n", name);
  984. i += scnprintf(buf + i, max - i, "\tFailed\n");
  985. }
  986. for (tv = test_data; tv->len > 0; ++tv) {
  987. if (tv->data) {
  988. i += test_pattern_verify((char **)&tv->data,
  989. tv->len, 1, buf + i, max - i);
  990. kfree(tv->data);
  991. }
  992. }
  993. return i;
  994. }
  995. /**
  996. * Verify Large-packet Local Loopback Support.
  997. *
  998. * @buf Buffer for status message
  999. * @max Size of buffer
  1000. *
  1001. * @returns Number of bytes written to @buf
  1002. *
  1003. * Open port in local loopback mode and write a multiple packets in ascending
  1004. * size and verify packet is received correctly.
  1005. */
  1006. static int smux_ut_local_big_pkt(char *buf, int max)
  1007. {
  1008. int i = 0;
  1009. int ret;
  1010. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1011. SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
  1012. if (ret == 0) {
  1013. smux_byte_loopback = SMUX_TEST_LCID;
  1014. i += smux_ut_loopback_big_pkt(buf, max, __func__);
  1015. smux_byte_loopback = 0;
  1016. } else {
  1017. i += scnprintf(buf + i, max - i,
  1018. "%s: Unable to set loopback mode\n",
  1019. __func__);
  1020. }
  1021. return i;
  1022. }
  1023. /**
  1024. * Verify Large-packet Remote Loopback Support.
  1025. *
  1026. * @buf Buffer for status message
  1027. * @max Size of buffer
  1028. *
  1029. * @returns Number of bytes written to @buf
  1030. *
  1031. * Open port in remote loopback mode and write a multiple packets in ascending
  1032. * size and verify packet is received correctly.
  1033. */
  1034. static int smux_ut_remote_big_pkt(char *buf, int max)
  1035. {
  1036. int i = 0;
  1037. int ret;
  1038. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1039. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  1040. if (ret == 0) {
  1041. i += smux_ut_loopback_big_pkt(buf, max, __func__);
  1042. } else {
  1043. i += scnprintf(buf + i, max - i,
  1044. "%s: Unable to set loopback mode\n",
  1045. __func__);
  1046. }
  1047. return i;
  1048. }
  1049. /**
  1050. * Run a large packet test for throughput metrics.
  1051. *
  1052. * Repeatedly send a packet for 100 iterations to get throughput metrics.
  1053. */
  1054. static int smux_ut_remote_throughput(char *buf, int max)
  1055. {
  1056. struct test_vector test_data[] = {
  1057. {0, 1500},
  1058. {0, 0},
  1059. };
  1060. int failed = 0;
  1061. int i = 0;
  1062. int loop = 0;
  1063. struct test_vector *tv;
  1064. int ret;
  1065. /* generate test data */
  1066. for (tv = test_data; tv->len > 0; ++tv) {
  1067. tv->data = kmalloc(tv->len, GFP_KERNEL);
  1068. if (!tv->data) {
  1069. i += scnprintf(buf + i, max - i,
  1070. "%s: Unable to allocate %d bytes\n",
  1071. __func__, tv->len);
  1072. failed = 1;
  1073. goto out;
  1074. }
  1075. test_pattern_fill((uint8_t *)tv->data, tv->len, 0);
  1076. }
  1077. /* run test */
  1078. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1079. while (!failed && loop < 100) {
  1080. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1081. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  1082. UT_ASSERT_INT(ret, ==, 0);
  1083. i += smux_ut_basic_core(buf + i, max - i, test_data, __func__);
  1084. ++loop;
  1085. }
  1086. out:
  1087. if (failed) {
  1088. pr_err("%s: Failed\n", __func__);
  1089. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1090. }
  1091. for (tv = test_data; tv->len > 0; ++tv)
  1092. kfree(tv->data);
  1093. return i;
  1094. }
  1095. /**
  1096. * Verify set and get operations for each TIOCM bit.
  1097. *
  1098. * @buf Buffer for status message
  1099. * @max Size of buffer
  1100. * @name Name of the test for error reporting
  1101. *
  1102. * @returns Number of bytes written to @buf
  1103. */
  1104. static int smux_ut_tiocm(char *buf, int max, const char *name)
  1105. {
  1106. static struct smux_mock_callback cb_data;
  1107. static int cb_initialized;
  1108. static const struct tiocm_test_vector tiocm_vectors[] = {
  1109. /* bit to set, set old, set new, clear old */
  1110. {TIOCM_DTR, TIOCM_DTR, TIOCM_DTR | TIOCM_DSR, TIOCM_DSR},
  1111. {TIOCM_RTS, TIOCM_RTS, TIOCM_RTS | TIOCM_CTS, TIOCM_CTS},
  1112. {TIOCM_RI, 0x0, TIOCM_RI, TIOCM_RI},
  1113. {TIOCM_CD, 0x0, TIOCM_CD, TIOCM_CD},
  1114. };
  1115. int i = 0;
  1116. int failed = 0;
  1117. int n;
  1118. int ret;
  1119. i += scnprintf(buf + i, max - i, "Running %s\n", name);
  1120. if (!cb_initialized)
  1121. mock_cb_data_init(&cb_data);
  1122. mock_cb_data_reset(&cb_data);
  1123. while (!failed) {
  1124. /* open port */
  1125. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  1126. get_rx_buffer);
  1127. UT_ASSERT_INT(ret, ==, 0);
  1128. UT_ASSERT_INT(
  1129. (int)wait_for_completion_timeout(
  1130. &cb_data.cb_completion, HZ), >, 0);
  1131. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1132. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1133. mock_cb_data_reset(&cb_data);
  1134. /* set and clear each TIOCM bit */
  1135. for (n = 0; n < ARRAY_SIZE(tiocm_vectors) && !failed; ++n) {
  1136. /* set signal and verify */
  1137. ret = msm_smux_tiocm_set(SMUX_TEST_LCID,
  1138. tiocm_vectors[n].input, 0x0);
  1139. UT_ASSERT_INT(ret, ==, 0);
  1140. UT_ASSERT_INT(
  1141. (int)wait_for_completion_timeout(
  1142. &cb_data.cb_completion, HZ), >, 0);
  1143. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1144. UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
  1145. UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
  1146. tiocm_vectors[n].set_old);
  1147. UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==,
  1148. tiocm_vectors[n].set_new);
  1149. mock_cb_data_reset(&cb_data);
  1150. /* clear signal and verify */
  1151. ret = msm_smux_tiocm_set(SMUX_TEST_LCID, 0x0,
  1152. tiocm_vectors[n].input);
  1153. UT_ASSERT_INT(ret, ==, 0);
  1154. UT_ASSERT_INT(
  1155. (int)wait_for_completion_timeout(
  1156. &cb_data.cb_completion, HZ),
  1157. >, 0);
  1158. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1159. UT_ASSERT_INT(cb_data.event_tiocm, ==, 1);
  1160. UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_old, ==,
  1161. tiocm_vectors[n].clr_old);
  1162. UT_ASSERT_INT(cb_data.tiocm_meta.tiocm_new, ==, 0x0);
  1163. mock_cb_data_reset(&cb_data);
  1164. }
  1165. if (failed)
  1166. break;
  1167. /* close port */
  1168. ret = msm_smux_close(SMUX_TEST_LCID);
  1169. UT_ASSERT_INT(ret, ==, 0);
  1170. while (cb_data.cb_count < 3) {
  1171. UT_ASSERT_INT(
  1172. (int)wait_for_completion_timeout(
  1173. &cb_data.cb_completion, HZ),
  1174. >, 0);
  1175. INIT_COMPLETION(cb_data.cb_completion);
  1176. }
  1177. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1178. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1179. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1180. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1181. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1182. break;
  1183. }
  1184. if (!failed) {
  1185. i += scnprintf(buf + i, max - i, "\tOK\n");
  1186. } else {
  1187. pr_err("%s: Failed\n", name);
  1188. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1189. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1190. msm_smux_close(SMUX_TEST_LCID);
  1191. }
  1192. mock_cb_data_reset(&cb_data);
  1193. return i;
  1194. }
  1195. /**
  1196. * Verify TIOCM Status Bits for local loopback.
  1197. *
  1198. * @buf Buffer for status message
  1199. * @max Size of buffer
  1200. *
  1201. * @returns Number of bytes written to @buf
  1202. */
  1203. static int smux_ut_local_tiocm(char *buf, int max)
  1204. {
  1205. int i = 0;
  1206. int ret;
  1207. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1208. SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
  1209. if (ret == 0) {
  1210. smux_byte_loopback = SMUX_TEST_LCID;
  1211. i += smux_ut_tiocm(buf, max, __func__);
  1212. smux_byte_loopback = 0;
  1213. } else {
  1214. i += scnprintf(buf + i, max - i,
  1215. "%s: Unable to set loopback mode\n",
  1216. __func__);
  1217. }
  1218. return i;
  1219. }
  1220. /**
  1221. * Verify TIOCM Status Bits for remote loopback.
  1222. *
  1223. * @buf Buffer for status message
  1224. * @max Size of buffer
  1225. *
  1226. * @returns Number of bytes written to @buf
  1227. */
  1228. static int smux_ut_remote_tiocm(char *buf, int max)
  1229. {
  1230. int i = 0;
  1231. int ret;
  1232. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1233. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  1234. if (ret == 0) {
  1235. i += smux_ut_tiocm(buf, max, __func__);
  1236. } else {
  1237. i += scnprintf(buf + i, max - i,
  1238. "%s: Unable to set loopback mode\n",
  1239. __func__);
  1240. }
  1241. return i;
  1242. }
  1243. /**
  1244. * Verify High/Low Watermark notifications.
  1245. *
  1246. * @buf Buffer for status message
  1247. * @max Size of buffer
  1248. *
  1249. * @returns Number of bytes written to @buf
  1250. */
  1251. static int smux_ut_local_wm(char *buf, int max)
  1252. {
  1253. static struct smux_mock_callback cb_data;
  1254. static int cb_initialized;
  1255. int i = 0;
  1256. int failed = 0;
  1257. int ret;
  1258. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1259. pr_err("%s", buf);
  1260. if (!cb_initialized)
  1261. mock_cb_data_init(&cb_data);
  1262. mock_cb_data_reset(&cb_data);
  1263. smux_byte_loopback = SMUX_TEST_LCID;
  1264. while (!failed) {
  1265. /* open port for loopback with TX disabled */
  1266. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1267. SMUX_CH_OPTION_LOCAL_LOOPBACK
  1268. | SMUX_CH_OPTION_REMOTE_TX_STOP,
  1269. 0);
  1270. UT_ASSERT_INT(ret, ==, 0);
  1271. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  1272. get_rx_buffer);
  1273. UT_ASSERT_INT(ret, ==, 0);
  1274. UT_ASSERT_INT(
  1275. (int)wait_for_completion_timeout(
  1276. &cb_data.cb_completion, HZ), >, 0);
  1277. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1278. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1279. mock_cb_data_reset(&cb_data);
  1280. /* transmit 4 packets and verify high-watermark notification */
  1281. ret = 0;
  1282. ret |= msm_smux_write(SMUX_TEST_LCID, (void *)1,
  1283. test_array, sizeof(test_array));
  1284. ret |= msm_smux_write(SMUX_TEST_LCID, (void *)2,
  1285. test_array, sizeof(test_array));
  1286. ret |= msm_smux_write(SMUX_TEST_LCID, (void *)3,
  1287. test_array, sizeof(test_array));
  1288. UT_ASSERT_INT(ret, ==, 0);
  1289. UT_ASSERT_INT(cb_data.cb_count, ==, 0);
  1290. UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
  1291. ret = msm_smux_write(SMUX_TEST_LCID, (void *)4,
  1292. test_array, sizeof(test_array));
  1293. UT_ASSERT_INT(ret, ==, 0);
  1294. UT_ASSERT_INT(
  1295. (int)wait_for_completion_timeout(
  1296. &cb_data.cb_completion, HZ),
  1297. >, 0);
  1298. UT_ASSERT_INT(cb_data.event_high_wm, ==, 1);
  1299. UT_ASSERT_INT(cb_data.event_low_wm, ==, 0);
  1300. mock_cb_data_reset(&cb_data);
  1301. /* exceed watermark and verify failure return value */
  1302. ret = msm_smux_write(SMUX_TEST_LCID, (void *)5,
  1303. test_array, sizeof(test_array));
  1304. UT_ASSERT_INT(ret, ==, -EAGAIN);
  1305. /* re-enable TX and verify low-watermark notification */
  1306. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1307. 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
  1308. UT_ASSERT_INT(ret, ==, 0);
  1309. while (cb_data.cb_count < 9) {
  1310. UT_ASSERT_INT(
  1311. (int)wait_for_completion_timeout(
  1312. &cb_data.cb_completion, HZ),
  1313. >, 0);
  1314. INIT_COMPLETION(cb_data.cb_completion);
  1315. }
  1316. if (failed)
  1317. break;
  1318. UT_ASSERT_INT(cb_data.event_high_wm, ==, 0);
  1319. UT_ASSERT_INT(cb_data.event_low_wm, ==, 1);
  1320. UT_ASSERT_INT(cb_data.event_write_done, ==, 4);
  1321. mock_cb_data_reset(&cb_data);
  1322. /* close port */
  1323. ret = msm_smux_close(SMUX_TEST_LCID);
  1324. UT_ASSERT_INT(ret, ==, 0);
  1325. while (cb_data.cb_count < 3) {
  1326. UT_ASSERT_INT(
  1327. (int)wait_for_completion_timeout(
  1328. &cb_data.cb_completion, HZ),
  1329. >, 0);
  1330. INIT_COMPLETION(cb_data.cb_completion);
  1331. }
  1332. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1333. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1334. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1335. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1336. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1337. break;
  1338. }
  1339. if (!failed) {
  1340. i += scnprintf(buf + i, max - i, "\tOK\n");
  1341. } else {
  1342. pr_err("%s: Failed\n", __func__);
  1343. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1344. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1345. msm_smux_close(SMUX_TEST_LCID);
  1346. }
  1347. smux_byte_loopback = 0;
  1348. mock_cb_data_reset(&cb_data);
  1349. return i;
  1350. }
  1351. /**
  1352. * Verify smuxld_receive_buf regular and error processing.
  1353. *
  1354. * @buf Buffer for status message
  1355. * @max Size of buffer
  1356. *
  1357. * @returns Number of bytes written to @buf
  1358. */
  1359. static int smux_ut_local_smuxld_receive_buf(char *buf, int max)
  1360. {
  1361. static struct smux_mock_callback cb_data;
  1362. static int cb_initialized;
  1363. struct mock_read_event *meta;
  1364. int i = 0;
  1365. int failed = 0;
  1366. int ret;
  1367. char data[] = {SMUX_UT_ECHO_REQ,
  1368. SMUX_UT_ECHO_REQ, SMUX_UT_ECHO_REQ,
  1369. };
  1370. char flags[] = {0x0, 0x1, 0x0,};
  1371. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1372. if (!cb_initialized)
  1373. mock_cb_data_init(&cb_data);
  1374. mock_cb_data_reset(&cb_data);
  1375. smux_byte_loopback = SMUX_TEST_LCID;
  1376. while (!failed) {
  1377. /* open port for loopback */
  1378. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1379. SMUX_CH_OPTION_LOCAL_LOOPBACK, 0);
  1380. UT_ASSERT_INT(ret, ==, 0);
  1381. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  1382. get_rx_buffer);
  1383. UT_ASSERT_INT(ret, ==, 0);
  1384. UT_ASSERT_INT(
  1385. (int)wait_for_completion_timeout(
  1386. &cb_data.cb_completion, HZ), >, 0);
  1387. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1388. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1389. mock_cb_data_reset(&cb_data);
  1390. /*
  1391. * Verify RX error processing by sending 3 echo requests:
  1392. * one OK, one fail, and a final OK
  1393. *
  1394. * The parsing framework should process the requests
  1395. * and send us three BYTE command packets with
  1396. * ECHO ACK FAIL and ECHO ACK OK characters.
  1397. */
  1398. smuxld_receive_buf(0, data, flags, sizeof(data));
  1399. /* verify response characters */
  1400. do {
  1401. UT_ASSERT_INT(
  1402. (int)wait_for_completion_timeout(
  1403. &cb_data.cb_completion, HZ), >, 0);
  1404. INIT_COMPLETION(cb_data.cb_completion);
  1405. } while (cb_data.cb_count < 3);
  1406. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1407. UT_ASSERT_INT(cb_data.event_read_done, ==, 3);
  1408. meta = list_first_entry(&cb_data.read_events,
  1409. struct mock_read_event, list);
  1410. UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
  1411. SMUX_UT_ECHO_ACK_OK);
  1412. list_del(&meta->list);
  1413. meta = list_first_entry(&cb_data.read_events,
  1414. struct mock_read_event, list);
  1415. UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
  1416. SMUX_UT_ECHO_ACK_FAIL);
  1417. list_del(&meta->list);
  1418. meta = list_first_entry(&cb_data.read_events,
  1419. struct mock_read_event, list);
  1420. UT_ASSERT_INT((int)meta->meta.pkt_priv, ==,
  1421. SMUX_UT_ECHO_ACK_OK);
  1422. list_del(&meta->list);
  1423. mock_cb_data_reset(&cb_data);
  1424. /* close port */
  1425. ret = msm_smux_close(SMUX_TEST_LCID);
  1426. UT_ASSERT_INT(ret, ==, 0);
  1427. while (cb_data.cb_count < 3) {
  1428. UT_ASSERT_INT(
  1429. (int)wait_for_completion_timeout(
  1430. &cb_data.cb_completion, HZ),
  1431. >, 0);
  1432. INIT_COMPLETION(cb_data.cb_completion);
  1433. }
  1434. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1435. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1436. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1437. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1438. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1439. break;
  1440. }
  1441. if (!failed) {
  1442. i += scnprintf(buf + i, max - i, "\tOK\n");
  1443. } else {
  1444. pr_err("%s: Failed\n", __func__);
  1445. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1446. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1447. msm_smux_close(SMUX_TEST_LCID);
  1448. }
  1449. smux_byte_loopback = 0;
  1450. mock_cb_data_reset(&cb_data);
  1451. return i;
  1452. }
  1453. /**
  1454. * Allocates a new buffer or returns a failure based upon the
  1455. * global @get_rx_buffer_mock_fail.
  1456. */
  1457. static int get_rx_buffer_mock(void *priv, void **pkt_priv,
  1458. void **buffer, int size)
  1459. {
  1460. void *rx_buf;
  1461. unsigned long flags;
  1462. struct smux_mock_callback *cb_ptr;
  1463. cb_ptr = (struct smux_mock_callback *)priv;
  1464. if (!cb_ptr) {
  1465. pr_err("%s: no callback data\n", __func__);
  1466. return -ENXIO;
  1467. }
  1468. if (get_rx_buffer_mock_fail) {
  1469. /* force failure and log failure event */
  1470. struct mock_get_rx_buff_event *meta;
  1471. meta = kmalloc(sizeof(struct mock_get_rx_buff_event),
  1472. GFP_KERNEL);
  1473. if (!meta) {
  1474. pr_err("%s: unable to allocate metadata\n", __func__);
  1475. return -ENOMEM;
  1476. }
  1477. INIT_LIST_HEAD(&meta->list);
  1478. meta->size = size;
  1479. meta->jiffies = jiffies;
  1480. spin_lock_irqsave(&cb_ptr->lock, flags);
  1481. ++cb_ptr->get_rx_buff_retry_count;
  1482. list_add_tail(&meta->list, &cb_ptr->get_rx_buff_retry_events);
  1483. ++cb_ptr->cb_count;
  1484. complete(&cb_ptr->cb_completion);
  1485. spin_unlock_irqrestore(&cb_ptr->lock, flags);
  1486. return -EAGAIN;
  1487. } else {
  1488. rx_buf = kmalloc(size, GFP_KERNEL);
  1489. *pkt_priv = (void *)0x1234;
  1490. *buffer = rx_buf;
  1491. return 0;
  1492. }
  1493. return 0;
  1494. }
  1495. /**
  1496. * Verify get_rx_buffer callback retry.
  1497. *
  1498. * @buf Buffer for status message
  1499. * @max Size of buffer
  1500. *
  1501. * @returns Number of bytes written to @buf
  1502. */
  1503. static int smux_ut_local_get_rx_buff_retry(char *buf, int max)
  1504. {
  1505. static struct smux_mock_callback cb_data;
  1506. static int cb_initialized;
  1507. int i = 0;
  1508. int failed = 0;
  1509. char try_two[] = "try 2";
  1510. int ret;
  1511. unsigned long start_j;
  1512. struct mock_get_rx_buff_event *event;
  1513. struct mock_read_event *read_event;
  1514. int try;
  1515. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1516. pr_err("%s", buf);
  1517. if (!cb_initialized)
  1518. mock_cb_data_init(&cb_data);
  1519. mock_cb_data_reset(&cb_data);
  1520. smux_byte_loopback = SMUX_TEST_LCID;
  1521. while (!failed) {
  1522. /* open port for loopback */
  1523. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1524. SMUX_CH_OPTION_LOCAL_LOOPBACK,
  1525. SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP);
  1526. UT_ASSERT_INT(ret, ==, 0);
  1527. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
  1528. smux_mock_cb, get_rx_buffer_mock);
  1529. UT_ASSERT_INT(ret, ==, 0);
  1530. UT_ASSERT_INT(
  1531. (int)wait_for_completion_timeout(
  1532. &cb_data.cb_completion, HZ), >, 0);
  1533. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1534. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1535. mock_cb_data_reset(&cb_data);
  1536. /*
  1537. * Force get_rx_buffer failure for a single RX packet
  1538. *
  1539. * The get_rx_buffer calls should follow an exponential
  1540. * back-off with a maximum timeout of 1024 ms after which we
  1541. * will get a failure notification.
  1542. *
  1543. * Try Post Delay (ms)
  1544. * 0 -
  1545. * 1 1
  1546. * 2 2
  1547. * 3 4
  1548. * 4 8
  1549. * 5 16
  1550. * 6 32
  1551. * 7 64
  1552. * 8 128
  1553. * 9 256
  1554. * 10 512
  1555. * 11 1024
  1556. * 12 Fail
  1557. *
  1558. * All times are limited by the precision of the timer
  1559. * framework, so ranges are used in the test
  1560. * verification.
  1561. */
  1562. get_rx_buffer_mock_fail = 1;
  1563. start_j = jiffies;
  1564. ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
  1565. test_array, sizeof(test_array));
  1566. UT_ASSERT_INT(ret, ==, 0);
  1567. ret = msm_smux_write(SMUX_TEST_LCID, (void *)2,
  1568. try_two, sizeof(try_two));
  1569. UT_ASSERT_INT(ret, ==, 0);
  1570. /* wait for RX failure event */
  1571. while (cb_data.event_read_failed == 0) {
  1572. UT_ASSERT_INT(
  1573. (int)wait_for_completion_timeout(
  1574. &cb_data.cb_completion, 2*HZ),
  1575. >, 0);
  1576. INIT_COMPLETION(cb_data.cb_completion);
  1577. }
  1578. if (failed)
  1579. break;
  1580. /* verify retry attempts */
  1581. UT_ASSERT_INT(cb_data.get_rx_buff_retry_count, ==, 12);
  1582. event = list_first_entry(&cb_data.get_rx_buff_retry_events,
  1583. struct mock_get_rx_buff_event, list);
  1584. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1585. jiffies_to_msecs(event->jiffies - start_j));
  1586. UT_ASSERT_INT_IN_RANGE(
  1587. jiffies_to_msecs(event->jiffies - start_j),
  1588. 0, 0 + 20);
  1589. start_j = event->jiffies;
  1590. event = list_first_entry(&event->list,
  1591. struct mock_get_rx_buff_event, list);
  1592. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1593. jiffies_to_msecs(event->jiffies - start_j));
  1594. UT_ASSERT_INT_IN_RANGE(
  1595. jiffies_to_msecs(event->jiffies - start_j),
  1596. 1, 1 + 20);
  1597. start_j = event->jiffies;
  1598. event = list_first_entry(&event->list,
  1599. struct mock_get_rx_buff_event, list);
  1600. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1601. jiffies_to_msecs(event->jiffies - start_j));
  1602. UT_ASSERT_INT_IN_RANGE(
  1603. jiffies_to_msecs(event->jiffies - start_j),
  1604. 2, 2 + 20);
  1605. start_j = event->jiffies;
  1606. event = list_first_entry(&event->list,
  1607. struct mock_get_rx_buff_event, list);
  1608. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1609. jiffies_to_msecs(event->jiffies - start_j));
  1610. UT_ASSERT_INT_IN_RANGE(
  1611. jiffies_to_msecs(event->jiffies - start_j),
  1612. 4, 4 + 20);
  1613. start_j = event->jiffies;
  1614. event = list_first_entry(&event->list,
  1615. struct mock_get_rx_buff_event, list);
  1616. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1617. jiffies_to_msecs(event->jiffies - start_j));
  1618. UT_ASSERT_INT_IN_RANGE(
  1619. jiffies_to_msecs(event->jiffies - start_j),
  1620. 8, 8 + 20);
  1621. start_j = event->jiffies;
  1622. event = list_first_entry(&event->list,
  1623. struct mock_get_rx_buff_event, list);
  1624. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1625. jiffies_to_msecs(event->jiffies - start_j));
  1626. UT_ASSERT_INT_IN_RANGE(
  1627. jiffies_to_msecs(event->jiffies - start_j),
  1628. 16, 16 + 20);
  1629. start_j = event->jiffies;
  1630. event = list_first_entry(&event->list,
  1631. struct mock_get_rx_buff_event, list);
  1632. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1633. jiffies_to_msecs(event->jiffies - start_j));
  1634. UT_ASSERT_INT_IN_RANGE(
  1635. jiffies_to_msecs(event->jiffies - start_j),
  1636. 32 - 20, 32 + 20);
  1637. start_j = event->jiffies;
  1638. event = list_first_entry(&event->list,
  1639. struct mock_get_rx_buff_event, list);
  1640. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1641. jiffies_to_msecs(event->jiffies - start_j));
  1642. UT_ASSERT_INT_IN_RANGE(
  1643. jiffies_to_msecs(event->jiffies - start_j),
  1644. 64 - 20, 64 + 20);
  1645. start_j = event->jiffies;
  1646. event = list_first_entry(&event->list,
  1647. struct mock_get_rx_buff_event, list);
  1648. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1649. jiffies_to_msecs(event->jiffies - start_j));
  1650. UT_ASSERT_INT_IN_RANGE(
  1651. jiffies_to_msecs(event->jiffies - start_j),
  1652. 128 - 20, 128 + 20);
  1653. start_j = event->jiffies;
  1654. event = list_first_entry(&event->list,
  1655. struct mock_get_rx_buff_event, list);
  1656. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1657. jiffies_to_msecs(event->jiffies - start_j));
  1658. UT_ASSERT_INT_IN_RANGE(
  1659. jiffies_to_msecs(event->jiffies - start_j),
  1660. 256 - 20, 256 + 20);
  1661. start_j = event->jiffies;
  1662. event = list_first_entry(&event->list,
  1663. struct mock_get_rx_buff_event, list);
  1664. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1665. jiffies_to_msecs(event->jiffies - start_j));
  1666. UT_ASSERT_INT_IN_RANGE(
  1667. jiffies_to_msecs(event->jiffies - start_j),
  1668. 512 - 20, 512 + 20);
  1669. start_j = event->jiffies;
  1670. event = list_first_entry(&event->list,
  1671. struct mock_get_rx_buff_event, list);
  1672. pr_err("%s: event->jiffies = %d (ms)\n", __func__,
  1673. jiffies_to_msecs(event->jiffies - start_j));
  1674. UT_ASSERT_INT_IN_RANGE(
  1675. jiffies_to_msecs(event->jiffies - start_j),
  1676. 1024 - 20, 1024 + 20);
  1677. mock_cb_data_reset(&cb_data);
  1678. /* verify 2nd pending RX packet goes through */
  1679. get_rx_buffer_mock_fail = 0;
  1680. INIT_COMPLETION(cb_data.cb_completion);
  1681. if (cb_data.event_read_done == 0)
  1682. UT_ASSERT_INT(
  1683. (int)wait_for_completion_timeout(
  1684. &cb_data.cb_completion, HZ),
  1685. >, 0);
  1686. UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
  1687. UT_ASSERT_INT(list_empty(&cb_data.read_events), ==, 0);
  1688. read_event = list_first_entry(&cb_data.read_events,
  1689. struct mock_read_event, list);
  1690. UT_ASSERT_PTR(read_event->meta.pkt_priv, ==, (void *)0x1234);
  1691. UT_ASSERT_PTR(read_event->meta.buffer, !=, NULL);
  1692. UT_ASSERT_INT(0, ==, memcmp(read_event->meta.buffer, try_two,
  1693. sizeof(try_two)));
  1694. mock_cb_data_reset(&cb_data);
  1695. /* Test maximum retry queue size */
  1696. get_rx_buffer_mock_fail = 1;
  1697. for (try = 0; try < (SMUX_RX_RETRY_MAX_PKTS + 1); ++try) {
  1698. mock_cb_data_reset(&cb_data);
  1699. ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
  1700. test_array, sizeof(test_array));
  1701. UT_ASSERT_INT(ret, ==, 0);
  1702. UT_ASSERT_INT(
  1703. (int)wait_for_completion_timeout(
  1704. &cb_data.cb_completion, HZ),
  1705. >, 0);
  1706. }
  1707. /* should have 32 successful rx packets and 1 failed */
  1708. while (cb_data.event_read_failed == 0) {
  1709. UT_ASSERT_INT(
  1710. (int)wait_for_completion_timeout(
  1711. &cb_data.cb_completion, 2*HZ),
  1712. >, 0);
  1713. INIT_COMPLETION(cb_data.cb_completion);
  1714. }
  1715. if (failed)
  1716. break;
  1717. get_rx_buffer_mock_fail = 0;
  1718. while (cb_data.event_read_done < SMUX_RX_RETRY_MAX_PKTS) {
  1719. UT_ASSERT_INT(
  1720. (int)wait_for_completion_timeout(
  1721. &cb_data.cb_completion, 2*HZ),
  1722. >, 0);
  1723. INIT_COMPLETION(cb_data.cb_completion);
  1724. }
  1725. if (failed)
  1726. break;
  1727. UT_ASSERT_INT(1, ==, cb_data.event_read_failed);
  1728. UT_ASSERT_INT(SMUX_RX_RETRY_MAX_PKTS, ==,
  1729. cb_data.event_read_done);
  1730. mock_cb_data_reset(&cb_data);
  1731. /* close port */
  1732. ret = msm_smux_close(SMUX_TEST_LCID);
  1733. UT_ASSERT_INT(ret, ==, 0);
  1734. while (cb_data.cb_count < 3) {
  1735. UT_ASSERT_INT(
  1736. (int)wait_for_completion_timeout(
  1737. &cb_data.cb_completion, HZ),
  1738. >, 0);
  1739. INIT_COMPLETION(cb_data.cb_completion);
  1740. }
  1741. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1742. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1743. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1744. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1745. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1746. break;
  1747. }
  1748. if (!failed) {
  1749. i += scnprintf(buf + i, max - i, "\tOK\n");
  1750. } else {
  1751. pr_err("%s: Failed\n", __func__);
  1752. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1753. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1754. msm_smux_close(SMUX_TEST_LCID);
  1755. }
  1756. smux_byte_loopback = 0;
  1757. mock_cb_data_reset(&cb_data);
  1758. return i;
  1759. }
  1760. /**
  1761. * Verify get_rx_buffer callback retry for auto-rx flow control.
  1762. *
  1763. * @buf Buffer for status message
  1764. * @max Size of buffer
  1765. *
  1766. * @returns Number of bytes written to @buf
  1767. */
  1768. static int smux_ut_local_get_rx_buff_retry_auto(char *buf, int max)
  1769. {
  1770. static struct smux_mock_callback cb_data;
  1771. static int cb_initialized;
  1772. int i = 0;
  1773. int failed = 0;
  1774. int ret;
  1775. int try;
  1776. int try_rx_retry_wm;
  1777. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1778. pr_err("%s", buf);
  1779. if (!cb_initialized)
  1780. mock_cb_data_init(&cb_data);
  1781. mock_cb_data_reset(&cb_data);
  1782. smux_byte_loopback = SMUX_TEST_LCID;
  1783. while (!failed) {
  1784. /* open port for loopback */
  1785. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1786. SMUX_CH_OPTION_LOCAL_LOOPBACK
  1787. | SMUX_CH_OPTION_AUTO_REMOTE_TX_STOP,
  1788. 0);
  1789. UT_ASSERT_INT(ret, ==, 0);
  1790. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data,
  1791. smux_mock_cb, get_rx_buffer_mock);
  1792. UT_ASSERT_INT(ret, ==, 0);
  1793. UT_ASSERT_INT(
  1794. (int)wait_for_completion_timeout(
  1795. &cb_data.cb_completion, HZ), >, 0);
  1796. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1797. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1798. mock_cb_data_reset(&cb_data);
  1799. /* Test high rx-retry watermark */
  1800. get_rx_buffer_mock_fail = 1;
  1801. try_rx_retry_wm = 0;
  1802. for (try = 0; try < SMUX_RX_RETRY_MAX_PKTS; ++try) {
  1803. pr_err("%s: try %d\n", __func__, try);
  1804. ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
  1805. test_array, sizeof(test_array));
  1806. UT_ASSERT_INT(ret, ==, 0);
  1807. if (failed)
  1808. break;
  1809. if (!try_rx_retry_wm &&
  1810. cb_data.event_rx_retry_high_wm) {
  1811. /* RX high watermark hit */
  1812. try_rx_retry_wm = try + 1;
  1813. break;
  1814. }
  1815. while (cb_data.event_write_done <= try) {
  1816. UT_ASSERT_INT(
  1817. (int)wait_for_completion_timeout(
  1818. &cb_data.cb_completion, HZ),
  1819. >, 0);
  1820. INIT_COMPLETION(cb_data.cb_completion);
  1821. }
  1822. if (failed)
  1823. break;
  1824. }
  1825. if (failed)
  1826. break;
  1827. /* RX retry high watermark should have been set */
  1828. UT_ASSERT_INT(cb_data.event_rx_retry_high_wm, ==, 1);
  1829. UT_ASSERT_INT(try_rx_retry_wm, ==, SMUX_RX_WM_HIGH);
  1830. /*
  1831. * Disabled RX buffer allocation failure and wait for
  1832. * the SMUX_RX_WM_HIGH count successful packets.
  1833. */
  1834. get_rx_buffer_mock_fail = 0;
  1835. while (cb_data.event_read_done < SMUX_RX_WM_HIGH) {
  1836. UT_ASSERT_INT(
  1837. (int)wait_for_completion_timeout(
  1838. &cb_data.cb_completion, 2*HZ),
  1839. >, 0);
  1840. INIT_COMPLETION(cb_data.cb_completion);
  1841. }
  1842. if (failed)
  1843. break;
  1844. UT_ASSERT_INT(0, ==, cb_data.event_read_failed);
  1845. UT_ASSERT_INT(SMUX_RX_WM_HIGH, ==,
  1846. cb_data.event_read_done);
  1847. UT_ASSERT_INT(cb_data.event_rx_retry_low_wm, ==, 1);
  1848. mock_cb_data_reset(&cb_data);
  1849. /* close port */
  1850. ret = msm_smux_close(SMUX_TEST_LCID);
  1851. UT_ASSERT_INT(ret, ==, 0);
  1852. while (cb_data.cb_count < 3) {
  1853. UT_ASSERT_INT(
  1854. (int)wait_for_completion_timeout(
  1855. &cb_data.cb_completion, HZ),
  1856. >, 0);
  1857. INIT_COMPLETION(cb_data.cb_completion);
  1858. }
  1859. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1860. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1861. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1862. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1863. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1864. break;
  1865. }
  1866. if (!failed) {
  1867. i += scnprintf(buf + i, max - i, "\tOK\n");
  1868. } else {
  1869. pr_err("%s: Failed\n", __func__);
  1870. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1871. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1872. msm_smux_close(SMUX_TEST_LCID);
  1873. }
  1874. smux_byte_loopback = 0;
  1875. mock_cb_data_reset(&cb_data);
  1876. return i;
  1877. }
  1878. /**
  1879. * Verify remote flow control (remote TX stop).
  1880. *
  1881. * @buf Buffer for status message
  1882. * @max Size of buffer
  1883. *
  1884. * @returns Number of bytes written to @buf
  1885. */
  1886. static int smux_ut_remote_tx_stop(char *buf, int max)
  1887. {
  1888. static struct smux_mock_callback cb_data;
  1889. static int cb_initialized;
  1890. int i = 0;
  1891. int failed = 0;
  1892. int ret;
  1893. i += scnprintf(buf + i, max - i, "Running %s\n", __func__);
  1894. pr_err("%s", buf);
  1895. if (!cb_initialized)
  1896. mock_cb_data_init(&cb_data);
  1897. mock_cb_data_reset(&cb_data);
  1898. while (!failed) {
  1899. /* open port for remote loopback */
  1900. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1901. SMUX_CH_OPTION_REMOTE_LOOPBACK, 0);
  1902. UT_ASSERT_INT(ret, ==, 0);
  1903. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  1904. get_rx_buffer);
  1905. UT_ASSERT_INT(ret, ==, 0);
  1906. UT_ASSERT_INT(
  1907. (int)wait_for_completion_timeout(
  1908. &cb_data.cb_completion, HZ), >, 0);
  1909. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1910. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  1911. mock_cb_data_reset(&cb_data);
  1912. /* send 1 packet and verify response */
  1913. ret = msm_smux_write(SMUX_TEST_LCID, (void *)1,
  1914. test_array, sizeof(test_array));
  1915. UT_ASSERT_INT(ret, ==, 0);
  1916. UT_ASSERT_INT(
  1917. (int)wait_for_completion_timeout(
  1918. &cb_data.cb_completion, HZ),
  1919. >, 0);
  1920. UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
  1921. INIT_COMPLETION(cb_data.cb_completion);
  1922. if (!cb_data.event_read_done) {
  1923. UT_ASSERT_INT(
  1924. (int)wait_for_completion_timeout(
  1925. &cb_data.cb_completion, HZ),
  1926. >, 0);
  1927. }
  1928. UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
  1929. mock_cb_data_reset(&cb_data);
  1930. /* enable flow control */
  1931. UT_ASSERT_INT(smux_lch[SMUX_TEST_LCID].tx_flow_control, ==, 0);
  1932. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1933. SMUX_CH_OPTION_REMOTE_TX_STOP, 0);
  1934. UT_ASSERT_INT(ret, ==, 0);
  1935. /* wait for remote echo and clear our tx_flow control */
  1936. msleep(500);
  1937. UT_ASSERT_INT(smux_lch[SMUX_TEST_LCID].tx_flow_control, ==, 1);
  1938. smux_lch[SMUX_TEST_LCID].tx_flow_control = 0;
  1939. /* Send 1 packet and verify no response */
  1940. ret = msm_smux_write(SMUX_TEST_LCID, (void *)2,
  1941. test_array, sizeof(test_array));
  1942. UT_ASSERT_INT(ret, ==, 0);
  1943. UT_ASSERT_INT(
  1944. (int)wait_for_completion_timeout(
  1945. &cb_data.cb_completion, HZ),
  1946. >, 0);
  1947. INIT_COMPLETION(cb_data.cb_completion);
  1948. UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
  1949. UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
  1950. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  1951. UT_ASSERT_INT(
  1952. (int)wait_for_completion_timeout(
  1953. &cb_data.cb_completion, 1*HZ),
  1954. ==, 0);
  1955. UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
  1956. mock_cb_data_reset(&cb_data);
  1957. /* disable flow control and verify response is received */
  1958. UT_ASSERT_INT(cb_data.event_read_done, ==, 0);
  1959. ret = msm_smux_set_ch_option(SMUX_TEST_LCID,
  1960. 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
  1961. UT_ASSERT_INT(ret, ==, 0);
  1962. UT_ASSERT_INT(
  1963. (int)wait_for_completion_timeout(
  1964. &cb_data.cb_completion, HZ),
  1965. >, 0);
  1966. UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
  1967. mock_cb_data_reset(&cb_data);
  1968. /* close port */
  1969. ret = msm_smux_close(SMUX_TEST_LCID);
  1970. UT_ASSERT_INT(ret, ==, 0);
  1971. while (cb_data.cb_count < 3) {
  1972. UT_ASSERT_INT(
  1973. (int)wait_for_completion_timeout(
  1974. &cb_data.cb_completion, HZ),
  1975. >, 0);
  1976. INIT_COMPLETION(cb_data.cb_completion);
  1977. }
  1978. UT_ASSERT_INT(cb_data.cb_count, ==, 3);
  1979. UT_ASSERT_INT(cb_data.event_disconnected, ==, 1);
  1980. UT_ASSERT_INT(cb_data.event_disconnected_ssr, ==, 0);
  1981. UT_ASSERT_INT(cb_data.event_local_closed, ==, 1);
  1982. UT_ASSERT_INT(cb_data.event_remote_closed, ==, 1);
  1983. break;
  1984. }
  1985. if (!failed) {
  1986. i += scnprintf(buf + i, max - i, "\tOK\n");
  1987. } else {
  1988. pr_err("%s: Failed\n", __func__);
  1989. i += scnprintf(buf + i, max - i, "\tFailed\n");
  1990. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  1991. msm_smux_set_ch_option(SMUX_TEST_LCID,
  1992. 0, SMUX_CH_OPTION_REMOTE_TX_STOP);
  1993. msm_smux_close(SMUX_TEST_LCID);
  1994. }
  1995. mock_cb_data_reset(&cb_data);
  1996. return i;
  1997. }
  1998. /**
  1999. * Verify Remote-initiated wakeup test case.
  2000. *
  2001. * @buf Output buffer for failure/status messages
  2002. * @max Size of @buf
  2003. */
  2004. static int smux_ut_remote_initiated_wakeup(char *buf, int max)
  2005. {
  2006. int i = 0;
  2007. int failed = 0;
  2008. static struct smux_mock_callback cb_data;
  2009. static int cb_initialized;
  2010. int ret;
  2011. if (!cb_initialized)
  2012. mock_cb_data_init(&cb_data);
  2013. smux_set_loopback_data_reply_delay(SMUX_REMOTE_DELAY_TIME_MS);
  2014. mock_cb_data_reset(&cb_data);
  2015. do {
  2016. unsigned long start_j;
  2017. unsigned transfer_time;
  2018. unsigned lwakeups_start;
  2019. unsigned rwakeups_start;
  2020. unsigned lwakeups_end;
  2021. unsigned rwakeups_end;
  2022. unsigned lwakeup_delta;
  2023. unsigned rwakeup_delta;
  2024. /* open port */
  2025. ret = msm_smux_open(SMUX_TEST_LCID, &cb_data, smux_mock_cb,
  2026. get_rx_buffer);
  2027. UT_ASSERT_INT(ret, ==, 0);
  2028. UT_ASSERT_INT(
  2029. (int)wait_for_completion_timeout(
  2030. &cb_data.cb_completion, HZ), >, 0);
  2031. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  2032. UT_ASSERT_INT(cb_data.event_connected, ==, 1);
  2033. mock_cb_data_reset(&cb_data);
  2034. /* do local wakeup test and send echo packet */
  2035. msleep(SMUX_REMOTE_INACTIVITY_TIME_MS);
  2036. smux_get_wakeup_counts(&lwakeups_start, &rwakeups_start);
  2037. msm_smux_write(SMUX_TEST_LCID, (void *)0x12345678,
  2038. "Hello", 5);
  2039. UT_ASSERT_INT(ret, ==, 0);
  2040. UT_ASSERT_INT(
  2041. (int)wait_for_completion_timeout(
  2042. &cb_data.cb_completion, HZ), >, 0);
  2043. UT_ASSERT_INT(cb_data.cb_count, ==, 1);
  2044. UT_ASSERT_INT(cb_data.event_write_done, ==, 1);
  2045. mock_cb_data_reset(&cb_data);
  2046. /* verify local initiated wakeup */
  2047. smux_get_wakeup_counts(&lwakeups_end, &rwakeups_end);
  2048. if (lwakeups_end > lwakeups_start)
  2049. i += scnprintf(buf + i, max - i,
  2050. "\tGood - have Apps-initiated wakeup\n");
  2051. else
  2052. i += scnprintf(buf + i, max - i,
  2053. "\tBad - no Apps-initiated wakeup\n");
  2054. /* verify remote wakeup and echo response */
  2055. smux_get_wakeup_counts(&lwakeups_start, &rwakeups_start);
  2056. start_j = jiffies;
  2057. INIT_COMPLETION(cb_data.cb_completion);
  2058. if (!cb_data.event_read_done)
  2059. UT_ASSERT_INT(
  2060. (int)wait_for_completion_timeout(
  2061. &cb_data.cb_completion,
  2062. SMUX_REMOTE_DELAY_TIME_MS * 2),
  2063. >, 0);
  2064. transfer_time = (unsigned)jiffies_to_msecs(jiffies - start_j);
  2065. UT_ASSERT_INT(cb_data.event_read_done, ==, 1);
  2066. UT_ASSERT_INT_IN_RANGE(transfer_time,
  2067. SMUX_REMOTE_DELAY_TIME_MS -
  2068. SMUX_REMOTE_INACTIVITY_TIME_MS,
  2069. SMUX_REMOTE_DELAY_TIME_MS +
  2070. SMUX_REMOTE_INACTIVITY_TIME_MS);
  2071. smux_get_wakeup_counts(&lwakeups_end, &rwakeups_end);
  2072. lwakeup_delta = lwakeups_end - lwakeups_end;
  2073. rwakeup_delta = rwakeups_end - rwakeups_end;
  2074. if (rwakeup_delta && lwakeup_delta) {
  2075. i += scnprintf(buf + i, max - i,
  2076. "\tBoth local and remote wakeup - re-run test (transfer time %d ms)\n",
  2077. transfer_time);
  2078. failed = 1;
  2079. break;
  2080. } else if (lwakeup_delta) {
  2081. i += scnprintf(buf + i, max - i,
  2082. "\tLocal wakeup only (transfer time %d ms) - FAIL\n",
  2083. transfer_time);
  2084. failed = 1;
  2085. break;
  2086. } else {
  2087. i += scnprintf(buf + i, max - i,
  2088. "\tRemote wakeup verified (transfer time %d ms) - OK\n",
  2089. transfer_time);
  2090. }
  2091. } while (0);
  2092. if (!failed) {
  2093. i += scnprintf(buf + i, max - i, "\tOK\n");
  2094. } else {
  2095. pr_err("%s: Failed\n", __func__);
  2096. i += scnprintf(buf + i, max - i, "\tFailed\n");
  2097. i += mock_cb_data_print(&cb_data, buf + i, max - i);
  2098. }
  2099. mock_cb_data_reset(&cb_data);
  2100. msm_smux_close(SMUX_TEST_LCID);
  2101. wait_for_completion_timeout(&cb_data.cb_completion, HZ);
  2102. mock_cb_data_reset(&cb_data);
  2103. smux_set_loopback_data_reply_delay(0);
  2104. return i;
  2105. }
  2106. static char debug_buffer[DEBUG_BUFMAX];
  2107. static ssize_t debug_read(struct file *file, char __user *buf,
  2108. size_t count, loff_t *ppos)
  2109. {
  2110. int (*fill)(char *buf, int max) = file->private_data;
  2111. int bsize;
  2112. if (*ppos != 0)
  2113. return 0;
  2114. bsize = fill(debug_buffer, DEBUG_BUFMAX);
  2115. return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
  2116. }
  2117. static int debug_open(struct inode *inode, struct file *file)
  2118. {
  2119. file->private_data = inode->i_private;
  2120. return 0;
  2121. }
  2122. static const struct file_operations debug_ops = {
  2123. .read = debug_read,
  2124. .open = debug_open,
  2125. };
  2126. static void debug_create(const char *name, mode_t mode,
  2127. struct dentry *dent,
  2128. int (*fill)(char *buf, int max))
  2129. {
  2130. debugfs_create_file(name, mode, dent, fill, &debug_ops);
  2131. }
  2132. static int __init smux_debugfs_init(void)
  2133. {
  2134. struct dentry *dent;
  2135. dent = debugfs_create_dir("n_smux_test", 0);
  2136. if (IS_ERR(dent))
  2137. return PTR_ERR(dent);
  2138. /*
  2139. * Add Unit Test entries.
  2140. *
  2141. * The idea with unit tests is that you can run all of them
  2142. * from ADB shell by doing:
  2143. * adb shell
  2144. * cat ut*
  2145. *
  2146. * And if particular tests fail, you can then repeatedly run the failing
  2147. * tests as you debug and resolve the failing test.
  2148. */
  2149. debug_create("ut_local_basic", 0444, dent, smux_ut_basic);
  2150. debug_create("ut_remote_basic", 0444, dent, smux_ut_remote_basic);
  2151. debug_create("ut_local_big_pkt", 0444, dent, smux_ut_local_big_pkt);
  2152. debug_create("ut_remote_big_pkt", 0444, dent, smux_ut_remote_big_pkt);
  2153. debug_create("ut_local_tiocm", 0444, dent, smux_ut_local_tiocm);
  2154. debug_create("ut_remote_tiocm", 0444, dent, smux_ut_remote_tiocm);
  2155. debug_create("ut_local_wm", 0444, dent, smux_ut_local_wm);
  2156. debug_create("ut_local_smuxld_receive_buf", 0444, dent,
  2157. smux_ut_local_smuxld_receive_buf);
  2158. debug_create("ut_local_get_rx_buff_retry", 0444, dent,
  2159. smux_ut_local_get_rx_buff_retry);
  2160. debug_create("ut_local_get_rx_buff_retry_auto", 0444, dent,
  2161. smux_ut_local_get_rx_buff_retry_auto);
  2162. debug_create("ut_ssr_remote_basic", 0444, dent,
  2163. smux_ut_ssr_remote_basic);
  2164. debug_create("ut_ssr_remote_open", 0444, dent,
  2165. smux_ut_ssr_remote_open);
  2166. debug_create("ut_ssr_remote_rx_buff_retry", 0444, dent,
  2167. smux_ut_ssr_remote_rx_buff_retry);
  2168. debug_create("ut_remote_tx_stop", 0444, dent,
  2169. smux_ut_remote_tx_stop);
  2170. debug_create("ut_remote_throughput", 0444, dent,
  2171. smux_ut_remote_throughput);
  2172. debug_create("ut_remote_initiated_wakeup", 0444, dent,
  2173. smux_ut_remote_initiated_wakeup);
  2174. return 0;
  2175. }
  2176. late_initcall(smux_debugfs_init);