mmc_block_test.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288
  1. /* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. /* MMC block test */
  14. #include <linux/module.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/mmc/card.h>
  18. #include <linux/mmc/host.h>
  19. #include <linux/delay.h>
  20. #include <linux/test-iosched.h>
  21. #include "queue.h"
  22. #include <linux/mmc/mmc.h>
  23. #define MODULE_NAME "mmc_block_test"
  24. #define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
  25. #define TEST_MAX_BIOS_PER_REQ 128
  26. #define CMD23_PACKED_BIT (1 << 30)
  27. #define LARGE_PRIME_1 1103515367
  28. #define LARGE_PRIME_2 35757
  29. #define PACKED_HDR_VER_MASK 0x000000FF
  30. #define PACKED_HDR_RW_MASK 0x0000FF00
  31. #define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
  32. #define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
  33. #define SECTOR_SIZE 512
  34. #define NUM_OF_SECTORS_PER_BIO ((BIO_U32_SIZE * 4) / SECTOR_SIZE)
  35. #define BIO_TO_SECTOR(x) (x * NUM_OF_SECTORS_PER_BIO)
  36. /* the desired long test size to be read */
  37. #define LONG_READ_TEST_MAX_NUM_BYTES (50*1024*1024) /* 50MB */
  38. /* the minimum amount of requests that will be created */
  39. #define LONG_WRITE_TEST_MIN_NUM_REQS 200 /* 100MB */
  40. /* request queue limitation is 128 requests, and we leave 10 spare requests */
  41. #define TEST_MAX_REQUESTS 118
  42. #define LONG_READ_TEST_MAX_NUM_REQS (LONG_READ_TEST_MAX_NUM_BYTES / \
  43. (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
  44. /* this doesn't allow the test requests num to be greater than the maximum */
  45. #define LONG_READ_TEST_ACTUAL_NUM_REQS \
  46. ((TEST_MAX_REQUESTS < LONG_READ_TEST_MAX_NUM_REQS) ? \
  47. TEST_MAX_REQUESTS : LONG_READ_TEST_MAX_NUM_REQS)
  48. #define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
  49. /* actual number of bytes in test */
  50. #define LONG_READ_NUM_BYTES (LONG_READ_TEST_ACTUAL_NUM_REQS * \
  51. (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE))
  52. /* actual number of MiB in test multiplied by 10, for single digit precision*/
  53. #define BYTE_TO_MB_x_10(x) ((x * 10) / (1024 * 1024))
  54. /* extract integer value */
  55. #define LONG_TEST_SIZE_INTEGER(x) (BYTE_TO_MB_x_10(x) / 10)
  56. /* and calculate the MiB value fraction */
  57. #define LONG_TEST_SIZE_FRACTION(x) (BYTE_TO_MB_x_10(x) - \
  58. (LONG_TEST_SIZE_INTEGER(x) * 10))
  59. #define LONG_WRITE_TEST_SLEEP_TIME_MS 5
  60. #define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
  61. #define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
  62. #define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
  63. #define SANITIZE_TEST_TIMEOUT 240000
  64. #define NEW_REQ_TEST_SLEEP_TIME 1
  65. #define NEW_REQ_TEST_NUM_BIOS 64
  66. #define TEST_REQUEST_NUM_OF_BIOS 3
  67. #define CHECK_BKOPS_STATS(stats, exp_bkops, exp_hpi, exp_suspend) \
  68. ((stats.bkops != exp_bkops) || \
  69. (stats.hpi != exp_hpi) || \
  70. (stats.suspend != exp_suspend))
  71. #define BKOPS_TEST_TIMEOUT 60000
  72. enum is_random {
  73. NON_RANDOM_TEST,
  74. RANDOM_TEST,
  75. };
  76. enum mmc_block_test_testcases {
  77. /* Start of send write packing test group */
  78. SEND_WRITE_PACKING_MIN_TESTCASE,
  79. TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
  80. TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
  81. TEST_STOP_DUE_TO_FLUSH,
  82. TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
  83. TEST_STOP_DUE_TO_EMPTY_QUEUE,
  84. TEST_STOP_DUE_TO_MAX_REQ_NUM,
  85. TEST_STOP_DUE_TO_THRESHOLD,
  86. SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
  87. /* Start of err check test group */
  88. ERR_CHECK_MIN_TESTCASE,
  89. TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
  90. TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
  91. TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
  92. TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
  93. TEST_RET_PARTIAL_MAX_FAIL_IDX,
  94. TEST_RET_RETRY,
  95. TEST_RET_CMD_ERR,
  96. TEST_RET_DATA_ERR,
  97. ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
  98. /* Start of send invalid test group */
  99. INVALID_CMD_MIN_TESTCASE,
  100. TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
  101. TEST_HDR_WRONG_WRITE_CODE,
  102. TEST_HDR_INVALID_RW_CODE,
  103. TEST_HDR_DIFFERENT_ADDRESSES,
  104. TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
  105. TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
  106. TEST_HDR_CMD23_PACKED_BIT_SET,
  107. TEST_CMD23_MAX_PACKED_WRITES,
  108. TEST_CMD23_ZERO_PACKED_WRITES,
  109. TEST_CMD23_PACKED_BIT_UNSET,
  110. TEST_CMD23_REL_WR_BIT_SET,
  111. TEST_CMD23_BITS_16TO29_SET,
  112. TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
  113. INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
  114. /*
  115. * Start of packing control test group.
  116. * in these next testcases the abbreviation FB = followed by
  117. */
  118. PACKING_CONTROL_MIN_TESTCASE,
  119. TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
  120. PACKING_CONTROL_MIN_TESTCASE,
  121. TEST_PACKING_EXP_N_OVER_TRIGGER,
  122. TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
  123. TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
  124. TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
  125. TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
  126. TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
  127. TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
  128. TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
  129. TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
  130. TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
  131. PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
  132. TEST_WRITE_DISCARD_SANITIZE_READ,
  133. /* Start of bkops test group */
  134. BKOPS_MIN_TESTCASE,
  135. BKOPS_DELAYED_WORK_LEVEL_1 = BKOPS_MIN_TESTCASE,
  136. BKOPS_DELAYED_WORK_LEVEL_1_HPI,
  137. BKOPS_CANCEL_DELAYED_WORK,
  138. BKOPS_URGENT_LEVEL_2,
  139. BKOPS_URGENT_LEVEL_2_TWO_REQS,
  140. BKOPS_URGENT_LEVEL_3,
  141. BKOPS_MAX_TESTCASE = BKOPS_URGENT_LEVEL_3,
  142. TEST_LONG_SEQUENTIAL_READ,
  143. TEST_LONG_SEQUENTIAL_WRITE,
  144. TEST_NEW_REQ_NOTIFICATION,
  145. };
  146. enum mmc_block_test_group {
  147. TEST_NO_GROUP,
  148. TEST_GENERAL_GROUP,
  149. TEST_SEND_WRITE_PACKING_GROUP,
  150. TEST_ERR_CHECK_GROUP,
  151. TEST_SEND_INVALID_GROUP,
  152. TEST_PACKING_CONTROL_GROUP,
  153. TEST_BKOPS_GROUP,
  154. TEST_NEW_NOTIFICATION_GROUP,
  155. };
  156. enum bkops_test_stages {
  157. BKOPS_STAGE_1,
  158. BKOPS_STAGE_2,
  159. BKOPS_STAGE_3,
  160. BKOPS_STAGE_4,
  161. };
  162. struct mmc_block_test_debug {
  163. struct dentry *send_write_packing_test;
  164. struct dentry *err_check_test;
  165. struct dentry *send_invalid_packed_test;
  166. struct dentry *random_test_seed;
  167. struct dentry *packing_control_test;
  168. struct dentry *discard_sanitize_test;
  169. struct dentry *bkops_test;
  170. struct dentry *long_sequential_read_test;
  171. struct dentry *long_sequential_write_test;
  172. struct dentry *new_req_notification_test;
  173. };
  174. struct mmc_block_test_data {
  175. /* The number of write requests that the test will issue */
  176. int num_requests;
  177. /* The expected write packing statistics for the current test */
  178. struct mmc_wr_pack_stats exp_packed_stats;
  179. /*
  180. * A user-defined seed for random choices of number of bios written in
  181. * a request, and of number of requests issued in a test
  182. * This field is randomly updated after each use
  183. */
  184. unsigned int random_test_seed;
  185. /* A retry counter used in err_check tests */
  186. int err_check_counter;
  187. /* Can be one of the values of enum test_group */
  188. enum mmc_block_test_group test_group;
  189. /*
  190. * Indicates if the current testcase is running with random values of
  191. * num_requests and num_bios (in each request)
  192. */
  193. int is_random;
  194. /* Data structure for debugfs dentrys */
  195. struct mmc_block_test_debug debug;
  196. /*
  197. * Data structure containing individual test information, including
  198. * self-defined specific data
  199. */
  200. struct test_info test_info;
  201. /* mmc block device test */
  202. struct blk_dev_test_type bdt;
  203. /* Current BKOPs test stage */
  204. enum bkops_test_stages bkops_stage;
  205. /* A wait queue for BKOPs tests */
  206. wait_queue_head_t bkops_wait_q;
  207. /* A counter for the number of test requests completed */
  208. unsigned int completed_req_count;
  209. };
  210. static struct mmc_block_test_data *mbtd;
  211. void print_mmc_packing_stats(struct mmc_card *card)
  212. {
  213. int i;
  214. int max_num_of_packed_reqs = 0;
  215. if ((!card) || (!card->wr_pack_stats.packing_events))
  216. return;
  217. max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
  218. spin_lock(&card->wr_pack_stats.lock);
  219. pr_info("%s: write packing statistics:\n",
  220. mmc_hostname(card->host));
  221. for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
  222. if (card->wr_pack_stats.packing_events[i] != 0)
  223. pr_info("%s: Packed %d reqs - %d times\n",
  224. mmc_hostname(card->host), i,
  225. card->wr_pack_stats.packing_events[i]);
  226. }
  227. pr_info("%s: stopped packing due to the following reasons:\n",
  228. mmc_hostname(card->host));
  229. if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
  230. pr_info("%s: %d times: exceedmax num of segments\n",
  231. mmc_hostname(card->host),
  232. card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
  233. if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
  234. pr_info("%s: %d times: exceeding the max num of sectors\n",
  235. mmc_hostname(card->host),
  236. card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
  237. if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
  238. pr_info("%s: %d times: wrong data direction\n",
  239. mmc_hostname(card->host),
  240. card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
  241. if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
  242. pr_info("%s: %d times: flush or discard\n",
  243. mmc_hostname(card->host),
  244. card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
  245. if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
  246. pr_info("%s: %d times: empty queue\n",
  247. mmc_hostname(card->host),
  248. card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
  249. if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
  250. pr_info("%s: %d times: rel write\n",
  251. mmc_hostname(card->host),
  252. card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
  253. if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
  254. pr_info("%s: %d times: Threshold\n",
  255. mmc_hostname(card->host),
  256. card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
  257. spin_unlock(&card->wr_pack_stats.lock);
  258. }
  259. /*
  260. * A callback assigned to the packed_test_fn field.
  261. * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
  262. * Here we alter the packed header or CMD23 in order to send an invalid
  263. * packed command to the card.
  264. */
  265. static void test_invalid_packed_cmd(struct request_queue *q,
  266. struct mmc_queue_req *mqrq)
  267. {
  268. struct mmc_queue *mq = q->queuedata;
  269. u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
  270. struct request *req = mqrq->req;
  271. struct request *second_rq;
  272. struct test_request *test_rq;
  273. struct mmc_blk_request *brq = &mqrq->brq;
  274. int num_requests;
  275. int max_packed_reqs;
  276. if (!mq) {
  277. test_pr_err("%s: NULL mq", __func__);
  278. return;
  279. }
  280. test_rq = (struct test_request *)req->elv.priv[0];
  281. if (!test_rq) {
  282. test_pr_err("%s: NULL test_rq", __func__);
  283. return;
  284. }
  285. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  286. switch (mbtd->test_info.testcase) {
  287. case TEST_HDR_INVALID_VERSION:
  288. test_pr_info("%s: set invalid header version", __func__);
  289. /* Put 0 in header version field (1 byte, offset 0 in header) */
  290. packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
  291. break;
  292. case TEST_HDR_WRONG_WRITE_CODE:
  293. test_pr_info("%s: wrong write code", __func__);
  294. /* Set R/W field with R value (1 byte, offset 1 in header) */
  295. packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
  296. packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
  297. break;
  298. case TEST_HDR_INVALID_RW_CODE:
  299. test_pr_info("%s: invalid r/w code", __func__);
  300. /* Set R/W field with invalid value */
  301. packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
  302. packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
  303. break;
  304. case TEST_HDR_DIFFERENT_ADDRESSES:
  305. test_pr_info("%s: different addresses", __func__);
  306. second_rq = list_entry(req->queuelist.next, struct request,
  307. queuelist);
  308. test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
  309. __func__, (long)req->__sector,
  310. (long)second_rq->__sector);
  311. /*
  312. * Put start sector of second write request in the first write
  313. * request's cmd25 argument in the packed header
  314. */
  315. packed_cmd_hdr[3] = second_rq->__sector;
  316. break;
  317. case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
  318. test_pr_info("%s: request num smaller than actual" , __func__);
  319. num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
  320. >> 16;
  321. /* num of entries is decremented by 1 */
  322. num_requests = (num_requests - 1) << 16;
  323. /*
  324. * Set number of requests field in packed write header to be
  325. * smaller than the actual number (1 byte, offset 2 in header)
  326. */
  327. packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
  328. ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
  329. break;
  330. case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
  331. test_pr_info("%s: request num larger than actual" , __func__);
  332. num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
  333. >> 16;
  334. /* num of entries is incremented by 1 */
  335. num_requests = (num_requests + 1) << 16;
  336. /*
  337. * Set number of requests field in packed write header to be
  338. * larger than the actual number (1 byte, offset 2 in header).
  339. */
  340. packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
  341. ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
  342. break;
  343. case TEST_HDR_CMD23_PACKED_BIT_SET:
  344. test_pr_info("%s: header CMD23 packed bit set" , __func__);
  345. /*
  346. * Set packed bit (bit 30) in cmd23 argument of first and second
  347. * write requests in packed write header.
  348. * These are located at bytes 2 and 4 in packed write header
  349. */
  350. packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
  351. packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
  352. break;
  353. case TEST_CMD23_MAX_PACKED_WRITES:
  354. test_pr_info("%s: CMD23 request num > max_packed_reqs",
  355. __func__);
  356. /*
  357. * Set the individual packed cmd23 request num to
  358. * max_packed_reqs + 1
  359. */
  360. brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
  361. break;
  362. case TEST_CMD23_ZERO_PACKED_WRITES:
  363. test_pr_info("%s: CMD23 request num = 0", __func__);
  364. /* Set the individual packed cmd23 request num to zero */
  365. brq->sbc.arg = MMC_CMD23_ARG_PACKED;
  366. break;
  367. case TEST_CMD23_PACKED_BIT_UNSET:
  368. test_pr_info("%s: CMD23 packed bit unset", __func__);
  369. /*
  370. * Set the individual packed cmd23 packed bit to 0,
  371. * although there is a packed write request
  372. */
  373. brq->sbc.arg &= ~CMD23_PACKED_BIT;
  374. break;
  375. case TEST_CMD23_REL_WR_BIT_SET:
  376. test_pr_info("%s: CMD23 REL WR bit set", __func__);
  377. /* Set the individual packed cmd23 reliable write bit */
  378. brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
  379. break;
  380. case TEST_CMD23_BITS_16TO29_SET:
  381. test_pr_info("%s: CMD23 bits [16-29] set", __func__);
  382. brq->sbc.arg = MMC_CMD23_ARG_PACKED |
  383. PACKED_HDR_BITS_16_TO_29_SET;
  384. break;
  385. case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
  386. test_pr_info("%s: CMD23 hdr not in block count", __func__);
  387. brq->sbc.arg = MMC_CMD23_ARG_PACKED |
  388. ((rq_data_dir(req) == READ) ? 0 : mqrq->packed_blocks);
  389. break;
  390. default:
  391. test_pr_err("%s: unexpected testcase %d",
  392. __func__, mbtd->test_info.testcase);
  393. break;
  394. }
  395. }
  396. /*
  397. * A callback assigned to the err_check_fn field of the mmc_request by the
  398. * MMC/card/block layer.
  399. * Called upon request completion by the MMC/core layer.
  400. * Here we emulate an error return value from the card.
  401. */
  402. static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
  403. {
  404. struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
  405. mmc_active);
  406. struct request_queue *req_q = test_iosched_get_req_queue();
  407. struct mmc_queue *mq;
  408. int max_packed_reqs;
  409. int ret = 0;
  410. struct mmc_blk_request *brq;
  411. if (req_q)
  412. mq = req_q->queuedata;
  413. else {
  414. test_pr_err("%s: NULL request_queue", __func__);
  415. return 0;
  416. }
  417. if (!mq) {
  418. test_pr_err("%s: %s: NULL mq", __func__,
  419. mmc_hostname(card->host));
  420. return 0;
  421. }
  422. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  423. if (!mq_rq) {
  424. test_pr_err("%s: %s: NULL mq_rq", __func__,
  425. mmc_hostname(card->host));
  426. return 0;
  427. }
  428. brq = &mq_rq->brq;
  429. switch (mbtd->test_info.testcase) {
  430. case TEST_RET_ABORT:
  431. test_pr_info("%s: return abort", __func__);
  432. ret = MMC_BLK_ABORT;
  433. break;
  434. case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
  435. test_pr_info("%s: return partial followed by success",
  436. __func__);
  437. /*
  438. * Since in this testcase num_requests is always >= 2,
  439. * we can be sure that packed_fail_idx is always >= 1
  440. */
  441. mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
  442. test_pr_info("%s: packed_fail_idx = %d"
  443. , __func__, mq_rq->packed_fail_idx);
  444. mq->err_check_fn = NULL;
  445. ret = MMC_BLK_PARTIAL;
  446. break;
  447. case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
  448. if (!mbtd->err_check_counter) {
  449. test_pr_info("%s: return partial followed by abort",
  450. __func__);
  451. mbtd->err_check_counter++;
  452. /*
  453. * Since in this testcase num_requests is always >= 3,
  454. * we have that packed_fail_idx is always >= 1
  455. */
  456. mq_rq->packed_fail_idx = (mbtd->num_requests / 2);
  457. test_pr_info("%s: packed_fail_idx = %d"
  458. , __func__, mq_rq->packed_fail_idx);
  459. ret = MMC_BLK_PARTIAL;
  460. break;
  461. }
  462. mbtd->err_check_counter = 0;
  463. mq->err_check_fn = NULL;
  464. ret = MMC_BLK_ABORT;
  465. break;
  466. case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
  467. test_pr_info("%s: return partial multiple until success",
  468. __func__);
  469. if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
  470. mq->err_check_fn = NULL;
  471. mbtd->err_check_counter = 0;
  472. ret = MMC_BLK_PARTIAL;
  473. break;
  474. }
  475. mq_rq->packed_fail_idx = 1;
  476. ret = MMC_BLK_PARTIAL;
  477. break;
  478. case TEST_RET_PARTIAL_MAX_FAIL_IDX:
  479. test_pr_info("%s: return partial max fail_idx", __func__);
  480. mq_rq->packed_fail_idx = max_packed_reqs - 1;
  481. mq->err_check_fn = NULL;
  482. ret = MMC_BLK_PARTIAL;
  483. break;
  484. case TEST_RET_RETRY:
  485. test_pr_info("%s: return retry", __func__);
  486. ret = MMC_BLK_RETRY;
  487. break;
  488. case TEST_RET_CMD_ERR:
  489. test_pr_info("%s: return cmd err", __func__);
  490. ret = MMC_BLK_CMD_ERR;
  491. break;
  492. case TEST_RET_DATA_ERR:
  493. test_pr_info("%s: return data err", __func__);
  494. ret = MMC_BLK_DATA_ERR;
  495. break;
  496. case BKOPS_URGENT_LEVEL_2:
  497. case BKOPS_URGENT_LEVEL_3:
  498. case BKOPS_URGENT_LEVEL_2_TWO_REQS:
  499. if (mbtd->err_check_counter++ == 0) {
  500. test_pr_info("%s: simulate an exception from the card",
  501. __func__);
  502. brq->cmd.resp[0] |= R1_EXCEPTION_EVENT;
  503. }
  504. mq->err_check_fn = NULL;
  505. break;
  506. default:
  507. test_pr_err("%s: unexpected testcase %d",
  508. __func__, mbtd->test_info.testcase);
  509. }
  510. return ret;
  511. }
  512. /*
  513. * This is a specific implementation for the get_test_case_str_fn function
  514. * pointer in the test_info data structure. Given a valid test_data instance,
  515. * the function returns a string resembling the test name, based on the testcase
  516. */
  517. static char *get_test_case_str(struct test_data *td)
  518. {
  519. if (!td) {
  520. test_pr_err("%s: NULL td", __func__);
  521. return NULL;
  522. }
  523. switch (td->test_info.testcase) {
  524. case TEST_STOP_DUE_TO_FLUSH:
  525. return "\"stop due to flush\"";
  526. case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
  527. return "\"stop due to flush after max-1 reqs\"";
  528. case TEST_STOP_DUE_TO_READ:
  529. return "\"stop due to read\"";
  530. case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
  531. return "\"stop due to read after max-1 reqs\"";
  532. case TEST_STOP_DUE_TO_EMPTY_QUEUE:
  533. return "\"stop due to empty queue\"";
  534. case TEST_STOP_DUE_TO_MAX_REQ_NUM:
  535. return "\"stop due to max req num\"";
  536. case TEST_STOP_DUE_TO_THRESHOLD:
  537. return "\"stop due to exceeding threshold\"";
  538. case TEST_RET_ABORT:
  539. return "\"err_check return abort\"";
  540. case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
  541. return "\"err_check return partial followed by success\"";
  542. case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
  543. return "\"err_check return partial followed by abort\"";
  544. case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
  545. return "\"err_check return partial multiple until success\"";
  546. case TEST_RET_PARTIAL_MAX_FAIL_IDX:
  547. return "\"err_check return partial max fail index\"";
  548. case TEST_RET_RETRY:
  549. return "\"err_check return retry\"";
  550. case TEST_RET_CMD_ERR:
  551. return "\"err_check return cmd error\"";
  552. case TEST_RET_DATA_ERR:
  553. return "\"err_check return data error\"";
  554. case TEST_HDR_INVALID_VERSION:
  555. return "\"invalid - wrong header version\"";
  556. case TEST_HDR_WRONG_WRITE_CODE:
  557. return "\"invalid - wrong write code\"";
  558. case TEST_HDR_INVALID_RW_CODE:
  559. return "\"invalid - wrong R/W code\"";
  560. case TEST_HDR_DIFFERENT_ADDRESSES:
  561. return "\"invalid - header different addresses\"";
  562. case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
  563. return "\"invalid - header req num smaller than actual\"";
  564. case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
  565. return "\"invalid - header req num larger than actual\"";
  566. case TEST_HDR_CMD23_PACKED_BIT_SET:
  567. return "\"invalid - header cmd23 packed bit set\"";
  568. case TEST_CMD23_MAX_PACKED_WRITES:
  569. return "\"invalid - cmd23 max packed writes\"";
  570. case TEST_CMD23_ZERO_PACKED_WRITES:
  571. return "\"invalid - cmd23 zero packed writes\"";
  572. case TEST_CMD23_PACKED_BIT_UNSET:
  573. return "\"invalid - cmd23 packed bit unset\"";
  574. case TEST_CMD23_REL_WR_BIT_SET:
  575. return "\"invalid - cmd23 rel wr bit set\"";
  576. case TEST_CMD23_BITS_16TO29_SET:
  577. return "\"invalid - cmd23 bits [16-29] set\"";
  578. case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
  579. return "\"invalid - cmd23 header block not in count\"";
  580. case TEST_PACKING_EXP_N_OVER_TRIGGER:
  581. return "\"packing control - pack n\"";
  582. case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
  583. return "\"packing control - pack n followed by read\"";
  584. case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
  585. return "\"packing control - pack n followed by flush\"";
  586. case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
  587. return "\"packing control - pack one followed by read\"";
  588. case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
  589. return "\"packing control - pack threshold\"";
  590. case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
  591. return "\"packing control - no packing\"";
  592. case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
  593. return "\"packing control - no packing, trigger requests\"";
  594. case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
  595. return "\"packing control - no pack, trigger-read-trigger\"";
  596. case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
  597. return "\"packing control- no pack, trigger-flush-trigger\"";
  598. case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
  599. return "\"packing control - mix: pack -> no pack -> pack\"";
  600. case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
  601. return "\"packing control - mix: no pack->pack->no pack\"";
  602. case TEST_WRITE_DISCARD_SANITIZE_READ:
  603. return "\"write, discard, sanitize\"";
  604. case BKOPS_DELAYED_WORK_LEVEL_1:
  605. return "\"delayed work BKOPS level 1\"";
  606. case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
  607. return "\"delayed work BKOPS level 1 with HPI\"";
  608. case BKOPS_CANCEL_DELAYED_WORK:
  609. return "\"cancel delayed BKOPS work\"";
  610. case BKOPS_URGENT_LEVEL_2:
  611. return "\"urgent BKOPS level 2\"";
  612. case BKOPS_URGENT_LEVEL_2_TWO_REQS:
  613. return "\"urgent BKOPS level 2, followed by a request\"";
  614. case BKOPS_URGENT_LEVEL_3:
  615. return "\"urgent BKOPS level 3\"";
  616. case TEST_LONG_SEQUENTIAL_READ:
  617. return "\"long sequential read\"";
  618. case TEST_LONG_SEQUENTIAL_WRITE:
  619. return "\"long sequential write\"";
  620. case TEST_NEW_REQ_NOTIFICATION:
  621. return "\"new request notification test\"";
  622. default:
  623. return " Unknown testcase";
  624. }
  625. return NULL;
  626. }
  627. /*
  628. * Compare individual testcase's statistics to the expected statistics:
  629. * Compare stop reason and number of packing events
  630. */
  631. static int check_wr_packing_statistics(struct test_data *td)
  632. {
  633. struct mmc_wr_pack_stats *mmc_packed_stats;
  634. struct mmc_queue *mq = td->req_q->queuedata;
  635. int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  636. int i;
  637. struct mmc_card *card = mq->card;
  638. struct mmc_wr_pack_stats expected_stats;
  639. int *stop_reason;
  640. int ret = 0;
  641. if (!mq) {
  642. test_pr_err("%s: NULL mq", __func__);
  643. return -EINVAL;
  644. }
  645. expected_stats = mbtd->exp_packed_stats;
  646. mmc_packed_stats = mmc_blk_get_packed_statistics(card);
  647. if (!mmc_packed_stats) {
  648. test_pr_err("%s: NULL mmc_packed_stats", __func__);
  649. return -EINVAL;
  650. }
  651. if (!mmc_packed_stats->packing_events) {
  652. test_pr_err("%s: NULL packing_events", __func__);
  653. return -EINVAL;
  654. }
  655. spin_lock(&mmc_packed_stats->lock);
  656. if (!mmc_packed_stats->enabled) {
  657. test_pr_err("%s write packing statistics are not enabled",
  658. __func__);
  659. ret = -EINVAL;
  660. goto exit_err;
  661. }
  662. stop_reason = mmc_packed_stats->pack_stop_reason;
  663. for (i = 1; i <= max_packed_reqs; ++i) {
  664. if (mmc_packed_stats->packing_events[i] !=
  665. expected_stats.packing_events[i]) {
  666. test_pr_err(
  667. "%s: Wrong pack stats in index %d, got %d, expected %d",
  668. __func__, i, mmc_packed_stats->packing_events[i],
  669. expected_stats.packing_events[i]);
  670. if (td->fs_wr_reqs_during_test)
  671. goto cancel_round;
  672. ret = -EINVAL;
  673. goto exit_err;
  674. }
  675. }
  676. if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
  677. expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
  678. test_pr_err(
  679. "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
  680. __func__, stop_reason[EXCEEDS_SEGMENTS],
  681. expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
  682. if (td->fs_wr_reqs_during_test)
  683. goto cancel_round;
  684. ret = -EINVAL;
  685. goto exit_err;
  686. }
  687. if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
  688. expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
  689. test_pr_err(
  690. "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
  691. __func__, stop_reason[EXCEEDS_SECTORS],
  692. expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
  693. if (td->fs_wr_reqs_during_test)
  694. goto cancel_round;
  695. ret = -EINVAL;
  696. goto exit_err;
  697. }
  698. if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
  699. expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
  700. test_pr_err(
  701. "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
  702. __func__, stop_reason[WRONG_DATA_DIR],
  703. expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
  704. if (td->fs_wr_reqs_during_test)
  705. goto cancel_round;
  706. ret = -EINVAL;
  707. goto exit_err;
  708. }
  709. if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
  710. expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
  711. test_pr_err(
  712. "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
  713. __func__, stop_reason[FLUSH_OR_DISCARD],
  714. expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
  715. if (td->fs_wr_reqs_during_test)
  716. goto cancel_round;
  717. ret = -EINVAL;
  718. goto exit_err;
  719. }
  720. if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
  721. expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
  722. test_pr_err(
  723. "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
  724. __func__, stop_reason[EMPTY_QUEUE],
  725. expected_stats.pack_stop_reason[EMPTY_QUEUE]);
  726. if (td->fs_wr_reqs_during_test)
  727. goto cancel_round;
  728. ret = -EINVAL;
  729. goto exit_err;
  730. }
  731. if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
  732. expected_stats.pack_stop_reason[REL_WRITE]) {
  733. test_pr_err(
  734. "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
  735. __func__, stop_reason[REL_WRITE],
  736. expected_stats.pack_stop_reason[REL_WRITE]);
  737. if (td->fs_wr_reqs_during_test)
  738. goto cancel_round;
  739. ret = -EINVAL;
  740. goto exit_err;
  741. }
  742. exit_err:
  743. spin_unlock(&mmc_packed_stats->lock);
  744. if (ret && mmc_packed_stats->enabled)
  745. print_mmc_packing_stats(card);
  746. return ret;
  747. cancel_round:
  748. spin_unlock(&mmc_packed_stats->lock);
  749. test_iosched_set_ignore_round(true);
  750. return 0;
  751. }
  752. /*
  753. * Pseudo-randomly choose a seed based on the last seed, and update it in
  754. * seed_number. then return seed_number (mod max_val), or min_val.
  755. */
  756. static unsigned int pseudo_random_seed(unsigned int *seed_number,
  757. unsigned int min_val,
  758. unsigned int max_val)
  759. {
  760. int ret = 0;
  761. if (!seed_number)
  762. return 0;
  763. *seed_number = ((unsigned int)(((unsigned long)*seed_number *
  764. (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
  765. ret = (unsigned int)((*seed_number) % max_val);
  766. return (ret > min_val ? ret : min_val);
  767. }
  768. /*
  769. * Given a pseudo-random seed, find a pseudo-random num_of_bios.
  770. * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
  771. */
  772. static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
  773. unsigned int *num_of_bios)
  774. {
  775. do {
  776. *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
  777. TEST_MAX_BIOS_PER_REQ);
  778. if (!(*num_of_bios))
  779. *num_of_bios = 1;
  780. } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
  781. }
  782. /* Add a single read request to the given td's request queue */
  783. static int prepare_request_add_read(struct test_data *td)
  784. {
  785. int ret;
  786. int start_sec;
  787. if (td)
  788. start_sec = td->start_sector;
  789. else {
  790. test_pr_err("%s: NULL td", __func__);
  791. return 0;
  792. }
  793. test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
  794. td->wr_rd_next_req_id);
  795. ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
  796. TEST_PATTERN_5A, NULL);
  797. if (ret) {
  798. test_pr_err("%s: failed to add a read request", __func__);
  799. return ret;
  800. }
  801. return 0;
  802. }
  803. /* Add a single flush request to the given td's request queue */
  804. static int prepare_request_add_flush(struct test_data *td)
  805. {
  806. int ret;
  807. if (!td) {
  808. test_pr_err("%s: NULL td", __func__);
  809. return 0;
  810. }
  811. test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
  812. td->unique_next_req_id);
  813. ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
  814. 0, 0, NULL);
  815. if (ret) {
  816. test_pr_err("%s: failed to add a flush request", __func__);
  817. return ret;
  818. }
  819. return ret;
  820. }
  821. /*
  822. * Add num_requets amount of write requests to the given td's request queue.
  823. * If random test mode is chosen we pseudo-randomly choose the number of bios
  824. * for each write request, otherwise add between 1 to 5 bio per request.
  825. */
  826. static int prepare_request_add_write_reqs(struct test_data *td,
  827. int num_requests, int is_err_expected,
  828. int is_random)
  829. {
  830. int i;
  831. unsigned int start_sec;
  832. int num_bios;
  833. int ret = 0;
  834. unsigned int *bio_seed = &mbtd->random_test_seed;
  835. if (td)
  836. start_sec = td->start_sector;
  837. else {
  838. test_pr_err("%s: NULL td", __func__);
  839. return ret;
  840. }
  841. test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
  842. num_requests, td->wr_rd_next_req_id);
  843. for (i = 1 ; i <= num_requests ; i++) {
  844. start_sec =
  845. td->start_sector + sizeof(int) *
  846. BIO_U32_SIZE * td->num_of_write_bios;
  847. if (is_random)
  848. pseudo_rnd_num_of_bios(bio_seed, &num_bios);
  849. else
  850. /*
  851. * For the non-random case, give num_bios a value
  852. * between 1 and 5, to keep a small number of BIOs
  853. */
  854. num_bios = (i%5)+1;
  855. ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
  856. start_sec, num_bios, TEST_PATTERN_5A, NULL);
  857. if (ret) {
  858. test_pr_err("%s: failed to add a write request",
  859. __func__);
  860. return ret;
  861. }
  862. }
  863. return 0;
  864. }
  865. /*
  866. * Prepare the write, read and flush requests for a generic packed commands
  867. * testcase
  868. */
  869. static int prepare_packed_requests(struct test_data *td, int is_err_expected,
  870. int num_requests, int is_random)
  871. {
  872. int ret = 0;
  873. struct mmc_queue *mq;
  874. int max_packed_reqs;
  875. struct request_queue *req_q;
  876. if (!td) {
  877. pr_err("%s: NULL td", __func__);
  878. return -EINVAL;
  879. }
  880. req_q = td->req_q;
  881. if (!req_q) {
  882. pr_err("%s: NULL request queue", __func__);
  883. return -EINVAL;
  884. }
  885. mq = req_q->queuedata;
  886. if (!mq) {
  887. test_pr_err("%s: NULL mq", __func__);
  888. return -EINVAL;
  889. }
  890. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  891. if (mbtd->random_test_seed <= 0) {
  892. mbtd->random_test_seed =
  893. (unsigned int)(get_jiffies_64() & 0xFFFF);
  894. test_pr_info("%s: got seed from jiffies %d",
  895. __func__, mbtd->random_test_seed);
  896. }
  897. ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
  898. is_random);
  899. if (ret)
  900. return ret;
  901. /* Avoid memory corruption in upcoming stats set */
  902. if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
  903. num_requests--;
  904. memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
  905. sizeof(mbtd->exp_packed_stats.pack_stop_reason));
  906. memset(mbtd->exp_packed_stats.packing_events, 0,
  907. (max_packed_reqs + 1) * sizeof(u32));
  908. if (num_requests <= max_packed_reqs)
  909. mbtd->exp_packed_stats.packing_events[num_requests] = 1;
  910. switch (td->test_info.testcase) {
  911. case TEST_STOP_DUE_TO_FLUSH:
  912. case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
  913. ret = prepare_request_add_flush(td);
  914. if (ret)
  915. return ret;
  916. mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
  917. break;
  918. case TEST_STOP_DUE_TO_READ:
  919. case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
  920. ret = prepare_request_add_read(td);
  921. if (ret)
  922. return ret;
  923. mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
  924. break;
  925. case TEST_STOP_DUE_TO_THRESHOLD:
  926. mbtd->exp_packed_stats.packing_events[num_requests] = 1;
  927. mbtd->exp_packed_stats.packing_events[1] = 1;
  928. mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
  929. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  930. break;
  931. case TEST_STOP_DUE_TO_MAX_REQ_NUM:
  932. case TEST_RET_PARTIAL_MAX_FAIL_IDX:
  933. mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
  934. break;
  935. default:
  936. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  937. }
  938. mbtd->num_requests = num_requests;
  939. return 0;
  940. }
  941. /*
  942. * Prepare the write, read and flush requests for the packing control
  943. * testcases
  944. */
  945. static int prepare_packed_control_tests_requests(struct test_data *td,
  946. int is_err_expected, int num_requests, int is_random)
  947. {
  948. int ret = 0;
  949. struct mmc_queue *mq;
  950. int max_packed_reqs;
  951. int temp_num_req = num_requests;
  952. struct request_queue *req_q;
  953. int test_packed_trigger;
  954. int num_packed_reqs;
  955. if (!td) {
  956. test_pr_err("%s: NULL td\n", __func__);
  957. return -EINVAL;
  958. }
  959. req_q = td->req_q;
  960. if (!req_q) {
  961. test_pr_err("%s: NULL request queue\n", __func__);
  962. return -EINVAL;
  963. }
  964. mq = req_q->queuedata;
  965. if (!mq) {
  966. test_pr_err("%s: NULL mq", __func__);
  967. return -EINVAL;
  968. }
  969. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  970. test_packed_trigger = mq->num_wr_reqs_to_start_packing;
  971. num_packed_reqs = num_requests - test_packed_trigger;
  972. if (mbtd->random_test_seed == 0) {
  973. mbtd->random_test_seed =
  974. (unsigned int)(get_jiffies_64() & 0xFFFF);
  975. test_pr_info("%s: got seed from jiffies %d",
  976. __func__, mbtd->random_test_seed);
  977. }
  978. if (td->test_info.testcase ==
  979. TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
  980. temp_num_req = num_requests;
  981. num_requests = test_packed_trigger - 1;
  982. }
  983. /* Verify that the packing is disabled before starting the test */
  984. mq->wr_packing_enabled = false;
  985. mq->num_of_potential_packed_wr_reqs = 0;
  986. if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
  987. mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
  988. mq->wr_packing_enabled = true;
  989. num_requests = test_packed_trigger + 2;
  990. }
  991. ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
  992. is_random);
  993. if (ret)
  994. goto exit;
  995. if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
  996. num_requests = temp_num_req;
  997. memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
  998. sizeof(mbtd->exp_packed_stats.pack_stop_reason));
  999. memset(mbtd->exp_packed_stats.packing_events, 0,
  1000. (max_packed_reqs + 1) * sizeof(u32));
  1001. switch (td->test_info.testcase) {
  1002. case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
  1003. case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
  1004. ret = prepare_request_add_read(td);
  1005. if (ret)
  1006. goto exit;
  1007. mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
  1008. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
  1009. break;
  1010. case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
  1011. ret = prepare_request_add_flush(td);
  1012. if (ret)
  1013. goto exit;
  1014. ret = prepare_request_add_write_reqs(td, num_packed_reqs,
  1015. is_err_expected, is_random);
  1016. if (ret)
  1017. goto exit;
  1018. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  1019. mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
  1020. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
  1021. break;
  1022. case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
  1023. ret = prepare_request_add_read(td);
  1024. if (ret)
  1025. goto exit;
  1026. ret = prepare_request_add_write_reqs(td, test_packed_trigger,
  1027. is_err_expected, is_random);
  1028. if (ret)
  1029. goto exit;
  1030. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
  1031. break;
  1032. case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
  1033. ret = prepare_request_add_flush(td);
  1034. if (ret)
  1035. goto exit;
  1036. ret = prepare_request_add_write_reqs(td, test_packed_trigger,
  1037. is_err_expected, is_random);
  1038. if (ret)
  1039. goto exit;
  1040. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
  1041. break;
  1042. case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
  1043. ret = prepare_request_add_read(td);
  1044. if (ret)
  1045. goto exit;
  1046. ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
  1047. is_err_expected, is_random);
  1048. if (ret)
  1049. goto exit;
  1050. ret = prepare_request_add_write_reqs(td, num_requests,
  1051. is_err_expected, is_random);
  1052. if (ret)
  1053. goto exit;
  1054. mbtd->exp_packed_stats.packing_events[num_requests] = 1;
  1055. mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
  1056. mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
  1057. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  1058. break;
  1059. case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
  1060. ret = prepare_request_add_read(td);
  1061. if (ret)
  1062. goto exit;
  1063. ret = prepare_request_add_write_reqs(td, num_requests,
  1064. is_err_expected, is_random);
  1065. if (ret)
  1066. goto exit;
  1067. ret = prepare_request_add_read(td);
  1068. if (ret)
  1069. goto exit;
  1070. ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
  1071. is_err_expected, is_random);
  1072. if (ret)
  1073. goto exit;
  1074. mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
  1075. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
  1076. break;
  1077. case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
  1078. case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
  1079. break;
  1080. default:
  1081. BUG_ON(num_packed_reqs < 0);
  1082. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  1083. mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
  1084. }
  1085. mbtd->num_requests = num_requests;
  1086. exit:
  1087. return ret;
  1088. }
  1089. /*
  1090. * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
  1091. * In this testcase we have mixed error expectations from different
  1092. * write requests, hence the special prepare function.
  1093. */
  1094. static int prepare_partial_followed_by_abort(struct test_data *td,
  1095. int num_requests)
  1096. {
  1097. int i, start_address;
  1098. int is_err_expected = 0;
  1099. int ret = 0;
  1100. struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
  1101. int max_packed_reqs;
  1102. if (!mq) {
  1103. test_pr_err("%s: NULL mq", __func__);
  1104. return -EINVAL;
  1105. }
  1106. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  1107. for (i = 1; i <= num_requests; i++) {
  1108. if (i > (num_requests / 2))
  1109. is_err_expected = 1;
  1110. start_address = td->start_sector +
  1111. sizeof(int) * BIO_U32_SIZE * td->num_of_write_bios;
  1112. ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
  1113. start_address, (i % 5) + 1, TEST_PATTERN_5A,
  1114. NULL);
  1115. if (ret) {
  1116. test_pr_err("%s: failed to add a write request",
  1117. __func__);
  1118. return ret;
  1119. }
  1120. }
  1121. memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
  1122. sizeof(mbtd->exp_packed_stats.pack_stop_reason));
  1123. memset(mbtd->exp_packed_stats.packing_events, 0,
  1124. (max_packed_reqs + 1) * sizeof(u32));
  1125. mbtd->exp_packed_stats.packing_events[num_requests] = 1;
  1126. mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
  1127. mbtd->num_requests = num_requests;
  1128. return ret;
  1129. }
  1130. /*
  1131. * Get number of write requests for current testcase. If random test mode was
  1132. * chosen, pseudo-randomly choose the number of requests, otherwise set to
  1133. * two less than the packing threshold.
  1134. */
  1135. static int get_num_requests(struct test_data *td)
  1136. {
  1137. int *seed = &mbtd->random_test_seed;
  1138. struct request_queue *req_q;
  1139. struct mmc_queue *mq;
  1140. int max_num_requests;
  1141. int num_requests;
  1142. int min_num_requests = 2;
  1143. int is_random = mbtd->is_random;
  1144. int max_for_double;
  1145. int test_packed_trigger;
  1146. req_q = test_iosched_get_req_queue();
  1147. if (req_q)
  1148. mq = req_q->queuedata;
  1149. else {
  1150. test_pr_err("%s: NULL request queue", __func__);
  1151. return 0;
  1152. }
  1153. if (!mq) {
  1154. test_pr_err("%s: NULL mq", __func__);
  1155. return -EINVAL;
  1156. }
  1157. max_num_requests = mq->card->ext_csd.max_packed_writes;
  1158. num_requests = max_num_requests - 2;
  1159. test_packed_trigger = mq->num_wr_reqs_to_start_packing;
  1160. /*
  1161. * Here max_for_double is intended for packed control testcases
  1162. * in which we issue many write requests. It's purpose is to prevent
  1163. * exceeding max number of req_queue requests.
  1164. */
  1165. max_for_double = max_num_requests - 10;
  1166. if (td->test_info.testcase ==
  1167. TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
  1168. /* Don't expect packing, so issue up to trigger-1 reqs */
  1169. num_requests = test_packed_trigger - 1;
  1170. if (is_random) {
  1171. if (td->test_info.testcase ==
  1172. TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
  1173. /*
  1174. * Here we don't want num_requests to be less than 1
  1175. * as a consequence of division by 2.
  1176. */
  1177. min_num_requests = 3;
  1178. if (td->test_info.testcase ==
  1179. TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
  1180. /* Don't expect packing, so issue up to trigger reqs */
  1181. max_num_requests = test_packed_trigger;
  1182. num_requests = pseudo_random_seed(seed, min_num_requests,
  1183. max_num_requests - 1);
  1184. }
  1185. if (td->test_info.testcase ==
  1186. TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
  1187. num_requests -= test_packed_trigger;
  1188. if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
  1189. num_requests =
  1190. num_requests > max_for_double ? max_for_double : num_requests;
  1191. if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
  1192. num_requests += test_packed_trigger;
  1193. if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
  1194. num_requests = test_packed_trigger;
  1195. return num_requests;
  1196. }
  1197. static int prepare_long_read_test_requests(struct test_data *td)
  1198. {
  1199. int ret;
  1200. int start_sec;
  1201. int j;
  1202. if (td)
  1203. start_sec = td->start_sector;
  1204. else {
  1205. test_pr_err("%s: NULL td\n", __func__);
  1206. return -EINVAL;
  1207. }
  1208. test_pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
  1209. LONG_READ_TEST_ACTUAL_NUM_REQS, td->wr_rd_next_req_id);
  1210. for (j = 0; j < LONG_READ_TEST_ACTUAL_NUM_REQS; j++) {
  1211. ret = test_iosched_add_wr_rd_test_req(0, READ,
  1212. start_sec,
  1213. TEST_MAX_BIOS_PER_REQ,
  1214. TEST_NO_PATTERN, NULL);
  1215. if (ret) {
  1216. test_pr_err("%s: failed to add a read request, err = %d"
  1217. , __func__, ret);
  1218. return ret;
  1219. }
  1220. start_sec +=
  1221. (TEST_MAX_BIOS_PER_REQ * sizeof(int) * BIO_U32_SIZE);
  1222. }
  1223. return 0;
  1224. }
  1225. /*
  1226. * An implementation for the prepare_test_fn pointer in the test_info
  1227. * data structure. According to the testcase we add the right number of requests
  1228. * and decide if an error is expected or not.
  1229. */
  1230. static int prepare_test(struct test_data *td)
  1231. {
  1232. struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
  1233. int max_num_requests;
  1234. int num_requests = 0;
  1235. int ret = 0;
  1236. int is_random = mbtd->is_random;
  1237. int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
  1238. if (!mq) {
  1239. test_pr_err("%s: NULL mq", __func__);
  1240. return -EINVAL;
  1241. }
  1242. max_num_requests = mq->card->ext_csd.max_packed_writes;
  1243. if (is_random && mbtd->random_test_seed == 0) {
  1244. mbtd->random_test_seed =
  1245. (unsigned int)(get_jiffies_64() & 0xFFFF);
  1246. test_pr_info("%s: got seed from jiffies %d",
  1247. __func__, mbtd->random_test_seed);
  1248. }
  1249. num_requests = get_num_requests(td);
  1250. if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
  1251. mq->packed_test_fn =
  1252. test_invalid_packed_cmd;
  1253. if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
  1254. mq->err_check_fn = test_err_check;
  1255. switch (td->test_info.testcase) {
  1256. case TEST_STOP_DUE_TO_FLUSH:
  1257. case TEST_STOP_DUE_TO_READ:
  1258. case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
  1259. case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
  1260. case TEST_STOP_DUE_TO_EMPTY_QUEUE:
  1261. case TEST_CMD23_PACKED_BIT_UNSET:
  1262. ret = prepare_packed_requests(td, 0, num_requests, is_random);
  1263. break;
  1264. case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
  1265. case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
  1266. ret = prepare_packed_requests(td, 0, max_num_requests - 1,
  1267. is_random);
  1268. break;
  1269. case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
  1270. ret = prepare_partial_followed_by_abort(td, num_requests);
  1271. break;
  1272. case TEST_STOP_DUE_TO_MAX_REQ_NUM:
  1273. case TEST_RET_PARTIAL_MAX_FAIL_IDX:
  1274. ret = prepare_packed_requests(td, 0, max_num_requests,
  1275. is_random);
  1276. break;
  1277. case TEST_STOP_DUE_TO_THRESHOLD:
  1278. ret = prepare_packed_requests(td, 0, max_num_requests + 1,
  1279. is_random);
  1280. break;
  1281. case TEST_RET_ABORT:
  1282. case TEST_RET_RETRY:
  1283. case TEST_RET_CMD_ERR:
  1284. case TEST_RET_DATA_ERR:
  1285. case TEST_HDR_INVALID_VERSION:
  1286. case TEST_HDR_WRONG_WRITE_CODE:
  1287. case TEST_HDR_INVALID_RW_CODE:
  1288. case TEST_HDR_DIFFERENT_ADDRESSES:
  1289. case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
  1290. case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
  1291. case TEST_CMD23_MAX_PACKED_WRITES:
  1292. case TEST_CMD23_ZERO_PACKED_WRITES:
  1293. case TEST_CMD23_REL_WR_BIT_SET:
  1294. case TEST_CMD23_BITS_16TO29_SET:
  1295. case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
  1296. case TEST_HDR_CMD23_PACKED_BIT_SET:
  1297. ret = prepare_packed_requests(td, 1, num_requests, is_random);
  1298. break;
  1299. case TEST_PACKING_EXP_N_OVER_TRIGGER:
  1300. case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
  1301. case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
  1302. case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
  1303. case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
  1304. case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
  1305. ret = prepare_packed_control_tests_requests(td, 0, num_requests,
  1306. is_random);
  1307. break;
  1308. case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
  1309. ret = prepare_packed_control_tests_requests(td, 0,
  1310. max_num_requests, is_random);
  1311. break;
  1312. case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
  1313. ret = prepare_packed_control_tests_requests(td, 0,
  1314. test_packed_trigger + 1,
  1315. is_random);
  1316. break;
  1317. case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
  1318. ret = prepare_packed_control_tests_requests(td, 0, num_requests,
  1319. is_random);
  1320. break;
  1321. case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
  1322. case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
  1323. ret = prepare_packed_control_tests_requests(td, 0,
  1324. test_packed_trigger, is_random);
  1325. break;
  1326. case TEST_LONG_SEQUENTIAL_WRITE:
  1327. case TEST_LONG_SEQUENTIAL_READ:
  1328. ret = prepare_long_read_test_requests(td);
  1329. break;
  1330. default:
  1331. test_pr_info("%s: Invalid test case...", __func__);
  1332. ret = -EINVAL;
  1333. }
  1334. return ret;
  1335. }
  1336. static int run_packed_test(struct test_data *td)
  1337. {
  1338. struct mmc_queue *mq;
  1339. struct request_queue *req_q;
  1340. if (!td) {
  1341. pr_err("%s: NULL td", __func__);
  1342. return -EINVAL;
  1343. }
  1344. req_q = td->req_q;
  1345. if (!req_q) {
  1346. pr_err("%s: NULL request queue", __func__);
  1347. return -EINVAL;
  1348. }
  1349. mq = req_q->queuedata;
  1350. if (!mq) {
  1351. test_pr_err("%s: NULL mq", __func__);
  1352. return -EINVAL;
  1353. }
  1354. mmc_blk_init_packed_statistics(mq->card);
  1355. if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
  1356. /*
  1357. * Verify that the packing is disabled before starting the
  1358. * test
  1359. */
  1360. mq->wr_packing_enabled = false;
  1361. mq->num_of_potential_packed_wr_reqs = 0;
  1362. }
  1363. __blk_run_queue(td->req_q);
  1364. return 0;
  1365. }
  1366. /*
  1367. * An implementation for the post_test_fn in the test_info data structure.
  1368. * In our case we just reset the function pointers in the mmc_queue in order for
  1369. * the FS to be able to dispatch it's requests correctly after the test is
  1370. * finished.
  1371. */
  1372. static int post_test(struct test_data *td)
  1373. {
  1374. struct mmc_queue *mq;
  1375. if (!td)
  1376. return -EINVAL;
  1377. mq = td->req_q->queuedata;
  1378. if (!mq) {
  1379. test_pr_err("%s: NULL mq", __func__);
  1380. return -EINVAL;
  1381. }
  1382. mq->packed_test_fn = NULL;
  1383. mq->err_check_fn = NULL;
  1384. return 0;
  1385. }
  1386. /*
  1387. * This function checks, based on the current test's test_group, that the
  1388. * packed commands capability and control are set right. In addition, we check
  1389. * if the card supports the packed command feature.
  1390. */
  1391. static int validate_packed_commands_settings(void)
  1392. {
  1393. struct request_queue *req_q;
  1394. struct mmc_queue *mq;
  1395. int max_num_requests;
  1396. struct mmc_host *host;
  1397. req_q = test_iosched_get_req_queue();
  1398. if (!req_q) {
  1399. test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
  1400. test_iosched_set_test_result(TEST_FAILED);
  1401. return -EINVAL;
  1402. }
  1403. mq = req_q->queuedata;
  1404. if (!mq) {
  1405. test_pr_err("%s: NULL mq", __func__);
  1406. return -EINVAL;
  1407. }
  1408. max_num_requests = mq->card->ext_csd.max_packed_writes;
  1409. host = mq->card->host;
  1410. if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
  1411. test_pr_err("%s: Packed Write capability disabled, exit test",
  1412. __func__);
  1413. test_iosched_set_test_result(TEST_NOT_SUPPORTED);
  1414. return -EINVAL;
  1415. }
  1416. if (max_num_requests == 0) {
  1417. test_pr_err(
  1418. "%s: no write packing support, ext_csd.max_packed_writes=%d",
  1419. __func__, mq->card->ext_csd.max_packed_writes);
  1420. test_iosched_set_test_result(TEST_NOT_SUPPORTED);
  1421. return -EINVAL;
  1422. }
  1423. test_pr_info("%s: max number of packed requests supported is %d ",
  1424. __func__, max_num_requests);
  1425. switch (mbtd->test_group) {
  1426. case TEST_SEND_WRITE_PACKING_GROUP:
  1427. case TEST_ERR_CHECK_GROUP:
  1428. case TEST_SEND_INVALID_GROUP:
  1429. /* disable the packing control */
  1430. host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
  1431. break;
  1432. case TEST_PACKING_CONTROL_GROUP:
  1433. host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
  1434. break;
  1435. default:
  1436. break;
  1437. }
  1438. return 0;
  1439. }
  1440. static void pseudo_rnd_sector_and_size(unsigned int *seed,
  1441. unsigned int min_start_sector,
  1442. unsigned int *start_sector,
  1443. unsigned int *num_of_bios)
  1444. {
  1445. unsigned int max_sec = min_start_sector + TEST_MAX_SECTOR_RANGE;
  1446. do {
  1447. *start_sector = pseudo_random_seed(seed,
  1448. 1, max_sec);
  1449. *num_of_bios = pseudo_random_seed(seed,
  1450. 1, TEST_MAX_BIOS_PER_REQ);
  1451. if (!(*num_of_bios))
  1452. *num_of_bios = 1;
  1453. } while ((*start_sector < min_start_sector) ||
  1454. (*start_sector + (*num_of_bios * BIO_U32_SIZE * 4)) > max_sec);
  1455. }
  1456. /* sanitize test functions */
  1457. static int prepare_write_discard_sanitize_read(struct test_data *td)
  1458. {
  1459. unsigned int start_sector;
  1460. unsigned int num_of_bios = 0;
  1461. static unsigned int total_bios;
  1462. unsigned int *num_bios_seed;
  1463. int i = 0;
  1464. if (mbtd->random_test_seed == 0) {
  1465. mbtd->random_test_seed =
  1466. (unsigned int)(get_jiffies_64() & 0xFFFF);
  1467. test_pr_info("%s: got seed from jiffies %d",
  1468. __func__, mbtd->random_test_seed);
  1469. }
  1470. num_bios_seed = &mbtd->random_test_seed;
  1471. do {
  1472. pseudo_rnd_sector_and_size(num_bios_seed, td->start_sector,
  1473. &start_sector, &num_of_bios);
  1474. /* DISCARD */
  1475. total_bios += num_of_bios;
  1476. test_pr_info("%s: discard req: id=%d, startSec=%d, NumBios=%d",
  1477. __func__, td->unique_next_req_id, start_sector,
  1478. num_of_bios);
  1479. test_iosched_add_unique_test_req(0, REQ_UNIQUE_DISCARD,
  1480. start_sector, BIO_TO_SECTOR(num_of_bios),
  1481. NULL);
  1482. } while (++i < (BLKDEV_MAX_RQ-10));
  1483. test_pr_info("%s: total discard bios = %d", __func__, total_bios);
  1484. test_pr_info("%s: add sanitize req", __func__);
  1485. test_iosched_add_unique_test_req(0, REQ_UNIQUE_SANITIZE, 0, 0, NULL);
  1486. return 0;
  1487. }
  1488. /*
  1489. * Post test operations for BKOPs test
  1490. * Disable the BKOPs statistics and clear the feature flags
  1491. */
  1492. static int bkops_post_test(struct test_data *td)
  1493. {
  1494. struct request_queue *q = td->req_q;
  1495. struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
  1496. struct mmc_card *card = mq->card;
  1497. mmc_card_clr_doing_bkops(mq->card);
  1498. card->ext_csd.raw_bkops_status = 0;
  1499. spin_lock(&card->bkops_info.bkops_stats.lock);
  1500. card->bkops_info.bkops_stats.enabled = false;
  1501. spin_unlock(&card->bkops_info.bkops_stats.lock);
  1502. return 0;
  1503. }
  1504. /*
  1505. * Verify the BKOPs statsistics
  1506. */
  1507. static int check_bkops_result(struct test_data *td)
  1508. {
  1509. struct request_queue *q = td->req_q;
  1510. struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
  1511. struct mmc_card *card = mq->card;
  1512. struct mmc_bkops_stats *bkops_stat;
  1513. if (!card)
  1514. goto fail;
  1515. bkops_stat = &card->bkops_info.bkops_stats;
  1516. test_pr_info("%s: Test results: bkops:(%d,%d,%d) hpi:%d, suspend:%d",
  1517. __func__,
  1518. bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX],
  1519. bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX],
  1520. bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX],
  1521. bkops_stat->hpi,
  1522. bkops_stat->suspend);
  1523. switch (mbtd->test_info.testcase) {
  1524. case BKOPS_DELAYED_WORK_LEVEL_1:
  1525. if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
  1526. (bkops_stat->suspend == 1) &&
  1527. (bkops_stat->hpi == 0))
  1528. goto exit;
  1529. else
  1530. goto fail;
  1531. break;
  1532. case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
  1533. if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 1) &&
  1534. (bkops_stat->suspend == 0) &&
  1535. (bkops_stat->hpi == 1))
  1536. goto exit;
  1537. /* this might happen due to timing issues */
  1538. else if
  1539. ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
  1540. (bkops_stat->suspend == 0) &&
  1541. (bkops_stat->hpi == 0))
  1542. goto ignore;
  1543. else
  1544. goto fail;
  1545. break;
  1546. case BKOPS_CANCEL_DELAYED_WORK:
  1547. if ((bkops_stat->bkops_level[BKOPS_SEVERITY_1_INDEX] == 0) &&
  1548. (bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 0) &&
  1549. (bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 0) &&
  1550. (bkops_stat->suspend == 0) &&
  1551. (bkops_stat->hpi == 0))
  1552. goto exit;
  1553. else
  1554. goto fail;
  1555. case BKOPS_URGENT_LEVEL_2:
  1556. case BKOPS_URGENT_LEVEL_2_TWO_REQS:
  1557. if ((bkops_stat->bkops_level[BKOPS_SEVERITY_2_INDEX] == 1) &&
  1558. (bkops_stat->suspend == 0) &&
  1559. (bkops_stat->hpi == 0))
  1560. goto exit;
  1561. else
  1562. goto fail;
  1563. case BKOPS_URGENT_LEVEL_3:
  1564. if ((bkops_stat->bkops_level[BKOPS_SEVERITY_3_INDEX] == 1) &&
  1565. (bkops_stat->suspend == 0) &&
  1566. (bkops_stat->hpi == 0))
  1567. goto exit;
  1568. else
  1569. goto fail;
  1570. default:
  1571. return -EINVAL;
  1572. }
  1573. exit:
  1574. return 0;
  1575. ignore:
  1576. test_iosched_set_ignore_round(true);
  1577. return 0;
  1578. fail:
  1579. if (td->fs_wr_reqs_during_test) {
  1580. test_pr_info("%s: wr reqs during test, cancel the round",
  1581. __func__);
  1582. test_iosched_set_ignore_round(true);
  1583. return 0;
  1584. }
  1585. test_pr_info("%s: BKOPs statistics are not as expected, test failed",
  1586. __func__);
  1587. return -EINVAL;
  1588. }
  1589. static void bkops_end_io_final_fn(struct request *rq, int err)
  1590. {
  1591. struct test_request *test_rq =
  1592. (struct test_request *)rq->elv.priv[0];
  1593. BUG_ON(!test_rq);
  1594. test_rq->req_completed = 1;
  1595. test_rq->req_result = err;
  1596. test_pr_info("%s: request %d completed, err=%d",
  1597. __func__, test_rq->req_id, err);
  1598. mbtd->bkops_stage = BKOPS_STAGE_4;
  1599. wake_up(&mbtd->bkops_wait_q);
  1600. }
  1601. static void bkops_end_io_fn(struct request *rq, int err)
  1602. {
  1603. struct test_request *test_rq =
  1604. (struct test_request *)rq->elv.priv[0];
  1605. BUG_ON(!test_rq);
  1606. test_rq->req_completed = 1;
  1607. test_rq->req_result = err;
  1608. test_pr_info("%s: request %d completed, err=%d",
  1609. __func__, test_rq->req_id, err);
  1610. mbtd->bkops_stage = BKOPS_STAGE_2;
  1611. wake_up(&mbtd->bkops_wait_q);
  1612. }
  1613. static int prepare_bkops(struct test_data *td)
  1614. {
  1615. int ret = 0;
  1616. struct request_queue *q = td->req_q;
  1617. struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
  1618. struct mmc_card *card = mq->card;
  1619. struct mmc_bkops_stats *bkops_stat;
  1620. if (!card)
  1621. return -EINVAL;
  1622. bkops_stat = &card->bkops_info.bkops_stats;
  1623. if (!card->ext_csd.bkops_en) {
  1624. test_pr_err("%s: BKOPS is not enabled by card or host)",
  1625. __func__);
  1626. return -ENOTSUPP;
  1627. }
  1628. if (mmc_card_doing_bkops(card)) {
  1629. test_pr_err("%s: BKOPS in progress, try later", __func__);
  1630. return -EAGAIN;
  1631. }
  1632. mmc_blk_init_bkops_statistics(card);
  1633. if ((mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2) ||
  1634. (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2_TWO_REQS) ||
  1635. (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_3))
  1636. mq->err_check_fn = test_err_check;
  1637. mbtd->err_check_counter = 0;
  1638. return ret;
  1639. }
  1640. static int run_bkops(struct test_data *td)
  1641. {
  1642. int ret = 0;
  1643. struct request_queue *q = td->req_q;
  1644. struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
  1645. struct mmc_card *card = mq->card;
  1646. struct mmc_bkops_stats *bkops_stat;
  1647. if (!card)
  1648. return -EINVAL;
  1649. bkops_stat = &card->bkops_info.bkops_stats;
  1650. switch (mbtd->test_info.testcase) {
  1651. case BKOPS_DELAYED_WORK_LEVEL_1:
  1652. bkops_stat->ignore_card_bkops_status = true;
  1653. card->ext_csd.raw_bkops_status = 1;
  1654. card->bkops_info.sectors_changed =
  1655. card->bkops_info.min_sectors_to_queue_delayed_work + 1;
  1656. mbtd->bkops_stage = BKOPS_STAGE_1;
  1657. __blk_run_queue(q);
  1658. /* this long sleep makes sure the host starts bkops and
  1659. also, gets into suspend */
  1660. msleep(10000);
  1661. bkops_stat->ignore_card_bkops_status = false;
  1662. card->ext_csd.raw_bkops_status = 0;
  1663. test_iosched_mark_test_completion();
  1664. break;
  1665. case BKOPS_DELAYED_WORK_LEVEL_1_HPI:
  1666. bkops_stat->ignore_card_bkops_status = true;
  1667. card->ext_csd.raw_bkops_status = 1;
  1668. card->bkops_info.sectors_changed =
  1669. card->bkops_info.min_sectors_to_queue_delayed_work + 1;
  1670. mbtd->bkops_stage = BKOPS_STAGE_1;
  1671. __blk_run_queue(q);
  1672. msleep(card->bkops_info.delay_ms);
  1673. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1674. td->start_sector,
  1675. TEST_REQUEST_NUM_OF_BIOS,
  1676. TEST_PATTERN_5A,
  1677. bkops_end_io_final_fn);
  1678. if (ret) {
  1679. test_pr_err("%s: failed to add a write request",
  1680. __func__);
  1681. ret = -EINVAL;
  1682. break;
  1683. }
  1684. __blk_run_queue(q);
  1685. wait_event(mbtd->bkops_wait_q,
  1686. mbtd->bkops_stage == BKOPS_STAGE_4);
  1687. bkops_stat->ignore_card_bkops_status = false;
  1688. test_iosched_mark_test_completion();
  1689. break;
  1690. case BKOPS_CANCEL_DELAYED_WORK:
  1691. bkops_stat->ignore_card_bkops_status = true;
  1692. card->ext_csd.raw_bkops_status = 1;
  1693. card->bkops_info.sectors_changed =
  1694. card->bkops_info.min_sectors_to_queue_delayed_work + 1;
  1695. mbtd->bkops_stage = BKOPS_STAGE_1;
  1696. __blk_run_queue(q);
  1697. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1698. td->start_sector,
  1699. TEST_REQUEST_NUM_OF_BIOS,
  1700. TEST_PATTERN_5A,
  1701. bkops_end_io_final_fn);
  1702. if (ret) {
  1703. test_pr_err("%s: failed to add a write request",
  1704. __func__);
  1705. ret = -EINVAL;
  1706. break;
  1707. }
  1708. __blk_run_queue(q);
  1709. wait_event(mbtd->bkops_wait_q,
  1710. mbtd->bkops_stage == BKOPS_STAGE_4);
  1711. bkops_stat->ignore_card_bkops_status = false;
  1712. test_iosched_mark_test_completion();
  1713. break;
  1714. case BKOPS_URGENT_LEVEL_2:
  1715. case BKOPS_URGENT_LEVEL_3:
  1716. bkops_stat->ignore_card_bkops_status = true;
  1717. if (mbtd->test_info.testcase == BKOPS_URGENT_LEVEL_2)
  1718. card->ext_csd.raw_bkops_status = 2;
  1719. else
  1720. card->ext_csd.raw_bkops_status = 3;
  1721. mbtd->bkops_stage = BKOPS_STAGE_1;
  1722. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1723. td->start_sector,
  1724. TEST_REQUEST_NUM_OF_BIOS,
  1725. TEST_PATTERN_5A,
  1726. bkops_end_io_fn);
  1727. if (ret) {
  1728. test_pr_err("%s: failed to add a write request",
  1729. __func__);
  1730. ret = -EINVAL;
  1731. break;
  1732. }
  1733. __blk_run_queue(q);
  1734. wait_event(mbtd->bkops_wait_q,
  1735. mbtd->bkops_stage == BKOPS_STAGE_2);
  1736. card->ext_csd.raw_bkops_status = 0;
  1737. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1738. td->start_sector,
  1739. TEST_REQUEST_NUM_OF_BIOS,
  1740. TEST_PATTERN_5A,
  1741. bkops_end_io_final_fn);
  1742. if (ret) {
  1743. test_pr_err("%s: failed to add a write request",
  1744. __func__);
  1745. ret = -EINVAL;
  1746. break;
  1747. }
  1748. __blk_run_queue(q);
  1749. wait_event(mbtd->bkops_wait_q,
  1750. mbtd->bkops_stage == BKOPS_STAGE_4);
  1751. bkops_stat->ignore_card_bkops_status = false;
  1752. test_iosched_mark_test_completion();
  1753. break;
  1754. case BKOPS_URGENT_LEVEL_2_TWO_REQS:
  1755. mq->wr_packing_enabled = false;
  1756. bkops_stat->ignore_card_bkops_status = true;
  1757. card->ext_csd.raw_bkops_status = 2;
  1758. mbtd->bkops_stage = BKOPS_STAGE_1;
  1759. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1760. td->start_sector,
  1761. TEST_REQUEST_NUM_OF_BIOS,
  1762. TEST_PATTERN_5A,
  1763. NULL);
  1764. if (ret) {
  1765. test_pr_err("%s: failed to add a write request",
  1766. __func__);
  1767. ret = -EINVAL;
  1768. break;
  1769. }
  1770. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1771. td->start_sector,
  1772. TEST_REQUEST_NUM_OF_BIOS,
  1773. TEST_PATTERN_5A,
  1774. bkops_end_io_fn);
  1775. if (ret) {
  1776. test_pr_err("%s: failed to add a write request",
  1777. __func__);
  1778. ret = -EINVAL;
  1779. break;
  1780. }
  1781. __blk_run_queue(q);
  1782. wait_event(mbtd->bkops_wait_q,
  1783. mbtd->bkops_stage == BKOPS_STAGE_2);
  1784. card->ext_csd.raw_bkops_status = 0;
  1785. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  1786. td->start_sector,
  1787. TEST_REQUEST_NUM_OF_BIOS,
  1788. TEST_PATTERN_5A,
  1789. bkops_end_io_final_fn);
  1790. if (ret) {
  1791. test_pr_err("%s: failed to add a write request",
  1792. __func__);
  1793. ret = -EINVAL;
  1794. break;
  1795. }
  1796. __blk_run_queue(q);
  1797. wait_event(mbtd->bkops_wait_q,
  1798. mbtd->bkops_stage == BKOPS_STAGE_4);
  1799. bkops_stat->ignore_card_bkops_status = false;
  1800. test_iosched_mark_test_completion();
  1801. break;
  1802. default:
  1803. test_pr_err("%s: wrong testcase: %d", __func__,
  1804. mbtd->test_info.testcase);
  1805. ret = -EINVAL;
  1806. }
  1807. return ret;
  1808. }
  1809. /*
  1810. * new_req_post_test() - Do post test operations for
  1811. * new_req_notification test: disable the statistics and clear
  1812. * the feature flags.
  1813. * @td The test_data for the new_req test that has
  1814. * ended.
  1815. */
  1816. static int new_req_post_test(struct test_data *td)
  1817. {
  1818. struct mmc_queue *mq;
  1819. if (!td || !td->req_q)
  1820. goto exit;
  1821. mq = (struct mmc_queue *)td->req_q->queuedata;
  1822. if (!mq || !mq->card)
  1823. goto exit;
  1824. test_pr_info("Completed %d requests",
  1825. mbtd->completed_req_count);
  1826. exit:
  1827. return 0;
  1828. }
  1829. /*
  1830. * check_new_req_result() - Print out the number of completed
  1831. * requests. Assigned to the check_test_result_fn pointer,
  1832. * therefore the name.
  1833. * @td The test_data for the new_req test that has
  1834. * ended.
  1835. */
  1836. static int check_new_req_result(struct test_data *td)
  1837. {
  1838. test_pr_info("%s: Test results: Completed %d requests",
  1839. __func__, mbtd->completed_req_count);
  1840. return 0;
  1841. }
  1842. /*
  1843. * new_req_free_end_io_fn() - Remove request from queuelist and
  1844. * free request's allocated memory. Used as a call-back
  1845. * assigned to end_io member in request struct.
  1846. * @rq The request to be freed
  1847. * @err Unused
  1848. */
  1849. static void new_req_free_end_io_fn(struct request *rq, int err)
  1850. {
  1851. struct test_request *test_rq =
  1852. (struct test_request *)rq->elv.priv[0];
  1853. struct test_data *ptd = test_get_test_data();
  1854. BUG_ON(!test_rq);
  1855. spin_lock_irq(&ptd->lock);
  1856. list_del_init(&test_rq->queuelist);
  1857. ptd->dispatched_count--;
  1858. spin_unlock_irq(&ptd->lock);
  1859. __blk_put_request(ptd->req_q, test_rq->rq);
  1860. kfree(test_rq->bios_buffer);
  1861. kfree(test_rq);
  1862. mbtd->completed_req_count++;
  1863. }
  1864. static int prepare_new_req(struct test_data *td)
  1865. {
  1866. struct request_queue *q = td->req_q;
  1867. struct mmc_queue *mq = (struct mmc_queue *)q->queuedata;
  1868. mmc_blk_init_packed_statistics(mq->card);
  1869. mbtd->completed_req_count = 0;
  1870. return 0;
  1871. }
  1872. static int run_new_req(struct test_data *ptd)
  1873. {
  1874. int ret = 0;
  1875. int i;
  1876. unsigned int requests_count = 2;
  1877. unsigned int bio_num;
  1878. struct test_request *test_rq = NULL;
  1879. while (1) {
  1880. for (i = 0; i < requests_count; i++) {
  1881. bio_num = TEST_MAX_BIOS_PER_REQ;
  1882. test_rq = test_iosched_create_test_req(0, READ,
  1883. ptd->start_sector,
  1884. bio_num, TEST_PATTERN_5A,
  1885. new_req_free_end_io_fn);
  1886. if (test_rq) {
  1887. spin_lock_irq(ptd->req_q->queue_lock);
  1888. list_add_tail(&test_rq->queuelist,
  1889. &ptd->test_queue);
  1890. ptd->test_count++;
  1891. spin_unlock_irq(ptd->req_q->queue_lock);
  1892. } else {
  1893. test_pr_err("%s: failed to create read request",
  1894. __func__);
  1895. ret = -ENODEV;
  1896. break;
  1897. }
  1898. }
  1899. __blk_run_queue(ptd->req_q);
  1900. /* wait while a mmc layer will send all requests in test_queue*/
  1901. while (!list_empty(&ptd->test_queue))
  1902. msleep(NEW_REQ_TEST_SLEEP_TIME);
  1903. /* test finish criteria */
  1904. if (mbtd->completed_req_count > 1000) {
  1905. if (ptd->dispatched_count)
  1906. continue;
  1907. else
  1908. break;
  1909. }
  1910. for (i = 0; i < requests_count; i++) {
  1911. bio_num = NEW_REQ_TEST_NUM_BIOS;
  1912. test_rq = test_iosched_create_test_req(0, READ,
  1913. ptd->start_sector,
  1914. bio_num, TEST_PATTERN_5A,
  1915. new_req_free_end_io_fn);
  1916. if (test_rq) {
  1917. spin_lock_irq(ptd->req_q->queue_lock);
  1918. list_add_tail(&test_rq->queuelist,
  1919. &ptd->test_queue);
  1920. ptd->test_count++;
  1921. spin_unlock_irq(ptd->req_q->queue_lock);
  1922. } else {
  1923. test_pr_err("%s: failed to create read request",
  1924. __func__);
  1925. ret = -ENODEV;
  1926. break;
  1927. }
  1928. }
  1929. __blk_run_queue(ptd->req_q);
  1930. }
  1931. test_iosched_mark_test_completion();
  1932. test_pr_info("%s: EXIT: %d code", __func__, ret);
  1933. return ret;
  1934. }
  1935. static bool message_repeat;
  1936. static int test_open(struct inode *inode, struct file *file)
  1937. {
  1938. file->private_data = inode->i_private;
  1939. message_repeat = 1;
  1940. return 0;
  1941. }
  1942. /* send_packing TEST */
  1943. static ssize_t send_write_packing_test_write(struct file *file,
  1944. const char __user *buf,
  1945. size_t count,
  1946. loff_t *ppos)
  1947. {
  1948. int ret = 0;
  1949. int i = 0;
  1950. int number = -1;
  1951. int j = 0;
  1952. test_pr_info("%s: -- send_write_packing TEST --", __func__);
  1953. sscanf(buf, "%d", &number);
  1954. if (number <= 0)
  1955. number = 1;
  1956. mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
  1957. if (validate_packed_commands_settings())
  1958. return count;
  1959. if (mbtd->random_test_seed > 0)
  1960. test_pr_info("%s: Test seed: %d", __func__,
  1961. mbtd->random_test_seed);
  1962. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  1963. mbtd->test_info.data = mbtd;
  1964. mbtd->test_info.prepare_test_fn = prepare_test;
  1965. mbtd->test_info.run_test_fn = run_packed_test;
  1966. mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
  1967. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  1968. mbtd->test_info.post_test_fn = post_test;
  1969. for (i = 0; i < number; ++i) {
  1970. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  1971. test_pr_info("%s: ====================", __func__);
  1972. for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
  1973. j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
  1974. mbtd->test_info.testcase = j;
  1975. mbtd->is_random = RANDOM_TEST;
  1976. ret = test_iosched_start_test(&mbtd->test_info);
  1977. if (ret)
  1978. break;
  1979. /* Allow FS requests to be dispatched */
  1980. msleep(1000);
  1981. mbtd->test_info.testcase = j;
  1982. mbtd->is_random = NON_RANDOM_TEST;
  1983. ret = test_iosched_start_test(&mbtd->test_info);
  1984. if (ret)
  1985. break;
  1986. /* Allow FS requests to be dispatched */
  1987. msleep(1000);
  1988. }
  1989. }
  1990. test_pr_info("%s: Completed all the test cases.", __func__);
  1991. return count;
  1992. }
  1993. static ssize_t send_write_packing_test_read(struct file *file,
  1994. char __user *buffer,
  1995. size_t count,
  1996. loff_t *offset)
  1997. {
  1998. if (!access_ok(VERIFY_WRITE, buffer, count))
  1999. return count;
  2000. memset((void *)buffer, 0, count);
  2001. snprintf(buffer, count,
  2002. "\nsend_write_packing_test\n"
  2003. "=========\n"
  2004. "Description:\n"
  2005. "This test checks the following scenarios\n"
  2006. "- Pack due to FLUSH message\n"
  2007. "- Pack due to FLUSH after threshold writes\n"
  2008. "- Pack due to READ message\n"
  2009. "- Pack due to READ after threshold writes\n"
  2010. "- Pack due to empty queue\n"
  2011. "- Pack due to threshold writes\n"
  2012. "- Pack due to one over threshold writes\n");
  2013. if (message_repeat == 1) {
  2014. message_repeat = 0;
  2015. return strnlen(buffer, count);
  2016. } else {
  2017. return 0;
  2018. }
  2019. }
  2020. const struct file_operations send_write_packing_test_ops = {
  2021. .open = test_open,
  2022. .write = send_write_packing_test_write,
  2023. .read = send_write_packing_test_read,
  2024. };
  2025. /* err_check TEST */
  2026. static ssize_t err_check_test_write(struct file *file,
  2027. const char __user *buf,
  2028. size_t count,
  2029. loff_t *ppos)
  2030. {
  2031. int ret = 0;
  2032. int i = 0;
  2033. int number = -1;
  2034. int j = 0;
  2035. test_pr_info("%s: -- err_check TEST --", __func__);
  2036. sscanf(buf, "%d", &number);
  2037. if (number <= 0)
  2038. number = 1;
  2039. mbtd->test_group = TEST_ERR_CHECK_GROUP;
  2040. if (validate_packed_commands_settings())
  2041. return count;
  2042. if (mbtd->random_test_seed > 0)
  2043. test_pr_info("%s: Test seed: %d", __func__,
  2044. mbtd->random_test_seed);
  2045. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2046. mbtd->test_info.data = mbtd;
  2047. mbtd->test_info.prepare_test_fn = prepare_test;
  2048. mbtd->test_info.run_test_fn = run_packed_test;
  2049. mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
  2050. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2051. mbtd->test_info.post_test_fn = post_test;
  2052. for (i = 0; i < number; ++i) {
  2053. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2054. test_pr_info("%s: ====================", __func__);
  2055. for (j = ERR_CHECK_MIN_TESTCASE;
  2056. j <= ERR_CHECK_MAX_TESTCASE ; j++) {
  2057. mbtd->test_info.testcase = j;
  2058. mbtd->is_random = RANDOM_TEST;
  2059. ret = test_iosched_start_test(&mbtd->test_info);
  2060. if (ret)
  2061. break;
  2062. /* Allow FS requests to be dispatched */
  2063. msleep(1000);
  2064. mbtd->test_info.testcase = j;
  2065. mbtd->is_random = NON_RANDOM_TEST;
  2066. ret = test_iosched_start_test(&mbtd->test_info);
  2067. if (ret)
  2068. break;
  2069. /* Allow FS requests to be dispatched */
  2070. msleep(1000);
  2071. }
  2072. }
  2073. test_pr_info("%s: Completed all the test cases.", __func__);
  2074. return count;
  2075. }
  2076. static ssize_t err_check_test_read(struct file *file,
  2077. char __user *buffer,
  2078. size_t count,
  2079. loff_t *offset)
  2080. {
  2081. if (!access_ok(VERIFY_WRITE, buffer, count))
  2082. return count;
  2083. memset((void *)buffer, 0, count);
  2084. snprintf(buffer, count,
  2085. "\nerr_check_TEST\n"
  2086. "=========\n"
  2087. "Description:\n"
  2088. "This test checks the following scenarios\n"
  2089. "- Return ABORT\n"
  2090. "- Return PARTIAL followed by success\n"
  2091. "- Return PARTIAL followed by abort\n"
  2092. "- Return PARTIAL multiple times until success\n"
  2093. "- Return PARTIAL with fail index = threshold\n"
  2094. "- Return RETRY\n"
  2095. "- Return CMD_ERR\n"
  2096. "- Return DATA_ERR\n");
  2097. if (message_repeat == 1) {
  2098. message_repeat = 0;
  2099. return strnlen(buffer, count);
  2100. } else {
  2101. return 0;
  2102. }
  2103. }
  2104. const struct file_operations err_check_test_ops = {
  2105. .open = test_open,
  2106. .write = err_check_test_write,
  2107. .read = err_check_test_read,
  2108. };
  2109. /* send_invalid_packed TEST */
  2110. static ssize_t send_invalid_packed_test_write(struct file *file,
  2111. const char __user *buf,
  2112. size_t count,
  2113. loff_t *ppos)
  2114. {
  2115. int ret = 0;
  2116. int i = 0;
  2117. int number = -1;
  2118. int j = 0;
  2119. int num_of_failures = 0;
  2120. test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
  2121. sscanf(buf, "%d", &number);
  2122. if (number <= 0)
  2123. number = 1;
  2124. mbtd->test_group = TEST_SEND_INVALID_GROUP;
  2125. if (validate_packed_commands_settings())
  2126. return count;
  2127. if (mbtd->random_test_seed > 0)
  2128. test_pr_info("%s: Test seed: %d", __func__,
  2129. mbtd->random_test_seed);
  2130. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2131. mbtd->test_info.data = mbtd;
  2132. mbtd->test_info.prepare_test_fn = prepare_test;
  2133. mbtd->test_info.run_test_fn = run_packed_test;
  2134. mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
  2135. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2136. mbtd->test_info.post_test_fn = post_test;
  2137. for (i = 0; i < number; ++i) {
  2138. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2139. test_pr_info("%s: ====================", __func__);
  2140. for (j = INVALID_CMD_MIN_TESTCASE;
  2141. j <= INVALID_CMD_MAX_TESTCASE ; j++) {
  2142. mbtd->test_info.testcase = j;
  2143. mbtd->is_random = RANDOM_TEST;
  2144. ret = test_iosched_start_test(&mbtd->test_info);
  2145. if (ret)
  2146. num_of_failures++;
  2147. /* Allow FS requests to be dispatched */
  2148. msleep(1000);
  2149. mbtd->test_info.testcase = j;
  2150. mbtd->is_random = NON_RANDOM_TEST;
  2151. ret = test_iosched_start_test(&mbtd->test_info);
  2152. if (ret)
  2153. num_of_failures++;
  2154. /* Allow FS requests to be dispatched */
  2155. msleep(1000);
  2156. }
  2157. }
  2158. test_pr_info("%s: Completed all the test cases.", __func__);
  2159. if (num_of_failures > 0) {
  2160. test_iosched_set_test_result(TEST_FAILED);
  2161. test_pr_err(
  2162. "There were %d failures during the test, TEST FAILED",
  2163. num_of_failures);
  2164. }
  2165. return count;
  2166. }
  2167. static ssize_t send_invalid_packed_test_read(struct file *file,
  2168. char __user *buffer,
  2169. size_t count,
  2170. loff_t *offset)
  2171. {
  2172. if (!access_ok(VERIFY_WRITE, buffer, count))
  2173. return count;
  2174. memset((void *)buffer, 0, count);
  2175. snprintf(buffer, count,
  2176. "\nsend_invalid_packed_TEST\n"
  2177. "=========\n"
  2178. "Description:\n"
  2179. "This test checks the following scenarios\n"
  2180. "- Send an invalid header version\n"
  2181. "- Send the wrong write code\n"
  2182. "- Send an invalid R/W code\n"
  2183. "- Send wrong start address in header\n"
  2184. "- Send header with block_count smaller than actual\n"
  2185. "- Send header with block_count larger than actual\n"
  2186. "- Send header CMD23 packed bit set\n"
  2187. "- Send CMD23 with block count over threshold\n"
  2188. "- Send CMD23 with block_count equals zero\n"
  2189. "- Send CMD23 packed bit unset\n"
  2190. "- Send CMD23 reliable write bit set\n"
  2191. "- Send CMD23 bits [16-29] set\n"
  2192. "- Send CMD23 header block not in block_count\n");
  2193. if (message_repeat == 1) {
  2194. message_repeat = 0;
  2195. return strnlen(buffer, count);
  2196. } else {
  2197. return 0;
  2198. }
  2199. }
  2200. const struct file_operations send_invalid_packed_test_ops = {
  2201. .open = test_open,
  2202. .write = send_invalid_packed_test_write,
  2203. .read = send_invalid_packed_test_read,
  2204. };
  2205. /* packing_control TEST */
  2206. static ssize_t write_packing_control_test_write(struct file *file,
  2207. const char __user *buf,
  2208. size_t count,
  2209. loff_t *ppos)
  2210. {
  2211. int ret = 0;
  2212. int i = 0;
  2213. int number = -1;
  2214. int j = 0;
  2215. struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
  2216. int max_num_requests = mq->card->ext_csd.max_packed_writes;
  2217. int test_successful = 1;
  2218. test_pr_info("%s: -- write_packing_control TEST --", __func__);
  2219. sscanf(buf, "%d", &number);
  2220. if (number <= 0)
  2221. number = 1;
  2222. test_pr_info("%s: max_num_requests = %d ", __func__,
  2223. max_num_requests);
  2224. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2225. mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
  2226. if (validate_packed_commands_settings())
  2227. return count;
  2228. mbtd->test_info.data = mbtd;
  2229. mbtd->test_info.prepare_test_fn = prepare_test;
  2230. mbtd->test_info.run_test_fn = run_packed_test;
  2231. mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
  2232. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2233. for (i = 0; i < number; ++i) {
  2234. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2235. test_pr_info("%s: ====================", __func__);
  2236. for (j = PACKING_CONTROL_MIN_TESTCASE;
  2237. j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
  2238. test_successful = 1;
  2239. mbtd->test_info.testcase = j;
  2240. mbtd->is_random = RANDOM_TEST;
  2241. ret = test_iosched_start_test(&mbtd->test_info);
  2242. if (ret) {
  2243. test_successful = 0;
  2244. break;
  2245. }
  2246. /* Allow FS requests to be dispatched */
  2247. msleep(1000);
  2248. mbtd->test_info.testcase = j;
  2249. mbtd->is_random = NON_RANDOM_TEST;
  2250. ret = test_iosched_start_test(&mbtd->test_info);
  2251. if (ret) {
  2252. test_successful = 0;
  2253. break;
  2254. }
  2255. /* Allow FS requests to be dispatched */
  2256. msleep(1000);
  2257. }
  2258. if (!test_successful)
  2259. break;
  2260. }
  2261. test_pr_info("%s: Completed all the test cases.", __func__);
  2262. return count;
  2263. }
  2264. static ssize_t write_packing_control_test_read(struct file *file,
  2265. char __user *buffer,
  2266. size_t count,
  2267. loff_t *offset)
  2268. {
  2269. if (!access_ok(VERIFY_WRITE, buffer, count))
  2270. return count;
  2271. memset((void *)buffer, 0, count);
  2272. snprintf(buffer, count,
  2273. "\nwrite_packing_control_test\n"
  2274. "=========\n"
  2275. "Description:\n"
  2276. "This test checks the following scenarios\n"
  2277. "- Packing expected - one over trigger\n"
  2278. "- Packing expected - N over trigger\n"
  2279. "- Packing expected - N over trigger followed by read\n"
  2280. "- Packing expected - N over trigger followed by flush\n"
  2281. "- Packing expected - threshold over trigger FB by flush\n"
  2282. "- Packing not expected - less than trigger\n"
  2283. "- Packing not expected - trigger requests\n"
  2284. "- Packing not expected - trigger, read, trigger\n"
  2285. "- Mixed state - packing -> no packing -> packing\n"
  2286. "- Mixed state - no packing -> packing -> no packing\n");
  2287. if (message_repeat == 1) {
  2288. message_repeat = 0;
  2289. return strnlen(buffer, count);
  2290. } else {
  2291. return 0;
  2292. }
  2293. }
  2294. const struct file_operations write_packing_control_test_ops = {
  2295. .open = test_open,
  2296. .write = write_packing_control_test_write,
  2297. .read = write_packing_control_test_read,
  2298. };
  2299. static ssize_t write_discard_sanitize_test_write(struct file *file,
  2300. const char __user *buf,
  2301. size_t count,
  2302. loff_t *ppos)
  2303. {
  2304. int ret = 0;
  2305. int i = 0;
  2306. int number = -1;
  2307. sscanf(buf, "%d", &number);
  2308. if (number <= 0)
  2309. number = 1;
  2310. test_pr_info("%s: -- write_discard_sanitize TEST --\n", __func__);
  2311. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2312. mbtd->test_group = TEST_GENERAL_GROUP;
  2313. mbtd->test_info.data = mbtd;
  2314. mbtd->test_info.prepare_test_fn = prepare_write_discard_sanitize_read;
  2315. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2316. mbtd->test_info.timeout_msec = SANITIZE_TEST_TIMEOUT;
  2317. for (i = 0 ; i < number ; ++i) {
  2318. test_pr_info("%s: Cycle # %d / %d\n", __func__, i+1, number);
  2319. test_pr_info("%s: ===================", __func__);
  2320. mbtd->test_info.testcase = TEST_WRITE_DISCARD_SANITIZE_READ;
  2321. ret = test_iosched_start_test(&mbtd->test_info);
  2322. if (ret)
  2323. break;
  2324. }
  2325. return count;
  2326. }
  2327. const struct file_operations write_discard_sanitize_test_ops = {
  2328. .open = test_open,
  2329. .write = write_discard_sanitize_test_write,
  2330. };
  2331. static ssize_t bkops_test_write(struct file *file,
  2332. const char __user *buf,
  2333. size_t count,
  2334. loff_t *ppos)
  2335. {
  2336. int ret = 0;
  2337. int i = 0, j;
  2338. int number = -1;
  2339. test_pr_info("%s: -- bkops_test TEST --", __func__);
  2340. sscanf(buf, "%d", &number);
  2341. if (number <= 0)
  2342. number = 1;
  2343. mbtd->test_group = TEST_BKOPS_GROUP;
  2344. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2345. mbtd->test_info.data = mbtd;
  2346. mbtd->test_info.prepare_test_fn = prepare_bkops;
  2347. mbtd->test_info.check_test_result_fn = check_bkops_result;
  2348. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2349. mbtd->test_info.run_test_fn = run_bkops;
  2350. mbtd->test_info.timeout_msec = BKOPS_TEST_TIMEOUT;
  2351. mbtd->test_info.post_test_fn = bkops_post_test;
  2352. for (i = 0 ; i < number ; ++i) {
  2353. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2354. test_pr_info("%s: ===================", __func__);
  2355. for (j = BKOPS_MIN_TESTCASE ;
  2356. j <= BKOPS_MAX_TESTCASE ; j++) {
  2357. mbtd->test_info.testcase = j;
  2358. ret = test_iosched_start_test(&mbtd->test_info);
  2359. if (ret)
  2360. break;
  2361. }
  2362. }
  2363. test_pr_info("%s: Completed all the test cases.", __func__);
  2364. return count;
  2365. }
  2366. static ssize_t bkops_test_read(struct file *file,
  2367. char __user *buffer,
  2368. size_t count,
  2369. loff_t *offset)
  2370. {
  2371. if (!access_ok(VERIFY_WRITE, buffer, count))
  2372. return count;
  2373. memset((void *)buffer, 0, count);
  2374. snprintf(buffer, count,
  2375. "\nbkops_test\n========================\n"
  2376. "Description:\n"
  2377. "This test simulates BKOPS status from card\n"
  2378. "and verifies that:\n"
  2379. " - Starting BKOPS delayed work, level 1\n"
  2380. " - Starting BKOPS delayed work, level 1, with HPI\n"
  2381. " - Cancel starting BKOPS delayed work, "
  2382. " when a request is received\n"
  2383. " - Starting BKOPS urgent, level 2,3\n"
  2384. " - Starting BKOPS urgent with 2 requests\n");
  2385. return strnlen(buffer, count);
  2386. }
  2387. const struct file_operations bkops_test_ops = {
  2388. .open = test_open,
  2389. .write = bkops_test_write,
  2390. .read = bkops_test_read,
  2391. };
  2392. static ssize_t long_sequential_read_test_write(struct file *file,
  2393. const char __user *buf,
  2394. size_t count,
  2395. loff_t *ppos)
  2396. {
  2397. int ret = 0;
  2398. int i = 0;
  2399. int number = -1;
  2400. unsigned long mtime, integer, fraction;
  2401. test_pr_info("%s: -- Long Sequential Read TEST --", __func__);
  2402. sscanf(buf, "%d", &number);
  2403. if (number <= 0)
  2404. number = 1;
  2405. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2406. mbtd->test_group = TEST_GENERAL_GROUP;
  2407. mbtd->test_info.data = mbtd;
  2408. mbtd->test_info.prepare_test_fn = prepare_test;
  2409. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2410. for (i = 0 ; i < number ; ++i) {
  2411. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2412. test_pr_info("%s: ====================", __func__);
  2413. mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_READ;
  2414. mbtd->is_random = NON_RANDOM_TEST;
  2415. ret = test_iosched_start_test(&mbtd->test_info);
  2416. if (ret)
  2417. break;
  2418. mtime = ktime_to_ms(mbtd->test_info.test_duration);
  2419. test_pr_info("%s: time is %lu msec, size is %u.%u MiB",
  2420. __func__, mtime,
  2421. LONG_TEST_SIZE_INTEGER(LONG_READ_NUM_BYTES),
  2422. LONG_TEST_SIZE_FRACTION(LONG_READ_NUM_BYTES));
  2423. /* we first multiply in order not to lose precision */
  2424. mtime *= MB_MSEC_RATIO_APPROXIMATION;
  2425. /* divide values to get a MiB/sec integer value with one
  2426. digit of precision. Multiply by 10 for one digit precision
  2427. */
  2428. fraction = integer = (LONG_READ_NUM_BYTES * 10) / mtime;
  2429. integer /= 10;
  2430. /* and calculate the MiB value fraction */
  2431. fraction -= integer * 10;
  2432. test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n"
  2433. , __func__, integer, fraction);
  2434. /* Allow FS requests to be dispatched */
  2435. msleep(1000);
  2436. }
  2437. return count;
  2438. }
  2439. static ssize_t long_sequential_read_test_read(struct file *file,
  2440. char __user *buffer,
  2441. size_t count,
  2442. loff_t *offset)
  2443. {
  2444. if (!access_ok(VERIFY_WRITE, buffer, count))
  2445. return count;
  2446. memset((void *)buffer, 0, count);
  2447. snprintf(buffer, count,
  2448. "\nlong_sequential_read_test\n"
  2449. "=========\n"
  2450. "Description:\n"
  2451. "This test runs the following scenarios\n"
  2452. "- Long Sequential Read Test: this test measures read "
  2453. "throughput at the driver level by sequentially reading many "
  2454. "large requests.\n");
  2455. if (message_repeat == 1) {
  2456. message_repeat = 0;
  2457. return strnlen(buffer, count);
  2458. } else
  2459. return 0;
  2460. }
  2461. const struct file_operations long_sequential_read_test_ops = {
  2462. .open = test_open,
  2463. .write = long_sequential_read_test_write,
  2464. .read = long_sequential_read_test_read,
  2465. };
  2466. static void long_seq_write_free_end_io_fn(struct request *rq, int err)
  2467. {
  2468. struct test_request *test_rq =
  2469. (struct test_request *)rq->elv.priv[0];
  2470. struct test_data *ptd = test_get_test_data();
  2471. BUG_ON(!test_rq);
  2472. spin_lock_irq(&ptd->lock);
  2473. list_del_init(&test_rq->queuelist);
  2474. ptd->dispatched_count--;
  2475. __blk_put_request(ptd->req_q, test_rq->rq);
  2476. spin_unlock_irq(&ptd->lock);
  2477. kfree(test_rq->bios_buffer);
  2478. kfree(test_rq);
  2479. mbtd->completed_req_count++;
  2480. check_test_completion();
  2481. }
  2482. static int run_long_seq_write(struct test_data *td)
  2483. {
  2484. int ret = 0;
  2485. int i;
  2486. int num_requests = TEST_MAX_REQUESTS / 2;
  2487. td->test_count = 0;
  2488. mbtd->completed_req_count = 0;
  2489. test_pr_info("%s: Adding at least %d write requests, first req_id=%d",
  2490. __func__, LONG_WRITE_TEST_MIN_NUM_REQS,
  2491. td->wr_rd_next_req_id);
  2492. do {
  2493. for (i = 0; i < num_requests; i++) {
  2494. /*
  2495. * since our requests come from a pool containing 128
  2496. * requests, we don't want to exhaust this quantity,
  2497. * therefore we add up to num_requests (which
  2498. * includes a safety margin) and then call the mmc layer
  2499. * to fetch them
  2500. */
  2501. if (td->test_count > num_requests)
  2502. break;
  2503. ret = test_iosched_add_wr_rd_test_req(0, WRITE,
  2504. td->start_sector, TEST_MAX_BIOS_PER_REQ,
  2505. TEST_PATTERN_5A,
  2506. long_seq_write_free_end_io_fn);
  2507. if (ret) {
  2508. test_pr_err("%s: failed to create write request"
  2509. , __func__);
  2510. break;
  2511. }
  2512. }
  2513. __blk_run_queue(td->req_q);
  2514. } while (mbtd->completed_req_count < LONG_WRITE_TEST_MIN_NUM_REQS);
  2515. test_pr_info("%s: completed %d requests", __func__,
  2516. mbtd->completed_req_count);
  2517. return ret;
  2518. }
  2519. static ssize_t long_sequential_write_test_write(struct file *file,
  2520. const char __user *buf,
  2521. size_t count,
  2522. loff_t *ppos)
  2523. {
  2524. int ret = 0;
  2525. int i = 0;
  2526. int number = -1;
  2527. unsigned long mtime, integer, fraction, byte_count;
  2528. test_pr_info("%s: -- Long Sequential Write TEST --", __func__);
  2529. sscanf(buf, "%d", &number);
  2530. if (number <= 0)
  2531. number = 1;
  2532. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2533. mbtd->test_group = TEST_GENERAL_GROUP;
  2534. mbtd->test_info.data = mbtd;
  2535. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2536. mbtd->test_info.run_test_fn = run_long_seq_write;
  2537. for (i = 0 ; i < number ; ++i) {
  2538. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2539. test_pr_info("%s: ====================", __func__);
  2540. integer = 0;
  2541. fraction = 0;
  2542. mbtd->test_info.test_byte_count = 0;
  2543. mbtd->test_info.testcase = TEST_LONG_SEQUENTIAL_WRITE;
  2544. mbtd->is_random = NON_RANDOM_TEST;
  2545. ret = test_iosched_start_test(&mbtd->test_info);
  2546. if (ret)
  2547. break;
  2548. mtime = ktime_to_ms(mbtd->test_info.test_duration);
  2549. byte_count = mbtd->test_info.test_byte_count;
  2550. test_pr_info("%s: time is %lu msec, size is %lu.%lu MiB",
  2551. __func__, mtime, LONG_TEST_SIZE_INTEGER(byte_count),
  2552. LONG_TEST_SIZE_FRACTION(byte_count));
  2553. /* we first multiply in order not to lose precision */
  2554. mtime *= MB_MSEC_RATIO_APPROXIMATION;
  2555. /* divide values to get a MiB/sec integer value with one
  2556. digit of precision
  2557. */
  2558. fraction = integer = (byte_count * 10) / mtime;
  2559. integer /= 10;
  2560. /* and calculate the MiB value fraction */
  2561. fraction -= integer * 10;
  2562. test_pr_info("%s: Throughput: %lu.%lu MiB/sec\n",
  2563. __func__, integer, fraction);
  2564. /* Allow FS requests to be dispatched */
  2565. msleep(1000);
  2566. }
  2567. return count;
  2568. }
  2569. static ssize_t long_sequential_write_test_read(struct file *file,
  2570. char __user *buffer,
  2571. size_t count,
  2572. loff_t *offset)
  2573. {
  2574. if (!access_ok(VERIFY_WRITE, buffer, count))
  2575. return count;
  2576. memset((void *)buffer, 0, count);
  2577. snprintf(buffer, count,
  2578. "\nlong_sequential_write_test\n"
  2579. "=========\n"
  2580. "Description:\n"
  2581. "This test runs the following scenarios\n"
  2582. "- Long Sequential Write Test: this test measures write "
  2583. "throughput at the driver level by sequentially writing many "
  2584. "large requests\n");
  2585. if (message_repeat == 1) {
  2586. message_repeat = 0;
  2587. return strnlen(buffer, count);
  2588. } else
  2589. return 0;
  2590. }
  2591. const struct file_operations long_sequential_write_test_ops = {
  2592. .open = test_open,
  2593. .write = long_sequential_write_test_write,
  2594. .read = long_sequential_write_test_read,
  2595. };
  2596. static ssize_t new_req_notification_test_write(struct file *file,
  2597. const char __user *buf,
  2598. size_t count,
  2599. loff_t *ppos)
  2600. {
  2601. int ret = 0;
  2602. int i = 0;
  2603. int number = -1;
  2604. test_pr_info("%s: -- new_req_notification TEST --", __func__);
  2605. sscanf(buf, "%d", &number);
  2606. if (number <= 0)
  2607. number = 1;
  2608. mbtd->test_group = TEST_NEW_NOTIFICATION_GROUP;
  2609. memset(&mbtd->test_info, 0, sizeof(struct test_info));
  2610. mbtd->test_info.data = mbtd;
  2611. mbtd->test_info.prepare_test_fn = prepare_new_req;
  2612. mbtd->test_info.check_test_result_fn = check_new_req_result;
  2613. mbtd->test_info.get_test_case_str_fn = get_test_case_str;
  2614. mbtd->test_info.run_test_fn = run_new_req;
  2615. mbtd->test_info.timeout_msec = 10 * 60 * 1000; /* 1 min */
  2616. mbtd->test_info.post_test_fn = new_req_post_test;
  2617. for (i = 0 ; i < number ; ++i) {
  2618. test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
  2619. test_pr_info("%s: ===================", __func__);
  2620. test_pr_info("%s: start test case TEST_NEW_REQ_NOTIFICATION",
  2621. __func__);
  2622. mbtd->test_info.testcase = TEST_NEW_REQ_NOTIFICATION;
  2623. ret = test_iosched_start_test(&mbtd->test_info);
  2624. if (ret) {
  2625. test_pr_info("%s: break from new_req tests loop",
  2626. __func__);
  2627. break;
  2628. }
  2629. }
  2630. return count;
  2631. }
  2632. static ssize_t new_req_notification_test_read(struct file *file,
  2633. char __user *buffer,
  2634. size_t count,
  2635. loff_t *offset)
  2636. {
  2637. if (!access_ok(VERIFY_WRITE, buffer, count))
  2638. return count;
  2639. memset((void *)buffer, 0, count);
  2640. snprintf(buffer, count,
  2641. "\nnew_req_notification_test\n========================\n"
  2642. "Description:\n"
  2643. "This test checks following scenarious\n"
  2644. "- new request arrives after a NULL request was sent to the "
  2645. "mmc_queue,\n"
  2646. "which is waiting for completion of a former request\n");
  2647. return strnlen(buffer, count);
  2648. }
  2649. const struct file_operations new_req_notification_test_ops = {
  2650. .open = test_open,
  2651. .write = new_req_notification_test_write,
  2652. .read = new_req_notification_test_read,
  2653. };
  2654. static void mmc_block_test_debugfs_cleanup(void)
  2655. {
  2656. debugfs_remove(mbtd->debug.random_test_seed);
  2657. debugfs_remove(mbtd->debug.send_write_packing_test);
  2658. debugfs_remove(mbtd->debug.err_check_test);
  2659. debugfs_remove(mbtd->debug.send_invalid_packed_test);
  2660. debugfs_remove(mbtd->debug.packing_control_test);
  2661. debugfs_remove(mbtd->debug.discard_sanitize_test);
  2662. debugfs_remove(mbtd->debug.bkops_test);
  2663. debugfs_remove(mbtd->debug.long_sequential_read_test);
  2664. debugfs_remove(mbtd->debug.long_sequential_write_test);
  2665. debugfs_remove(mbtd->debug.new_req_notification_test);
  2666. }
  2667. static int mmc_block_test_debugfs_init(void)
  2668. {
  2669. struct dentry *utils_root, *tests_root;
  2670. utils_root = test_iosched_get_debugfs_utils_root();
  2671. tests_root = test_iosched_get_debugfs_tests_root();
  2672. if (!utils_root || !tests_root)
  2673. return -EINVAL;
  2674. mbtd->debug.random_test_seed = debugfs_create_u32(
  2675. "random_test_seed",
  2676. S_IRUGO | S_IWUGO,
  2677. utils_root,
  2678. &mbtd->random_test_seed);
  2679. if (!mbtd->debug.random_test_seed)
  2680. goto err_nomem;
  2681. mbtd->debug.send_write_packing_test =
  2682. debugfs_create_file("send_write_packing_test",
  2683. S_IRUGO | S_IWUGO,
  2684. tests_root,
  2685. NULL,
  2686. &send_write_packing_test_ops);
  2687. if (!mbtd->debug.send_write_packing_test)
  2688. goto err_nomem;
  2689. mbtd->debug.err_check_test =
  2690. debugfs_create_file("err_check_test",
  2691. S_IRUGO | S_IWUGO,
  2692. tests_root,
  2693. NULL,
  2694. &err_check_test_ops);
  2695. if (!mbtd->debug.err_check_test)
  2696. goto err_nomem;
  2697. mbtd->debug.send_invalid_packed_test =
  2698. debugfs_create_file("send_invalid_packed_test",
  2699. S_IRUGO | S_IWUGO,
  2700. tests_root,
  2701. NULL,
  2702. &send_invalid_packed_test_ops);
  2703. if (!mbtd->debug.send_invalid_packed_test)
  2704. goto err_nomem;
  2705. mbtd->debug.packing_control_test = debugfs_create_file(
  2706. "packing_control_test",
  2707. S_IRUGO | S_IWUGO,
  2708. tests_root,
  2709. NULL,
  2710. &write_packing_control_test_ops);
  2711. if (!mbtd->debug.packing_control_test)
  2712. goto err_nomem;
  2713. mbtd->debug.discard_sanitize_test =
  2714. debugfs_create_file("write_discard_sanitize_test",
  2715. S_IRUGO | S_IWUGO,
  2716. tests_root,
  2717. NULL,
  2718. &write_discard_sanitize_test_ops);
  2719. if (!mbtd->debug.discard_sanitize_test) {
  2720. mmc_block_test_debugfs_cleanup();
  2721. return -ENOMEM;
  2722. }
  2723. mbtd->debug.bkops_test =
  2724. debugfs_create_file("bkops_test",
  2725. S_IRUGO | S_IWUGO,
  2726. tests_root,
  2727. NULL,
  2728. &bkops_test_ops);
  2729. mbtd->debug.new_req_notification_test =
  2730. debugfs_create_file("new_req_notification_test",
  2731. S_IRUGO | S_IWUGO,
  2732. tests_root,
  2733. NULL,
  2734. &new_req_notification_test_ops);
  2735. if (!mbtd->debug.new_req_notification_test)
  2736. goto err_nomem;
  2737. if (!mbtd->debug.bkops_test)
  2738. goto err_nomem;
  2739. mbtd->debug.long_sequential_read_test = debugfs_create_file(
  2740. "long_sequential_read_test",
  2741. S_IRUGO | S_IWUGO,
  2742. tests_root,
  2743. NULL,
  2744. &long_sequential_read_test_ops);
  2745. if (!mbtd->debug.long_sequential_read_test)
  2746. goto err_nomem;
  2747. mbtd->debug.long_sequential_write_test = debugfs_create_file(
  2748. "long_sequential_write_test",
  2749. S_IRUGO | S_IWUGO,
  2750. tests_root,
  2751. NULL,
  2752. &long_sequential_write_test_ops);
  2753. if (!mbtd->debug.long_sequential_write_test)
  2754. goto err_nomem;
  2755. return 0;
  2756. err_nomem:
  2757. mmc_block_test_debugfs_cleanup();
  2758. return -ENOMEM;
  2759. }
  2760. static void mmc_block_test_probe(void)
  2761. {
  2762. struct request_queue *q = test_iosched_get_req_queue();
  2763. struct mmc_queue *mq;
  2764. int max_packed_reqs;
  2765. if (!q) {
  2766. test_pr_err("%s: NULL request queue", __func__);
  2767. return;
  2768. }
  2769. mq = q->queuedata;
  2770. if (!mq) {
  2771. test_pr_err("%s: NULL mq", __func__);
  2772. return;
  2773. }
  2774. max_packed_reqs = mq->card->ext_csd.max_packed_writes;
  2775. mbtd->exp_packed_stats.packing_events =
  2776. kzalloc((max_packed_reqs + 1) *
  2777. sizeof(*mbtd->exp_packed_stats.packing_events),
  2778. GFP_KERNEL);
  2779. mmc_block_test_debugfs_init();
  2780. }
  2781. static void mmc_block_test_remove(void)
  2782. {
  2783. mmc_block_test_debugfs_cleanup();
  2784. }
  2785. static int __init mmc_block_test_init(void)
  2786. {
  2787. mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
  2788. if (!mbtd) {
  2789. test_pr_err("%s: failed to allocate mmc_block_test_data",
  2790. __func__);
  2791. return -ENODEV;
  2792. }
  2793. init_waitqueue_head(&mbtd->bkops_wait_q);
  2794. mbtd->bdt.init_fn = mmc_block_test_probe;
  2795. mbtd->bdt.exit_fn = mmc_block_test_remove;
  2796. INIT_LIST_HEAD(&mbtd->bdt.list);
  2797. test_iosched_register(&mbtd->bdt);
  2798. return 0;
  2799. }
  2800. static void __exit mmc_block_test_exit(void)
  2801. {
  2802. test_iosched_unregister(&mbtd->bdt);
  2803. kfree(mbtd);
  2804. }
  2805. module_init(mmc_block_test_init);
  2806. module_exit(mmc_block_test_exit);
  2807. MODULE_LICENSE("GPL v2");
  2808. MODULE_DESCRIPTION("MMC block test");