debug.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Artem Bityutskiy (Битюцкий Артём)
  20. * Adrian Hunter
  21. */
  22. /*
  23. * This file implements most of the debugging stuff which is compiled in only
  24. * when it is enabled. But some debugging check functions are implemented in
  25. * corresponding subsystem, just because they are closely related and utilize
  26. * various local functions of those subsystems.
  27. */
  28. #define UBIFS_DBG_PRESERVE_UBI
  29. #include "ubifs.h"
  30. #include <linux/module.h>
  31. #include <linux/moduleparam.h>
  32. #include <linux/debugfs.h>
  33. #include <linux/math64.h>
  34. #ifdef CONFIG_UBIFS_FS_DEBUG
  35. DEFINE_SPINLOCK(dbg_lock);
  36. static char dbg_key_buf0[128];
  37. static char dbg_key_buf1[128];
  38. unsigned int ubifs_chk_flags;
  39. unsigned int ubifs_tst_flags;
  40. module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
  41. module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
  42. MODULE_PARM_DESC(debug_chks, "Debug check flags");
  43. MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
  44. static const char *get_key_fmt(int fmt)
  45. {
  46. switch (fmt) {
  47. case UBIFS_SIMPLE_KEY_FMT:
  48. return "simple";
  49. default:
  50. return "unknown/invalid format";
  51. }
  52. }
  53. static const char *get_key_hash(int hash)
  54. {
  55. switch (hash) {
  56. case UBIFS_KEY_HASH_R5:
  57. return "R5";
  58. case UBIFS_KEY_HASH_TEST:
  59. return "test";
  60. default:
  61. return "unknown/invalid name hash";
  62. }
  63. }
  64. static const char *get_key_type(int type)
  65. {
  66. switch (type) {
  67. case UBIFS_INO_KEY:
  68. return "inode";
  69. case UBIFS_DENT_KEY:
  70. return "direntry";
  71. case UBIFS_XENT_KEY:
  72. return "xentry";
  73. case UBIFS_DATA_KEY:
  74. return "data";
  75. case UBIFS_TRUN_KEY:
  76. return "truncate";
  77. default:
  78. return "unknown/invalid key";
  79. }
  80. }
  81. static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
  82. char *buffer)
  83. {
  84. char *p = buffer;
  85. int type = key_type(c, key);
  86. if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) {
  87. switch (type) {
  88. case UBIFS_INO_KEY:
  89. sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key),
  90. get_key_type(type));
  91. break;
  92. case UBIFS_DENT_KEY:
  93. case UBIFS_XENT_KEY:
  94. sprintf(p, "(%lu, %s, %#08x)",
  95. (unsigned long)key_inum(c, key),
  96. get_key_type(type), key_hash(c, key));
  97. break;
  98. case UBIFS_DATA_KEY:
  99. sprintf(p, "(%lu, %s, %u)",
  100. (unsigned long)key_inum(c, key),
  101. get_key_type(type), key_block(c, key));
  102. break;
  103. case UBIFS_TRUN_KEY:
  104. sprintf(p, "(%lu, %s)",
  105. (unsigned long)key_inum(c, key),
  106. get_key_type(type));
  107. break;
  108. default:
  109. sprintf(p, "(bad key type: %#08x, %#08x)",
  110. key->u32[0], key->u32[1]);
  111. }
  112. } else
  113. sprintf(p, "bad key format %d", c->key_fmt);
  114. }
  115. const char *dbg_key_str0(const struct ubifs_info *c, const union ubifs_key *key)
  116. {
  117. /* dbg_lock must be held */
  118. sprintf_key(c, key, dbg_key_buf0);
  119. return dbg_key_buf0;
  120. }
  121. const char *dbg_key_str1(const struct ubifs_info *c, const union ubifs_key *key)
  122. {
  123. /* dbg_lock must be held */
  124. sprintf_key(c, key, dbg_key_buf1);
  125. return dbg_key_buf1;
  126. }
  127. const char *dbg_ntype(int type)
  128. {
  129. switch (type) {
  130. case UBIFS_PAD_NODE:
  131. return "padding node";
  132. case UBIFS_SB_NODE:
  133. return "superblock node";
  134. case UBIFS_MST_NODE:
  135. return "master node";
  136. case UBIFS_REF_NODE:
  137. return "reference node";
  138. case UBIFS_INO_NODE:
  139. return "inode node";
  140. case UBIFS_DENT_NODE:
  141. return "direntry node";
  142. case UBIFS_XENT_NODE:
  143. return "xentry node";
  144. case UBIFS_DATA_NODE:
  145. return "data node";
  146. case UBIFS_TRUN_NODE:
  147. return "truncate node";
  148. case UBIFS_IDX_NODE:
  149. return "indexing node";
  150. case UBIFS_CS_NODE:
  151. return "commit start node";
  152. case UBIFS_ORPH_NODE:
  153. return "orphan node";
  154. default:
  155. return "unknown node";
  156. }
  157. }
  158. static const char *dbg_gtype(int type)
  159. {
  160. switch (type) {
  161. case UBIFS_NO_NODE_GROUP:
  162. return "no node group";
  163. case UBIFS_IN_NODE_GROUP:
  164. return "in node group";
  165. case UBIFS_LAST_OF_NODE_GROUP:
  166. return "last of node group";
  167. default:
  168. return "unknown";
  169. }
  170. }
  171. const char *dbg_cstate(int cmt_state)
  172. {
  173. switch (cmt_state) {
  174. case COMMIT_RESTING:
  175. return "commit resting";
  176. case COMMIT_BACKGROUND:
  177. return "background commit requested";
  178. case COMMIT_REQUIRED:
  179. return "commit required";
  180. case COMMIT_RUNNING_BACKGROUND:
  181. return "BACKGROUND commit running";
  182. case COMMIT_RUNNING_REQUIRED:
  183. return "commit running and required";
  184. case COMMIT_BROKEN:
  185. return "broken commit";
  186. default:
  187. return "unknown commit state";
  188. }
  189. }
  190. const char *dbg_jhead(int jhead)
  191. {
  192. switch (jhead) {
  193. case GCHD:
  194. return "0 (GC)";
  195. case BASEHD:
  196. return "1 (base)";
  197. case DATAHD:
  198. return "2 (data)";
  199. default:
  200. return "unknown journal head";
  201. }
  202. }
  203. static void dump_ch(const struct ubifs_ch *ch)
  204. {
  205. printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
  206. printk(KERN_DEBUG "\tcrc %#x\n", le32_to_cpu(ch->crc));
  207. printk(KERN_DEBUG "\tnode_type %d (%s)\n", ch->node_type,
  208. dbg_ntype(ch->node_type));
  209. printk(KERN_DEBUG "\tgroup_type %d (%s)\n", ch->group_type,
  210. dbg_gtype(ch->group_type));
  211. printk(KERN_DEBUG "\tsqnum %llu\n",
  212. (unsigned long long)le64_to_cpu(ch->sqnum));
  213. printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
  214. }
  215. void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
  216. {
  217. const struct ubifs_inode *ui = ubifs_inode(inode);
  218. printk(KERN_DEBUG "Dump in-memory inode:");
  219. printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
  220. printk(KERN_DEBUG "\tsize %llu\n",
  221. (unsigned long long)i_size_read(inode));
  222. printk(KERN_DEBUG "\tnlink %u\n", inode->i_nlink);
  223. printk(KERN_DEBUG "\tuid %u\n", (unsigned int)inode->i_uid);
  224. printk(KERN_DEBUG "\tgid %u\n", (unsigned int)inode->i_gid);
  225. printk(KERN_DEBUG "\tatime %u.%u\n",
  226. (unsigned int)inode->i_atime.tv_sec,
  227. (unsigned int)inode->i_atime.tv_nsec);
  228. printk(KERN_DEBUG "\tmtime %u.%u\n",
  229. (unsigned int)inode->i_mtime.tv_sec,
  230. (unsigned int)inode->i_mtime.tv_nsec);
  231. printk(KERN_DEBUG "\tctime %u.%u\n",
  232. (unsigned int)inode->i_ctime.tv_sec,
  233. (unsigned int)inode->i_ctime.tv_nsec);
  234. printk(KERN_DEBUG "\tcreat_sqnum %llu\n", ui->creat_sqnum);
  235. printk(KERN_DEBUG "\txattr_size %u\n", ui->xattr_size);
  236. printk(KERN_DEBUG "\txattr_cnt %u\n", ui->xattr_cnt);
  237. printk(KERN_DEBUG "\txattr_names %u\n", ui->xattr_names);
  238. printk(KERN_DEBUG "\tdirty %u\n", ui->dirty);
  239. printk(KERN_DEBUG "\txattr %u\n", ui->xattr);
  240. printk(KERN_DEBUG "\tbulk_read %u\n", ui->xattr);
  241. printk(KERN_DEBUG "\tsynced_i_size %llu\n",
  242. (unsigned long long)ui->synced_i_size);
  243. printk(KERN_DEBUG "\tui_size %llu\n",
  244. (unsigned long long)ui->ui_size);
  245. printk(KERN_DEBUG "\tflags %d\n", ui->flags);
  246. printk(KERN_DEBUG "\tcompr_type %d\n", ui->compr_type);
  247. printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
  248. printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
  249. printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
  250. }
  251. void dbg_dump_node(const struct ubifs_info *c, const void *node)
  252. {
  253. int i, n;
  254. union ubifs_key key;
  255. const struct ubifs_ch *ch = node;
  256. if (dbg_failure_mode)
  257. return;
  258. /* If the magic is incorrect, just hexdump the first bytes */
  259. if (le32_to_cpu(ch->magic) != UBIFS_NODE_MAGIC) {
  260. printk(KERN_DEBUG "Not a node, first %zu bytes:", UBIFS_CH_SZ);
  261. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  262. (void *)node, UBIFS_CH_SZ, 1);
  263. return;
  264. }
  265. spin_lock(&dbg_lock);
  266. dump_ch(node);
  267. switch (ch->node_type) {
  268. case UBIFS_PAD_NODE:
  269. {
  270. const struct ubifs_pad_node *pad = node;
  271. printk(KERN_DEBUG "\tpad_len %u\n",
  272. le32_to_cpu(pad->pad_len));
  273. break;
  274. }
  275. case UBIFS_SB_NODE:
  276. {
  277. const struct ubifs_sb_node *sup = node;
  278. unsigned int sup_flags = le32_to_cpu(sup->flags);
  279. printk(KERN_DEBUG "\tkey_hash %d (%s)\n",
  280. (int)sup->key_hash, get_key_hash(sup->key_hash));
  281. printk(KERN_DEBUG "\tkey_fmt %d (%s)\n",
  282. (int)sup->key_fmt, get_key_fmt(sup->key_fmt));
  283. printk(KERN_DEBUG "\tflags %#x\n", sup_flags);
  284. printk(KERN_DEBUG "\t big_lpt %u\n",
  285. !!(sup_flags & UBIFS_FLG_BIGLPT));
  286. printk(KERN_DEBUG "\t space_fixup %u\n",
  287. !!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
  288. printk(KERN_DEBUG "\tmin_io_size %u\n",
  289. le32_to_cpu(sup->min_io_size));
  290. printk(KERN_DEBUG "\tleb_size %u\n",
  291. le32_to_cpu(sup->leb_size));
  292. printk(KERN_DEBUG "\tleb_cnt %u\n",
  293. le32_to_cpu(sup->leb_cnt));
  294. printk(KERN_DEBUG "\tmax_leb_cnt %u\n",
  295. le32_to_cpu(sup->max_leb_cnt));
  296. printk(KERN_DEBUG "\tmax_bud_bytes %llu\n",
  297. (unsigned long long)le64_to_cpu(sup->max_bud_bytes));
  298. printk(KERN_DEBUG "\tlog_lebs %u\n",
  299. le32_to_cpu(sup->log_lebs));
  300. printk(KERN_DEBUG "\tlpt_lebs %u\n",
  301. le32_to_cpu(sup->lpt_lebs));
  302. printk(KERN_DEBUG "\torph_lebs %u\n",
  303. le32_to_cpu(sup->orph_lebs));
  304. printk(KERN_DEBUG "\tjhead_cnt %u\n",
  305. le32_to_cpu(sup->jhead_cnt));
  306. printk(KERN_DEBUG "\tfanout %u\n",
  307. le32_to_cpu(sup->fanout));
  308. printk(KERN_DEBUG "\tlsave_cnt %u\n",
  309. le32_to_cpu(sup->lsave_cnt));
  310. printk(KERN_DEBUG "\tdefault_compr %u\n",
  311. (int)le16_to_cpu(sup->default_compr));
  312. printk(KERN_DEBUG "\trp_size %llu\n",
  313. (unsigned long long)le64_to_cpu(sup->rp_size));
  314. printk(KERN_DEBUG "\trp_uid %u\n",
  315. le32_to_cpu(sup->rp_uid));
  316. printk(KERN_DEBUG "\trp_gid %u\n",
  317. le32_to_cpu(sup->rp_gid));
  318. printk(KERN_DEBUG "\tfmt_version %u\n",
  319. le32_to_cpu(sup->fmt_version));
  320. printk(KERN_DEBUG "\ttime_gran %u\n",
  321. le32_to_cpu(sup->time_gran));
  322. printk(KERN_DEBUG "\tUUID %pUB\n",
  323. sup->uuid);
  324. break;
  325. }
  326. case UBIFS_MST_NODE:
  327. {
  328. const struct ubifs_mst_node *mst = node;
  329. printk(KERN_DEBUG "\thighest_inum %llu\n",
  330. (unsigned long long)le64_to_cpu(mst->highest_inum));
  331. printk(KERN_DEBUG "\tcommit number %llu\n",
  332. (unsigned long long)le64_to_cpu(mst->cmt_no));
  333. printk(KERN_DEBUG "\tflags %#x\n",
  334. le32_to_cpu(mst->flags));
  335. printk(KERN_DEBUG "\tlog_lnum %u\n",
  336. le32_to_cpu(mst->log_lnum));
  337. printk(KERN_DEBUG "\troot_lnum %u\n",
  338. le32_to_cpu(mst->root_lnum));
  339. printk(KERN_DEBUG "\troot_offs %u\n",
  340. le32_to_cpu(mst->root_offs));
  341. printk(KERN_DEBUG "\troot_len %u\n",
  342. le32_to_cpu(mst->root_len));
  343. printk(KERN_DEBUG "\tgc_lnum %u\n",
  344. le32_to_cpu(mst->gc_lnum));
  345. printk(KERN_DEBUG "\tihead_lnum %u\n",
  346. le32_to_cpu(mst->ihead_lnum));
  347. printk(KERN_DEBUG "\tihead_offs %u\n",
  348. le32_to_cpu(mst->ihead_offs));
  349. printk(KERN_DEBUG "\tindex_size %llu\n",
  350. (unsigned long long)le64_to_cpu(mst->index_size));
  351. printk(KERN_DEBUG "\tlpt_lnum %u\n",
  352. le32_to_cpu(mst->lpt_lnum));
  353. printk(KERN_DEBUG "\tlpt_offs %u\n",
  354. le32_to_cpu(mst->lpt_offs));
  355. printk(KERN_DEBUG "\tnhead_lnum %u\n",
  356. le32_to_cpu(mst->nhead_lnum));
  357. printk(KERN_DEBUG "\tnhead_offs %u\n",
  358. le32_to_cpu(mst->nhead_offs));
  359. printk(KERN_DEBUG "\tltab_lnum %u\n",
  360. le32_to_cpu(mst->ltab_lnum));
  361. printk(KERN_DEBUG "\tltab_offs %u\n",
  362. le32_to_cpu(mst->ltab_offs));
  363. printk(KERN_DEBUG "\tlsave_lnum %u\n",
  364. le32_to_cpu(mst->lsave_lnum));
  365. printk(KERN_DEBUG "\tlsave_offs %u\n",
  366. le32_to_cpu(mst->lsave_offs));
  367. printk(KERN_DEBUG "\tlscan_lnum %u\n",
  368. le32_to_cpu(mst->lscan_lnum));
  369. printk(KERN_DEBUG "\tleb_cnt %u\n",
  370. le32_to_cpu(mst->leb_cnt));
  371. printk(KERN_DEBUG "\tempty_lebs %u\n",
  372. le32_to_cpu(mst->empty_lebs));
  373. printk(KERN_DEBUG "\tidx_lebs %u\n",
  374. le32_to_cpu(mst->idx_lebs));
  375. printk(KERN_DEBUG "\ttotal_free %llu\n",
  376. (unsigned long long)le64_to_cpu(mst->total_free));
  377. printk(KERN_DEBUG "\ttotal_dirty %llu\n",
  378. (unsigned long long)le64_to_cpu(mst->total_dirty));
  379. printk(KERN_DEBUG "\ttotal_used %llu\n",
  380. (unsigned long long)le64_to_cpu(mst->total_used));
  381. printk(KERN_DEBUG "\ttotal_dead %llu\n",
  382. (unsigned long long)le64_to_cpu(mst->total_dead));
  383. printk(KERN_DEBUG "\ttotal_dark %llu\n",
  384. (unsigned long long)le64_to_cpu(mst->total_dark));
  385. break;
  386. }
  387. case UBIFS_REF_NODE:
  388. {
  389. const struct ubifs_ref_node *ref = node;
  390. printk(KERN_DEBUG "\tlnum %u\n",
  391. le32_to_cpu(ref->lnum));
  392. printk(KERN_DEBUG "\toffs %u\n",
  393. le32_to_cpu(ref->offs));
  394. printk(KERN_DEBUG "\tjhead %u\n",
  395. le32_to_cpu(ref->jhead));
  396. break;
  397. }
  398. case UBIFS_INO_NODE:
  399. {
  400. const struct ubifs_ino_node *ino = node;
  401. key_read(c, &ino->key, &key);
  402. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  403. printk(KERN_DEBUG "\tcreat_sqnum %llu\n",
  404. (unsigned long long)le64_to_cpu(ino->creat_sqnum));
  405. printk(KERN_DEBUG "\tsize %llu\n",
  406. (unsigned long long)le64_to_cpu(ino->size));
  407. printk(KERN_DEBUG "\tnlink %u\n",
  408. le32_to_cpu(ino->nlink));
  409. printk(KERN_DEBUG "\tatime %lld.%u\n",
  410. (long long)le64_to_cpu(ino->atime_sec),
  411. le32_to_cpu(ino->atime_nsec));
  412. printk(KERN_DEBUG "\tmtime %lld.%u\n",
  413. (long long)le64_to_cpu(ino->mtime_sec),
  414. le32_to_cpu(ino->mtime_nsec));
  415. printk(KERN_DEBUG "\tctime %lld.%u\n",
  416. (long long)le64_to_cpu(ino->ctime_sec),
  417. le32_to_cpu(ino->ctime_nsec));
  418. printk(KERN_DEBUG "\tuid %u\n",
  419. le32_to_cpu(ino->uid));
  420. printk(KERN_DEBUG "\tgid %u\n",
  421. le32_to_cpu(ino->gid));
  422. printk(KERN_DEBUG "\tmode %u\n",
  423. le32_to_cpu(ino->mode));
  424. printk(KERN_DEBUG "\tflags %#x\n",
  425. le32_to_cpu(ino->flags));
  426. printk(KERN_DEBUG "\txattr_cnt %u\n",
  427. le32_to_cpu(ino->xattr_cnt));
  428. printk(KERN_DEBUG "\txattr_size %u\n",
  429. le32_to_cpu(ino->xattr_size));
  430. printk(KERN_DEBUG "\txattr_names %u\n",
  431. le32_to_cpu(ino->xattr_names));
  432. printk(KERN_DEBUG "\tcompr_type %#x\n",
  433. (int)le16_to_cpu(ino->compr_type));
  434. printk(KERN_DEBUG "\tdata len %u\n",
  435. le32_to_cpu(ino->data_len));
  436. break;
  437. }
  438. case UBIFS_DENT_NODE:
  439. case UBIFS_XENT_NODE:
  440. {
  441. const struct ubifs_dent_node *dent = node;
  442. int nlen = le16_to_cpu(dent->nlen);
  443. key_read(c, &dent->key, &key);
  444. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  445. printk(KERN_DEBUG "\tinum %llu\n",
  446. (unsigned long long)le64_to_cpu(dent->inum));
  447. printk(KERN_DEBUG "\ttype %d\n", (int)dent->type);
  448. printk(KERN_DEBUG "\tnlen %d\n", nlen);
  449. printk(KERN_DEBUG "\tname ");
  450. if (nlen > UBIFS_MAX_NLEN)
  451. printk(KERN_DEBUG "(bad name length, not printing, "
  452. "bad or corrupted node)");
  453. else {
  454. for (i = 0; i < nlen && dent->name[i]; i++)
  455. printk(KERN_CONT "%c", dent->name[i]);
  456. }
  457. printk(KERN_CONT "\n");
  458. break;
  459. }
  460. case UBIFS_DATA_NODE:
  461. {
  462. const struct ubifs_data_node *dn = node;
  463. int dlen = le32_to_cpu(ch->len) - UBIFS_DATA_NODE_SZ;
  464. key_read(c, &dn->key, &key);
  465. printk(KERN_DEBUG "\tkey %s\n", DBGKEY(&key));
  466. printk(KERN_DEBUG "\tsize %u\n",
  467. le32_to_cpu(dn->size));
  468. printk(KERN_DEBUG "\tcompr_typ %d\n",
  469. (int)le16_to_cpu(dn->compr_type));
  470. printk(KERN_DEBUG "\tdata size %d\n",
  471. dlen);
  472. printk(KERN_DEBUG "\tdata:\n");
  473. print_hex_dump(KERN_DEBUG, "\t", DUMP_PREFIX_OFFSET, 32, 1,
  474. (void *)&dn->data, dlen, 0);
  475. break;
  476. }
  477. case UBIFS_TRUN_NODE:
  478. {
  479. const struct ubifs_trun_node *trun = node;
  480. printk(KERN_DEBUG "\tinum %u\n",
  481. le32_to_cpu(trun->inum));
  482. printk(KERN_DEBUG "\told_size %llu\n",
  483. (unsigned long long)le64_to_cpu(trun->old_size));
  484. printk(KERN_DEBUG "\tnew_size %llu\n",
  485. (unsigned long long)le64_to_cpu(trun->new_size));
  486. break;
  487. }
  488. case UBIFS_IDX_NODE:
  489. {
  490. const struct ubifs_idx_node *idx = node;
  491. n = le16_to_cpu(idx->child_cnt);
  492. printk(KERN_DEBUG "\tchild_cnt %d\n", n);
  493. printk(KERN_DEBUG "\tlevel %d\n",
  494. (int)le16_to_cpu(idx->level));
  495. printk(KERN_DEBUG "\tBranches:\n");
  496. for (i = 0; i < n && i < c->fanout - 1; i++) {
  497. const struct ubifs_branch *br;
  498. br = ubifs_idx_branch(c, idx, i);
  499. key_read(c, &br->key, &key);
  500. printk(KERN_DEBUG "\t%d: LEB %d:%d len %d key %s\n",
  501. i, le32_to_cpu(br->lnum), le32_to_cpu(br->offs),
  502. le32_to_cpu(br->len), DBGKEY(&key));
  503. }
  504. break;
  505. }
  506. case UBIFS_CS_NODE:
  507. break;
  508. case UBIFS_ORPH_NODE:
  509. {
  510. const struct ubifs_orph_node *orph = node;
  511. printk(KERN_DEBUG "\tcommit number %llu\n",
  512. (unsigned long long)
  513. le64_to_cpu(orph->cmt_no) & LLONG_MAX);
  514. printk(KERN_DEBUG "\tlast node flag %llu\n",
  515. (unsigned long long)(le64_to_cpu(orph->cmt_no)) >> 63);
  516. n = (le32_to_cpu(ch->len) - UBIFS_ORPH_NODE_SZ) >> 3;
  517. printk(KERN_DEBUG "\t%d orphan inode numbers:\n", n);
  518. for (i = 0; i < n; i++)
  519. printk(KERN_DEBUG "\t ino %llu\n",
  520. (unsigned long long)le64_to_cpu(orph->inos[i]));
  521. break;
  522. }
  523. default:
  524. printk(KERN_DEBUG "node type %d was not recognized\n",
  525. (int)ch->node_type);
  526. }
  527. spin_unlock(&dbg_lock);
  528. }
  529. void dbg_dump_budget_req(const struct ubifs_budget_req *req)
  530. {
  531. spin_lock(&dbg_lock);
  532. printk(KERN_DEBUG "Budgeting request: new_ino %d, dirtied_ino %d\n",
  533. req->new_ino, req->dirtied_ino);
  534. printk(KERN_DEBUG "\tnew_ino_d %d, dirtied_ino_d %d\n",
  535. req->new_ino_d, req->dirtied_ino_d);
  536. printk(KERN_DEBUG "\tnew_page %d, dirtied_page %d\n",
  537. req->new_page, req->dirtied_page);
  538. printk(KERN_DEBUG "\tnew_dent %d, mod_dent %d\n",
  539. req->new_dent, req->mod_dent);
  540. printk(KERN_DEBUG "\tidx_growth %d\n", req->idx_growth);
  541. printk(KERN_DEBUG "\tdata_growth %d dd_growth %d\n",
  542. req->data_growth, req->dd_growth);
  543. spin_unlock(&dbg_lock);
  544. }
  545. void dbg_dump_lstats(const struct ubifs_lp_stats *lst)
  546. {
  547. spin_lock(&dbg_lock);
  548. printk(KERN_DEBUG "(pid %d) Lprops statistics: empty_lebs %d, "
  549. "idx_lebs %d\n", current->pid, lst->empty_lebs, lst->idx_lebs);
  550. printk(KERN_DEBUG "\ttaken_empty_lebs %d, total_free %lld, "
  551. "total_dirty %lld\n", lst->taken_empty_lebs, lst->total_free,
  552. lst->total_dirty);
  553. printk(KERN_DEBUG "\ttotal_used %lld, total_dark %lld, "
  554. "total_dead %lld\n", lst->total_used, lst->total_dark,
  555. lst->total_dead);
  556. spin_unlock(&dbg_lock);
  557. }
  558. void dbg_dump_budg(struct ubifs_info *c, const struct ubifs_budg_info *bi)
  559. {
  560. int i;
  561. struct rb_node *rb;
  562. struct ubifs_bud *bud;
  563. struct ubifs_gced_idx_leb *idx_gc;
  564. long long available, outstanding, free;
  565. spin_lock(&c->space_lock);
  566. spin_lock(&dbg_lock);
  567. printk(KERN_DEBUG "(pid %d) Budgeting info: data budget sum %lld, "
  568. "total budget sum %lld\n", current->pid,
  569. bi->data_growth + bi->dd_growth,
  570. bi->data_growth + bi->dd_growth + bi->idx_growth);
  571. printk(KERN_DEBUG "\tbudg_data_growth %lld, budg_dd_growth %lld, "
  572. "budg_idx_growth %lld\n", bi->data_growth, bi->dd_growth,
  573. bi->idx_growth);
  574. printk(KERN_DEBUG "\tmin_idx_lebs %d, old_idx_sz %llu, "
  575. "uncommitted_idx %lld\n", bi->min_idx_lebs, bi->old_idx_sz,
  576. bi->uncommitted_idx);
  577. printk(KERN_DEBUG "\tpage_budget %d, inode_budget %d, dent_budget %d\n",
  578. bi->page_budget, bi->inode_budget, bi->dent_budget);
  579. printk(KERN_DEBUG "\tnospace %u, nospace_rp %u\n",
  580. bi->nospace, bi->nospace_rp);
  581. printk(KERN_DEBUG "\tdark_wm %d, dead_wm %d, max_idx_node_sz %d\n",
  582. c->dark_wm, c->dead_wm, c->max_idx_node_sz);
  583. if (bi != &c->bi)
  584. /*
  585. * If we are dumping saved budgeting data, do not print
  586. * additional information which is about the current state, not
  587. * the old one which corresponded to the saved budgeting data.
  588. */
  589. goto out_unlock;
  590. printk(KERN_DEBUG "\tfreeable_cnt %d, calc_idx_sz %lld, idx_gc_cnt %d\n",
  591. c->freeable_cnt, c->calc_idx_sz, c->idx_gc_cnt);
  592. printk(KERN_DEBUG "\tdirty_pg_cnt %ld, dirty_zn_cnt %ld, "
  593. "clean_zn_cnt %ld\n", atomic_long_read(&c->dirty_pg_cnt),
  594. atomic_long_read(&c->dirty_zn_cnt),
  595. atomic_long_read(&c->clean_zn_cnt));
  596. printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
  597. c->gc_lnum, c->ihead_lnum);
  598. /* If we are in R/O mode, journal heads do not exist */
  599. if (c->jheads)
  600. for (i = 0; i < c->jhead_cnt; i++)
  601. printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
  602. dbg_jhead(c->jheads[i].wbuf.jhead),
  603. c->jheads[i].wbuf.lnum);
  604. for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
  605. bud = rb_entry(rb, struct ubifs_bud, rb);
  606. printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
  607. }
  608. list_for_each_entry(bud, &c->old_buds, list)
  609. printk(KERN_DEBUG "\told bud LEB %d\n", bud->lnum);
  610. list_for_each_entry(idx_gc, &c->idx_gc, list)
  611. printk(KERN_DEBUG "\tGC'ed idx LEB %d unmap %d\n",
  612. idx_gc->lnum, idx_gc->unmap);
  613. printk(KERN_DEBUG "\tcommit state %d\n", c->cmt_state);
  614. /* Print budgeting predictions */
  615. available = ubifs_calc_available(c, c->bi.min_idx_lebs);
  616. outstanding = c->bi.data_growth + c->bi.dd_growth;
  617. free = ubifs_get_free_space_nolock(c);
  618. printk(KERN_DEBUG "Budgeting predictions:\n");
  619. printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
  620. available, outstanding, free);
  621. out_unlock:
  622. spin_unlock(&dbg_lock);
  623. spin_unlock(&c->space_lock);
  624. }
  625. void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
  626. {
  627. int i, spc, dark = 0, dead = 0;
  628. struct rb_node *rb;
  629. struct ubifs_bud *bud;
  630. spc = lp->free + lp->dirty;
  631. if (spc < c->dead_wm)
  632. dead = spc;
  633. else
  634. dark = ubifs_calc_dark(c, spc);
  635. if (lp->flags & LPROPS_INDEX)
  636. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  637. "free + dirty %-8d flags %#x (", lp->lnum, lp->free,
  638. lp->dirty, c->leb_size - spc, spc, lp->flags);
  639. else
  640. printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
  641. "free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
  642. "flags %#-4x (", lp->lnum, lp->free, lp->dirty,
  643. c->leb_size - spc, spc, dark, dead,
  644. (int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
  645. if (lp->flags & LPROPS_TAKEN) {
  646. if (lp->flags & LPROPS_INDEX)
  647. printk(KERN_CONT "index, taken");
  648. else
  649. printk(KERN_CONT "taken");
  650. } else {
  651. const char *s;
  652. if (lp->flags & LPROPS_INDEX) {
  653. switch (lp->flags & LPROPS_CAT_MASK) {
  654. case LPROPS_DIRTY_IDX:
  655. s = "dirty index";
  656. break;
  657. case LPROPS_FRDI_IDX:
  658. s = "freeable index";
  659. break;
  660. default:
  661. s = "index";
  662. }
  663. } else {
  664. switch (lp->flags & LPROPS_CAT_MASK) {
  665. case LPROPS_UNCAT:
  666. s = "not categorized";
  667. break;
  668. case LPROPS_DIRTY:
  669. s = "dirty";
  670. break;
  671. case LPROPS_FREE:
  672. s = "free";
  673. break;
  674. case LPROPS_EMPTY:
  675. s = "empty";
  676. break;
  677. case LPROPS_FREEABLE:
  678. s = "freeable";
  679. break;
  680. default:
  681. s = NULL;
  682. break;
  683. }
  684. }
  685. printk(KERN_CONT "%s", s);
  686. }
  687. for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
  688. bud = rb_entry(rb, struct ubifs_bud, rb);
  689. if (bud->lnum == lp->lnum) {
  690. int head = 0;
  691. for (i = 0; i < c->jhead_cnt; i++) {
  692. /*
  693. * Note, if we are in R/O mode or in the middle
  694. * of mounting/re-mounting, the write-buffers do
  695. * not exist.
  696. */
  697. if (c->jheads &&
  698. lp->lnum == c->jheads[i].wbuf.lnum) {
  699. printk(KERN_CONT ", jhead %s",
  700. dbg_jhead(i));
  701. head = 1;
  702. }
  703. }
  704. if (!head)
  705. printk(KERN_CONT ", bud of jhead %s",
  706. dbg_jhead(bud->jhead));
  707. }
  708. }
  709. if (lp->lnum == c->gc_lnum)
  710. printk(KERN_CONT ", GC LEB");
  711. printk(KERN_CONT ")\n");
  712. }
  713. void dbg_dump_lprops(struct ubifs_info *c)
  714. {
  715. int lnum, err;
  716. struct ubifs_lprops lp;
  717. struct ubifs_lp_stats lst;
  718. printk(KERN_DEBUG "(pid %d) start dumping LEB properties\n",
  719. current->pid);
  720. ubifs_get_lp_stats(c, &lst);
  721. dbg_dump_lstats(&lst);
  722. for (lnum = c->main_first; lnum < c->leb_cnt; lnum++) {
  723. err = ubifs_read_one_lp(c, lnum, &lp);
  724. if (err)
  725. ubifs_err("cannot read lprops for LEB %d", lnum);
  726. dbg_dump_lprop(c, &lp);
  727. }
  728. printk(KERN_DEBUG "(pid %d) finish dumping LEB properties\n",
  729. current->pid);
  730. }
  731. void dbg_dump_lpt_info(struct ubifs_info *c)
  732. {
  733. int i;
  734. spin_lock(&dbg_lock);
  735. printk(KERN_DEBUG "(pid %d) dumping LPT information\n", current->pid);
  736. printk(KERN_DEBUG "\tlpt_sz: %lld\n", c->lpt_sz);
  737. printk(KERN_DEBUG "\tpnode_sz: %d\n", c->pnode_sz);
  738. printk(KERN_DEBUG "\tnnode_sz: %d\n", c->nnode_sz);
  739. printk(KERN_DEBUG "\tltab_sz: %d\n", c->ltab_sz);
  740. printk(KERN_DEBUG "\tlsave_sz: %d\n", c->lsave_sz);
  741. printk(KERN_DEBUG "\tbig_lpt: %d\n", c->big_lpt);
  742. printk(KERN_DEBUG "\tlpt_hght: %d\n", c->lpt_hght);
  743. printk(KERN_DEBUG "\tpnode_cnt: %d\n", c->pnode_cnt);
  744. printk(KERN_DEBUG "\tnnode_cnt: %d\n", c->nnode_cnt);
  745. printk(KERN_DEBUG "\tdirty_pn_cnt: %d\n", c->dirty_pn_cnt);
  746. printk(KERN_DEBUG "\tdirty_nn_cnt: %d\n", c->dirty_nn_cnt);
  747. printk(KERN_DEBUG "\tlsave_cnt: %d\n", c->lsave_cnt);
  748. printk(KERN_DEBUG "\tspace_bits: %d\n", c->space_bits);
  749. printk(KERN_DEBUG "\tlpt_lnum_bits: %d\n", c->lpt_lnum_bits);
  750. printk(KERN_DEBUG "\tlpt_offs_bits: %d\n", c->lpt_offs_bits);
  751. printk(KERN_DEBUG "\tlpt_spc_bits: %d\n", c->lpt_spc_bits);
  752. printk(KERN_DEBUG "\tpcnt_bits: %d\n", c->pcnt_bits);
  753. printk(KERN_DEBUG "\tlnum_bits: %d\n", c->lnum_bits);
  754. printk(KERN_DEBUG "\tLPT root is at %d:%d\n", c->lpt_lnum, c->lpt_offs);
  755. printk(KERN_DEBUG "\tLPT head is at %d:%d\n",
  756. c->nhead_lnum, c->nhead_offs);
  757. printk(KERN_DEBUG "\tLPT ltab is at %d:%d\n",
  758. c->ltab_lnum, c->ltab_offs);
  759. if (c->big_lpt)
  760. printk(KERN_DEBUG "\tLPT lsave is at %d:%d\n",
  761. c->lsave_lnum, c->lsave_offs);
  762. for (i = 0; i < c->lpt_lebs; i++)
  763. printk(KERN_DEBUG "\tLPT LEB %d free %d dirty %d tgc %d "
  764. "cmt %d\n", i + c->lpt_first, c->ltab[i].free,
  765. c->ltab[i].dirty, c->ltab[i].tgc, c->ltab[i].cmt);
  766. spin_unlock(&dbg_lock);
  767. }
  768. void dbg_dump_leb(const struct ubifs_info *c, int lnum)
  769. {
  770. struct ubifs_scan_leb *sleb;
  771. struct ubifs_scan_node *snod;
  772. void *buf;
  773. if (dbg_failure_mode)
  774. return;
  775. printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
  776. current->pid, lnum);
  777. buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
  778. if (!buf) {
  779. ubifs_err("cannot allocate memory for dumping LEB %d", lnum);
  780. return;
  781. }
  782. sleb = ubifs_scan(c, lnum, 0, buf, 0);
  783. if (IS_ERR(sleb)) {
  784. ubifs_err("scan error %d", (int)PTR_ERR(sleb));
  785. goto out;
  786. }
  787. printk(KERN_DEBUG "LEB %d has %d nodes ending at %d\n", lnum,
  788. sleb->nodes_cnt, sleb->endpt);
  789. list_for_each_entry(snod, &sleb->nodes, list) {
  790. cond_resched();
  791. printk(KERN_DEBUG "Dumping node at LEB %d:%d len %d\n", lnum,
  792. snod->offs, snod->len);
  793. dbg_dump_node(c, snod->node);
  794. }
  795. printk(KERN_DEBUG "(pid %d) finish dumping LEB %d\n",
  796. current->pid, lnum);
  797. ubifs_scan_destroy(sleb);
  798. out:
  799. vfree(buf);
  800. return;
  801. }
  802. void dbg_dump_znode(const struct ubifs_info *c,
  803. const struct ubifs_znode *znode)
  804. {
  805. int n;
  806. const struct ubifs_zbranch *zbr;
  807. spin_lock(&dbg_lock);
  808. if (znode->parent)
  809. zbr = &znode->parent->zbranch[znode->iip];
  810. else
  811. zbr = &c->zroot;
  812. printk(KERN_DEBUG "znode %p, LEB %d:%d len %d parent %p iip %d level %d"
  813. " child_cnt %d flags %lx\n", znode, zbr->lnum, zbr->offs,
  814. zbr->len, znode->parent, znode->iip, znode->level,
  815. znode->child_cnt, znode->flags);
  816. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  817. spin_unlock(&dbg_lock);
  818. return;
  819. }
  820. printk(KERN_DEBUG "zbranches:\n");
  821. for (n = 0; n < znode->child_cnt; n++) {
  822. zbr = &znode->zbranch[n];
  823. if (znode->level > 0)
  824. printk(KERN_DEBUG "\t%d: znode %p LEB %d:%d len %d key "
  825. "%s\n", n, zbr->znode, zbr->lnum,
  826. zbr->offs, zbr->len,
  827. DBGKEY(&zbr->key));
  828. else
  829. printk(KERN_DEBUG "\t%d: LNC %p LEB %d:%d len %d key "
  830. "%s\n", n, zbr->znode, zbr->lnum,
  831. zbr->offs, zbr->len,
  832. DBGKEY(&zbr->key));
  833. }
  834. spin_unlock(&dbg_lock);
  835. }
  836. void dbg_dump_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat)
  837. {
  838. int i;
  839. printk(KERN_DEBUG "(pid %d) start dumping heap cat %d (%d elements)\n",
  840. current->pid, cat, heap->cnt);
  841. for (i = 0; i < heap->cnt; i++) {
  842. struct ubifs_lprops *lprops = heap->arr[i];
  843. printk(KERN_DEBUG "\t%d. LEB %d hpos %d free %d dirty %d "
  844. "flags %d\n", i, lprops->lnum, lprops->hpos,
  845. lprops->free, lprops->dirty, lprops->flags);
  846. }
  847. printk(KERN_DEBUG "(pid %d) finish dumping heap\n", current->pid);
  848. }
  849. void dbg_dump_pnode(struct ubifs_info *c, struct ubifs_pnode *pnode,
  850. struct ubifs_nnode *parent, int iip)
  851. {
  852. int i;
  853. printk(KERN_DEBUG "(pid %d) dumping pnode:\n", current->pid);
  854. printk(KERN_DEBUG "\taddress %zx parent %zx cnext %zx\n",
  855. (size_t)pnode, (size_t)parent, (size_t)pnode->cnext);
  856. printk(KERN_DEBUG "\tflags %lu iip %d level %d num %d\n",
  857. pnode->flags, iip, pnode->level, pnode->num);
  858. for (i = 0; i < UBIFS_LPT_FANOUT; i++) {
  859. struct ubifs_lprops *lp = &pnode->lprops[i];
  860. printk(KERN_DEBUG "\t%d: free %d dirty %d flags %d lnum %d\n",
  861. i, lp->free, lp->dirty, lp->flags, lp->lnum);
  862. }
  863. }
  864. void dbg_dump_tnc(struct ubifs_info *c)
  865. {
  866. struct ubifs_znode *znode;
  867. int level;
  868. printk(KERN_DEBUG "\n");
  869. printk(KERN_DEBUG "(pid %d) start dumping TNC tree\n", current->pid);
  870. znode = ubifs_tnc_levelorder_next(c->zroot.znode, NULL);
  871. level = znode->level;
  872. printk(KERN_DEBUG "== Level %d ==\n", level);
  873. while (znode) {
  874. if (level != znode->level) {
  875. level = znode->level;
  876. printk(KERN_DEBUG "== Level %d ==\n", level);
  877. }
  878. dbg_dump_znode(c, znode);
  879. znode = ubifs_tnc_levelorder_next(c->zroot.znode, znode);
  880. }
  881. printk(KERN_DEBUG "(pid %d) finish dumping TNC tree\n", current->pid);
  882. }
  883. static int dump_znode(struct ubifs_info *c, struct ubifs_znode *znode,
  884. void *priv)
  885. {
  886. dbg_dump_znode(c, znode);
  887. return 0;
  888. }
  889. /**
  890. * dbg_dump_index - dump the on-flash index.
  891. * @c: UBIFS file-system description object
  892. *
  893. * This function dumps whole UBIFS indexing B-tree, unlike 'dbg_dump_tnc()'
  894. * which dumps only in-memory znodes and does not read znodes which from flash.
  895. */
  896. void dbg_dump_index(struct ubifs_info *c)
  897. {
  898. dbg_walk_index(c, NULL, dump_znode, NULL);
  899. }
  900. /**
  901. * dbg_save_space_info - save information about flash space.
  902. * @c: UBIFS file-system description object
  903. *
  904. * This function saves information about UBIFS free space, dirty space, etc, in
  905. * order to check it later.
  906. */
  907. void dbg_save_space_info(struct ubifs_info *c)
  908. {
  909. struct ubifs_debug_info *d = c->dbg;
  910. int freeable_cnt;
  911. spin_lock(&c->space_lock);
  912. memcpy(&d->saved_lst, &c->lst, sizeof(struct ubifs_lp_stats));
  913. memcpy(&d->saved_bi, &c->bi, sizeof(struct ubifs_budg_info));
  914. d->saved_idx_gc_cnt = c->idx_gc_cnt;
  915. /*
  916. * We use a dirty hack here and zero out @c->freeable_cnt, because it
  917. * affects the free space calculations, and UBIFS might not know about
  918. * all freeable eraseblocks. Indeed, we know about freeable eraseblocks
  919. * only when we read their lprops, and we do this only lazily, upon the
  920. * need. So at any given point of time @c->freeable_cnt might be not
  921. * exactly accurate.
  922. *
  923. * Just one example about the issue we hit when we did not zero
  924. * @c->freeable_cnt.
  925. * 1. The file-system is mounted R/O, c->freeable_cnt is %0. We save the
  926. * amount of free space in @d->saved_free
  927. * 2. We re-mount R/W, which makes UBIFS to read the "lsave"
  928. * information from flash, where we cache LEBs from various
  929. * categories ('ubifs_remount_fs()' -> 'ubifs_lpt_init()'
  930. * -> 'lpt_init_wr()' -> 'read_lsave()' -> 'ubifs_lpt_lookup()'
  931. * -> 'ubifs_get_pnode()' -> 'update_cats()'
  932. * -> 'ubifs_add_to_cat()').
  933. * 3. Lsave contains a freeable eraseblock, and @c->freeable_cnt
  934. * becomes %1.
  935. * 4. We calculate the amount of free space when the re-mount is
  936. * finished in 'dbg_check_space_info()' and it does not match
  937. * @d->saved_free.
  938. */
  939. freeable_cnt = c->freeable_cnt;
  940. c->freeable_cnt = 0;
  941. d->saved_free = ubifs_get_free_space_nolock(c);
  942. c->freeable_cnt = freeable_cnt;
  943. spin_unlock(&c->space_lock);
  944. }
  945. /**
  946. * dbg_check_space_info - check flash space information.
  947. * @c: UBIFS file-system description object
  948. *
  949. * This function compares current flash space information with the information
  950. * which was saved when the 'dbg_save_space_info()' function was called.
  951. * Returns zero if the information has not changed, and %-EINVAL it it has
  952. * changed.
  953. */
  954. int dbg_check_space_info(struct ubifs_info *c)
  955. {
  956. struct ubifs_debug_info *d = c->dbg;
  957. struct ubifs_lp_stats lst;
  958. long long free;
  959. int freeable_cnt;
  960. spin_lock(&c->space_lock);
  961. freeable_cnt = c->freeable_cnt;
  962. c->freeable_cnt = 0;
  963. free = ubifs_get_free_space_nolock(c);
  964. c->freeable_cnt = freeable_cnt;
  965. spin_unlock(&c->space_lock);
  966. if (free != d->saved_free) {
  967. ubifs_err("free space changed from %lld to %lld",
  968. d->saved_free, free);
  969. goto out;
  970. }
  971. return 0;
  972. out:
  973. ubifs_msg("saved lprops statistics dump");
  974. dbg_dump_lstats(&d->saved_lst);
  975. ubifs_msg("saved budgeting info dump");
  976. dbg_dump_budg(c, &d->saved_bi);
  977. ubifs_msg("saved idx_gc_cnt %d", d->saved_idx_gc_cnt);
  978. ubifs_msg("current lprops statistics dump");
  979. ubifs_get_lp_stats(c, &lst);
  980. dbg_dump_lstats(&lst);
  981. ubifs_msg("current budgeting info dump");
  982. dbg_dump_budg(c, &c->bi);
  983. dump_stack();
  984. return -EINVAL;
  985. }
  986. /**
  987. * dbg_check_synced_i_size - check synchronized inode size.
  988. * @inode: inode to check
  989. *
  990. * If inode is clean, synchronized inode size has to be equivalent to current
  991. * inode size. This function has to be called only for locked inodes (@i_mutex
  992. * has to be locked). Returns %0 if synchronized inode size if correct, and
  993. * %-EINVAL if not.
  994. */
  995. int dbg_check_synced_i_size(struct inode *inode)
  996. {
  997. int err = 0;
  998. struct ubifs_inode *ui = ubifs_inode(inode);
  999. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  1000. return 0;
  1001. if (!S_ISREG(inode->i_mode))
  1002. return 0;
  1003. mutex_lock(&ui->ui_mutex);
  1004. spin_lock(&ui->ui_lock);
  1005. if (ui->ui_size != ui->synced_i_size && !ui->dirty) {
  1006. ubifs_err("ui_size is %lld, synced_i_size is %lld, but inode "
  1007. "is clean", ui->ui_size, ui->synced_i_size);
  1008. ubifs_err("i_ino %lu, i_mode %#x, i_size %lld", inode->i_ino,
  1009. inode->i_mode, i_size_read(inode));
  1010. dbg_dump_stack();
  1011. err = -EINVAL;
  1012. }
  1013. spin_unlock(&ui->ui_lock);
  1014. mutex_unlock(&ui->ui_mutex);
  1015. return err;
  1016. }
  1017. /*
  1018. * dbg_check_dir - check directory inode size and link count.
  1019. * @c: UBIFS file-system description object
  1020. * @dir: the directory to calculate size for
  1021. * @size: the result is returned here
  1022. *
  1023. * This function makes sure that directory size and link count are correct.
  1024. * Returns zero in case of success and a negative error code in case of
  1025. * failure.
  1026. *
  1027. * Note, it is good idea to make sure the @dir->i_mutex is locked before
  1028. * calling this function.
  1029. */
  1030. int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
  1031. {
  1032. unsigned int nlink = 2;
  1033. union ubifs_key key;
  1034. struct ubifs_dent_node *dent, *pdent = NULL;
  1035. struct qstr nm = { .name = NULL };
  1036. loff_t size = UBIFS_INO_NODE_SZ;
  1037. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  1038. return 0;
  1039. if (!S_ISDIR(dir->i_mode))
  1040. return 0;
  1041. lowest_dent_key(c, &key, dir->i_ino);
  1042. while (1) {
  1043. int err;
  1044. dent = ubifs_tnc_next_ent(c, &key, &nm);
  1045. if (IS_ERR(dent)) {
  1046. err = PTR_ERR(dent);
  1047. if (err == -ENOENT)
  1048. break;
  1049. return err;
  1050. }
  1051. nm.name = dent->name;
  1052. nm.len = le16_to_cpu(dent->nlen);
  1053. size += CALC_DENT_SIZE(nm.len);
  1054. if (dent->type == UBIFS_ITYPE_DIR)
  1055. nlink += 1;
  1056. kfree(pdent);
  1057. pdent = dent;
  1058. key_read(c, &dent->key, &key);
  1059. }
  1060. kfree(pdent);
  1061. if (i_size_read(dir) != size) {
  1062. ubifs_err("directory inode %lu has size %llu, "
  1063. "but calculated size is %llu", dir->i_ino,
  1064. (unsigned long long)i_size_read(dir),
  1065. (unsigned long long)size);
  1066. dump_stack();
  1067. return -EINVAL;
  1068. }
  1069. if (dir->i_nlink != nlink) {
  1070. ubifs_err("directory inode %lu has nlink %u, but calculated "
  1071. "nlink is %u", dir->i_ino, dir->i_nlink, nlink);
  1072. dump_stack();
  1073. return -EINVAL;
  1074. }
  1075. return 0;
  1076. }
  1077. /**
  1078. * dbg_check_key_order - make sure that colliding keys are properly ordered.
  1079. * @c: UBIFS file-system description object
  1080. * @zbr1: first zbranch
  1081. * @zbr2: following zbranch
  1082. *
  1083. * In UBIFS indexing B-tree colliding keys has to be sorted in binary order of
  1084. * names of the direntries/xentries which are referred by the keys. This
  1085. * function reads direntries/xentries referred by @zbr1 and @zbr2 and makes
  1086. * sure the name of direntry/xentry referred by @zbr1 is less than
  1087. * direntry/xentry referred by @zbr2. Returns zero if this is true, %1 if not,
  1088. * and a negative error code in case of failure.
  1089. */
  1090. static int dbg_check_key_order(struct ubifs_info *c, struct ubifs_zbranch *zbr1,
  1091. struct ubifs_zbranch *zbr2)
  1092. {
  1093. int err, nlen1, nlen2, cmp;
  1094. struct ubifs_dent_node *dent1, *dent2;
  1095. union ubifs_key key;
  1096. ubifs_assert(!keys_cmp(c, &zbr1->key, &zbr2->key));
  1097. dent1 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1098. if (!dent1)
  1099. return -ENOMEM;
  1100. dent2 = kmalloc(UBIFS_MAX_DENT_NODE_SZ, GFP_NOFS);
  1101. if (!dent2) {
  1102. err = -ENOMEM;
  1103. goto out_free;
  1104. }
  1105. err = ubifs_tnc_read_node(c, zbr1, dent1);
  1106. if (err)
  1107. goto out_free;
  1108. err = ubifs_validate_entry(c, dent1);
  1109. if (err)
  1110. goto out_free;
  1111. err = ubifs_tnc_read_node(c, zbr2, dent2);
  1112. if (err)
  1113. goto out_free;
  1114. err = ubifs_validate_entry(c, dent2);
  1115. if (err)
  1116. goto out_free;
  1117. /* Make sure node keys are the same as in zbranch */
  1118. err = 1;
  1119. key_read(c, &dent1->key, &key);
  1120. if (keys_cmp(c, &zbr1->key, &key)) {
  1121. dbg_err("1st entry at %d:%d has key %s", zbr1->lnum,
  1122. zbr1->offs, DBGKEY(&key));
  1123. dbg_err("but it should have key %s according to tnc",
  1124. DBGKEY(&zbr1->key));
  1125. dbg_dump_node(c, dent1);
  1126. goto out_free;
  1127. }
  1128. key_read(c, &dent2->key, &key);
  1129. if (keys_cmp(c, &zbr2->key, &key)) {
  1130. dbg_err("2nd entry at %d:%d has key %s", zbr1->lnum,
  1131. zbr1->offs, DBGKEY(&key));
  1132. dbg_err("but it should have key %s according to tnc",
  1133. DBGKEY(&zbr2->key));
  1134. dbg_dump_node(c, dent2);
  1135. goto out_free;
  1136. }
  1137. nlen1 = le16_to_cpu(dent1->nlen);
  1138. nlen2 = le16_to_cpu(dent2->nlen);
  1139. cmp = memcmp(dent1->name, dent2->name, min_t(int, nlen1, nlen2));
  1140. if (cmp < 0 || (cmp == 0 && nlen1 < nlen2)) {
  1141. err = 0;
  1142. goto out_free;
  1143. }
  1144. if (cmp == 0 && nlen1 == nlen2)
  1145. dbg_err("2 xent/dent nodes with the same name");
  1146. else
  1147. dbg_err("bad order of colliding key %s",
  1148. DBGKEY(&key));
  1149. ubifs_msg("first node at %d:%d\n", zbr1->lnum, zbr1->offs);
  1150. dbg_dump_node(c, dent1);
  1151. ubifs_msg("second node at %d:%d\n", zbr2->lnum, zbr2->offs);
  1152. dbg_dump_node(c, dent2);
  1153. out_free:
  1154. kfree(dent2);
  1155. kfree(dent1);
  1156. return err;
  1157. }
  1158. /**
  1159. * dbg_check_znode - check if znode is all right.
  1160. * @c: UBIFS file-system description object
  1161. * @zbr: zbranch which points to this znode
  1162. *
  1163. * This function makes sure that znode referred to by @zbr is all right.
  1164. * Returns zero if it is, and %-EINVAL if it is not.
  1165. */
  1166. static int dbg_check_znode(struct ubifs_info *c, struct ubifs_zbranch *zbr)
  1167. {
  1168. struct ubifs_znode *znode = zbr->znode;
  1169. struct ubifs_znode *zp = znode->parent;
  1170. int n, err, cmp;
  1171. if (znode->child_cnt <= 0 || znode->child_cnt > c->fanout) {
  1172. err = 1;
  1173. goto out;
  1174. }
  1175. if (znode->level < 0) {
  1176. err = 2;
  1177. goto out;
  1178. }
  1179. if (znode->iip < 0 || znode->iip >= c->fanout) {
  1180. err = 3;
  1181. goto out;
  1182. }
  1183. if (zbr->len == 0)
  1184. /* Only dirty zbranch may have no on-flash nodes */
  1185. if (!ubifs_zn_dirty(znode)) {
  1186. err = 4;
  1187. goto out;
  1188. }
  1189. if (ubifs_zn_dirty(znode)) {
  1190. /*
  1191. * If znode is dirty, its parent has to be dirty as well. The
  1192. * order of the operation is important, so we have to have
  1193. * memory barriers.
  1194. */
  1195. smp_mb();
  1196. if (zp && !ubifs_zn_dirty(zp)) {
  1197. /*
  1198. * The dirty flag is atomic and is cleared outside the
  1199. * TNC mutex, so znode's dirty flag may now have
  1200. * been cleared. The child is always cleared before the
  1201. * parent, so we just need to check again.
  1202. */
  1203. smp_mb();
  1204. if (ubifs_zn_dirty(znode)) {
  1205. err = 5;
  1206. goto out;
  1207. }
  1208. }
  1209. }
  1210. if (zp) {
  1211. const union ubifs_key *min, *max;
  1212. if (znode->level != zp->level - 1) {
  1213. err = 6;
  1214. goto out;
  1215. }
  1216. /* Make sure the 'parent' pointer in our znode is correct */
  1217. err = ubifs_search_zbranch(c, zp, &zbr->key, &n);
  1218. if (!err) {
  1219. /* This zbranch does not exist in the parent */
  1220. err = 7;
  1221. goto out;
  1222. }
  1223. if (znode->iip >= zp->child_cnt) {
  1224. err = 8;
  1225. goto out;
  1226. }
  1227. if (znode->iip != n) {
  1228. /* This may happen only in case of collisions */
  1229. if (keys_cmp(c, &zp->zbranch[n].key,
  1230. &zp->zbranch[znode->iip].key)) {
  1231. err = 9;
  1232. goto out;
  1233. }
  1234. n = znode->iip;
  1235. }
  1236. /*
  1237. * Make sure that the first key in our znode is greater than or
  1238. * equal to the key in the pointing zbranch.
  1239. */
  1240. min = &zbr->key;
  1241. cmp = keys_cmp(c, min, &znode->zbranch[0].key);
  1242. if (cmp == 1) {
  1243. err = 10;
  1244. goto out;
  1245. }
  1246. if (n + 1 < zp->child_cnt) {
  1247. max = &zp->zbranch[n + 1].key;
  1248. /*
  1249. * Make sure the last key in our znode is less or
  1250. * equivalent than the key in the zbranch which goes
  1251. * after our pointing zbranch.
  1252. */
  1253. cmp = keys_cmp(c, max,
  1254. &znode->zbranch[znode->child_cnt - 1].key);
  1255. if (cmp == -1) {
  1256. err = 11;
  1257. goto out;
  1258. }
  1259. }
  1260. } else {
  1261. /* This may only be root znode */
  1262. if (zbr != &c->zroot) {
  1263. err = 12;
  1264. goto out;
  1265. }
  1266. }
  1267. /*
  1268. * Make sure that next key is greater or equivalent then the previous
  1269. * one.
  1270. */
  1271. for (n = 1; n < znode->child_cnt; n++) {
  1272. cmp = keys_cmp(c, &znode->zbranch[n - 1].key,
  1273. &znode->zbranch[n].key);
  1274. if (cmp > 0) {
  1275. err = 13;
  1276. goto out;
  1277. }
  1278. if (cmp == 0) {
  1279. /* This can only be keys with colliding hash */
  1280. if (!is_hash_key(c, &znode->zbranch[n].key)) {
  1281. err = 14;
  1282. goto out;
  1283. }
  1284. if (znode->level != 0 || c->replaying)
  1285. continue;
  1286. /*
  1287. * Colliding keys should follow binary order of
  1288. * corresponding xentry/dentry names.
  1289. */
  1290. err = dbg_check_key_order(c, &znode->zbranch[n - 1],
  1291. &znode->zbranch[n]);
  1292. if (err < 0)
  1293. return err;
  1294. if (err) {
  1295. err = 15;
  1296. goto out;
  1297. }
  1298. }
  1299. }
  1300. for (n = 0; n < znode->child_cnt; n++) {
  1301. if (!znode->zbranch[n].znode &&
  1302. (znode->zbranch[n].lnum == 0 ||
  1303. znode->zbranch[n].len == 0)) {
  1304. err = 16;
  1305. goto out;
  1306. }
  1307. if (znode->zbranch[n].lnum != 0 &&
  1308. znode->zbranch[n].len == 0) {
  1309. err = 17;
  1310. goto out;
  1311. }
  1312. if (znode->zbranch[n].lnum == 0 &&
  1313. znode->zbranch[n].len != 0) {
  1314. err = 18;
  1315. goto out;
  1316. }
  1317. if (znode->zbranch[n].lnum == 0 &&
  1318. znode->zbranch[n].offs != 0) {
  1319. err = 19;
  1320. goto out;
  1321. }
  1322. if (znode->level != 0 && znode->zbranch[n].znode)
  1323. if (znode->zbranch[n].znode->parent != znode) {
  1324. err = 20;
  1325. goto out;
  1326. }
  1327. }
  1328. return 0;
  1329. out:
  1330. ubifs_err("failed, error %d", err);
  1331. ubifs_msg("dump of the znode");
  1332. dbg_dump_znode(c, znode);
  1333. if (zp) {
  1334. ubifs_msg("dump of the parent znode");
  1335. dbg_dump_znode(c, zp);
  1336. }
  1337. dump_stack();
  1338. return -EINVAL;
  1339. }
  1340. /**
  1341. * dbg_check_tnc - check TNC tree.
  1342. * @c: UBIFS file-system description object
  1343. * @extra: do extra checks that are possible at start commit
  1344. *
  1345. * This function traverses whole TNC tree and checks every znode. Returns zero
  1346. * if everything is all right and %-EINVAL if something is wrong with TNC.
  1347. */
  1348. int dbg_check_tnc(struct ubifs_info *c, int extra)
  1349. {
  1350. struct ubifs_znode *znode;
  1351. long clean_cnt = 0, dirty_cnt = 0;
  1352. int err, last;
  1353. if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
  1354. return 0;
  1355. ubifs_assert(mutex_is_locked(&c->tnc_mutex));
  1356. if (!c->zroot.znode)
  1357. return 0;
  1358. znode = ubifs_tnc_postorder_first(c->zroot.znode);
  1359. while (1) {
  1360. struct ubifs_znode *prev;
  1361. struct ubifs_zbranch *zbr;
  1362. if (!znode->parent)
  1363. zbr = &c->zroot;
  1364. else
  1365. zbr = &znode->parent->zbranch[znode->iip];
  1366. err = dbg_check_znode(c, zbr);
  1367. if (err)
  1368. return err;
  1369. if (extra) {
  1370. if (ubifs_zn_dirty(znode))
  1371. dirty_cnt += 1;
  1372. else
  1373. clean_cnt += 1;
  1374. }
  1375. prev = znode;
  1376. znode = ubifs_tnc_postorder_next(znode);
  1377. if (!znode)
  1378. break;
  1379. /*
  1380. * If the last key of this znode is equivalent to the first key
  1381. * of the next znode (collision), then check order of the keys.
  1382. */
  1383. last = prev->child_cnt - 1;
  1384. if (prev->level == 0 && znode->level == 0 && !c->replaying &&
  1385. !keys_cmp(c, &prev->zbranch[last].key,
  1386. &znode->zbranch[0].key)) {
  1387. err = dbg_check_key_order(c, &prev->zbranch[last],
  1388. &znode->zbranch[0]);
  1389. if (err < 0)
  1390. return err;
  1391. if (err) {
  1392. ubifs_msg("first znode");
  1393. dbg_dump_znode(c, prev);
  1394. ubifs_msg("second znode");
  1395. dbg_dump_znode(c, znode);
  1396. return -EINVAL;
  1397. }
  1398. }
  1399. }
  1400. if (extra) {
  1401. if (clean_cnt != atomic_long_read(&c->clean_zn_cnt)) {
  1402. ubifs_err("incorrect clean_zn_cnt %ld, calculated %ld",
  1403. atomic_long_read(&c->clean_zn_cnt),
  1404. clean_cnt);
  1405. return -EINVAL;
  1406. }
  1407. if (dirty_cnt != atomic_long_read(&c->dirty_zn_cnt)) {
  1408. ubifs_err("incorrect dirty_zn_cnt %ld, calculated %ld",
  1409. atomic_long_read(&c->dirty_zn_cnt),
  1410. dirty_cnt);
  1411. return -EINVAL;
  1412. }
  1413. }
  1414. return 0;
  1415. }
  1416. /**
  1417. * dbg_walk_index - walk the on-flash index.
  1418. * @c: UBIFS file-system description object
  1419. * @leaf_cb: called for each leaf node
  1420. * @znode_cb: called for each indexing node
  1421. * @priv: private data which is passed to callbacks
  1422. *
  1423. * This function walks the UBIFS index and calls the @leaf_cb for each leaf
  1424. * node and @znode_cb for each indexing node. Returns zero in case of success
  1425. * and a negative error code in case of failure.
  1426. *
  1427. * It would be better if this function removed every znode it pulled to into
  1428. * the TNC, so that the behavior more closely matched the non-debugging
  1429. * behavior.
  1430. */
  1431. int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
  1432. dbg_znode_callback znode_cb, void *priv)
  1433. {
  1434. int err;
  1435. struct ubifs_zbranch *zbr;
  1436. struct ubifs_znode *znode, *child;
  1437. mutex_lock(&c->tnc_mutex);
  1438. /* If the root indexing node is not in TNC - pull it */
  1439. if (!c->zroot.znode) {
  1440. c->zroot.znode = ubifs_load_znode(c, &c->zroot, NULL, 0);
  1441. if (IS_ERR(c->zroot.znode)) {
  1442. err = PTR_ERR(c->zroot.znode);
  1443. c->zroot.znode = NULL;
  1444. goto out_unlock;
  1445. }
  1446. }
  1447. /*
  1448. * We are going to traverse the indexing tree in the postorder manner.
  1449. * Go down and find the leftmost indexing node where we are going to
  1450. * start from.
  1451. */
  1452. znode = c->zroot.znode;
  1453. while (znode->level > 0) {
  1454. zbr = &znode->zbranch[0];
  1455. child = zbr->znode;
  1456. if (!child) {
  1457. child = ubifs_load_znode(c, zbr, znode, 0);
  1458. if (IS_ERR(child)) {
  1459. err = PTR_ERR(child);
  1460. goto out_unlock;
  1461. }
  1462. zbr->znode = child;
  1463. }
  1464. znode = child;
  1465. }
  1466. /* Iterate over all indexing nodes */
  1467. while (1) {
  1468. int idx;
  1469. cond_resched();
  1470. if (znode_cb) {
  1471. err = znode_cb(c, znode, priv);
  1472. if (err) {
  1473. ubifs_err("znode checking function returned "
  1474. "error %d", err);
  1475. dbg_dump_znode(c, znode);
  1476. goto out_dump;
  1477. }
  1478. }
  1479. if (leaf_cb && znode->level == 0) {
  1480. for (idx = 0; idx < znode->child_cnt; idx++) {
  1481. zbr = &znode->zbranch[idx];
  1482. err = leaf_cb(c, zbr, priv);
  1483. if (err) {
  1484. ubifs_err("leaf checking function "
  1485. "returned error %d, for leaf "
  1486. "at LEB %d:%d",
  1487. err, zbr->lnum, zbr->offs);
  1488. goto out_dump;
  1489. }
  1490. }
  1491. }
  1492. if (!znode->parent)
  1493. break;
  1494. idx = znode->iip + 1;
  1495. znode = znode->parent;
  1496. if (idx < znode->child_cnt) {
  1497. /* Switch to the next index in the parent */
  1498. zbr = &znode->zbranch[idx];
  1499. child = zbr->znode;
  1500. if (!child) {
  1501. child = ubifs_load_znode(c, zbr, znode, idx);
  1502. if (IS_ERR(child)) {
  1503. err = PTR_ERR(child);
  1504. goto out_unlock;
  1505. }
  1506. zbr->znode = child;
  1507. }
  1508. znode = child;
  1509. } else
  1510. /*
  1511. * This is the last child, switch to the parent and
  1512. * continue.
  1513. */
  1514. continue;
  1515. /* Go to the lowest leftmost znode in the new sub-tree */
  1516. while (znode->level > 0) {
  1517. zbr = &znode->zbranch[0];
  1518. child = zbr->znode;
  1519. if (!child) {
  1520. child = ubifs_load_znode(c, zbr, znode, 0);
  1521. if (IS_ERR(child)) {
  1522. err = PTR_ERR(child);
  1523. goto out_unlock;
  1524. }
  1525. zbr->znode = child;
  1526. }
  1527. znode = child;
  1528. }
  1529. }
  1530. mutex_unlock(&c->tnc_mutex);
  1531. return 0;
  1532. out_dump:
  1533. if (znode->parent)
  1534. zbr = &znode->parent->zbranch[znode->iip];
  1535. else
  1536. zbr = &c->zroot;
  1537. ubifs_msg("dump of znode at LEB %d:%d", zbr->lnum, zbr->offs);
  1538. dbg_dump_znode(c, znode);
  1539. out_unlock:
  1540. mutex_unlock(&c->tnc_mutex);
  1541. return err;
  1542. }
  1543. /**
  1544. * add_size - add znode size to partially calculated index size.
  1545. * @c: UBIFS file-system description object
  1546. * @znode: znode to add size for
  1547. * @priv: partially calculated index size
  1548. *
  1549. * This is a helper function for 'dbg_check_idx_size()' which is called for
  1550. * every indexing node and adds its size to the 'long long' variable pointed to
  1551. * by @priv.
  1552. */
  1553. static int add_size(struct ubifs_info *c, struct ubifs_znode *znode, void *priv)
  1554. {
  1555. long long *idx_size = priv;
  1556. int add;
  1557. add = ubifs_idx_node_sz(c, znode->child_cnt);
  1558. add = ALIGN(add, 8);
  1559. *idx_size += add;
  1560. return 0;
  1561. }
  1562. /**
  1563. * dbg_check_idx_size - check index size.
  1564. * @c: UBIFS file-system description object
  1565. * @idx_size: size to check
  1566. *
  1567. * This function walks the UBIFS index, calculates its size and checks that the
  1568. * size is equivalent to @idx_size. Returns zero in case of success and a
  1569. * negative error code in case of failure.
  1570. */
  1571. int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
  1572. {
  1573. int err;
  1574. long long calc = 0;
  1575. if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
  1576. return 0;
  1577. err = dbg_walk_index(c, NULL, add_size, &calc);
  1578. if (err) {
  1579. ubifs_err("error %d while walking the index", err);
  1580. return err;
  1581. }
  1582. if (calc != idx_size) {
  1583. ubifs_err("index size check failed: calculated size is %lld, "
  1584. "should be %lld", calc, idx_size);
  1585. dump_stack();
  1586. return -EINVAL;
  1587. }
  1588. return 0;
  1589. }
  1590. /**
  1591. * struct fsck_inode - information about an inode used when checking the file-system.
  1592. * @rb: link in the RB-tree of inodes
  1593. * @inum: inode number
  1594. * @mode: inode type, permissions, etc
  1595. * @nlink: inode link count
  1596. * @xattr_cnt: count of extended attributes
  1597. * @references: how many directory/xattr entries refer this inode (calculated
  1598. * while walking the index)
  1599. * @calc_cnt: for directory inode count of child directories
  1600. * @size: inode size (read from on-flash inode)
  1601. * @xattr_sz: summary size of all extended attributes (read from on-flash
  1602. * inode)
  1603. * @calc_sz: for directories calculated directory size
  1604. * @calc_xcnt: count of extended attributes
  1605. * @calc_xsz: calculated summary size of all extended attributes
  1606. * @xattr_nms: sum of lengths of all extended attribute names belonging to this
  1607. * inode (read from on-flash inode)
  1608. * @calc_xnms: calculated sum of lengths of all extended attribute names
  1609. */
  1610. struct fsck_inode {
  1611. struct rb_node rb;
  1612. ino_t inum;
  1613. umode_t mode;
  1614. unsigned int nlink;
  1615. unsigned int xattr_cnt;
  1616. int references;
  1617. int calc_cnt;
  1618. long long size;
  1619. unsigned int xattr_sz;
  1620. long long calc_sz;
  1621. long long calc_xcnt;
  1622. long long calc_xsz;
  1623. unsigned int xattr_nms;
  1624. long long calc_xnms;
  1625. };
  1626. /**
  1627. * struct fsck_data - private FS checking information.
  1628. * @inodes: RB-tree of all inodes (contains @struct fsck_inode objects)
  1629. */
  1630. struct fsck_data {
  1631. struct rb_root inodes;
  1632. };
  1633. /**
  1634. * add_inode - add inode information to RB-tree of inodes.
  1635. * @c: UBIFS file-system description object
  1636. * @fsckd: FS checking information
  1637. * @ino: raw UBIFS inode to add
  1638. *
  1639. * This is a helper function for 'check_leaf()' which adds information about
  1640. * inode @ino to the RB-tree of inodes. Returns inode information pointer in
  1641. * case of success and a negative error code in case of failure.
  1642. */
  1643. static struct fsck_inode *add_inode(struct ubifs_info *c,
  1644. struct fsck_data *fsckd,
  1645. struct ubifs_ino_node *ino)
  1646. {
  1647. struct rb_node **p, *parent = NULL;
  1648. struct fsck_inode *fscki;
  1649. ino_t inum = key_inum_flash(c, &ino->key);
  1650. struct inode *inode;
  1651. struct ubifs_inode *ui;
  1652. p = &fsckd->inodes.rb_node;
  1653. while (*p) {
  1654. parent = *p;
  1655. fscki = rb_entry(parent, struct fsck_inode, rb);
  1656. if (inum < fscki->inum)
  1657. p = &(*p)->rb_left;
  1658. else if (inum > fscki->inum)
  1659. p = &(*p)->rb_right;
  1660. else
  1661. return fscki;
  1662. }
  1663. if (inum > c->highest_inum) {
  1664. ubifs_err("too high inode number, max. is %lu",
  1665. (unsigned long)c->highest_inum);
  1666. return ERR_PTR(-EINVAL);
  1667. }
  1668. fscki = kzalloc(sizeof(struct fsck_inode), GFP_NOFS);
  1669. if (!fscki)
  1670. return ERR_PTR(-ENOMEM);
  1671. inode = ilookup(c->vfs_sb, inum);
  1672. fscki->inum = inum;
  1673. /*
  1674. * If the inode is present in the VFS inode cache, use it instead of
  1675. * the on-flash inode which might be out-of-date. E.g., the size might
  1676. * be out-of-date. If we do not do this, the following may happen, for
  1677. * example:
  1678. * 1. A power cut happens
  1679. * 2. We mount the file-system R/O, the replay process fixes up the
  1680. * inode size in the VFS cache, but on on-flash.
  1681. * 3. 'check_leaf()' fails because it hits a data node beyond inode
  1682. * size.
  1683. */
  1684. if (!inode) {
  1685. fscki->nlink = le32_to_cpu(ino->nlink);
  1686. fscki->size = le64_to_cpu(ino->size);
  1687. fscki->xattr_cnt = le32_to_cpu(ino->xattr_cnt);
  1688. fscki->xattr_sz = le32_to_cpu(ino->xattr_size);
  1689. fscki->xattr_nms = le32_to_cpu(ino->xattr_names);
  1690. fscki->mode = le32_to_cpu(ino->mode);
  1691. } else {
  1692. ui = ubifs_inode(inode);
  1693. fscki->nlink = inode->i_nlink;
  1694. fscki->size = inode->i_size;
  1695. fscki->xattr_cnt = ui->xattr_cnt;
  1696. fscki->xattr_sz = ui->xattr_size;
  1697. fscki->xattr_nms = ui->xattr_names;
  1698. fscki->mode = inode->i_mode;
  1699. iput(inode);
  1700. }
  1701. if (S_ISDIR(fscki->mode)) {
  1702. fscki->calc_sz = UBIFS_INO_NODE_SZ;
  1703. fscki->calc_cnt = 2;
  1704. }
  1705. rb_link_node(&fscki->rb, parent, p);
  1706. rb_insert_color(&fscki->rb, &fsckd->inodes);
  1707. return fscki;
  1708. }
  1709. /**
  1710. * search_inode - search inode in the RB-tree of inodes.
  1711. * @fsckd: FS checking information
  1712. * @inum: inode number to search
  1713. *
  1714. * This is a helper function for 'check_leaf()' which searches inode @inum in
  1715. * the RB-tree of inodes and returns an inode information pointer or %NULL if
  1716. * the inode was not found.
  1717. */
  1718. static struct fsck_inode *search_inode(struct fsck_data *fsckd, ino_t inum)
  1719. {
  1720. struct rb_node *p;
  1721. struct fsck_inode *fscki;
  1722. p = fsckd->inodes.rb_node;
  1723. while (p) {
  1724. fscki = rb_entry(p, struct fsck_inode, rb);
  1725. if (inum < fscki->inum)
  1726. p = p->rb_left;
  1727. else if (inum > fscki->inum)
  1728. p = p->rb_right;
  1729. else
  1730. return fscki;
  1731. }
  1732. return NULL;
  1733. }
  1734. /**
  1735. * read_add_inode - read inode node and add it to RB-tree of inodes.
  1736. * @c: UBIFS file-system description object
  1737. * @fsckd: FS checking information
  1738. * @inum: inode number to read
  1739. *
  1740. * This is a helper function for 'check_leaf()' which finds inode node @inum in
  1741. * the index, reads it, and adds it to the RB-tree of inodes. Returns inode
  1742. * information pointer in case of success and a negative error code in case of
  1743. * failure.
  1744. */
  1745. static struct fsck_inode *read_add_inode(struct ubifs_info *c,
  1746. struct fsck_data *fsckd, ino_t inum)
  1747. {
  1748. int n, err;
  1749. union ubifs_key key;
  1750. struct ubifs_znode *znode;
  1751. struct ubifs_zbranch *zbr;
  1752. struct ubifs_ino_node *ino;
  1753. struct fsck_inode *fscki;
  1754. fscki = search_inode(fsckd, inum);
  1755. if (fscki)
  1756. return fscki;
  1757. ino_key_init(c, &key, inum);
  1758. err = ubifs_lookup_level0(c, &key, &znode, &n);
  1759. if (!err) {
  1760. ubifs_err("inode %lu not found in index", (unsigned long)inum);
  1761. return ERR_PTR(-ENOENT);
  1762. } else if (err < 0) {
  1763. ubifs_err("error %d while looking up inode %lu",
  1764. err, (unsigned long)inum);
  1765. return ERR_PTR(err);
  1766. }
  1767. zbr = &znode->zbranch[n];
  1768. if (zbr->len < UBIFS_INO_NODE_SZ) {
  1769. ubifs_err("bad node %lu node length %d",
  1770. (unsigned long)inum, zbr->len);
  1771. return ERR_PTR(-EINVAL);
  1772. }
  1773. ino = kmalloc(zbr->len, GFP_NOFS);
  1774. if (!ino)
  1775. return ERR_PTR(-ENOMEM);
  1776. err = ubifs_tnc_read_node(c, zbr, ino);
  1777. if (err) {
  1778. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  1779. zbr->lnum, zbr->offs, err);
  1780. kfree(ino);
  1781. return ERR_PTR(err);
  1782. }
  1783. fscki = add_inode(c, fsckd, ino);
  1784. kfree(ino);
  1785. if (IS_ERR(fscki)) {
  1786. ubifs_err("error %ld while adding inode %lu node",
  1787. PTR_ERR(fscki), (unsigned long)inum);
  1788. return fscki;
  1789. }
  1790. return fscki;
  1791. }
  1792. /**
  1793. * check_leaf - check leaf node.
  1794. * @c: UBIFS file-system description object
  1795. * @zbr: zbranch of the leaf node to check
  1796. * @priv: FS checking information
  1797. *
  1798. * This is a helper function for 'dbg_check_filesystem()' which is called for
  1799. * every single leaf node while walking the indexing tree. It checks that the
  1800. * leaf node referred from the indexing tree exists, has correct CRC, and does
  1801. * some other basic validation. This function is also responsible for building
  1802. * an RB-tree of inodes - it adds all inodes into the RB-tree. It also
  1803. * calculates reference count, size, etc for each inode in order to later
  1804. * compare them to the information stored inside the inodes and detect possible
  1805. * inconsistencies. Returns zero in case of success and a negative error code
  1806. * in case of failure.
  1807. */
  1808. static int check_leaf(struct ubifs_info *c, struct ubifs_zbranch *zbr,
  1809. void *priv)
  1810. {
  1811. ino_t inum;
  1812. void *node;
  1813. struct ubifs_ch *ch;
  1814. int err, type = key_type(c, &zbr->key);
  1815. struct fsck_inode *fscki;
  1816. if (zbr->len < UBIFS_CH_SZ) {
  1817. ubifs_err("bad leaf length %d (LEB %d:%d)",
  1818. zbr->len, zbr->lnum, zbr->offs);
  1819. return -EINVAL;
  1820. }
  1821. node = kmalloc(zbr->len, GFP_NOFS);
  1822. if (!node)
  1823. return -ENOMEM;
  1824. err = ubifs_tnc_read_node(c, zbr, node);
  1825. if (err) {
  1826. ubifs_err("cannot read leaf node at LEB %d:%d, error %d",
  1827. zbr->lnum, zbr->offs, err);
  1828. goto out_free;
  1829. }
  1830. /* If this is an inode node, add it to RB-tree of inodes */
  1831. if (type == UBIFS_INO_KEY) {
  1832. fscki = add_inode(c, priv, node);
  1833. if (IS_ERR(fscki)) {
  1834. err = PTR_ERR(fscki);
  1835. ubifs_err("error %d while adding inode node", err);
  1836. goto out_dump;
  1837. }
  1838. goto out;
  1839. }
  1840. if (type != UBIFS_DENT_KEY && type != UBIFS_XENT_KEY &&
  1841. type != UBIFS_DATA_KEY) {
  1842. ubifs_err("unexpected node type %d at LEB %d:%d",
  1843. type, zbr->lnum, zbr->offs);
  1844. err = -EINVAL;
  1845. goto out_free;
  1846. }
  1847. ch = node;
  1848. if (le64_to_cpu(ch->sqnum) > c->max_sqnum) {
  1849. ubifs_err("too high sequence number, max. is %llu",
  1850. c->max_sqnum);
  1851. err = -EINVAL;
  1852. goto out_dump;
  1853. }
  1854. if (type == UBIFS_DATA_KEY) {
  1855. long long blk_offs;
  1856. struct ubifs_data_node *dn = node;
  1857. /*
  1858. * Search the inode node this data node belongs to and insert
  1859. * it to the RB-tree of inodes.
  1860. */
  1861. inum = key_inum_flash(c, &dn->key);
  1862. fscki = read_add_inode(c, priv, inum);
  1863. if (IS_ERR(fscki)) {
  1864. err = PTR_ERR(fscki);
  1865. ubifs_err("error %d while processing data node and "
  1866. "trying to find inode node %lu",
  1867. err, (unsigned long)inum);
  1868. goto out_dump;
  1869. }
  1870. /* Make sure the data node is within inode size */
  1871. blk_offs = key_block_flash(c, &dn->key);
  1872. blk_offs <<= UBIFS_BLOCK_SHIFT;
  1873. blk_offs += le32_to_cpu(dn->size);
  1874. if (blk_offs > fscki->size) {
  1875. ubifs_err("data node at LEB %d:%d is not within inode "
  1876. "size %lld", zbr->lnum, zbr->offs,
  1877. fscki->size);
  1878. err = -EINVAL;
  1879. goto out_dump;
  1880. }
  1881. } else {
  1882. int nlen;
  1883. struct ubifs_dent_node *dent = node;
  1884. struct fsck_inode *fscki1;
  1885. err = ubifs_validate_entry(c, dent);
  1886. if (err)
  1887. goto out_dump;
  1888. /*
  1889. * Search the inode node this entry refers to and the parent
  1890. * inode node and insert them to the RB-tree of inodes.
  1891. */
  1892. inum = le64_to_cpu(dent->inum);
  1893. fscki = read_add_inode(c, priv, inum);
  1894. if (IS_ERR(fscki)) {
  1895. err = PTR_ERR(fscki);
  1896. ubifs_err("error %d while processing entry node and "
  1897. "trying to find inode node %lu",
  1898. err, (unsigned long)inum);
  1899. goto out_dump;
  1900. }
  1901. /* Count how many direntries or xentries refers this inode */
  1902. fscki->references += 1;
  1903. inum = key_inum_flash(c, &dent->key);
  1904. fscki1 = read_add_inode(c, priv, inum);
  1905. if (IS_ERR(fscki1)) {
  1906. err = PTR_ERR(fscki1);
  1907. ubifs_err("error %d while processing entry node and "
  1908. "trying to find parent inode node %lu",
  1909. err, (unsigned long)inum);
  1910. goto out_dump;
  1911. }
  1912. nlen = le16_to_cpu(dent->nlen);
  1913. if (type == UBIFS_XENT_KEY) {
  1914. fscki1->calc_xcnt += 1;
  1915. fscki1->calc_xsz += CALC_DENT_SIZE(nlen);
  1916. fscki1->calc_xsz += CALC_XATTR_BYTES(fscki->size);
  1917. fscki1->calc_xnms += nlen;
  1918. } else {
  1919. fscki1->calc_sz += CALC_DENT_SIZE(nlen);
  1920. if (dent->type == UBIFS_ITYPE_DIR)
  1921. fscki1->calc_cnt += 1;
  1922. }
  1923. }
  1924. out:
  1925. kfree(node);
  1926. return 0;
  1927. out_dump:
  1928. ubifs_msg("dump of node at LEB %d:%d", zbr->lnum, zbr->offs);
  1929. dbg_dump_node(c, node);
  1930. out_free:
  1931. kfree(node);
  1932. return err;
  1933. }
  1934. /**
  1935. * free_inodes - free RB-tree of inodes.
  1936. * @fsckd: FS checking information
  1937. */
  1938. static void free_inodes(struct fsck_data *fsckd)
  1939. {
  1940. struct rb_node *this = fsckd->inodes.rb_node;
  1941. struct fsck_inode *fscki;
  1942. while (this) {
  1943. if (this->rb_left)
  1944. this = this->rb_left;
  1945. else if (this->rb_right)
  1946. this = this->rb_right;
  1947. else {
  1948. fscki = rb_entry(this, struct fsck_inode, rb);
  1949. this = rb_parent(this);
  1950. if (this) {
  1951. if (this->rb_left == &fscki->rb)
  1952. this->rb_left = NULL;
  1953. else
  1954. this->rb_right = NULL;
  1955. }
  1956. kfree(fscki);
  1957. }
  1958. }
  1959. }
  1960. /**
  1961. * check_inodes - checks all inodes.
  1962. * @c: UBIFS file-system description object
  1963. * @fsckd: FS checking information
  1964. *
  1965. * This is a helper function for 'dbg_check_filesystem()' which walks the
  1966. * RB-tree of inodes after the index scan has been finished, and checks that
  1967. * inode nlink, size, etc are correct. Returns zero if inodes are fine,
  1968. * %-EINVAL if not, and a negative error code in case of failure.
  1969. */
  1970. static int check_inodes(struct ubifs_info *c, struct fsck_data *fsckd)
  1971. {
  1972. int n, err;
  1973. union ubifs_key key;
  1974. struct ubifs_znode *znode;
  1975. struct ubifs_zbranch *zbr;
  1976. struct ubifs_ino_node *ino;
  1977. struct fsck_inode *fscki;
  1978. struct rb_node *this = rb_first(&fsckd->inodes);
  1979. while (this) {
  1980. fscki = rb_entry(this, struct fsck_inode, rb);
  1981. this = rb_next(this);
  1982. if (S_ISDIR(fscki->mode)) {
  1983. /*
  1984. * Directories have to have exactly one reference (they
  1985. * cannot have hardlinks), although root inode is an
  1986. * exception.
  1987. */
  1988. if (fscki->inum != UBIFS_ROOT_INO &&
  1989. fscki->references != 1) {
  1990. ubifs_err("directory inode %lu has %d "
  1991. "direntries which refer it, but "
  1992. "should be 1",
  1993. (unsigned long)fscki->inum,
  1994. fscki->references);
  1995. goto out_dump;
  1996. }
  1997. if (fscki->inum == UBIFS_ROOT_INO &&
  1998. fscki->references != 0) {
  1999. ubifs_err("root inode %lu has non-zero (%d) "
  2000. "direntries which refer it",
  2001. (unsigned long)fscki->inum,
  2002. fscki->references);
  2003. goto out_dump;
  2004. }
  2005. if (fscki->calc_sz != fscki->size) {
  2006. ubifs_err("directory inode %lu size is %lld, "
  2007. "but calculated size is %lld",
  2008. (unsigned long)fscki->inum,
  2009. fscki->size, fscki->calc_sz);
  2010. goto out_dump;
  2011. }
  2012. if (fscki->calc_cnt != fscki->nlink) {
  2013. ubifs_err("directory inode %lu nlink is %d, "
  2014. "but calculated nlink is %d",
  2015. (unsigned long)fscki->inum,
  2016. fscki->nlink, fscki->calc_cnt);
  2017. goto out_dump;
  2018. }
  2019. } else {
  2020. if (fscki->references != fscki->nlink) {
  2021. ubifs_err("inode %lu nlink is %d, but "
  2022. "calculated nlink is %d",
  2023. (unsigned long)fscki->inum,
  2024. fscki->nlink, fscki->references);
  2025. goto out_dump;
  2026. }
  2027. }
  2028. if (fscki->xattr_sz != fscki->calc_xsz) {
  2029. ubifs_err("inode %lu has xattr size %u, but "
  2030. "calculated size is %lld",
  2031. (unsigned long)fscki->inum, fscki->xattr_sz,
  2032. fscki->calc_xsz);
  2033. goto out_dump;
  2034. }
  2035. if (fscki->xattr_cnt != fscki->calc_xcnt) {
  2036. ubifs_err("inode %lu has %u xattrs, but "
  2037. "calculated count is %lld",
  2038. (unsigned long)fscki->inum,
  2039. fscki->xattr_cnt, fscki->calc_xcnt);
  2040. goto out_dump;
  2041. }
  2042. if (fscki->xattr_nms != fscki->calc_xnms) {
  2043. ubifs_err("inode %lu has xattr names' size %u, but "
  2044. "calculated names' size is %lld",
  2045. (unsigned long)fscki->inum, fscki->xattr_nms,
  2046. fscki->calc_xnms);
  2047. goto out_dump;
  2048. }
  2049. }
  2050. return 0;
  2051. out_dump:
  2052. /* Read the bad inode and dump it */
  2053. ino_key_init(c, &key, fscki->inum);
  2054. err = ubifs_lookup_level0(c, &key, &znode, &n);
  2055. if (!err) {
  2056. ubifs_err("inode %lu not found in index",
  2057. (unsigned long)fscki->inum);
  2058. return -ENOENT;
  2059. } else if (err < 0) {
  2060. ubifs_err("error %d while looking up inode %lu",
  2061. err, (unsigned long)fscki->inum);
  2062. return err;
  2063. }
  2064. zbr = &znode->zbranch[n];
  2065. ino = kmalloc(zbr->len, GFP_NOFS);
  2066. if (!ino)
  2067. return -ENOMEM;
  2068. err = ubifs_tnc_read_node(c, zbr, ino);
  2069. if (err) {
  2070. ubifs_err("cannot read inode node at LEB %d:%d, error %d",
  2071. zbr->lnum, zbr->offs, err);
  2072. kfree(ino);
  2073. return err;
  2074. }
  2075. ubifs_msg("dump of the inode %lu sitting in LEB %d:%d",
  2076. (unsigned long)fscki->inum, zbr->lnum, zbr->offs);
  2077. dbg_dump_node(c, ino);
  2078. kfree(ino);
  2079. return -EINVAL;
  2080. }
  2081. /**
  2082. * dbg_check_filesystem - check the file-system.
  2083. * @c: UBIFS file-system description object
  2084. *
  2085. * This function checks the file system, namely:
  2086. * o makes sure that all leaf nodes exist and their CRCs are correct;
  2087. * o makes sure inode nlink, size, xattr size/count are correct (for all
  2088. * inodes).
  2089. *
  2090. * The function reads whole indexing tree and all nodes, so it is pretty
  2091. * heavy-weight. Returns zero if the file-system is consistent, %-EINVAL if
  2092. * not, and a negative error code in case of failure.
  2093. */
  2094. int dbg_check_filesystem(struct ubifs_info *c)
  2095. {
  2096. int err;
  2097. struct fsck_data fsckd;
  2098. if (!(ubifs_chk_flags & UBIFS_CHK_FS))
  2099. return 0;
  2100. fsckd.inodes = RB_ROOT;
  2101. err = dbg_walk_index(c, check_leaf, NULL, &fsckd);
  2102. if (err)
  2103. goto out_free;
  2104. err = check_inodes(c, &fsckd);
  2105. if (err)
  2106. goto out_free;
  2107. free_inodes(&fsckd);
  2108. return 0;
  2109. out_free:
  2110. ubifs_err("file-system check failed with error %d", err);
  2111. dump_stack();
  2112. free_inodes(&fsckd);
  2113. return err;
  2114. }
  2115. /**
  2116. * dbg_check_data_nodes_order - check that list of data nodes is sorted.
  2117. * @c: UBIFS file-system description object
  2118. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2119. *
  2120. * This function returns zero if the list of data nodes is sorted correctly,
  2121. * and %-EINVAL if not.
  2122. */
  2123. int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
  2124. {
  2125. struct list_head *cur;
  2126. struct ubifs_scan_node *sa, *sb;
  2127. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2128. return 0;
  2129. for (cur = head->next; cur->next != head; cur = cur->next) {
  2130. ino_t inuma, inumb;
  2131. uint32_t blka, blkb;
  2132. cond_resched();
  2133. sa = container_of(cur, struct ubifs_scan_node, list);
  2134. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2135. if (sa->type != UBIFS_DATA_NODE) {
  2136. ubifs_err("bad node type %d", sa->type);
  2137. dbg_dump_node(c, sa->node);
  2138. return -EINVAL;
  2139. }
  2140. if (sb->type != UBIFS_DATA_NODE) {
  2141. ubifs_err("bad node type %d", sb->type);
  2142. dbg_dump_node(c, sb->node);
  2143. return -EINVAL;
  2144. }
  2145. inuma = key_inum(c, &sa->key);
  2146. inumb = key_inum(c, &sb->key);
  2147. if (inuma < inumb)
  2148. continue;
  2149. if (inuma > inumb) {
  2150. ubifs_err("larger inum %lu goes before inum %lu",
  2151. (unsigned long)inuma, (unsigned long)inumb);
  2152. goto error_dump;
  2153. }
  2154. blka = key_block(c, &sa->key);
  2155. blkb = key_block(c, &sb->key);
  2156. if (blka > blkb) {
  2157. ubifs_err("larger block %u goes before %u", blka, blkb);
  2158. goto error_dump;
  2159. }
  2160. if (blka == blkb) {
  2161. ubifs_err("two data nodes for the same block");
  2162. goto error_dump;
  2163. }
  2164. }
  2165. return 0;
  2166. error_dump:
  2167. dbg_dump_node(c, sa->node);
  2168. dbg_dump_node(c, sb->node);
  2169. return -EINVAL;
  2170. }
  2171. /**
  2172. * dbg_check_nondata_nodes_order - check that list of data nodes is sorted.
  2173. * @c: UBIFS file-system description object
  2174. * @head: the list of nodes ('struct ubifs_scan_node' objects)
  2175. *
  2176. * This function returns zero if the list of non-data nodes is sorted correctly,
  2177. * and %-EINVAL if not.
  2178. */
  2179. int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
  2180. {
  2181. struct list_head *cur;
  2182. struct ubifs_scan_node *sa, *sb;
  2183. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2184. return 0;
  2185. for (cur = head->next; cur->next != head; cur = cur->next) {
  2186. ino_t inuma, inumb;
  2187. uint32_t hasha, hashb;
  2188. cond_resched();
  2189. sa = container_of(cur, struct ubifs_scan_node, list);
  2190. sb = container_of(cur->next, struct ubifs_scan_node, list);
  2191. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2192. sa->type != UBIFS_XENT_NODE) {
  2193. ubifs_err("bad node type %d", sa->type);
  2194. dbg_dump_node(c, sa->node);
  2195. return -EINVAL;
  2196. }
  2197. if (sa->type != UBIFS_INO_NODE && sa->type != UBIFS_DENT_NODE &&
  2198. sa->type != UBIFS_XENT_NODE) {
  2199. ubifs_err("bad node type %d", sb->type);
  2200. dbg_dump_node(c, sb->node);
  2201. return -EINVAL;
  2202. }
  2203. if (sa->type != UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2204. ubifs_err("non-inode node goes before inode node");
  2205. goto error_dump;
  2206. }
  2207. if (sa->type == UBIFS_INO_NODE && sb->type != UBIFS_INO_NODE)
  2208. continue;
  2209. if (sa->type == UBIFS_INO_NODE && sb->type == UBIFS_INO_NODE) {
  2210. /* Inode nodes are sorted in descending size order */
  2211. if (sa->len < sb->len) {
  2212. ubifs_err("smaller inode node goes first");
  2213. goto error_dump;
  2214. }
  2215. continue;
  2216. }
  2217. /*
  2218. * This is either a dentry or xentry, which should be sorted in
  2219. * ascending (parent ino, hash) order.
  2220. */
  2221. inuma = key_inum(c, &sa->key);
  2222. inumb = key_inum(c, &sb->key);
  2223. if (inuma < inumb)
  2224. continue;
  2225. if (inuma > inumb) {
  2226. ubifs_err("larger inum %lu goes before inum %lu",
  2227. (unsigned long)inuma, (unsigned long)inumb);
  2228. goto error_dump;
  2229. }
  2230. hasha = key_block(c, &sa->key);
  2231. hashb = key_block(c, &sb->key);
  2232. if (hasha > hashb) {
  2233. ubifs_err("larger hash %u goes before %u",
  2234. hasha, hashb);
  2235. goto error_dump;
  2236. }
  2237. }
  2238. return 0;
  2239. error_dump:
  2240. ubifs_msg("dumping first node");
  2241. dbg_dump_node(c, sa->node);
  2242. ubifs_msg("dumping second node");
  2243. dbg_dump_node(c, sb->node);
  2244. return -EINVAL;
  2245. return 0;
  2246. }
  2247. int dbg_force_in_the_gaps(void)
  2248. {
  2249. if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
  2250. return 0;
  2251. return !(random32() & 7);
  2252. }
  2253. /* Failure mode for recovery testing */
  2254. #define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
  2255. struct failure_mode_info {
  2256. struct list_head list;
  2257. struct ubifs_info *c;
  2258. };
  2259. static LIST_HEAD(fmi_list);
  2260. static DEFINE_SPINLOCK(fmi_lock);
  2261. static unsigned int next;
  2262. static int simple_rand(void)
  2263. {
  2264. if (next == 0)
  2265. next = current->pid;
  2266. next = next * 1103515245 + 12345;
  2267. return (next >> 16) & 32767;
  2268. }
  2269. static void failure_mode_init(struct ubifs_info *c)
  2270. {
  2271. struct failure_mode_info *fmi;
  2272. fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
  2273. if (!fmi) {
  2274. ubifs_err("Failed to register failure mode - no memory");
  2275. return;
  2276. }
  2277. fmi->c = c;
  2278. spin_lock(&fmi_lock);
  2279. list_add_tail(&fmi->list, &fmi_list);
  2280. spin_unlock(&fmi_lock);
  2281. }
  2282. static void failure_mode_exit(struct ubifs_info *c)
  2283. {
  2284. struct failure_mode_info *fmi, *tmp;
  2285. spin_lock(&fmi_lock);
  2286. list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
  2287. if (fmi->c == c) {
  2288. list_del(&fmi->list);
  2289. kfree(fmi);
  2290. }
  2291. spin_unlock(&fmi_lock);
  2292. }
  2293. static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
  2294. {
  2295. struct failure_mode_info *fmi;
  2296. spin_lock(&fmi_lock);
  2297. list_for_each_entry(fmi, &fmi_list, list)
  2298. if (fmi->c->ubi == desc) {
  2299. struct ubifs_info *c = fmi->c;
  2300. spin_unlock(&fmi_lock);
  2301. return c;
  2302. }
  2303. spin_unlock(&fmi_lock);
  2304. return NULL;
  2305. }
  2306. static int in_failure_mode(struct ubi_volume_desc *desc)
  2307. {
  2308. struct ubifs_info *c = dbg_find_info(desc);
  2309. if (c && dbg_failure_mode)
  2310. return c->dbg->failure_mode;
  2311. return 0;
  2312. }
  2313. static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
  2314. {
  2315. struct ubifs_info *c = dbg_find_info(desc);
  2316. struct ubifs_debug_info *d;
  2317. if (!c || !dbg_failure_mode)
  2318. return 0;
  2319. d = c->dbg;
  2320. if (d->failure_mode)
  2321. return 1;
  2322. if (!d->fail_cnt) {
  2323. /* First call - decide delay to failure */
  2324. if (chance(1, 2)) {
  2325. unsigned int delay = 1 << (simple_rand() >> 11);
  2326. if (chance(1, 2)) {
  2327. d->fail_delay = 1;
  2328. d->fail_timeout = jiffies +
  2329. msecs_to_jiffies(delay);
  2330. dbg_rcvry("failing after %ums", delay);
  2331. } else {
  2332. d->fail_delay = 2;
  2333. d->fail_cnt_max = delay;
  2334. dbg_rcvry("failing after %u calls", delay);
  2335. }
  2336. }
  2337. d->fail_cnt += 1;
  2338. }
  2339. /* Determine if failure delay has expired */
  2340. if (d->fail_delay == 1) {
  2341. if (time_before(jiffies, d->fail_timeout))
  2342. return 0;
  2343. } else if (d->fail_delay == 2)
  2344. if (d->fail_cnt++ < d->fail_cnt_max)
  2345. return 0;
  2346. if (lnum == UBIFS_SB_LNUM) {
  2347. if (write) {
  2348. if (chance(1, 2))
  2349. return 0;
  2350. } else if (chance(19, 20))
  2351. return 0;
  2352. dbg_rcvry("failing in super block LEB %d", lnum);
  2353. } else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
  2354. if (chance(19, 20))
  2355. return 0;
  2356. dbg_rcvry("failing in master LEB %d", lnum);
  2357. } else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
  2358. if (write) {
  2359. if (chance(99, 100))
  2360. return 0;
  2361. } else if (chance(399, 400))
  2362. return 0;
  2363. dbg_rcvry("failing in log LEB %d", lnum);
  2364. } else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
  2365. if (write) {
  2366. if (chance(7, 8))
  2367. return 0;
  2368. } else if (chance(19, 20))
  2369. return 0;
  2370. dbg_rcvry("failing in LPT LEB %d", lnum);
  2371. } else if (lnum >= c->orph_first && lnum <= c->orph_last) {
  2372. if (write) {
  2373. if (chance(1, 2))
  2374. return 0;
  2375. } else if (chance(9, 10))
  2376. return 0;
  2377. dbg_rcvry("failing in orphan LEB %d", lnum);
  2378. } else if (lnum == c->ihead_lnum) {
  2379. if (chance(99, 100))
  2380. return 0;
  2381. dbg_rcvry("failing in index head LEB %d", lnum);
  2382. } else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
  2383. if (chance(9, 10))
  2384. return 0;
  2385. dbg_rcvry("failing in GC head LEB %d", lnum);
  2386. } else if (write && !RB_EMPTY_ROOT(&c->buds) &&
  2387. !ubifs_search_bud(c, lnum)) {
  2388. if (chance(19, 20))
  2389. return 0;
  2390. dbg_rcvry("failing in non-bud LEB %d", lnum);
  2391. } else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
  2392. c->cmt_state == COMMIT_RUNNING_REQUIRED) {
  2393. if (chance(999, 1000))
  2394. return 0;
  2395. dbg_rcvry("failing in bud LEB %d commit running", lnum);
  2396. } else {
  2397. if (chance(9999, 10000))
  2398. return 0;
  2399. dbg_rcvry("failing in bud LEB %d commit not running", lnum);
  2400. }
  2401. ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
  2402. d->failure_mode = 1;
  2403. dump_stack();
  2404. return 1;
  2405. }
  2406. static void cut_data(const void *buf, int len)
  2407. {
  2408. int flen, i;
  2409. unsigned char *p = (void *)buf;
  2410. flen = (len * (long long)simple_rand()) >> 15;
  2411. for (i = flen; i < len; i++)
  2412. p[i] = 0xff;
  2413. }
  2414. int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
  2415. int len, int check)
  2416. {
  2417. if (in_failure_mode(desc))
  2418. return -EROFS;
  2419. return ubi_leb_read(desc, lnum, buf, offset, len, check);
  2420. }
  2421. int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2422. int offset, int len, int dtype)
  2423. {
  2424. int err, failing;
  2425. if (in_failure_mode(desc))
  2426. return -EROFS;
  2427. failing = do_fail(desc, lnum, 1);
  2428. if (failing)
  2429. cut_data(buf, len);
  2430. err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
  2431. if (err)
  2432. return err;
  2433. if (failing)
  2434. return -EROFS;
  2435. return 0;
  2436. }
  2437. int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
  2438. int len, int dtype)
  2439. {
  2440. int err;
  2441. if (do_fail(desc, lnum, 1))
  2442. return -EROFS;
  2443. err = ubi_leb_change(desc, lnum, buf, len, dtype);
  2444. if (err)
  2445. return err;
  2446. if (do_fail(desc, lnum, 1))
  2447. return -EROFS;
  2448. return 0;
  2449. }
  2450. int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
  2451. {
  2452. int err;
  2453. if (do_fail(desc, lnum, 0))
  2454. return -EROFS;
  2455. err = ubi_leb_erase(desc, lnum);
  2456. if (err)
  2457. return err;
  2458. if (do_fail(desc, lnum, 0))
  2459. return -EROFS;
  2460. return 0;
  2461. }
  2462. int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
  2463. {
  2464. int err;
  2465. if (do_fail(desc, lnum, 0))
  2466. return -EROFS;
  2467. err = ubi_leb_unmap(desc, lnum);
  2468. if (err)
  2469. return err;
  2470. if (do_fail(desc, lnum, 0))
  2471. return -EROFS;
  2472. return 0;
  2473. }
  2474. int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
  2475. {
  2476. if (in_failure_mode(desc))
  2477. return -EROFS;
  2478. return ubi_is_mapped(desc, lnum);
  2479. }
  2480. int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
  2481. {
  2482. int err;
  2483. if (do_fail(desc, lnum, 0))
  2484. return -EROFS;
  2485. err = ubi_leb_map(desc, lnum, dtype);
  2486. if (err)
  2487. return err;
  2488. if (do_fail(desc, lnum, 0))
  2489. return -EROFS;
  2490. return 0;
  2491. }
  2492. /**
  2493. * ubifs_debugging_init - initialize UBIFS debugging.
  2494. * @c: UBIFS file-system description object
  2495. *
  2496. * This function initializes debugging-related data for the file system.
  2497. * Returns zero in case of success and a negative error code in case of
  2498. * failure.
  2499. */
  2500. int ubifs_debugging_init(struct ubifs_info *c)
  2501. {
  2502. c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
  2503. if (!c->dbg)
  2504. return -ENOMEM;
  2505. failure_mode_init(c);
  2506. return 0;
  2507. }
  2508. /**
  2509. * ubifs_debugging_exit - free debugging data.
  2510. * @c: UBIFS file-system description object
  2511. */
  2512. void ubifs_debugging_exit(struct ubifs_info *c)
  2513. {
  2514. failure_mode_exit(c);
  2515. kfree(c->dbg);
  2516. }
  2517. /*
  2518. * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
  2519. * contain the stuff specific to particular file-system mounts.
  2520. */
  2521. static struct dentry *dfs_rootdir;
  2522. /**
  2523. * dbg_debugfs_init - initialize debugfs file-system.
  2524. *
  2525. * UBIFS uses debugfs file-system to expose various debugging knobs to
  2526. * user-space. This function creates "ubifs" directory in the debugfs
  2527. * file-system. Returns zero in case of success and a negative error code in
  2528. * case of failure.
  2529. */
  2530. int dbg_debugfs_init(void)
  2531. {
  2532. dfs_rootdir = debugfs_create_dir("ubifs", NULL);
  2533. if (IS_ERR(dfs_rootdir)) {
  2534. int err = PTR_ERR(dfs_rootdir);
  2535. ubifs_err("cannot create \"ubifs\" debugfs directory, "
  2536. "error %d\n", err);
  2537. return err;
  2538. }
  2539. return 0;
  2540. }
  2541. /**
  2542. * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system.
  2543. */
  2544. void dbg_debugfs_exit(void)
  2545. {
  2546. debugfs_remove(dfs_rootdir);
  2547. }
  2548. static int open_debugfs_file(struct inode *inode, struct file *file)
  2549. {
  2550. file->private_data = inode->i_private;
  2551. return nonseekable_open(inode, file);
  2552. }
  2553. static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
  2554. size_t count, loff_t *ppos)
  2555. {
  2556. struct ubifs_info *c = file->private_data;
  2557. struct ubifs_debug_info *d = c->dbg;
  2558. if (file->f_path.dentry == d->dfs_dump_lprops)
  2559. dbg_dump_lprops(c);
  2560. else if (file->f_path.dentry == d->dfs_dump_budg)
  2561. dbg_dump_budg(c, &c->bi);
  2562. else if (file->f_path.dentry == d->dfs_dump_tnc) {
  2563. mutex_lock(&c->tnc_mutex);
  2564. dbg_dump_tnc(c);
  2565. mutex_unlock(&c->tnc_mutex);
  2566. } else
  2567. return -EINVAL;
  2568. return count;
  2569. }
  2570. static const struct file_operations dfs_fops = {
  2571. .open = open_debugfs_file,
  2572. .write = write_debugfs_file,
  2573. .owner = THIS_MODULE,
  2574. .llseek = no_llseek,
  2575. };
  2576. /**
  2577. * dbg_debugfs_init_fs - initialize debugfs for UBIFS instance.
  2578. * @c: UBIFS file-system description object
  2579. *
  2580. * This function creates all debugfs files for this instance of UBIFS. Returns
  2581. * zero in case of success and a negative error code in case of failure.
  2582. *
  2583. * Note, the only reason we have not merged this function with the
  2584. * 'ubifs_debugging_init()' function is because it is better to initialize
  2585. * debugfs interfaces at the very end of the mount process, and remove them at
  2586. * the very beginning of the mount process.
  2587. */
  2588. int dbg_debugfs_init_fs(struct ubifs_info *c)
  2589. {
  2590. int err;
  2591. const char *fname;
  2592. struct dentry *dent;
  2593. struct ubifs_debug_info *d = c->dbg;
  2594. sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
  2595. fname = d->dfs_dir_name;
  2596. dent = debugfs_create_dir(fname, dfs_rootdir);
  2597. if (IS_ERR_OR_NULL(dent))
  2598. goto out;
  2599. d->dfs_dir = dent;
  2600. fname = "dump_lprops";
  2601. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2602. if (IS_ERR_OR_NULL(dent))
  2603. goto out_remove;
  2604. d->dfs_dump_lprops = dent;
  2605. fname = "dump_budg";
  2606. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2607. if (IS_ERR_OR_NULL(dent))
  2608. goto out_remove;
  2609. d->dfs_dump_budg = dent;
  2610. fname = "dump_tnc";
  2611. dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, c, &dfs_fops);
  2612. if (IS_ERR_OR_NULL(dent))
  2613. goto out_remove;
  2614. d->dfs_dump_tnc = dent;
  2615. return 0;
  2616. out_remove:
  2617. debugfs_remove_recursive(d->dfs_dir);
  2618. out:
  2619. err = dent ? PTR_ERR(dent) : -ENODEV;
  2620. ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
  2621. fname, err);
  2622. return err;
  2623. }
  2624. /**
  2625. * dbg_debugfs_exit_fs - remove all debugfs files.
  2626. * @c: UBIFS file-system description object
  2627. */
  2628. void dbg_debugfs_exit_fs(struct ubifs_info *c)
  2629. {
  2630. debugfs_remove_recursive(c->dbg->dfs_dir);
  2631. }
  2632. #endif /* CONFIG_UBIFS_FS_DEBUG */