super.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546
  1. /*
  2. * linux/fs/ext2/super.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/fs.h>
  21. #include <linux/slab.h>
  22. #include <linux/init.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/parser.h>
  25. #include <linux/random.h>
  26. #include <linux/buffer_head.h>
  27. #include <linux/exportfs.h>
  28. #include <linux/vfs.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/mount.h>
  31. #include <linux/log2.h>
  32. #include <linux/quotaops.h>
  33. #include <asm/uaccess.h>
  34. #include "ext2.h"
  35. #include "xattr.h"
  36. #include "acl.h"
  37. #include "xip.h"
  38. static void ext2_sync_super(struct super_block *sb,
  39. struct ext2_super_block *es, int wait);
  40. static int ext2_remount (struct super_block * sb, int * flags, char * data);
  41. static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
  42. static int ext2_sync_fs(struct super_block *sb, int wait);
  43. void ext2_error(struct super_block *sb, const char *function,
  44. const char *fmt, ...)
  45. {
  46. struct va_format vaf;
  47. va_list args;
  48. struct ext2_sb_info *sbi = EXT2_SB(sb);
  49. struct ext2_super_block *es = sbi->s_es;
  50. if (!(sb->s_flags & MS_RDONLY)) {
  51. spin_lock(&sbi->s_lock);
  52. sbi->s_mount_state |= EXT2_ERROR_FS;
  53. es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
  54. spin_unlock(&sbi->s_lock);
  55. ext2_sync_super(sb, es, 1);
  56. }
  57. va_start(args, fmt);
  58. vaf.fmt = fmt;
  59. vaf.va = &args;
  60. printk(KERN_CRIT "EXT2-fs (%s): error: %s: %pV\n",
  61. sb->s_id, function, &vaf);
  62. va_end(args);
  63. if (test_opt(sb, ERRORS_PANIC))
  64. panic("EXT2-fs: panic from previous error\n");
  65. if (test_opt(sb, ERRORS_RO)) {
  66. ext2_msg(sb, KERN_CRIT,
  67. "error: remounting filesystem read-only");
  68. sb->s_flags |= MS_RDONLY;
  69. }
  70. }
  71. void ext2_msg(struct super_block *sb, const char *prefix,
  72. const char *fmt, ...)
  73. {
  74. struct va_format vaf;
  75. va_list args;
  76. va_start(args, fmt);
  77. vaf.fmt = fmt;
  78. vaf.va = &args;
  79. printk("%sEXT2-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  80. va_end(args);
  81. }
  82. /*
  83. * This must be called with sbi->s_lock held.
  84. */
  85. void ext2_update_dynamic_rev(struct super_block *sb)
  86. {
  87. struct ext2_super_block *es = EXT2_SB(sb)->s_es;
  88. if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
  89. return;
  90. ext2_msg(sb, KERN_WARNING,
  91. "warning: updating to rev %d because of "
  92. "new feature flag, running e2fsck is recommended",
  93. EXT2_DYNAMIC_REV);
  94. es->s_first_ino = cpu_to_le32(EXT2_GOOD_OLD_FIRST_INO);
  95. es->s_inode_size = cpu_to_le16(EXT2_GOOD_OLD_INODE_SIZE);
  96. es->s_rev_level = cpu_to_le32(EXT2_DYNAMIC_REV);
  97. /* leave es->s_feature_*compat flags alone */
  98. /* es->s_uuid will be set by e2fsck if empty */
  99. /*
  100. * The rest of the superblock fields should be zero, and if not it
  101. * means they are likely already in use, so leave them alone. We
  102. * can leave it up to e2fsck to clean up any inconsistencies there.
  103. */
  104. }
  105. static void ext2_put_super (struct super_block * sb)
  106. {
  107. int db_count;
  108. int i;
  109. struct ext2_sb_info *sbi = EXT2_SB(sb);
  110. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  111. if (sb->s_dirt)
  112. ext2_write_super(sb);
  113. ext2_xattr_put_super(sb);
  114. if (!(sb->s_flags & MS_RDONLY)) {
  115. struct ext2_super_block *es = sbi->s_es;
  116. spin_lock(&sbi->s_lock);
  117. es->s_state = cpu_to_le16(sbi->s_mount_state);
  118. spin_unlock(&sbi->s_lock);
  119. ext2_sync_super(sb, es, 1);
  120. }
  121. db_count = sbi->s_gdb_count;
  122. for (i = 0; i < db_count; i++)
  123. if (sbi->s_group_desc[i])
  124. brelse (sbi->s_group_desc[i]);
  125. kfree(sbi->s_group_desc);
  126. kfree(sbi->s_debts);
  127. percpu_counter_destroy(&sbi->s_freeblocks_counter);
  128. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  129. percpu_counter_destroy(&sbi->s_dirs_counter);
  130. brelse (sbi->s_sbh);
  131. sb->s_fs_info = NULL;
  132. kfree(sbi->s_blockgroup_lock);
  133. kfree(sbi);
  134. }
  135. static struct kmem_cache * ext2_inode_cachep;
  136. static struct inode *ext2_alloc_inode(struct super_block *sb)
  137. {
  138. struct ext2_inode_info *ei;
  139. ei = (struct ext2_inode_info *)kmem_cache_alloc(ext2_inode_cachep, GFP_KERNEL);
  140. if (!ei)
  141. return NULL;
  142. ei->i_block_alloc_info = NULL;
  143. ei->vfs_inode.i_version = 1;
  144. return &ei->vfs_inode;
  145. }
  146. static void ext2_i_callback(struct rcu_head *head)
  147. {
  148. struct inode *inode = container_of(head, struct inode, i_rcu);
  149. kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
  150. }
  151. static void ext2_destroy_inode(struct inode *inode)
  152. {
  153. call_rcu(&inode->i_rcu, ext2_i_callback);
  154. }
  155. static void init_once(void *foo)
  156. {
  157. struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;
  158. rwlock_init(&ei->i_meta_lock);
  159. #ifdef CONFIG_EXT2_FS_XATTR
  160. init_rwsem(&ei->xattr_sem);
  161. #endif
  162. mutex_init(&ei->truncate_mutex);
  163. inode_init_once(&ei->vfs_inode);
  164. }
  165. static int init_inodecache(void)
  166. {
  167. ext2_inode_cachep = kmem_cache_create("ext2_inode_cache",
  168. sizeof(struct ext2_inode_info),
  169. 0, (SLAB_RECLAIM_ACCOUNT|
  170. SLAB_MEM_SPREAD),
  171. init_once);
  172. if (ext2_inode_cachep == NULL)
  173. return -ENOMEM;
  174. return 0;
  175. }
  176. static void destroy_inodecache(void)
  177. {
  178. /*
  179. * Make sure all delayed rcu free inodes are flushed before we
  180. * destroy cache.
  181. */
  182. rcu_barrier();
  183. kmem_cache_destroy(ext2_inode_cachep);
  184. }
  185. static int ext2_show_options(struct seq_file *seq, struct dentry *root)
  186. {
  187. struct super_block *sb = root->d_sb;
  188. struct ext2_sb_info *sbi = EXT2_SB(sb);
  189. struct ext2_super_block *es = sbi->s_es;
  190. unsigned long def_mount_opts;
  191. spin_lock(&sbi->s_lock);
  192. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  193. if (sbi->s_sb_block != 1)
  194. seq_printf(seq, ",sb=%lu", sbi->s_sb_block);
  195. if (test_opt(sb, MINIX_DF))
  196. seq_puts(seq, ",minixdf");
  197. if (test_opt(sb, GRPID))
  198. seq_puts(seq, ",grpid");
  199. if (!test_opt(sb, GRPID) && (def_mount_opts & EXT2_DEFM_BSDGROUPS))
  200. seq_puts(seq, ",nogrpid");
  201. if (sbi->s_resuid != EXT2_DEF_RESUID ||
  202. le16_to_cpu(es->s_def_resuid) != EXT2_DEF_RESUID) {
  203. seq_printf(seq, ",resuid=%u", sbi->s_resuid);
  204. }
  205. if (sbi->s_resgid != EXT2_DEF_RESGID ||
  206. le16_to_cpu(es->s_def_resgid) != EXT2_DEF_RESGID) {
  207. seq_printf(seq, ",resgid=%u", sbi->s_resgid);
  208. }
  209. if (test_opt(sb, ERRORS_RO)) {
  210. int def_errors = le16_to_cpu(es->s_errors);
  211. if (def_errors == EXT2_ERRORS_PANIC ||
  212. def_errors == EXT2_ERRORS_CONTINUE) {
  213. seq_puts(seq, ",errors=remount-ro");
  214. }
  215. }
  216. if (test_opt(sb, ERRORS_CONT))
  217. seq_puts(seq, ",errors=continue");
  218. if (test_opt(sb, ERRORS_PANIC))
  219. seq_puts(seq, ",errors=panic");
  220. if (test_opt(sb, NO_UID32))
  221. seq_puts(seq, ",nouid32");
  222. if (test_opt(sb, DEBUG))
  223. seq_puts(seq, ",debug");
  224. if (test_opt(sb, OLDALLOC))
  225. seq_puts(seq, ",oldalloc");
  226. #ifdef CONFIG_EXT2_FS_XATTR
  227. if (test_opt(sb, XATTR_USER))
  228. seq_puts(seq, ",user_xattr");
  229. if (!test_opt(sb, XATTR_USER) &&
  230. (def_mount_opts & EXT2_DEFM_XATTR_USER)) {
  231. seq_puts(seq, ",nouser_xattr");
  232. }
  233. #endif
  234. #ifdef CONFIG_EXT2_FS_POSIX_ACL
  235. if (test_opt(sb, POSIX_ACL))
  236. seq_puts(seq, ",acl");
  237. if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT2_DEFM_ACL))
  238. seq_puts(seq, ",noacl");
  239. #endif
  240. if (test_opt(sb, NOBH))
  241. seq_puts(seq, ",nobh");
  242. #if defined(CONFIG_QUOTA)
  243. if (sbi->s_mount_opt & EXT2_MOUNT_USRQUOTA)
  244. seq_puts(seq, ",usrquota");
  245. if (sbi->s_mount_opt & EXT2_MOUNT_GRPQUOTA)
  246. seq_puts(seq, ",grpquota");
  247. #endif
  248. #if defined(CONFIG_EXT2_FS_XIP)
  249. if (sbi->s_mount_opt & EXT2_MOUNT_XIP)
  250. seq_puts(seq, ",xip");
  251. #endif
  252. if (!test_opt(sb, RESERVATION))
  253. seq_puts(seq, ",noreservation");
  254. spin_unlock(&sbi->s_lock);
  255. return 0;
  256. }
  257. #ifdef CONFIG_QUOTA
  258. static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off);
  259. static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off);
  260. #endif
  261. static const struct super_operations ext2_sops = {
  262. .alloc_inode = ext2_alloc_inode,
  263. .destroy_inode = ext2_destroy_inode,
  264. .write_inode = ext2_write_inode,
  265. .evict_inode = ext2_evict_inode,
  266. .put_super = ext2_put_super,
  267. .write_super = ext2_write_super,
  268. .sync_fs = ext2_sync_fs,
  269. .statfs = ext2_statfs,
  270. .remount_fs = ext2_remount,
  271. .show_options = ext2_show_options,
  272. #ifdef CONFIG_QUOTA
  273. .quota_read = ext2_quota_read,
  274. .quota_write = ext2_quota_write,
  275. #endif
  276. };
  277. static struct inode *ext2_nfs_get_inode(struct super_block *sb,
  278. u64 ino, u32 generation)
  279. {
  280. struct inode *inode;
  281. if (ino < EXT2_FIRST_INO(sb) && ino != EXT2_ROOT_INO)
  282. return ERR_PTR(-ESTALE);
  283. if (ino > le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count))
  284. return ERR_PTR(-ESTALE);
  285. /*
  286. * ext2_iget isn't quite right if the inode is currently unallocated!
  287. * However ext2_iget currently does appropriate checks to handle stale
  288. * inodes so everything is OK.
  289. */
  290. inode = ext2_iget(sb, ino);
  291. if (IS_ERR(inode))
  292. return ERR_CAST(inode);
  293. if (generation && inode->i_generation != generation) {
  294. /* we didn't find the right inode.. */
  295. iput(inode);
  296. return ERR_PTR(-ESTALE);
  297. }
  298. return inode;
  299. }
  300. static struct dentry *ext2_fh_to_dentry(struct super_block *sb, struct fid *fid,
  301. int fh_len, int fh_type)
  302. {
  303. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  304. ext2_nfs_get_inode);
  305. }
  306. static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
  307. int fh_len, int fh_type)
  308. {
  309. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  310. ext2_nfs_get_inode);
  311. }
  312. /* Yes, most of these are left as NULL!!
  313. * A NULL value implies the default, which works with ext2-like file
  314. * systems, but can be improved upon.
  315. * Currently only get_parent is required.
  316. */
  317. static const struct export_operations ext2_export_ops = {
  318. .fh_to_dentry = ext2_fh_to_dentry,
  319. .fh_to_parent = ext2_fh_to_parent,
  320. .get_parent = ext2_get_parent,
  321. };
  322. static unsigned long get_sb_block(void **data)
  323. {
  324. unsigned long sb_block;
  325. char *options = (char *) *data;
  326. if (!options || strncmp(options, "sb=", 3) != 0)
  327. return 1; /* Default location */
  328. options += 3;
  329. sb_block = simple_strtoul(options, &options, 0);
  330. if (*options && *options != ',') {
  331. printk("EXT2-fs: Invalid sb specification: %s\n",
  332. (char *) *data);
  333. return 1;
  334. }
  335. if (*options == ',')
  336. options++;
  337. *data = (void *) options;
  338. return sb_block;
  339. }
  340. enum {
  341. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  342. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
  343. Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
  344. Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
  345. Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
  346. Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
  347. };
  348. static const match_table_t tokens = {
  349. {Opt_bsd_df, "bsddf"},
  350. {Opt_minix_df, "minixdf"},
  351. {Opt_grpid, "grpid"},
  352. {Opt_grpid, "bsdgroups"},
  353. {Opt_nogrpid, "nogrpid"},
  354. {Opt_nogrpid, "sysvgroups"},
  355. {Opt_resgid, "resgid=%u"},
  356. {Opt_resuid, "resuid=%u"},
  357. {Opt_sb, "sb=%u"},
  358. {Opt_err_cont, "errors=continue"},
  359. {Opt_err_panic, "errors=panic"},
  360. {Opt_err_ro, "errors=remount-ro"},
  361. {Opt_nouid32, "nouid32"},
  362. {Opt_nocheck, "check=none"},
  363. {Opt_nocheck, "nocheck"},
  364. {Opt_debug, "debug"},
  365. {Opt_oldalloc, "oldalloc"},
  366. {Opt_orlov, "orlov"},
  367. {Opt_nobh, "nobh"},
  368. {Opt_user_xattr, "user_xattr"},
  369. {Opt_nouser_xattr, "nouser_xattr"},
  370. {Opt_acl, "acl"},
  371. {Opt_noacl, "noacl"},
  372. {Opt_xip, "xip"},
  373. {Opt_grpquota, "grpquota"},
  374. {Opt_ignore, "noquota"},
  375. {Opt_quota, "quota"},
  376. {Opt_usrquota, "usrquota"},
  377. {Opt_reservation, "reservation"},
  378. {Opt_noreservation, "noreservation"},
  379. {Opt_err, NULL}
  380. };
  381. static int parse_options(char *options, struct super_block *sb)
  382. {
  383. char *p;
  384. struct ext2_sb_info *sbi = EXT2_SB(sb);
  385. substring_t args[MAX_OPT_ARGS];
  386. int option;
  387. if (!options)
  388. return 1;
  389. while ((p = strsep (&options, ",")) != NULL) {
  390. int token;
  391. if (!*p)
  392. continue;
  393. token = match_token(p, tokens, args);
  394. switch (token) {
  395. case Opt_bsd_df:
  396. clear_opt (sbi->s_mount_opt, MINIX_DF);
  397. break;
  398. case Opt_minix_df:
  399. set_opt (sbi->s_mount_opt, MINIX_DF);
  400. break;
  401. case Opt_grpid:
  402. set_opt (sbi->s_mount_opt, GRPID);
  403. break;
  404. case Opt_nogrpid:
  405. clear_opt (sbi->s_mount_opt, GRPID);
  406. break;
  407. case Opt_resuid:
  408. if (match_int(&args[0], &option))
  409. return 0;
  410. sbi->s_resuid = option;
  411. break;
  412. case Opt_resgid:
  413. if (match_int(&args[0], &option))
  414. return 0;
  415. sbi->s_resgid = option;
  416. break;
  417. case Opt_sb:
  418. /* handled by get_sb_block() instead of here */
  419. /* *sb_block = match_int(&args[0]); */
  420. break;
  421. case Opt_err_panic:
  422. clear_opt (sbi->s_mount_opt, ERRORS_CONT);
  423. clear_opt (sbi->s_mount_opt, ERRORS_RO);
  424. set_opt (sbi->s_mount_opt, ERRORS_PANIC);
  425. break;
  426. case Opt_err_ro:
  427. clear_opt (sbi->s_mount_opt, ERRORS_CONT);
  428. clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
  429. set_opt (sbi->s_mount_opt, ERRORS_RO);
  430. break;
  431. case Opt_err_cont:
  432. clear_opt (sbi->s_mount_opt, ERRORS_RO);
  433. clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
  434. set_opt (sbi->s_mount_opt, ERRORS_CONT);
  435. break;
  436. case Opt_nouid32:
  437. set_opt (sbi->s_mount_opt, NO_UID32);
  438. break;
  439. case Opt_nocheck:
  440. clear_opt (sbi->s_mount_opt, CHECK);
  441. break;
  442. case Opt_debug:
  443. set_opt (sbi->s_mount_opt, DEBUG);
  444. break;
  445. case Opt_oldalloc:
  446. set_opt (sbi->s_mount_opt, OLDALLOC);
  447. break;
  448. case Opt_orlov:
  449. clear_opt (sbi->s_mount_opt, OLDALLOC);
  450. break;
  451. case Opt_nobh:
  452. set_opt (sbi->s_mount_opt, NOBH);
  453. break;
  454. #ifdef CONFIG_EXT2_FS_XATTR
  455. case Opt_user_xattr:
  456. set_opt (sbi->s_mount_opt, XATTR_USER);
  457. break;
  458. case Opt_nouser_xattr:
  459. clear_opt (sbi->s_mount_opt, XATTR_USER);
  460. break;
  461. #else
  462. case Opt_user_xattr:
  463. case Opt_nouser_xattr:
  464. ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
  465. "not supported");
  466. break;
  467. #endif
  468. #ifdef CONFIG_EXT2_FS_POSIX_ACL
  469. case Opt_acl:
  470. set_opt(sbi->s_mount_opt, POSIX_ACL);
  471. break;
  472. case Opt_noacl:
  473. clear_opt(sbi->s_mount_opt, POSIX_ACL);
  474. break;
  475. #else
  476. case Opt_acl:
  477. case Opt_noacl:
  478. ext2_msg(sb, KERN_INFO,
  479. "(no)acl options not supported");
  480. break;
  481. #endif
  482. case Opt_xip:
  483. #ifdef CONFIG_EXT2_FS_XIP
  484. set_opt (sbi->s_mount_opt, XIP);
  485. #else
  486. ext2_msg(sb, KERN_INFO, "xip option not supported");
  487. #endif
  488. break;
  489. #if defined(CONFIG_QUOTA)
  490. case Opt_quota:
  491. case Opt_usrquota:
  492. set_opt(sbi->s_mount_opt, USRQUOTA);
  493. break;
  494. case Opt_grpquota:
  495. set_opt(sbi->s_mount_opt, GRPQUOTA);
  496. break;
  497. #else
  498. case Opt_quota:
  499. case Opt_usrquota:
  500. case Opt_grpquota:
  501. ext2_msg(sb, KERN_INFO,
  502. "quota operations not supported");
  503. break;
  504. #endif
  505. case Opt_reservation:
  506. set_opt(sbi->s_mount_opt, RESERVATION);
  507. ext2_msg(sb, KERN_INFO, "reservations ON");
  508. break;
  509. case Opt_noreservation:
  510. clear_opt(sbi->s_mount_opt, RESERVATION);
  511. ext2_msg(sb, KERN_INFO, "reservations OFF");
  512. break;
  513. case Opt_ignore:
  514. break;
  515. default:
  516. return 0;
  517. }
  518. }
  519. return 1;
  520. }
  521. static int ext2_setup_super (struct super_block * sb,
  522. struct ext2_super_block * es,
  523. int read_only)
  524. {
  525. int res = 0;
  526. struct ext2_sb_info *sbi = EXT2_SB(sb);
  527. if (le32_to_cpu(es->s_rev_level) > EXT2_MAX_SUPP_REV) {
  528. ext2_msg(sb, KERN_ERR,
  529. "error: revision level too high, "
  530. "forcing read-only mode");
  531. res = MS_RDONLY;
  532. }
  533. if (read_only)
  534. return res;
  535. if (!(sbi->s_mount_state & EXT2_VALID_FS))
  536. ext2_msg(sb, KERN_WARNING,
  537. "warning: mounting unchecked fs, "
  538. "running e2fsck is recommended");
  539. else if ((sbi->s_mount_state & EXT2_ERROR_FS))
  540. ext2_msg(sb, KERN_WARNING,
  541. "warning: mounting fs with errors, "
  542. "running e2fsck is recommended");
  543. else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
  544. le16_to_cpu(es->s_mnt_count) >=
  545. (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
  546. ext2_msg(sb, KERN_WARNING,
  547. "warning: maximal mount count reached, "
  548. "running e2fsck is recommended");
  549. else if (le32_to_cpu(es->s_checkinterval) &&
  550. (le32_to_cpu(es->s_lastcheck) +
  551. le32_to_cpu(es->s_checkinterval) <= get_seconds()))
  552. ext2_msg(sb, KERN_WARNING,
  553. "warning: checktime reached, "
  554. "running e2fsck is recommended");
  555. if (!le16_to_cpu(es->s_max_mnt_count))
  556. es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
  557. le16_add_cpu(&es->s_mnt_count, 1);
  558. if (test_opt (sb, DEBUG))
  559. ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, "
  560. "bpg=%lu, ipg=%lu, mo=%04lx]",
  561. EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize,
  562. sbi->s_frag_size,
  563. sbi->s_groups_count,
  564. EXT2_BLOCKS_PER_GROUP(sb),
  565. EXT2_INODES_PER_GROUP(sb),
  566. sbi->s_mount_opt);
  567. return res;
  568. }
  569. static int ext2_check_descriptors(struct super_block *sb)
  570. {
  571. int i;
  572. struct ext2_sb_info *sbi = EXT2_SB(sb);
  573. ext2_debug ("Checking group descriptors");
  574. for (i = 0; i < sbi->s_groups_count; i++) {
  575. struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
  576. ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
  577. ext2_fsblk_t last_block;
  578. if (i == sbi->s_groups_count - 1)
  579. last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
  580. else
  581. last_block = first_block +
  582. (EXT2_BLOCKS_PER_GROUP(sb) - 1);
  583. if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
  584. le32_to_cpu(gdp->bg_block_bitmap) > last_block)
  585. {
  586. ext2_error (sb, "ext2_check_descriptors",
  587. "Block bitmap for group %d"
  588. " not in group (block %lu)!",
  589. i, (unsigned long) le32_to_cpu(gdp->bg_block_bitmap));
  590. return 0;
  591. }
  592. if (le32_to_cpu(gdp->bg_inode_bitmap) < first_block ||
  593. le32_to_cpu(gdp->bg_inode_bitmap) > last_block)
  594. {
  595. ext2_error (sb, "ext2_check_descriptors",
  596. "Inode bitmap for group %d"
  597. " not in group (block %lu)!",
  598. i, (unsigned long) le32_to_cpu(gdp->bg_inode_bitmap));
  599. return 0;
  600. }
  601. if (le32_to_cpu(gdp->bg_inode_table) < first_block ||
  602. le32_to_cpu(gdp->bg_inode_table) + sbi->s_itb_per_group - 1 >
  603. last_block)
  604. {
  605. ext2_error (sb, "ext2_check_descriptors",
  606. "Inode table for group %d"
  607. " not in group (block %lu)!",
  608. i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
  609. return 0;
  610. }
  611. }
  612. return 1;
  613. }
  614. /*
  615. * Maximal file size. There is a direct, and {,double-,triple-}indirect
  616. * block limit, and also a limit of (2^32 - 1) 512-byte sectors in i_blocks.
  617. * We need to be 1 filesystem block less than the 2^32 sector limit.
  618. */
  619. static loff_t ext2_max_size(int bits)
  620. {
  621. loff_t res = EXT2_NDIR_BLOCKS;
  622. int meta_blocks;
  623. unsigned int upper_limit;
  624. unsigned int ppb = 1 << (bits-2);
  625. /* This is calculated to be the largest file size for a
  626. * dense, file such that the total number of
  627. * sectors in the file, including data and all indirect blocks,
  628. * does not exceed 2^32 -1
  629. * __u32 i_blocks representing the total number of
  630. * 512 bytes blocks of the file
  631. */
  632. upper_limit = (1LL << 32) - 1;
  633. /* total blocks in file system block size */
  634. upper_limit >>= (bits - 9);
  635. /* Compute how many blocks we can address by block tree */
  636. res += 1LL << (bits-2);
  637. res += 1LL << (2*(bits-2));
  638. res += 1LL << (3*(bits-2));
  639. /* Does block tree limit file size? */
  640. if (res < upper_limit)
  641. goto check_lfs;
  642. res = upper_limit;
  643. /* How many metadata blocks are needed for addressing upper_limit? */
  644. upper_limit -= EXT2_NDIR_BLOCKS;
  645. /* indirect blocks */
  646. meta_blocks = 1;
  647. upper_limit -= ppb;
  648. /* double indirect blocks */
  649. if (upper_limit < ppb * ppb) {
  650. meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
  651. res -= meta_blocks;
  652. goto check_lfs;
  653. }
  654. meta_blocks += 1 + ppb;
  655. upper_limit -= ppb * ppb;
  656. /* tripple indirect blocks for the rest */
  657. meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
  658. DIV_ROUND_UP(upper_limit, ppb*ppb);
  659. res -= meta_blocks;
  660. check_lfs:
  661. res <<= bits;
  662. if (res > MAX_LFS_FILESIZE)
  663. res = MAX_LFS_FILESIZE;
  664. return res;
  665. }
  666. static unsigned long descriptor_loc(struct super_block *sb,
  667. unsigned long logic_sb_block,
  668. int nr)
  669. {
  670. struct ext2_sb_info *sbi = EXT2_SB(sb);
  671. unsigned long bg, first_meta_bg;
  672. int has_super = 0;
  673. first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
  674. if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
  675. nr < first_meta_bg)
  676. return (logic_sb_block + nr + 1);
  677. bg = sbi->s_desc_per_block * nr;
  678. if (ext2_bg_has_super(sb, bg))
  679. has_super = 1;
  680. return ext2_group_first_block_no(sb, bg) + has_super;
  681. }
  682. static int ext2_fill_super(struct super_block *sb, void *data, int silent)
  683. {
  684. struct buffer_head * bh;
  685. struct ext2_sb_info * sbi;
  686. struct ext2_super_block * es;
  687. struct inode *root;
  688. unsigned long block;
  689. unsigned long sb_block = get_sb_block(&data);
  690. unsigned long logic_sb_block;
  691. unsigned long offset = 0;
  692. unsigned long def_mount_opts;
  693. long ret = -EINVAL;
  694. int blocksize = BLOCK_SIZE;
  695. int db_count;
  696. int i, j;
  697. __le32 features;
  698. int err;
  699. err = -ENOMEM;
  700. sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
  701. if (!sbi)
  702. goto failed_unlock;
  703. sbi->s_blockgroup_lock =
  704. kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
  705. if (!sbi->s_blockgroup_lock) {
  706. kfree(sbi);
  707. goto failed_unlock;
  708. }
  709. sb->s_fs_info = sbi;
  710. sbi->s_sb_block = sb_block;
  711. spin_lock_init(&sbi->s_lock);
  712. /*
  713. * See what the current blocksize for the device is, and
  714. * use that as the blocksize. Otherwise (or if the blocksize
  715. * is smaller than the default) use the default.
  716. * This is important for devices that have a hardware
  717. * sectorsize that is larger than the default.
  718. */
  719. blocksize = sb_min_blocksize(sb, BLOCK_SIZE);
  720. if (!blocksize) {
  721. ext2_msg(sb, KERN_ERR, "error: unable to set blocksize");
  722. goto failed_sbi;
  723. }
  724. /*
  725. * If the superblock doesn't start on a hardware sector boundary,
  726. * calculate the offset.
  727. */
  728. if (blocksize != BLOCK_SIZE) {
  729. logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
  730. offset = (sb_block*BLOCK_SIZE) % blocksize;
  731. } else {
  732. logic_sb_block = sb_block;
  733. }
  734. if (!(bh = sb_bread(sb, logic_sb_block))) {
  735. ext2_msg(sb, KERN_ERR, "error: unable to read superblock");
  736. goto failed_sbi;
  737. }
  738. /*
  739. * Note: s_es must be initialized as soon as possible because
  740. * some ext2 macro-instructions depend on its value
  741. */
  742. es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
  743. sbi->s_es = es;
  744. sb->s_magic = le16_to_cpu(es->s_magic);
  745. if (sb->s_magic != EXT2_SUPER_MAGIC)
  746. goto cantfind_ext2;
  747. /* Set defaults before we parse the mount options */
  748. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  749. if (def_mount_opts & EXT2_DEFM_DEBUG)
  750. set_opt(sbi->s_mount_opt, DEBUG);
  751. if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
  752. set_opt(sbi->s_mount_opt, GRPID);
  753. if (def_mount_opts & EXT2_DEFM_UID16)
  754. set_opt(sbi->s_mount_opt, NO_UID32);
  755. #ifdef CONFIG_EXT2_FS_XATTR
  756. if (def_mount_opts & EXT2_DEFM_XATTR_USER)
  757. set_opt(sbi->s_mount_opt, XATTR_USER);
  758. #endif
  759. #ifdef CONFIG_EXT2_FS_POSIX_ACL
  760. if (def_mount_opts & EXT2_DEFM_ACL)
  761. set_opt(sbi->s_mount_opt, POSIX_ACL);
  762. #endif
  763. if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
  764. set_opt(sbi->s_mount_opt, ERRORS_PANIC);
  765. else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
  766. set_opt(sbi->s_mount_opt, ERRORS_CONT);
  767. else
  768. set_opt(sbi->s_mount_opt, ERRORS_RO);
  769. sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
  770. sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
  771. set_opt(sbi->s_mount_opt, RESERVATION);
  772. if (!parse_options((char *) data, sb))
  773. goto failed_mount;
  774. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  775. ((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
  776. MS_POSIXACL : 0);
  777. ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
  778. EXT2_MOUNT_XIP if not */
  779. if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
  780. (EXT2_HAS_COMPAT_FEATURE(sb, ~0U) ||
  781. EXT2_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
  782. EXT2_HAS_INCOMPAT_FEATURE(sb, ~0U)))
  783. ext2_msg(sb, KERN_WARNING,
  784. "warning: feature flags set on rev 0 fs, "
  785. "running e2fsck is recommended");
  786. /*
  787. * Check feature flags regardless of the revision level, since we
  788. * previously didn't change the revision level when setting the flags,
  789. * so there is a chance incompat flags are set on a rev 0 filesystem.
  790. */
  791. features = EXT2_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP);
  792. if (features) {
  793. ext2_msg(sb, KERN_ERR, "error: couldn't mount because of "
  794. "unsupported optional features (%x)",
  795. le32_to_cpu(features));
  796. goto failed_mount;
  797. }
  798. if (!(sb->s_flags & MS_RDONLY) &&
  799. (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
  800. ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
  801. "unsupported optional features (%x)",
  802. le32_to_cpu(features));
  803. goto failed_mount;
  804. }
  805. blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
  806. if (ext2_use_xip(sb) && blocksize != PAGE_SIZE) {
  807. if (!silent)
  808. ext2_msg(sb, KERN_ERR,
  809. "error: unsupported blocksize for xip");
  810. goto failed_mount;
  811. }
  812. /* If the blocksize doesn't match, re-read the thing.. */
  813. if (sb->s_blocksize != blocksize) {
  814. brelse(bh);
  815. if (!sb_set_blocksize(sb, blocksize)) {
  816. ext2_msg(sb, KERN_ERR,
  817. "error: bad blocksize %d", blocksize);
  818. goto failed_sbi;
  819. }
  820. logic_sb_block = (sb_block*BLOCK_SIZE) / blocksize;
  821. offset = (sb_block*BLOCK_SIZE) % blocksize;
  822. bh = sb_bread(sb, logic_sb_block);
  823. if(!bh) {
  824. ext2_msg(sb, KERN_ERR, "error: couldn't read"
  825. "superblock on 2nd try");
  826. goto failed_sbi;
  827. }
  828. es = (struct ext2_super_block *) (((char *)bh->b_data) + offset);
  829. sbi->s_es = es;
  830. if (es->s_magic != cpu_to_le16(EXT2_SUPER_MAGIC)) {
  831. ext2_msg(sb, KERN_ERR, "error: magic mismatch");
  832. goto failed_mount;
  833. }
  834. }
  835. sb->s_maxbytes = ext2_max_size(sb->s_blocksize_bits);
  836. sb->s_max_links = EXT2_LINK_MAX;
  837. if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV) {
  838. sbi->s_inode_size = EXT2_GOOD_OLD_INODE_SIZE;
  839. sbi->s_first_ino = EXT2_GOOD_OLD_FIRST_INO;
  840. } else {
  841. sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
  842. sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
  843. if ((sbi->s_inode_size < EXT2_GOOD_OLD_INODE_SIZE) ||
  844. !is_power_of_2(sbi->s_inode_size) ||
  845. (sbi->s_inode_size > blocksize)) {
  846. ext2_msg(sb, KERN_ERR,
  847. "error: unsupported inode size: %d",
  848. sbi->s_inode_size);
  849. goto failed_mount;
  850. }
  851. }
  852. sbi->s_frag_size = EXT2_MIN_FRAG_SIZE <<
  853. le32_to_cpu(es->s_log_frag_size);
  854. if (sbi->s_frag_size == 0)
  855. goto cantfind_ext2;
  856. sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size;
  857. sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
  858. sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group);
  859. sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
  860. if (EXT2_INODE_SIZE(sb) == 0)
  861. goto cantfind_ext2;
  862. sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb);
  863. if (sbi->s_inodes_per_block == 0 || sbi->s_inodes_per_group == 0)
  864. goto cantfind_ext2;
  865. sbi->s_itb_per_group = sbi->s_inodes_per_group /
  866. sbi->s_inodes_per_block;
  867. sbi->s_desc_per_block = sb->s_blocksize /
  868. sizeof (struct ext2_group_desc);
  869. sbi->s_sbh = bh;
  870. sbi->s_mount_state = le16_to_cpu(es->s_state);
  871. sbi->s_addr_per_block_bits =
  872. ilog2 (EXT2_ADDR_PER_BLOCK(sb));
  873. sbi->s_desc_per_block_bits =
  874. ilog2 (EXT2_DESC_PER_BLOCK(sb));
  875. if (sb->s_magic != EXT2_SUPER_MAGIC)
  876. goto cantfind_ext2;
  877. if (sb->s_blocksize != bh->b_size) {
  878. if (!silent)
  879. ext2_msg(sb, KERN_ERR, "error: unsupported blocksize");
  880. goto failed_mount;
  881. }
  882. if (sb->s_blocksize != sbi->s_frag_size) {
  883. ext2_msg(sb, KERN_ERR,
  884. "error: fragsize %lu != blocksize %lu"
  885. "(not supported yet)",
  886. sbi->s_frag_size, sb->s_blocksize);
  887. goto failed_mount;
  888. }
  889. if (sbi->s_blocks_per_group > sb->s_blocksize * 8) {
  890. ext2_msg(sb, KERN_ERR,
  891. "error: #blocks per group too big: %lu",
  892. sbi->s_blocks_per_group);
  893. goto failed_mount;
  894. }
  895. if (sbi->s_frags_per_group > sb->s_blocksize * 8) {
  896. ext2_msg(sb, KERN_ERR,
  897. "error: #fragments per group too big: %lu",
  898. sbi->s_frags_per_group);
  899. goto failed_mount;
  900. }
  901. if (sbi->s_inodes_per_group > sb->s_blocksize * 8) {
  902. ext2_msg(sb, KERN_ERR,
  903. "error: #inodes per group too big: %lu",
  904. sbi->s_inodes_per_group);
  905. goto failed_mount;
  906. }
  907. if (EXT2_BLOCKS_PER_GROUP(sb) == 0)
  908. goto cantfind_ext2;
  909. sbi->s_groups_count = ((le32_to_cpu(es->s_blocks_count) -
  910. le32_to_cpu(es->s_first_data_block) - 1)
  911. / EXT2_BLOCKS_PER_GROUP(sb)) + 1;
  912. db_count = (sbi->s_groups_count + EXT2_DESC_PER_BLOCK(sb) - 1) /
  913. EXT2_DESC_PER_BLOCK(sb);
  914. sbi->s_group_desc = kmalloc (db_count * sizeof (struct buffer_head *), GFP_KERNEL);
  915. if (sbi->s_group_desc == NULL) {
  916. ext2_msg(sb, KERN_ERR, "error: not enough memory");
  917. goto failed_mount;
  918. }
  919. bgl_lock_init(sbi->s_blockgroup_lock);
  920. sbi->s_debts = kcalloc(sbi->s_groups_count, sizeof(*sbi->s_debts), GFP_KERNEL);
  921. if (!sbi->s_debts) {
  922. ext2_msg(sb, KERN_ERR, "error: not enough memory");
  923. goto failed_mount_group_desc;
  924. }
  925. for (i = 0; i < db_count; i++) {
  926. block = descriptor_loc(sb, logic_sb_block, i);
  927. sbi->s_group_desc[i] = sb_bread(sb, block);
  928. if (!sbi->s_group_desc[i]) {
  929. for (j = 0; j < i; j++)
  930. brelse (sbi->s_group_desc[j]);
  931. ext2_msg(sb, KERN_ERR,
  932. "error: unable to read group descriptors");
  933. goto failed_mount_group_desc;
  934. }
  935. }
  936. if (!ext2_check_descriptors (sb)) {
  937. ext2_msg(sb, KERN_ERR, "group descriptors corrupted");
  938. goto failed_mount2;
  939. }
  940. sbi->s_gdb_count = db_count;
  941. get_random_bytes(&sbi->s_next_generation, sizeof(u32));
  942. spin_lock_init(&sbi->s_next_gen_lock);
  943. /* per fileystem reservation list head & lock */
  944. spin_lock_init(&sbi->s_rsv_window_lock);
  945. sbi->s_rsv_window_root = RB_ROOT;
  946. /*
  947. * Add a single, static dummy reservation to the start of the
  948. * reservation window list --- it gives us a placeholder for
  949. * append-at-start-of-list which makes the allocation logic
  950. * _much_ simpler.
  951. */
  952. sbi->s_rsv_window_head.rsv_start = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
  953. sbi->s_rsv_window_head.rsv_end = EXT2_RESERVE_WINDOW_NOT_ALLOCATED;
  954. sbi->s_rsv_window_head.rsv_alloc_hit = 0;
  955. sbi->s_rsv_window_head.rsv_goal_size = 0;
  956. ext2_rsv_window_add(sb, &sbi->s_rsv_window_head);
  957. err = percpu_counter_init(&sbi->s_freeblocks_counter,
  958. ext2_count_free_blocks(sb));
  959. if (!err) {
  960. err = percpu_counter_init(&sbi->s_freeinodes_counter,
  961. ext2_count_free_inodes(sb));
  962. }
  963. if (!err) {
  964. err = percpu_counter_init(&sbi->s_dirs_counter,
  965. ext2_count_dirs(sb));
  966. }
  967. if (err) {
  968. ext2_msg(sb, KERN_ERR, "error: insufficient memory");
  969. goto failed_mount3;
  970. }
  971. /*
  972. * set up enough so that it can read an inode
  973. */
  974. sb->s_op = &ext2_sops;
  975. sb->s_export_op = &ext2_export_ops;
  976. sb->s_xattr = ext2_xattr_handlers;
  977. #ifdef CONFIG_QUOTA
  978. sb->dq_op = &dquot_operations;
  979. sb->s_qcop = &dquot_quotactl_ops;
  980. #endif
  981. root = ext2_iget(sb, EXT2_ROOT_INO);
  982. if (IS_ERR(root)) {
  983. ret = PTR_ERR(root);
  984. goto failed_mount3;
  985. }
  986. if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
  987. iput(root);
  988. ext2_msg(sb, KERN_ERR, "error: corrupt root inode, run e2fsck");
  989. goto failed_mount3;
  990. }
  991. sb->s_root = d_make_root(root);
  992. if (!sb->s_root) {
  993. ext2_msg(sb, KERN_ERR, "error: get root inode failed");
  994. ret = -ENOMEM;
  995. goto failed_mount3;
  996. }
  997. if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
  998. ext2_msg(sb, KERN_WARNING,
  999. "warning: mounting ext3 filesystem as ext2");
  1000. if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
  1001. sb->s_flags |= MS_RDONLY;
  1002. ext2_write_super(sb);
  1003. return 0;
  1004. cantfind_ext2:
  1005. if (!silent)
  1006. ext2_msg(sb, KERN_ERR,
  1007. "error: can't find an ext2 filesystem on dev %s.",
  1008. sb->s_id);
  1009. goto failed_mount;
  1010. failed_mount3:
  1011. percpu_counter_destroy(&sbi->s_freeblocks_counter);
  1012. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  1013. percpu_counter_destroy(&sbi->s_dirs_counter);
  1014. failed_mount2:
  1015. for (i = 0; i < db_count; i++)
  1016. brelse(sbi->s_group_desc[i]);
  1017. failed_mount_group_desc:
  1018. kfree(sbi->s_group_desc);
  1019. kfree(sbi->s_debts);
  1020. failed_mount:
  1021. brelse(bh);
  1022. failed_sbi:
  1023. sb->s_fs_info = NULL;
  1024. kfree(sbi->s_blockgroup_lock);
  1025. kfree(sbi);
  1026. failed_unlock:
  1027. return ret;
  1028. }
  1029. static void ext2_clear_super_error(struct super_block *sb)
  1030. {
  1031. struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
  1032. if (buffer_write_io_error(sbh)) {
  1033. /*
  1034. * Oh, dear. A previous attempt to write the
  1035. * superblock failed. This could happen because the
  1036. * USB device was yanked out. Or it could happen to
  1037. * be a transient write error and maybe the block will
  1038. * be remapped. Nothing we can do but to retry the
  1039. * write and hope for the best.
  1040. */
  1041. ext2_msg(sb, KERN_ERR,
  1042. "previous I/O error to superblock detected\n");
  1043. clear_buffer_write_io_error(sbh);
  1044. set_buffer_uptodate(sbh);
  1045. }
  1046. }
  1047. static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
  1048. int wait)
  1049. {
  1050. ext2_clear_super_error(sb);
  1051. spin_lock(&EXT2_SB(sb)->s_lock);
  1052. es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
  1053. es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
  1054. es->s_wtime = cpu_to_le32(get_seconds());
  1055. /* unlock before we do IO */
  1056. spin_unlock(&EXT2_SB(sb)->s_lock);
  1057. mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
  1058. if (wait)
  1059. sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
  1060. sb->s_dirt = 0;
  1061. }
  1062. /*
  1063. * In the second extended file system, it is not necessary to
  1064. * write the super block since we use a mapping of the
  1065. * disk super block in a buffer.
  1066. *
  1067. * However, this function is still used to set the fs valid
  1068. * flags to 0. We need to set this flag to 0 since the fs
  1069. * may have been checked while mounted and e2fsck may have
  1070. * set s_state to EXT2_VALID_FS after some corrections.
  1071. */
  1072. static int ext2_sync_fs(struct super_block *sb, int wait)
  1073. {
  1074. struct ext2_sb_info *sbi = EXT2_SB(sb);
  1075. struct ext2_super_block *es = EXT2_SB(sb)->s_es;
  1076. spin_lock(&sbi->s_lock);
  1077. if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
  1078. ext2_debug("setting valid to 0\n");
  1079. es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
  1080. }
  1081. spin_unlock(&sbi->s_lock);
  1082. ext2_sync_super(sb, es, wait);
  1083. return 0;
  1084. }
  1085. void ext2_write_super(struct super_block *sb)
  1086. {
  1087. if (!(sb->s_flags & MS_RDONLY))
  1088. ext2_sync_fs(sb, 1);
  1089. else
  1090. sb->s_dirt = 0;
  1091. }
  1092. static int ext2_remount (struct super_block * sb, int * flags, char * data)
  1093. {
  1094. struct ext2_sb_info * sbi = EXT2_SB(sb);
  1095. struct ext2_super_block * es;
  1096. unsigned long old_mount_opt = sbi->s_mount_opt;
  1097. struct ext2_mount_options old_opts;
  1098. unsigned long old_sb_flags;
  1099. int err;
  1100. sync_filesystem(sb);
  1101. spin_lock(&sbi->s_lock);
  1102. /* Store the old options */
  1103. old_sb_flags = sb->s_flags;
  1104. old_opts.s_mount_opt = sbi->s_mount_opt;
  1105. old_opts.s_resuid = sbi->s_resuid;
  1106. old_opts.s_resgid = sbi->s_resgid;
  1107. /*
  1108. * Allow the "check" option to be passed as a remount option.
  1109. */
  1110. if (!parse_options(data, sb)) {
  1111. err = -EINVAL;
  1112. goto restore_opts;
  1113. }
  1114. sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
  1115. ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
  1116. ext2_xip_verify_sb(sb); /* see if bdev supports xip, unset
  1117. EXT2_MOUNT_XIP if not */
  1118. if ((ext2_use_xip(sb)) && (sb->s_blocksize != PAGE_SIZE)) {
  1119. ext2_msg(sb, KERN_WARNING,
  1120. "warning: unsupported blocksize for xip");
  1121. err = -EINVAL;
  1122. goto restore_opts;
  1123. }
  1124. es = sbi->s_es;
  1125. if ((sbi->s_mount_opt ^ old_mount_opt) & EXT2_MOUNT_XIP) {
  1126. ext2_msg(sb, KERN_WARNING, "warning: refusing change of "
  1127. "xip flag with busy inodes while remounting");
  1128. sbi->s_mount_opt &= ~EXT2_MOUNT_XIP;
  1129. sbi->s_mount_opt |= old_mount_opt & EXT2_MOUNT_XIP;
  1130. }
  1131. if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
  1132. spin_unlock(&sbi->s_lock);
  1133. return 0;
  1134. }
  1135. if (*flags & MS_RDONLY) {
  1136. if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
  1137. !(sbi->s_mount_state & EXT2_VALID_FS)) {
  1138. spin_unlock(&sbi->s_lock);
  1139. return 0;
  1140. }
  1141. /*
  1142. * OK, we are remounting a valid rw partition rdonly, so set
  1143. * the rdonly flag and then mark the partition as valid again.
  1144. */
  1145. es->s_state = cpu_to_le16(sbi->s_mount_state);
  1146. es->s_mtime = cpu_to_le32(get_seconds());
  1147. spin_unlock(&sbi->s_lock);
  1148. err = dquot_suspend(sb, -1);
  1149. if (err < 0) {
  1150. spin_lock(&sbi->s_lock);
  1151. goto restore_opts;
  1152. }
  1153. ext2_sync_super(sb, es, 1);
  1154. } else {
  1155. __le32 ret = EXT2_HAS_RO_COMPAT_FEATURE(sb,
  1156. ~EXT2_FEATURE_RO_COMPAT_SUPP);
  1157. if (ret) {
  1158. ext2_msg(sb, KERN_WARNING,
  1159. "warning: couldn't remount RDWR because of "
  1160. "unsupported optional features (%x).",
  1161. le32_to_cpu(ret));
  1162. err = -EROFS;
  1163. goto restore_opts;
  1164. }
  1165. /*
  1166. * Mounting a RDONLY partition read-write, so reread and
  1167. * store the current valid flag. (It may have been changed
  1168. * by e2fsck since we originally mounted the partition.)
  1169. */
  1170. sbi->s_mount_state = le16_to_cpu(es->s_state);
  1171. if (!ext2_setup_super (sb, es, 0))
  1172. sb->s_flags &= ~MS_RDONLY;
  1173. spin_unlock(&sbi->s_lock);
  1174. ext2_write_super(sb);
  1175. dquot_resume(sb, -1);
  1176. }
  1177. return 0;
  1178. restore_opts:
  1179. sbi->s_mount_opt = old_opts.s_mount_opt;
  1180. sbi->s_resuid = old_opts.s_resuid;
  1181. sbi->s_resgid = old_opts.s_resgid;
  1182. sb->s_flags = old_sb_flags;
  1183. spin_unlock(&sbi->s_lock);
  1184. return err;
  1185. }
  1186. static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
  1187. {
  1188. struct super_block *sb = dentry->d_sb;
  1189. struct ext2_sb_info *sbi = EXT2_SB(sb);
  1190. struct ext2_super_block *es = sbi->s_es;
  1191. u64 fsid;
  1192. spin_lock(&sbi->s_lock);
  1193. if (test_opt (sb, MINIX_DF))
  1194. sbi->s_overhead_last = 0;
  1195. else if (sbi->s_blocks_last != le32_to_cpu(es->s_blocks_count)) {
  1196. unsigned long i, overhead = 0;
  1197. smp_rmb();
  1198. /*
  1199. * Compute the overhead (FS structures). This is constant
  1200. * for a given filesystem unless the number of block groups
  1201. * changes so we cache the previous value until it does.
  1202. */
  1203. /*
  1204. * All of the blocks before first_data_block are
  1205. * overhead
  1206. */
  1207. overhead = le32_to_cpu(es->s_first_data_block);
  1208. /*
  1209. * Add the overhead attributed to the superblock and
  1210. * block group descriptors. If the sparse superblocks
  1211. * feature is turned on, then not all groups have this.
  1212. */
  1213. for (i = 0; i < sbi->s_groups_count; i++)
  1214. overhead += ext2_bg_has_super(sb, i) +
  1215. ext2_bg_num_gdb(sb, i);
  1216. /*
  1217. * Every block group has an inode bitmap, a block
  1218. * bitmap, and an inode table.
  1219. */
  1220. overhead += (sbi->s_groups_count *
  1221. (2 + sbi->s_itb_per_group));
  1222. sbi->s_overhead_last = overhead;
  1223. smp_wmb();
  1224. sbi->s_blocks_last = le32_to_cpu(es->s_blocks_count);
  1225. }
  1226. buf->f_type = EXT2_SUPER_MAGIC;
  1227. buf->f_bsize = sb->s_blocksize;
  1228. buf->f_blocks = le32_to_cpu(es->s_blocks_count) - sbi->s_overhead_last;
  1229. buf->f_bfree = ext2_count_free_blocks(sb);
  1230. es->s_free_blocks_count = cpu_to_le32(buf->f_bfree);
  1231. buf->f_bavail = buf->f_bfree - le32_to_cpu(es->s_r_blocks_count);
  1232. if (buf->f_bfree < le32_to_cpu(es->s_r_blocks_count))
  1233. buf->f_bavail = 0;
  1234. buf->f_files = le32_to_cpu(es->s_inodes_count);
  1235. buf->f_ffree = ext2_count_free_inodes(sb);
  1236. es->s_free_inodes_count = cpu_to_le32(buf->f_ffree);
  1237. buf->f_namelen = EXT2_NAME_LEN;
  1238. fsid = le64_to_cpup((void *)es->s_uuid) ^
  1239. le64_to_cpup((void *)es->s_uuid + sizeof(u64));
  1240. buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
  1241. buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
  1242. spin_unlock(&sbi->s_lock);
  1243. return 0;
  1244. }
  1245. static struct dentry *ext2_mount(struct file_system_type *fs_type,
  1246. int flags, const char *dev_name, void *data)
  1247. {
  1248. return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
  1249. }
  1250. #ifdef CONFIG_QUOTA
  1251. /* Read data from quotafile - avoid pagecache and such because we cannot afford
  1252. * acquiring the locks... As quota files are never truncated and quota code
  1253. * itself serializes the operations (and no one else should touch the files)
  1254. * we don't have to be afraid of races */
  1255. static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data,
  1256. size_t len, loff_t off)
  1257. {
  1258. struct inode *inode = sb_dqopt(sb)->files[type];
  1259. sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
  1260. int err = 0;
  1261. int offset = off & (sb->s_blocksize - 1);
  1262. int tocopy;
  1263. size_t toread;
  1264. struct buffer_head tmp_bh;
  1265. struct buffer_head *bh;
  1266. loff_t i_size = i_size_read(inode);
  1267. if (off > i_size)
  1268. return 0;
  1269. if (off+len > i_size)
  1270. len = i_size-off;
  1271. toread = len;
  1272. while (toread > 0) {
  1273. tocopy = sb->s_blocksize - offset < toread ?
  1274. sb->s_blocksize - offset : toread;
  1275. tmp_bh.b_state = 0;
  1276. tmp_bh.b_size = sb->s_blocksize;
  1277. err = ext2_get_block(inode, blk, &tmp_bh, 0);
  1278. if (err < 0)
  1279. return err;
  1280. if (!buffer_mapped(&tmp_bh)) /* A hole? */
  1281. memset(data, 0, tocopy);
  1282. else {
  1283. bh = sb_bread(sb, tmp_bh.b_blocknr);
  1284. if (!bh)
  1285. return -EIO;
  1286. memcpy(data, bh->b_data+offset, tocopy);
  1287. brelse(bh);
  1288. }
  1289. offset = 0;
  1290. toread -= tocopy;
  1291. data += tocopy;
  1292. blk++;
  1293. }
  1294. return len;
  1295. }
  1296. /* Write to quotafile */
  1297. static ssize_t ext2_quota_write(struct super_block *sb, int type,
  1298. const char *data, size_t len, loff_t off)
  1299. {
  1300. struct inode *inode = sb_dqopt(sb)->files[type];
  1301. sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb);
  1302. int err = 0;
  1303. int offset = off & (sb->s_blocksize - 1);
  1304. int tocopy;
  1305. size_t towrite = len;
  1306. struct buffer_head tmp_bh;
  1307. struct buffer_head *bh;
  1308. mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
  1309. while (towrite > 0) {
  1310. tocopy = sb->s_blocksize - offset < towrite ?
  1311. sb->s_blocksize - offset : towrite;
  1312. tmp_bh.b_state = 0;
  1313. err = ext2_get_block(inode, blk, &tmp_bh, 1);
  1314. if (err < 0)
  1315. goto out;
  1316. if (offset || tocopy != EXT2_BLOCK_SIZE(sb))
  1317. bh = sb_bread(sb, tmp_bh.b_blocknr);
  1318. else
  1319. bh = sb_getblk(sb, tmp_bh.b_blocknr);
  1320. if (!bh) {
  1321. err = -EIO;
  1322. goto out;
  1323. }
  1324. lock_buffer(bh);
  1325. memcpy(bh->b_data+offset, data, tocopy);
  1326. flush_dcache_page(bh->b_page);
  1327. set_buffer_uptodate(bh);
  1328. mark_buffer_dirty(bh);
  1329. unlock_buffer(bh);
  1330. brelse(bh);
  1331. offset = 0;
  1332. towrite -= tocopy;
  1333. data += tocopy;
  1334. blk++;
  1335. }
  1336. out:
  1337. if (len == towrite) {
  1338. mutex_unlock(&inode->i_mutex);
  1339. return err;
  1340. }
  1341. if (inode->i_size < off+len-towrite)
  1342. i_size_write(inode, off+len-towrite);
  1343. inode->i_version++;
  1344. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  1345. mark_inode_dirty(inode);
  1346. mutex_unlock(&inode->i_mutex);
  1347. return len - towrite;
  1348. }
  1349. #endif
  1350. static struct file_system_type ext2_fs_type = {
  1351. .owner = THIS_MODULE,
  1352. .name = "ext2",
  1353. .mount = ext2_mount,
  1354. .kill_sb = kill_block_super,
  1355. .fs_flags = FS_REQUIRES_DEV,
  1356. };
  1357. MODULE_ALIAS_FS("ext2");
  1358. static int __init init_ext2_fs(void)
  1359. {
  1360. int err = init_ext2_xattr();
  1361. if (err)
  1362. return err;
  1363. err = init_inodecache();
  1364. if (err)
  1365. goto out1;
  1366. err = register_filesystem(&ext2_fs_type);
  1367. if (err)
  1368. goto out;
  1369. return 0;
  1370. out:
  1371. destroy_inodecache();
  1372. out1:
  1373. exit_ext2_xattr();
  1374. return err;
  1375. }
  1376. static void __exit exit_ext2_fs(void)
  1377. {
  1378. unregister_filesystem(&ext2_fs_type);
  1379. destroy_inodecache();
  1380. exit_ext2_xattr();
  1381. }
  1382. MODULE_AUTHOR("Remy Card and others");
  1383. MODULE_DESCRIPTION("Second Extended Filesystem");
  1384. MODULE_LICENSE("GPL");
  1385. module_init(init_ext2_fs)
  1386. module_exit(exit_ext2_fs)