scfs.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347
  1. /*
  2. * fs/scfs/scfs.c
  3. *
  4. * Copyright (C) 2014 Samsung Electronics Co., Ltd.
  5. * Authors: Sunghwan Yun <sunghwan.yun@samsung.com>
  6. * Jongmin Kim <jm45.kim@samsung.com>
  7. * Sangwoo Lee <sangwoo2.lee@samsung.com>
  8. * Inbae Lee <inbae.lee@samsung.com>
  9. *
  10. * This program has been developed as a stackable file system based on
  11. * the WrapFS, which was written by:
  12. *
  13. * Copyright (C) 1997-2003 Erez Zadok
  14. * Copyright (C) 2001-2003 Stony Brook University
  15. * Copyright (C) 2004-2006 International Business Machines Corp.
  16. * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
  17. * Michael C. Thompson <mcthomps@us.ibm.com>
  18. *
  19. * This program is free software: you can redistribute it and/or modify
  20. * it under the terms of the GNU General Public License as published by
  21. * the Free Software Foundation, either version 2 of the License, or
  22. * (at your option) any later version.
  23. *
  24. * This program is distributed in the hope that it will be useful, but
  25. * WITHOUT ANY WARRANTY; without even the implied warranty of
  26. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  27. * General Public License for more details.
  28. *
  29. * You should have received a copy of the GNU General Public License
  30. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  31. */
  32. #include <linux/crypto.h>
  33. #include <linux/nsproxy.h>
  34. #include <linux/parser.h>
  35. #include <linux/statfs.h>
  36. #include "scfs.h"
  37. #include <linux/lzo.h>
  38. #include <linux/ctype.h>
  39. struct kmem_cache *scfs_file_info_cache;
  40. struct kmem_cache *scfs_dentry_info_cache;
  41. struct kmem_cache *scfs_inode_info_cache;
  42. struct kmem_cache *scfs_sb_info_cache;
  43. struct kmem_cache *scfs_info_entry_list;
  44. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  45. struct kmem_cache *scfs_cbm_cache;
  46. #endif
  47. /* LZO must be enabled */
  48. #if (!defined(CONFIG_LZO_DECOMPRESS) || !defined(CONFIG_LZO_COMPRESS))
  49. #error "LZO library needs to be enabled!"
  50. #endif
  51. const char *tfm_names[SCFS_COMP_TOTAL_TYPES] =
  52. {
  53. "none", /* none */
  54. "lzo", /* lzo */
  55. "zlib", /* zlib */
  56. "deflate",
  57. "fastlzo" /* lzo */
  58. };
  59. extern struct scfs_compressor *scfs_compressors[SCFS_COMP_TOTAL_TYPES];
  60. void scfs_printk(const char *fmt, ...)
  61. {
  62. va_list args;
  63. va_start(args, fmt);
  64. vprintk(fmt, args);
  65. va_end(args);
  66. }
  67. int scfs_load_cinfo(struct scfs_inode_info *sii, struct file *lower_file)
  68. {
  69. struct scfs_sb_info *sbi = SCFS_S(sii->vfs_inode.i_sb);
  70. void *buf;
  71. loff_t pos;
  72. int ret;
  73. ASSERT(lower_file);
  74. ASSERT(sii->compressed);
  75. buf = scfs_cinfo_alloc(sii, sii->cinfo_array_size);
  76. if (!buf) {
  77. return -ENOMEM;
  78. }
  79. pos = i_size_read(sii->lower_inode) - sii->cinfo_array_size - CF_SIZE;
  80. ASSERT(pos > 0);
  81. ret = scfs_lower_read(lower_file, buf, sii->cinfo_array_size, &pos);
  82. if (ret < 0) {
  83. scfs_cinfo_free(sii, buf);
  84. return ret;
  85. }
  86. ret = 0;
  87. if (scfs_check_cinfo(sii, buf)) {
  88. SCFS_PRINT("treat this file as non-compressed one(missing footer).\n");
  89. sii->cinfo_array_size = 0;
  90. sii->flags &= ~SCFS_DATA_COMPRESSABLE;
  91. sii->compressed = 0;
  92. sii->cluster_size = sbi->options.cluster_size;
  93. sii->comp_type = sbi->options.comp_type;
  94. sii->cinfo_array = NULL;
  95. scfs_cinfo_free(sii, buf);
  96. } else
  97. sii->cinfo_array = buf;
  98. return ret;
  99. }
  100. int scfs_reload_meta(struct file *file)
  101. {
  102. struct dentry *dentry = file->f_dentry;
  103. struct scfs_inode_info *sii = SCFS_I(dentry->d_inode);
  104. struct file *lower_file;
  105. int ret = 0;
  106. ASSERT(IS_INVALID_META(sii));
  107. ret = scfs_initialize_lower_file(dentry, &lower_file, O_RDONLY);
  108. if (ret) {
  109. SCFS_PRINT_ERROR("err in get_lower_file %s\n",
  110. dentry->d_name.name);
  111. return ret;
  112. }
  113. ret = scfs_footer_read(dentry->d_inode, lower_file);
  114. if (ret) {
  115. SCFS_PRINT_ERROR("f:%s err in reading footer, ret : %d\n",
  116. dentry->d_name.name, ret);
  117. goto out;
  118. }
  119. SCFS_PRINT("f:%s info size = %d \n",
  120. dentry->d_name.name, sii->cinfo_array_size);
  121. if (sii->cinfo_array)
  122. scfs_cinfo_free(sii, sii->cinfo_array);
  123. ret = scfs_load_cinfo(sii, lower_file);
  124. if (ret) {
  125. SCFS_PRINT_ERROR("f:%s err in loading cinfo, ret : %d\n",
  126. dentry->d_name.name, ret);
  127. goto out;
  128. }
  129. CLEAR_META_INVALID(sii);
  130. out:
  131. SCFS_PRINT("f:%s calling fput\n", dentry->d_name.name);
  132. fput(lower_file);
  133. return ret;
  134. }
  135. /*
  136. * get_cluster_info
  137. *
  138. * Parameters:
  139. * @inode: inode in VFS layer
  140. * @cluster_n: cluster number wanted to get info
  141. * @*clust_info: address of cluster info structure
  142. *
  143. * Return:
  144. * SCFS_SUCCESS if success, otherwise if error
  145. *
  146. * Description:
  147. * Calculate the position of the cluster info for a given cluster,
  148. * and return the address
  149. */
  150. int get_cluster_info(struct file *file, int cluster_idx,
  151. struct scfs_cinfo *target)
  152. {
  153. struct scfs_inode_info *sii = SCFS_I(file->f_dentry->d_inode);
  154. struct scfs_cinfo *cinfo;
  155. struct cinfo_entry *cinfo_entry;
  156. struct list_head *head, *tmp;
  157. int ret = 0;
  158. ASSERT(IS_COMPRESSABLE(sii));
  159. if (IS_INVALID_META(sii)) {
  160. SCFS_PRINT("f:%s meta invalid flag is set, "
  161. "let's reload.\n",
  162. file->f_path.dentry->d_name.name);
  163. ret = scfs_reload_meta(file);
  164. if (ret) {
  165. SCFS_PRINT_ERROR("f:%s error in re-reading footer, err : %d\n",
  166. file->f_path.dentry->d_name.name, ret);
  167. goto out;
  168. }
  169. }
  170. ret = -EINVAL;
  171. if (cluster_idx + 1 > CLUSTER_COUNT(sii)) {
  172. SCFS_PRINT_ERROR("f:%s size check err, "
  173. "cluster_idx %d cluster count of the file %d\n",
  174. file->f_path.dentry->d_name.name, cluster_idx, CLUSTER_COUNT(sii));
  175. goto out;
  176. }
  177. if (cluster_idx * sizeof(struct scfs_cinfo) < sii->cinfo_array_size) {
  178. cinfo = (struct scfs_cinfo *)(sii->cinfo_array) + cluster_idx;
  179. ret = 0;
  180. } else {
  181. if (list_empty(&sii->cinfo_list)) {
  182. SCFS_PRINT_ERROR("cluster idx : %d, and info size : %d, but info list is empty!\n",
  183. cluster_idx, sii->cinfo_array_size);
  184. goto out;
  185. }
  186. list_for_each_safe(head, tmp, &sii->cinfo_list) {
  187. cinfo_entry = list_entry(head, struct cinfo_entry, entry);
  188. if (cinfo_entry->current_cluster_idx < cluster_idx) {
  189. SCFS_PRINT_ERROR("cluster idx : %d, and current_cluster_idx %d\n",
  190. cluster_idx, cinfo_entry->current_cluster_idx);
  191. goto out;
  192. }
  193. if (cinfo_entry->current_cluster_idx == cluster_idx) {
  194. cinfo = &cinfo_entry->cinfo;
  195. ret = 0;
  196. goto out;
  197. }
  198. }
  199. SCFS_PRINT_ERROR("f:%s invalid cluster idx : %d or cluster_info(size : %d)\n",
  200. file->f_path.dentry->d_name.name,
  201. cluster_idx, sii->cinfo_array_size);
  202. ret = -EIO;
  203. }
  204. out:
  205. if (!ret) {
  206. target->offset = cinfo->offset;
  207. target->size = cinfo->size;
  208. }
  209. return ret;
  210. }
  211. enum {
  212. scfs_opt_nocompress,
  213. scfs_opt_cluster_size,
  214. scfs_opt_comp_threshold,
  215. scfs_opt_comp_type,
  216. scfs_opt_err,
  217. };
  218. static const match_table_t tokens = {
  219. {scfs_opt_nocompress, "nocomp"},
  220. {scfs_opt_cluster_size, "cluster_size=%u"},
  221. {scfs_opt_comp_threshold, "comp_threshold=%u"},
  222. {scfs_opt_comp_type, "comp_type=%s"},
  223. {scfs_opt_err, NULL}
  224. };
  225. int scfs_parse_options(struct scfs_sb_info *sbi, char *options)
  226. {
  227. int token;
  228. int option;
  229. char *p;
  230. char *type;
  231. substring_t args[MAX_OPT_ARGS];
  232. if (!options)
  233. return 0;
  234. while ((p = strsep(&options, ",")) != NULL) {
  235. if (!*p)
  236. continue;
  237. token = match_token(p, tokens, args);
  238. switch (token) {
  239. case scfs_opt_nocompress:
  240. sbi->options.flags &= ~SCFS_DATA_COMPRESSABLE;
  241. break;
  242. case scfs_opt_cluster_size:
  243. if (match_int(&args[0], &option))
  244. return 0;
  245. if (option > SCFS_CLUSTER_SIZE_MAX ||
  246. option < SCFS_CLUSTER_SIZE_MIN) {
  247. SCFS_PRINT_ERROR("cluster_size, out of range\n");
  248. return -EINVAL;
  249. }
  250. if (!IS_POW2(option)) {
  251. SCFS_PRINT_ERROR("cluster_size must be a power of 2\n");
  252. return -EINVAL;
  253. }
  254. sbi->options.cluster_size = option;
  255. break;
  256. case scfs_opt_comp_threshold:
  257. if (match_int(&args[0], &option))
  258. return 0;
  259. if (option > 100 || option < 0) {
  260. SCFS_PRINT_ERROR("threshold, out of range, "
  261. "it's a percent\n");
  262. return -EINVAL;
  263. }
  264. sbi->options.comp_threshold = option;
  265. break;
  266. case scfs_opt_comp_type:
  267. type = args[0].from;
  268. if (!strcmp(type, "lzo"))
  269. sbi->options.comp_type = SCFS_COMP_LZO;
  270. /* disable bzip for now, crypto_alloc_comp doesn't work for some reason */
  271. #if 0 //#ifdef CONFIG_CRYPTO_DEFLATE
  272. else if (!strcmp(type, "bzip2"))
  273. sbi->options.comp_type = BZIP2;
  274. #endif
  275. #ifdef CONFIG_CRYPTO_ZLIB
  276. else if (!strcmp(type, "zlib"))
  277. sbi->options.comp_type = SCFS_COMP_ZLIB;
  278. #endif
  279. #ifdef CONFIG_CRYPTO_FASTLZO
  280. else if (!strcmp(type, "fastlzo"))
  281. sbi->options.comp_type = SCFS_COMP_FASTLZO;
  282. #endif
  283. else {
  284. SCFS_PRINT_ERROR("invalid compression type\n");
  285. return -EINVAL;
  286. }
  287. break;
  288. default:
  289. SCFS_PRINT_ERROR("Unrecognized mount option [%s]\n", p);
  290. return -EINVAL;
  291. }
  292. }
  293. return 0;
  294. }
  295. void copy_mount_flags_to_inode_flags(struct inode *inode, struct super_block *sb)
  296. {
  297. struct scfs_sb_info *sbi = SCFS_S(sb);
  298. struct scfs_inode_info *sii = SCFS_I(inode);
  299. sii->cluster_size = sbi->options.cluster_size;
  300. sii->comp_type = sbi->options.comp_type;
  301. if (sbi->options.flags & SCFS_DATA_COMPRESSABLE)
  302. sii->flags |= SCFS_DATA_COMPRESSABLE;
  303. }
  304. int scfs_initialize_lower_file(struct dentry *dentry, struct file **lower_file, int flags)
  305. {
  306. const struct cred *cred;
  307. int ret = 0;
  308. struct dentry *lower_dentry = scfs_lower_dentry(dentry);
  309. struct vfsmount *lower_mnt = scfs_dentry_to_lower_mnt(dentry);
  310. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
  311. const struct path path = {lower_mnt, lower_dentry};
  312. #else
  313. /* dput and mntput is done in dentry_open if it returns an error */
  314. dget(lower_dentry);
  315. mntget(lower_mnt);
  316. #endif
  317. cred = current_cred();
  318. if (flags == EMPTY_FLAG)
  319. flags = IS_RDONLY(lower_dentry->d_inode) ? O_RDONLY : O_RDWR;
  320. else
  321. flags &= ~O_APPEND;
  322. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
  323. (*lower_file) = dentry_open(&path, flags, cred);
  324. #else
  325. (*lower_file) = dentry_open(lower_dentry, lower_mnt, flags, cred);
  326. #endif
  327. if (IS_ERR(*lower_file)) {
  328. ret = PTR_ERR(*lower_file);
  329. SCFS_PRINT_ERROR("lower dentry_open fail, name : %s, ret : %d\n",
  330. lower_dentry->d_name.name, ret);
  331. *lower_file = NULL;
  332. return ret;
  333. }
  334. #ifdef CONFIG_SCFS_LOWER_PAGECACHE_INVALIDATION
  335. /* 16KB fixed-size lower readahead */
  336. (*lower_file)->f_flags |= O_SCFSLOWER;
  337. (*lower_file)->f_ra.ra_pages = 4;
  338. #endif
  339. return ret;
  340. }
  341. /**
  342. * scfs_read_cluster
  343. *
  344. * Parameters:
  345. * @file: upper file
  346. * @page: upper page from SCFS inode mapping
  347. * @buf_c: buffer for compressed cluster
  348. * @buf_u: bufferer for uncompressed cluster
  349. * @compressed: whether cluster was compressed or not
  350. *
  351. * Return:
  352. * SCFS_SUCCESS if success, otherwise if error
  353. * *compressed = 1 if cluster was compressed
  354. *
  355. * Description:
  356. * - Read a cluster, and if it was compressed, decompress it.
  357. */
  358. int scfs_read_cluster(struct file *file, struct page *page,
  359. char *buf_c, char **buf_u, int *compressed)
  360. {
  361. struct scfs_inode_info *sii = SCFS_I(page->mapping->host);
  362. struct scfs_cinfo cinfo;
  363. struct file *lower_file = NULL;
  364. int cluster_idx = 0, ret = 0;
  365. int size = 0, last_cluster_idx = 0;
  366. loff_t i_size = 0, pos = 0, tmp, left;
  367. size_t actual = 0;
  368. /* check upper inode size */
  369. i_size = i_size_read(&sii->vfs_inode);
  370. if (!i_size) {
  371. SCFS_PRINT("file %s: i_size is zero, "
  372. "flags 0x%x sii->clust_info_size %d\n",
  373. file->f_path.dentry->d_name.name, sii->flags,
  374. sii->cinfo_array_size);
  375. unlock_page(page);
  376. return ret;
  377. } else if (page->index * PAGE_SIZE >= i_size) {
  378. SCFS_PRINT("file %s: page->idx out of bounds, "
  379. "page->idx %d i_size %lld\n",
  380. file->f_path.dentry->d_name.name, page->index, i_size);
  381. unlock_page(page);
  382. return ret;
  383. }
  384. tmp = i_size;
  385. left = do_div(tmp, sii->cluster_size);
  386. if (left)
  387. tmp++;
  388. last_cluster_idx = tmp - 1;
  389. cluster_idx = PAGE_TO_CLUSTER_INDEX(page, sii);
  390. if (cluster_idx > last_cluster_idx) {
  391. SCFS_PRINT_ERROR("file %s: cluster_idx out of range, "
  392. "clust %u of %u, i_size %lld, "
  393. "page->index %d\n",
  394. file->f_path.dentry->d_name.name,
  395. cluster_idx, last_cluster_idx, i_size, page->index);
  396. return -ERANGE;
  397. }
  398. if (IS_COMPRESSABLE(sii)) {
  399. mutex_lock(&sii->cinfo_mutex);
  400. ret = get_cluster_info(file, cluster_idx, &cinfo);
  401. mutex_unlock(&sii->cinfo_mutex);
  402. if (ret) {
  403. SCFS_PRINT_ERROR("err in get_cluster_info, ret : %d,"
  404. "i_size %lld\n", ret, i_size);
  405. return ret;
  406. }
  407. if (!cinfo.size || cinfo.size > sii->cluster_size) {
  408. SCFS_PRINT_ERROR("file %s: cinfo is invalid, "
  409. "clust %u of %u cinfo.size %u\n",
  410. file->f_path.dentry->d_name.name,
  411. cluster_idx, last_cluster_idx, cinfo.size);
  412. return -EINVAL;
  413. }
  414. /* decide if cluster was compressed */
  415. if (cinfo.size == sii->cluster_size) {
  416. *compressed = 0;
  417. } else {
  418. if (cluster_idx == last_cluster_idx && left == cinfo.size)
  419. *compressed = 0;
  420. else
  421. *compressed = 1;
  422. }
  423. size = cinfo.size;
  424. pos = (loff_t)cinfo.offset;
  425. } else {
  426. *compressed = 0;
  427. size = sii->cluster_size;
  428. if (cluster_idx == last_cluster_idx && left)
  429. size = left;
  430. pos = (loff_t)cluster_idx * sii->cluster_size;
  431. }
  432. lower_file = scfs_lower_file(file);
  433. if (!lower_file) {
  434. SCFS_PRINT_ERROR("file %s: lower file is null!\n",
  435. file->f_path.dentry->d_name.name);
  436. return -EINVAL;
  437. }
  438. /* vfs read, either cluster or page */
  439. #ifdef SCFS_REMOVE_NO_COMPRESSED_UPPER_MEMCPY
  440. if (!*compressed) {
  441. buf_c = kmap(page);
  442. size -= (PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE);
  443. if (size > PAGE_SIZE) size = PAGE_SIZE;
  444. pos += (PGOFF_IN_CLUSTER(page, sii) * PAGE_SIZE);
  445. }
  446. #endif
  447. ret = scfs_lower_read(lower_file, buf_c, size, &pos);
  448. #ifdef SCFS_REMOVE_NO_COMPRESSED_UPPER_MEMCPY
  449. if (!*compressed)
  450. kunmap(page);
  451. #endif
  452. if (ret < 0) {
  453. SCFS_PRINT_ERROR("file %s: vfs_read failed, clust %d of %d, "
  454. "size %u, pos %lld, ret %d(0x%x), "
  455. "compressed %d, page->index %d,"
  456. "i_size %lld, sii->flags 0x%x, sii->cis %d\n",
  457. file->f_path.dentry->d_name.name, cluster_idx,
  458. last_cluster_idx, size, pos, ret, ret, *compressed,
  459. page->index, i_size, sii->flags, sii->cinfo_array_size);
  460. unlock_page(page);
  461. return ret;
  462. }
  463. ret = 0;
  464. /* decompress cluster if needed */
  465. if (*compressed) {
  466. actual = (size_t)sii->cluster_size;
  467. ret = scfs_decompress(sii->comp_type, buf_c, *buf_u,
  468. size, &actual);
  469. if (ret) {
  470. SCFS_PRINT_ERROR("file %s: decompress failed. "
  471. "clust %u of %u, offset %u size %u ret 0x%x "
  472. "buf_c 0x%x buf_u 0x%x\n",
  473. file->f_path.dentry->d_name.name,
  474. cluster_idx, last_cluster_idx, cinfo.offset,
  475. size, ret, buf_c, *buf_u);
  476. ClearPageUptodate(page);
  477. unlock_page(page);
  478. return ret;
  479. }
  480. }
  481. return ret;
  482. }
  483. /*
  484. * scfs_decompress
  485. *
  486. * Parameters:
  487. * @algo: algorithm type to use
  488. * @*buf_c: global buffer for compressed cluster data
  489. * @*buf_u: global buffer for decompressed cluster data
  490. * @len: compressed size of this cluster
  491. * @*actual: IN - full cluster size, OUT - decompressed size
  492. * Return:
  493. * SCFS_SUCCESS if success, otherwise if error
  494. *
  495. * Description:
  496. * Decompress a cluster. *actual needs to be set as size of the original
  497. * cluster by the caller.
  498. */
  499. int scfs_decompress(enum comp_type algo, char *buf_c, char *buf_u, size_t len,
  500. size_t *actual)
  501. {
  502. int ret = 0;
  503. ASSERT(algo < SCFS_COMP_TOTAL_TYPES);
  504. #ifdef CONFIG_SCFS_USE_CRYPTO
  505. ret = scfs_decompress_crypto((void *)buf_c, len, (void *)buf_u, actual, (int)algo);
  506. if (ret) {
  507. SCFS_PRINT("%s decompress error! "
  508. "ret %d len %d tmp_len %d\n", scfs_compressors[algo]->name,
  509. ret, len, *actual);
  510. ret = -EIO;
  511. }
  512. #else // Use kernel libraries directly
  513. switch (algo) {
  514. case SCFS_COMP_LZO:
  515. ret = lzo1x_decompress_safe(buf_c, len, buf_u, actual);
  516. if (ret) {
  517. SCFS_PRINT_ERROR("lzo decompress error! "
  518. "ret %d len %d tmp_len %d\n",
  519. ret, len, *actual);
  520. ret = -EIO;
  521. }
  522. break;
  523. default:
  524. ret = scfs_decompress_crypto((void *)buf_c, len, (void *)buf_u, actual, (int)algo);
  525. if (ret) {
  526. SCFS_PRINT("%s decompress error! "
  527. "ret %d len %d tmp_len %d\n", scfs_compressors[algo]->name,
  528. ret, len, *actual);
  529. ret = -EIO;
  530. }
  531. break;
  532. }
  533. #endif
  534. return ret;
  535. }
  536. /*
  537. * scfs_compress
  538. *
  539. * Parameters:
  540. * @algo: algorithm type to use
  541. * @*buf_c: global buffer for compressed cluster data
  542. * @*buf_u: global buffer for decompressed cluster data
  543. * @len: uncompressed size of this cluster
  544. * @*actual: IN - full cluster size, OUT - compressed size
  545. * Return:
  546. * SCFS_SUCCESS if success, otherwise if error
  547. *
  548. * Description:
  549. * Compress a cluster. *actual needs to be set as full cluster size
  550. * by the caller.
  551. */
  552. int scfs_compress(enum comp_type algo, char *buf_c, char *buf_u, size_t len,
  553. size_t *actual, void *workdata, struct scfs_sb_info *sbi)
  554. {
  555. int ret = 0;
  556. ASSERT(algo < SCFS_COMP_TOTAL_TYPES);
  557. #ifdef CONFIG_SCFS_USE_CRYPTO
  558. ret = scfs_compress_crypto((void *)buf_u, len, (void *)buf_c, actual, (int)algo);
  559. if (ret) {
  560. SCFS_PRINT("%s compress error! "
  561. "ret %d len %d tmp_len %d\n", scfs_compressors[algo]->name,
  562. ret, len, *actual);
  563. *actual = len; // We use raw data if compression was failed.
  564. }
  565. #else // Use kernel libraries directly
  566. switch (algo) {
  567. case SCFS_COMP_LZO:
  568. if (!workdata) {
  569. spin_lock(&sbi->workdata_lock);
  570. memset(sbi->scfs_workdata, 0, LZO1X_MEM_COMPRESS);
  571. ret = lzo1x_1_compress(buf_u, len, buf_c, actual, sbi->scfs_workdata);
  572. spin_unlock(&sbi->workdata_lock);
  573. } else {
  574. memset(workdata, 0, LZO1X_MEM_COMPRESS);
  575. ret = lzo1x_1_compress(buf_u, len, buf_c, actual, workdata);
  576. }
  577. if (ret) {
  578. SCFS_PRINT("lzo compress error! "
  579. "ret %d len %d tmp_len %d\n", ret, len, *actual);
  580. ret = -EIO;
  581. }
  582. break;
  583. default:
  584. ret = scfs_compress_crypto((void *)buf_u, len, (void *)buf_c, actual, (int)algo);
  585. if (ret) {
  586. SCFS_PRINT("%s compress error! "
  587. "ret %d len %d tmp_len %d\n", scfs_compressors[algo]->name,
  588. ret, len, *actual);
  589. *actual = len; // We use raw data if compression was failed.
  590. }
  591. break;
  592. }
  593. #endif
  594. return ret;
  595. }
  596. struct page *scfs_alloc_mempool_buffer(struct scfs_sb_info *sbi)
  597. {
  598. struct page *ret = mempool_alloc(sbi->mempool,
  599. __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
  600. if (ret != NULL)
  601. profile_add_mempooled(SCFS_MEMPOOL_SIZE, sbi);
  602. return ret;
  603. }
  604. void scfs_free_mempool_buffer(struct page *p, struct scfs_sb_info *sbi)
  605. {
  606. if (!p)
  607. return;
  608. mempool_free(p, sbi->mempool);
  609. profile_sub_mempooled(SCFS_MEMPOOL_SIZE, sbi);
  610. }
  611. int scfs_check_space(struct scfs_sb_info *sbi, struct dentry *dentry)
  612. {
  613. struct dentry *lower_dentry = scfs_lower_dentry(dentry);
  614. struct kstatfs buf;
  615. int ret = 0;
  616. size_t min_space = (atomic_read(&sbi->total_cluster_count) *
  617. sizeof(struct scfs_cinfo)) + (atomic_read(&sbi->current_file_count) *
  618. CF_SIZE) + atomic64_read(&sbi->current_data_size) + PAGE_SIZE;
  619. ret = lower_dentry->d_sb->s_op->statfs(lower_dentry, &buf);
  620. if (ret)
  621. return ret;
  622. if ((buf.f_bavail * PAGE_SIZE) < min_space) {
  623. SCFS_PRINT_ERROR("bavail = %lld, req_space = %lld\n", buf.f_bavail * PAGE_SIZE
  624. , min_space);
  625. ret = -ENOSPC;
  626. }
  627. return ret;
  628. }
  629. void sync_page_to_buffer(struct page *page, char *buffer)
  630. {
  631. char *source_addr;
  632. source_addr = kmap_atomic(page);
  633. SCFS_PRINT(" buffer = %x , page address = %x\n", buffer,
  634. buffer + (PAGE_SIZE * PGOFF_IN_CLUSTER(page, SCFS_I(page->mapping->host))));
  635. memcpy(buffer + (PAGE_SIZE * PGOFF_IN_CLUSTER(page, SCFS_I(page->mapping->host))),
  636. source_addr, PAGE_SIZE);
  637. kunmap_atomic(source_addr);
  638. }
  639. void sync_page_from_buffer(struct page *page, char *buffer)
  640. {
  641. char *dest_addr;
  642. dest_addr = kmap_atomic(page);
  643. SCFS_PRINT(" buffer = %x , page address = %x\n", buffer,
  644. buffer + (PAGE_SIZE*PGOFF_IN_CLUSTER(page, SCFS_I(page->mapping->host))));
  645. memcpy(dest_addr, buffer + (PAGE_SIZE *
  646. PGOFF_IN_CLUSTER(page, SCFS_I(page->mapping->host))), PAGE_SIZE);
  647. kunmap_atomic(dest_addr);
  648. }
  649. int scfs_write_cinfo(struct scfs_inode_info *sii, struct file *lower_file, loff_t *pos)
  650. {
  651. struct scfs_sb_info *sbi = SCFS_S(sii->vfs_inode.i_sb);
  652. struct list_head *head, *tmp;
  653. struct cinfo_entry *cinfo_entry;
  654. int ret, written = 0, cinfo_size = sizeof(struct scfs_cinfo);
  655. char *buf_pos = sii->cluster_buffer.u_buffer;;
  656. ASSERT(sii->compressed);
  657. if (sii->cinfo_array_size) {
  658. ASSERT(!list_empty(&sii->cinfo_list));
  659. cinfo_entry = list_entry(sii->cinfo_list.next, struct cinfo_entry, entry);
  660. ret = scfs_lower_write(lower_file, sii->cinfo_array,
  661. sizeof(struct scfs_cinfo) * cinfo_entry->current_cluster_idx, pos);
  662. if (ret < 0) {
  663. SCFS_PRINT_ERROR("f:%s write fail in writing" \
  664. "existing meta, ret : %d.\n",
  665. lower_file->f_dentry->d_name.name, ret);
  666. MAKE_META_INVALID(sii);
  667. return ret;
  668. } else {
  669. written += ret;
  670. ret = 0;
  671. }
  672. }
  673. list_for_each_safe(head, tmp, &sii->cinfo_list) {
  674. cinfo_entry = list_entry(head, struct cinfo_entry, entry);
  675. memcpy(buf_pos, &cinfo_entry->cinfo, cinfo_size);
  676. buf_pos += cinfo_size;
  677. list_del(&cinfo_entry->entry);
  678. kmem_cache_free(scfs_info_entry_list, cinfo_entry);
  679. profile_sub_kmcached(sizeof(cinfo_entry), sbi);
  680. if (buf_pos > sii->cluster_buffer.u_buffer +
  681. ((sii->cluster_size * 2) - cinfo_size) ||
  682. list_empty(&sii->cinfo_list)) {
  683. ret = scfs_lower_write(lower_file,
  684. sii->cluster_buffer.u_buffer,
  685. (size_t)(buf_pos - sii->cluster_buffer.u_buffer),
  686. pos);
  687. if (ret < 0) {
  688. SCFS_PRINT_ERROR("f:%s write fail in writing " \
  689. "new metas, ret : %d\n",
  690. lower_file->f_dentry->d_name.name, ret);
  691. MAKE_META_INVALID(sii);
  692. return ret;
  693. }
  694. written += ret;
  695. atomic_sub(ret / sizeof(struct scfs_cinfo),
  696. &sbi->total_cluster_count);
  697. buf_pos = sii->cluster_buffer.u_buffer;
  698. }
  699. }
  700. return written;
  701. }
  702. int scfs_write_meta(struct file *file)
  703. {
  704. struct list_head *head = NULL, *tmp;
  705. struct cinfo_entry *last, *cinfo_entry = NULL;
  706. struct comp_footer cf = {0, };
  707. struct scfs_inode_info *sii = SCFS_I(file->f_dentry->d_inode);
  708. struct file *lower_file;
  709. struct scfs_sb_info *sbi = SCFS_S(sii->vfs_inode.i_sb);
  710. struct inode *lower_inode;
  711. struct iattr ia;
  712. int ret = 0;
  713. char *source = NULL;
  714. loff_t pos;
  715. size_t tmp_len;
  716. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  717. struct cinfo_entry *prev_info_entry = NULL;
  718. #endif
  719. ret = scfs_initialize_lower_file(file->f_dentry, &lower_file, O_WRONLY);
  720. if (ret) {
  721. SCFS_PRINT_ERROR("err in get_lower_file %s\n", file->f_dentry->d_name.name);
  722. return ret;
  723. }
  724. SCFS_PRINT("filename : %s\n", lower_file->f_dentry->d_name.name);
  725. mutex_lock(&sii->cinfo_mutex);
  726. if (list_empty(&sii->cinfo_list)) {
  727. SCFS_PRINT("cinfo_list is empty\n");
  728. mutex_unlock(&sii->cinfo_mutex);
  729. goto out;
  730. }
  731. last = list_entry(sii->cinfo_list.prev, struct cinfo_entry, entry);
  732. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  733. scfs_write_compress_all_cluster(sii, lower_file);
  734. #endif
  735. /* if last cluster exists, we should write it first. */
  736. if (IS_COMPRESSABLE(sii)) {
  737. if (sii->cluster_buffer.original_size > 0) {
  738. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  739. /* update current cluster's offset using previous cluster */
  740. if (sii->is_inserted_to_sii_list) {
  741. prev_info_entry = list_entry(last->entry.prev,
  742. struct cinfo_entry, entry);
  743. last->cinfo.offset = prev_info_entry->cinfo.offset +
  744. prev_info_entry->cinfo.size;
  745. if (prev_info_entry->cinfo.size % SCFS_CLUSTER_ALIGN_BYTE)
  746. last->cinfo.offset += (SCFS_CLUSTER_ALIGN_BYTE -
  747. (prev_info_entry->cinfo.size % SCFS_CLUSTER_ALIGN_BYTE));
  748. }
  749. #endif
  750. /* Set cinfo size as available buffer size because zlib care about
  751. * available buf size. */
  752. last->cinfo.size = PAGE_CACHE_SIZE * 8;
  753. tmp_len = (size_t)last->cinfo.size;
  754. ret = scfs_compress(sii->comp_type, sii->cluster_buffer.c_buffer,
  755. sii->cluster_buffer.u_buffer,
  756. sii->cluster_buffer.original_size,
  757. &tmp_len,
  758. NULL, sbi);
  759. last->cinfo.size = (__u32)(tmp_len & 0xffff);
  760. if (ret) {
  761. SCFS_PRINT_ERROR("f:%s Compression failed." \
  762. "So, write uncompress data.\n",
  763. lower_file->f_dentry->d_name.name);
  764. goto free_out;;
  765. }
  766. last->pad = ALIGN(last->cinfo.size, SCFS_CLUSTER_ALIGN_BYTE) -
  767. last->cinfo.size;
  768. pos = (loff_t)last->cinfo.offset;
  769. if (last->cinfo.size <
  770. sii->cluster_buffer.original_size *
  771. sbi->options.comp_threshold / 100) {
  772. source = sii->cluster_buffer.c_buffer;
  773. sii->compressed = 1;
  774. } else {
  775. last->cinfo.size =
  776. sii->cluster_buffer.original_size;
  777. source = sii->cluster_buffer.u_buffer;
  778. }
  779. ret = scfs_lower_write(lower_file, source,
  780. last->cinfo.size + last->pad, &pos);
  781. if (ret < 0) {
  782. SCFS_PRINT_ERROR("f:%s writing last cluster buffer failed, ret : %d\n",
  783. lower_file->f_dentry->d_name.name, ret);
  784. MAKE_META_INVALID(sii);
  785. goto free_out;
  786. } else
  787. ret = 0;
  788. atomic64_sub(sii->cluster_buffer.original_size ,&sbi->current_data_size);
  789. sii->cluster_buffer.original_size = 0;
  790. }
  791. pos = ALIGN(last->cinfo.offset + last->cinfo.size, SCFS_CLUSTER_ALIGN_BYTE);
  792. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  793. sii->is_inserted_to_sii_list = 0;
  794. #endif
  795. if (sii->compressed) {
  796. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  797. ret = scfs_get_comp_buffer(sii);
  798. if (ret < 0)
  799. goto free_out;
  800. #endif
  801. ret = scfs_write_cinfo(sii, lower_file, &pos);
  802. if (ret < 0)
  803. goto free_out;
  804. cf.footer_size = ret;
  805. }
  806. } else { //file not compressed
  807. pos = i_size_read(&sii->vfs_inode);
  808. /* Remove fake cluster_info */
  809. cinfo_entry = list_entry(sii->cinfo_list.prev, struct cinfo_entry, entry);
  810. list_del(&cinfo_entry->entry);
  811. kmem_cache_free(scfs_info_entry_list, cinfo_entry);
  812. atomic_sub(1, &sbi->total_cluster_count);
  813. }
  814. cf.footer_size += CF_SIZE;
  815. cf.cluster_size = sii->cluster_size;
  816. cf.comp_type = sii->comp_type;
  817. cf.original_file_size = i_size_read(&sii->vfs_inode);
  818. cf.magic = SCFS_MAGIC;
  819. ret = scfs_lower_write(lower_file, (char*)&cf, CF_SIZE , &pos);
  820. if (ret < 0) {
  821. SCFS_PRINT_ERROR("f:%s write fail, comp_footer, ret : %d",
  822. lower_file->f_dentry->d_name.name, ret);
  823. MAKE_META_INVALID(sii);
  824. goto free_out;
  825. } else
  826. ret = 0;
  827. lower_inode = lower_file->f_dentry->d_inode;
  828. /* file may have shrunk after append-write */
  829. if (pos < i_size_read(lower_inode)) {
  830. ia.ia_valid = ATTR_SIZE;
  831. ia.ia_size = pos;
  832. truncate_setsize(lower_inode, pos);
  833. mutex_lock(&lower_inode->i_mutex);
  834. ret = notify_change(lower_file->f_dentry, &ia);
  835. mutex_unlock(&lower_inode->i_mutex);
  836. if (ret) {
  837. SCFS_PRINT_ERROR("f:%s error in lower_truncate, %d",
  838. lower_file->f_dentry->d_name.name,
  839. ret);
  840. MAKE_META_INVALID(sii);
  841. goto free_out;
  842. }
  843. }
  844. if (cf.footer_size > CF_SIZE)
  845. MAKE_META_INVALID(sii);
  846. else
  847. sii->flags &= ~SCFS_DATA_COMPRESSABLE;
  848. free_out:
  849. if (!list_empty(&sii->cinfo_list)) {
  850. list_for_each_safe(head, tmp, &sii->cinfo_list) {
  851. cinfo_entry = list_entry(head, struct cinfo_entry, entry);
  852. list_del(&cinfo_entry->entry);
  853. kmem_cache_free(scfs_info_entry_list, cinfo_entry);
  854. profile_sub_kmcached(sizeof(struct cinfo_entry), sbi);
  855. }
  856. }
  857. mutex_unlock(&sii->cinfo_mutex);
  858. out:
  859. fput(lower_file);
  860. return ret;
  861. }
  862. struct cinfo_entry *scfs_alloc_cinfo_entry(unsigned int cluster_index,
  863. struct scfs_inode_info *sii)
  864. {
  865. struct cinfo_entry *new_entry = NULL;
  866. struct scfs_sb_info *sbi = SCFS_S(sii->vfs_inode.i_sb);
  867. new_entry = kmem_cache_zalloc(scfs_info_entry_list, GFP_KERNEL);
  868. if (!new_entry) {
  869. SCFS_PRINT_ERROR("kmem_cache_zalloc ERROR.\n");
  870. return NULL;
  871. }
  872. profile_add_kmcached(sizeof(struct cinfo_entry), sbi);
  873. new_entry->current_cluster_idx = cluster_index;
  874. list_add_tail(&new_entry->entry, &sii->cinfo_list);
  875. atomic_add(1, &sbi->total_cluster_count);
  876. return new_entry;
  877. }
  878. int scfs_get_cluster_from_lower(struct file *file, struct scfs_cinfo clust_info)
  879. {
  880. struct dentry *dentry = file->f_dentry;
  881. struct scfs_inode_info *sii = SCFS_I(dentry->d_inode);
  882. struct file *lower_file;
  883. loff_t pos = 0;
  884. int ret;
  885. ret = scfs_initialize_lower_file(dentry, &lower_file, O_RDONLY);
  886. if (ret) {
  887. SCFS_PRINT_ERROR("err in get_lower_file %s\n", dentry->d_name.name);
  888. return ret;
  889. }
  890. if (clust_info.size > sii->cluster_size) {
  891. SCFS_PRINT_ERROR("f:%s clust_info.size out of bounds, size %d\n",
  892. lower_file->f_path.dentry->d_name.name, clust_info.size);
  893. return -EINVAL;
  894. }
  895. pos = clust_info.offset;
  896. if (IS_COMPRESSABLE(sii) && clust_info.size < sii->cluster_size) {
  897. //TODO pass appropriate algorithm, retrieved from file meta
  898. loff_t i_size = i_size_read(&sii->vfs_inode);
  899. if(do_div(i_size, sii->cluster_size) == clust_info.size) {
  900. ret = scfs_lower_read(lower_file, sii->cluster_buffer.u_buffer,
  901. clust_info.size, &pos);
  902. if (ret < 0) {
  903. SCFS_PRINT_ERROR("f:%s read failed, size %d pos %d ret = %d\n",
  904. lower_file->f_path.dentry->d_name.name,
  905. clust_info.size, (int)pos, ret);
  906. goto out;
  907. }
  908. ret = 0;
  909. sii->cluster_buffer.original_size = clust_info.size;
  910. } else {
  911. size_t len = (size_t)sii->cluster_size;
  912. ret = scfs_lower_read(lower_file, sii->cluster_buffer.c_buffer,
  913. clust_info.size, &pos);
  914. if (ret < 0) {
  915. SCFS_PRINT_ERROR("f:%s read failed, size %d pos %d ret = %d\n",
  916. lower_file->f_path.dentry->d_name.name,
  917. clust_info.size, (int)pos, ret);
  918. goto out;
  919. }
  920. ret = 0;
  921. ret = scfs_decompress(sii->comp_type,
  922. sii->cluster_buffer.c_buffer,
  923. sii->cluster_buffer.u_buffer,
  924. clust_info.size,
  925. &len);
  926. if (ret) {
  927. SCFS_PRINT_ERROR("f:%s decompress lower cluster failed.\n",
  928. lower_file->f_path.dentry->d_name.name);
  929. goto out;
  930. }
  931. sii->cluster_buffer.original_size = len;
  932. }
  933. } else {
  934. ret = scfs_lower_read(lower_file, sii->cluster_buffer.u_buffer,
  935. clust_info.size, &pos);
  936. if (ret < 0) {
  937. SCFS_PRINT_ERROR("f:%s vfs_read failed, size %d pos %d ret = %d\n",
  938. lower_file->f_path.dentry->d_name.name,
  939. clust_info.size, (int) pos, ret);
  940. goto out;
  941. }
  942. ret = 0;
  943. sii->cluster_buffer.original_size = clust_info.size;
  944. }
  945. out:
  946. fput(lower_file);
  947. return ret;
  948. }
  949. int scfs_get_comp_buffer(struct scfs_inode_info *sii)
  950. {
  951. struct scfs_sb_info *sbi = SCFS_S(sii->vfs_inode.i_sb);
  952. if (!sii->cluster_buffer.u_buffer) {
  953. sii->cluster_buffer.u_page = alloc_pages(GFP_KERNEL, SCFS_MEMPOOL_ORDER + 1);
  954. if (!sii->cluster_buffer.u_page) {
  955. SCFS_PRINT_ERROR("u_page malloc failed\n");
  956. return -ENOMEM;
  957. }
  958. sii->cluster_buffer.u_buffer = page_address(sii->cluster_buffer.u_page);
  959. if (!sii->cluster_buffer.u_buffer)
  960. return -ENOMEM;
  961. atomic_add(1, &sbi->current_file_count);
  962. }
  963. if (!sii->cluster_buffer.c_buffer) {
  964. sii->cluster_buffer.c_page = alloc_pages(GFP_KERNEL, SCFS_MEMPOOL_ORDER + 1);
  965. if (!sii->cluster_buffer.c_page) {
  966. SCFS_PRINT_ERROR("c_page malloc failed\n");
  967. return -ENOMEM;
  968. }
  969. sii->cluster_buffer.c_buffer = page_address(sii->cluster_buffer.c_page);
  970. if (!sii->cluster_buffer.c_buffer)
  971. return -ENOMEM;
  972. }
  973. return 0;
  974. }
  975. int scfs_truncate(struct dentry *dentry, loff_t size)
  976. {
  977. struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = size };
  978. struct inode *inode = dentry->d_inode;
  979. struct scfs_inode_info *sii = SCFS_I(inode);
  980. struct dentry *lower_dentry = scfs_lower_dentry(dentry);
  981. struct list_head *cluster_info, *tmp;
  982. struct cinfo_entry *info_index;
  983. int ret = 0;
  984. if (size) {
  985. SCFS_PRINT_ERROR("only truncate to zero-size is allowd\n");
  986. return -EINVAL;
  987. }
  988. SCFS_PRINT("Truncate %s size to %lld\n", dentry->d_name.name, size);
  989. truncate_setsize(inode, ia.ia_size);
  990. mutex_lock(&sii->cinfo_mutex);
  991. list_for_each_safe(cluster_info, tmp, &sii->cinfo_list) {
  992. info_index = list_entry(cluster_info,
  993. struct cinfo_entry, entry);
  994. list_del(&info_index->entry);
  995. kmem_cache_free(scfs_info_entry_list, info_index);
  996. profile_sub_kmcached(sizeof(struct cinfo_entry), SCFS_S(inode->i_sb));
  997. }
  998. mutex_unlock(&sii->cinfo_mutex);
  999. mutex_lock(&lower_dentry->d_inode->i_mutex);
  1000. ret = notify_change(lower_dentry, &ia);
  1001. mutex_unlock(&lower_dentry->d_inode->i_mutex);
  1002. if (ret)
  1003. return ret;
  1004. ret = scfs_initialize_file(dentry, inode);
  1005. if (ret) {
  1006. SCFS_PRINT_ERROR("f:%s err in initializing file, ret : %d\n",
  1007. dentry->d_name.name, ret);
  1008. MAKE_META_INVALID(sii);
  1009. return ret;
  1010. }
  1011. if (sii->cinfo_array) {
  1012. scfs_cinfo_free(sii, sii->cinfo_array);
  1013. sii->cinfo_array = NULL;
  1014. }
  1015. sii->cinfo_array_size = 0;
  1016. sii->upper_file_size = 0;
  1017. sii->cluster_buffer.original_size = 0;
  1018. sii->compressed = 0;
  1019. if (SCFS_S(inode->i_sb)->options.flags & SCFS_DATA_COMPRESSABLE)
  1020. sii->flags |= SCFS_DATA_COMPRESSABLE;
  1021. else
  1022. sii->flags &= ~SCFS_DATA_COMPRESSABLE;
  1023. CLEAR_META_INVALID(sii);
  1024. return ret;
  1025. }
  1026. /* This function returns read count on lower fs, not 0 when succeed */
  1027. ssize_t scfs_lower_read(struct file *file, char *buf, size_t count, loff_t *pos)
  1028. {
  1029. int ret, read = 0, retry = 0;
  1030. mm_segment_t fs_save;
  1031. fs_save = get_fs();
  1032. while (read < count) {
  1033. set_fs(get_ds());
  1034. ret = vfs_read(file, buf + read, count - read, pos);
  1035. set_fs(fs_save);
  1036. if (ret < 0) {
  1037. if (ret == -EINTR || ret == -EAGAIN) {
  1038. SCFS_PRINT("still hungry, ret : %d, %lld/%lld\n",
  1039. ret, read, count - read);
  1040. continue;
  1041. }
  1042. SCFS_PRINT_ERROR("f:%s err in vfs_read, ret : %d\n",
  1043. file->f_path.dentry->d_name.name, ret);
  1044. return ret;
  1045. }
  1046. read += ret;
  1047. if (++retry > SCFS_IO_MAX_RETRY) {
  1048. SCFS_PRINT_ERROR("f:%s too many retries\n",
  1049. file->f_path.dentry->d_name.name);
  1050. return -EIO;
  1051. }
  1052. }
  1053. return read;
  1054. }
  1055. ssize_t scfs_lower_write(struct file *file, char *buf, size_t count, loff_t *pos)
  1056. {
  1057. int ret, written = 0, retry = 0;
  1058. mm_segment_t fs_save;
  1059. fs_save = get_fs();
  1060. while (written < count) {
  1061. set_fs(get_ds());
  1062. ret = vfs_write(file, buf + written, count - written, pos);
  1063. set_fs(fs_save);
  1064. if (ret < 0) {
  1065. if (ret == -EINTR || ret == -EAGAIN) {
  1066. SCFS_PRINT("still hungry, ret : %d, %lld/%lld\n",
  1067. ret, written, count - written);
  1068. continue;
  1069. }
  1070. SCFS_PRINT_ERROR("f:%s err in vfs_write, ret : %d\n",
  1071. file->f_path.dentry->d_name.name, ret);
  1072. return ret;
  1073. }
  1074. written += ret;
  1075. if (++retry > SCFS_IO_MAX_RETRY) {
  1076. SCFS_PRINT_ERROR("f:%s too many retries\n",
  1077. file->f_path.dentry->d_name.name);
  1078. return -EIO;
  1079. }
  1080. }
  1081. return written;
  1082. }
  1083. /**
  1084. * inode_info_init_once
  1085. *
  1086. * Initializes the scfs_inode_info_cache when it is created
  1087. */
  1088. void
  1089. inode_info_init_once(void *vptr)
  1090. {
  1091. struct scfs_inode_info *sii = (struct scfs_inode_info *)vptr;
  1092. inode_init_once(&sii->vfs_inode);
  1093. }
  1094. static struct scfs_cache_info {
  1095. struct kmem_cache **cache;
  1096. const char *name;
  1097. size_t size;
  1098. void (*ctor)(void *obj);
  1099. } scfs_cache_infos[] = {
  1100. {
  1101. .cache = &scfs_file_info_cache,
  1102. .name = "scfs_file_cache",
  1103. .size = sizeof(struct scfs_file_info),
  1104. },
  1105. {
  1106. .cache = &scfs_dentry_info_cache,
  1107. .name = "scfs_dentry_info_cache",
  1108. .size = sizeof(struct scfs_dentry_info),
  1109. },
  1110. {
  1111. .cache = &scfs_inode_info_cache,
  1112. .name = "scfs_inode_cache",
  1113. .size = sizeof(struct scfs_inode_info),
  1114. .ctor = inode_info_init_once,
  1115. },
  1116. {
  1117. .cache = &scfs_sb_info_cache,
  1118. .name = "scfs_sb_cache",
  1119. .size = sizeof(struct scfs_sb_info),
  1120. },
  1121. {
  1122. .cache = &scfs_info_entry_list,
  1123. .name = "scfs_info_entry_list",
  1124. .size = sizeof(struct cinfo_entry),
  1125. },
  1126. #ifdef SCFS_MULTI_THREAD_COMPRESSION
  1127. {
  1128. .cache = &scfs_cbm_cache,
  1129. .name = "scfs_cbm_cache",
  1130. .size = sizeof(struct scfs_cluster_buffer_mtc),
  1131. },
  1132. #endif
  1133. };
  1134. void scfs_free_kmem_caches(void)
  1135. {
  1136. int i;
  1137. for (i = 0; i < ARRAY_SIZE(scfs_cache_infos); i++) {
  1138. struct scfs_cache_info *info;
  1139. info = &scfs_cache_infos[i];
  1140. if (*(info->cache))
  1141. kmem_cache_destroy(*(info->cache));
  1142. }
  1143. }
  1144. /**
  1145. * scfs_init_kmem_caches
  1146. *
  1147. * Returns zero on success; non-zero otherwise
  1148. */
  1149. int scfs_init_kmem_caches(void)
  1150. {
  1151. int i;
  1152. for (i = 0; i < ARRAY_SIZE(scfs_cache_infos); i++) {
  1153. struct scfs_cache_info *info;
  1154. info = &scfs_cache_infos[i];
  1155. *(info->cache) = kmem_cache_create(info->name, info->size,
  1156. 0, SLAB_HWCACHE_ALIGN, info->ctor);
  1157. if (!*(info->cache)) {
  1158. scfs_free_kmem_caches();
  1159. SCFS_PRINT("kmem_cache_create failed\n",
  1160. info->name);
  1161. return -ENOMEM;
  1162. }
  1163. }
  1164. return 0;
  1165. }
  1166. void *scfs_cinfo_alloc(struct scfs_inode_info *sii, unsigned long size)
  1167. {
  1168. SCFS_PRINT("cinfo_alloc, size : %d\n", size);
  1169. if (size >= PAGE_SIZE) {
  1170. sii->flags |= SCFS_CINFO_OVER_PAGESIZE;
  1171. profile_add_vmalloced(PAGE_ALIGN(size) + PAGE_SIZE,
  1172. SCFS_S(sii->vfs_inode.i_sb));
  1173. return vmalloc(size);
  1174. } else {
  1175. sii->flags &= ~SCFS_CINFO_OVER_PAGESIZE;
  1176. profile_add_kmalloced(size,
  1177. SCFS_S(sii->vfs_inode.i_sb));
  1178. return kmalloc(size, GFP_KERNEL);
  1179. }
  1180. }
  1181. void scfs_cinfo_free(struct scfs_inode_info *sii, const void *addr)
  1182. {
  1183. SCFS_PRINT("cinfo_free, size : %d\n", sii->cinfo_array_size);
  1184. if (sii->flags & SCFS_CINFO_OVER_PAGESIZE) {
  1185. profile_sub_vmalloced(PAGE_ALIGN(sii->cinfo_array_size) + PAGE_SIZE,
  1186. SCFS_S(sii->vfs_inode.i_sb));
  1187. vfree(addr);
  1188. } else {
  1189. profile_sub_kmalloced(sii->cinfo_array_size, SCFS_S(sii->vfs_inode.i_sb));
  1190. kfree(addr);
  1191. }
  1192. }