dfr.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373
  1. /*
  2. * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. /************************************************************************/
  18. /* */
  19. /* @PROJECT : exFAT & FAT12/16/32 File System */
  20. /* @FILE : dfr.c */
  21. /* @PURPOSE : Defragmentation support for SDFAT32 */
  22. /* */
  23. /*----------------------------------------------------------------------*/
  24. /* NOTES */
  25. /* */
  26. /* */
  27. /************************************************************************/
  28. #include <linux/version.h>
  29. #include <linux/list.h>
  30. #include <linux/blkdev.h>
  31. #include "sdfat.h"
  32. #include "core.h"
  33. #include "amap_smart.h"
  34. #ifdef CONFIG_SDFAT_DFR
  35. /**
  36. * @fn defrag_get_info
  37. * @brief get HW params for defrag daemon
  38. * @return 0 on success, -errno otherwise
  39. * @param sb super block
  40. * @param arg defrag info arguments
  41. * @remark protected by super_block
  42. */
  43. int
  44. defrag_get_info(
  45. IN struct super_block *sb,
  46. OUT struct defrag_info_arg *arg)
  47. {
  48. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  49. AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
  50. if (!arg)
  51. return -EINVAL;
  52. arg->sec_sz = sb->s_blocksize;
  53. arg->clus_sz = fsi->cluster_size;
  54. arg->total_sec = fsi->num_sectors;
  55. arg->fat_offset_sec = fsi->FAT1_start_sector;
  56. arg->fat_sz_sec = fsi->num_FAT_sectors;
  57. arg->n_fat = (fsi->FAT1_start_sector == fsi->FAT2_start_sector) ? 1 : 2;
  58. arg->sec_per_au = amap->option.au_size;
  59. arg->hidden_sectors = amap->option.au_align_factor % amap->option.au_size;
  60. return 0;
  61. }
  62. static int
  63. __defrag_scan_dir(
  64. IN struct super_block *sb,
  65. IN DOS_DENTRY_T *dos_ep,
  66. IN loff_t i_pos,
  67. OUT struct defrag_trav_arg *arg)
  68. {
  69. FS_INFO_T *fsi = NULL;
  70. UNI_NAME_T uniname;
  71. unsigned int type = 0, start_clus = 0;
  72. int err = -EPERM;
  73. /* Check params */
  74. ERR_HANDLE2((!sb || !dos_ep || !i_pos || !arg), err, -EINVAL);
  75. fsi = &(SDFAT_SB(sb)->fsi);
  76. /* Get given entry's type */
  77. type = fsi->fs_func->get_entry_type((DENTRY_T *) dos_ep);
  78. /* Check dos_ep */
  79. if (!strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
  80. ;
  81. } else if (!strncmp(dos_ep->name, DOS_PAR_DIR_NAME, DOS_NAME_LENGTH)) {
  82. ;
  83. } else if ((type == TYPE_DIR) || (type == TYPE_FILE)) {
  84. /* Set start_clus */
  85. SET32_HI(start_clus, le16_to_cpu(dos_ep->start_clu_hi));
  86. SET32_LO(start_clus, le16_to_cpu(dos_ep->start_clu_lo));
  87. arg->start_clus = start_clus;
  88. /* Set type & i_pos */
  89. if (type == TYPE_DIR)
  90. arg->type = DFR_TRAV_TYPE_DIR;
  91. else
  92. arg->type = DFR_TRAV_TYPE_FILE;
  93. arg->i_pos = i_pos;
  94. /* Set name */
  95. memset(&uniname, 0, sizeof(UNI_NAME_T));
  96. get_uniname_from_dos_entry(sb, dos_ep, &uniname, 0x1);
  97. /* FIXME :
  98. * we should think that whether the size of arg->name
  99. * is enough or not
  100. */
  101. nls_uni16s_to_vfsname(sb, &uniname,
  102. arg->name, sizeof(arg->name));
  103. err = 0;
  104. /* End case */
  105. } else if (type == TYPE_UNUSED) {
  106. err = -ENOENT;
  107. } else {
  108. ;
  109. }
  110. error:
  111. return err;
  112. }
  113. /**
  114. * @fn defrag_scan_dir
  115. * @brief scan given directory
  116. * @return 0 on success, -errno otherwise
  117. * @param sb super block
  118. * @param args traverse args
  119. * @remark protected by inode_lock, super_block and volume lock
  120. */
  121. int
  122. defrag_scan_dir(
  123. IN struct super_block *sb,
  124. INOUT struct defrag_trav_arg *args)
  125. {
  126. struct sdfat_sb_info *sbi = NULL;
  127. FS_INFO_T *fsi = NULL;
  128. struct defrag_trav_header *header = NULL;
  129. DOS_DENTRY_T *dos_ep;
  130. CHAIN_T chain;
  131. int dot_found = 0, args_idx = DFR_TRAV_HEADER_IDX + 1, clus = 0, index = 0;
  132. int err = 0, j = 0;
  133. /* Check params */
  134. ERR_HANDLE2((!sb || !args), err, -EINVAL);
  135. sbi = SDFAT_SB(sb);
  136. fsi = &(sbi->fsi);
  137. header = (struct defrag_trav_header *) args;
  138. /* Exceptional case for ROOT */
  139. if (header->i_pos == DFR_TRAV_ROOT_IPOS) {
  140. header->start_clus = fsi->root_dir;
  141. dfr_debug("IOC_DFR_TRAV for ROOT: start_clus %08x", header->start_clus);
  142. dot_found = 1;
  143. }
  144. chain.dir = header->start_clus;
  145. chain.size = 0;
  146. chain.flags = 0;
  147. /* Check if this is directory */
  148. if (!dot_found) {
  149. FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
  150. ERR_HANDLE(err);
  151. dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, 0, NULL);
  152. ERR_HANDLE2(!dos_ep, err, -EIO);
  153. if (strncmp(dos_ep->name, DOS_CUR_DIR_NAME, DOS_NAME_LENGTH)) {
  154. err = -EINVAL;
  155. dfr_err("Scan: Not a directory, err %d", err);
  156. goto error;
  157. }
  158. }
  159. /* For more-scan case */
  160. if ((header->stat == DFR_TRAV_STAT_MORE) &&
  161. (header->start_clus == sbi->dfr_hint_clus) &&
  162. (sbi->dfr_hint_idx > 0)) {
  163. index = sbi->dfr_hint_idx;
  164. for (j = 0; j < (sbi->dfr_hint_idx / fsi->dentries_per_clu); j++) {
  165. /* Follow FAT-chain */
  166. FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
  167. ERR_HANDLE(err);
  168. err = fat_ent_get(sb, chain.dir, &(chain.dir));
  169. ERR_HANDLE(err);
  170. if (!IS_CLUS_EOF(chain.dir)) {
  171. clus++;
  172. index -= fsi->dentries_per_clu;
  173. } else {
  174. /**
  175. * This directory modified. Stop scanning.
  176. */
  177. err = -EINVAL;
  178. dfr_err("Scan: SCAN_MORE failed, err %d", err);
  179. goto error;
  180. }
  181. }
  182. /* For first-scan case */
  183. } else {
  184. clus = 0;
  185. index = 0;
  186. }
  187. scan_fat_chain:
  188. /* Scan given directory and get info of children */
  189. for ( ; index < fsi->dentries_per_clu; index++) {
  190. DOS_DENTRY_T *dos_ep = NULL;
  191. loff_t i_pos = 0;
  192. /* Get dos_ep */
  193. FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
  194. ERR_HANDLE(err);
  195. dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &chain, index, NULL);
  196. ERR_HANDLE2(!dos_ep, err, -EIO);
  197. /* Make i_pos for this entry */
  198. SET64_HI(i_pos, header->start_clus);
  199. SET64_LO(i_pos, clus * fsi->dentries_per_clu + index);
  200. err = __defrag_scan_dir(sb, dos_ep, i_pos, &args[args_idx]);
  201. if (!err) {
  202. /* More-scan case */
  203. if (++args_idx >= (PAGE_SIZE / sizeof(struct defrag_trav_arg))) {
  204. sbi->dfr_hint_clus = header->start_clus;
  205. sbi->dfr_hint_idx = clus * fsi->dentries_per_clu + index + 1;
  206. header->stat = DFR_TRAV_STAT_MORE;
  207. header->nr_entries = args_idx;
  208. goto error;
  209. }
  210. /* Error case */
  211. } else if (err == -EINVAL) {
  212. sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
  213. dfr_err("Scan: err %d", err);
  214. goto error;
  215. /* End case */
  216. } else if (err == -ENOENT) {
  217. sbi->dfr_hint_clus = sbi->dfr_hint_idx = 0;
  218. err = 0;
  219. goto done;
  220. } else {
  221. /* DO NOTHING */
  222. }
  223. err = 0;
  224. }
  225. /* Follow FAT-chain */
  226. FAT32_CHECK_CLUSTER(fsi, chain.dir, err);
  227. ERR_HANDLE(err);
  228. err = fat_ent_get(sb, chain.dir, &(chain.dir));
  229. ERR_HANDLE(err);
  230. if (!IS_CLUS_EOF(chain.dir)) {
  231. index = 0;
  232. clus++;
  233. goto scan_fat_chain;
  234. }
  235. done:
  236. /* Update header */
  237. header->stat = DFR_TRAV_STAT_DONE;
  238. header->nr_entries = args_idx;
  239. error:
  240. return err;
  241. }
  242. static int
  243. __defrag_validate_cluster_prev(
  244. IN struct super_block *sb,
  245. IN struct defrag_chunk_info *chunk)
  246. {
  247. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  248. CHAIN_T dir;
  249. DENTRY_T *ep = NULL;
  250. unsigned int entry = 0, clus = 0;
  251. int err = 0;
  252. if (chunk->prev_clus == 0) {
  253. /* For the first cluster of a file */
  254. dir.dir = GET64_HI(chunk->i_pos);
  255. dir.flags = 0x1; // Assume non-continuous
  256. entry = GET64_LO(chunk->i_pos);
  257. FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
  258. ERR_HANDLE(err);
  259. ep = get_dentry_in_dir(sb, &dir, entry, NULL);
  260. if (!ep) {
  261. err = -EPERM;
  262. goto error;
  263. }
  264. /* should call fat_get_entry_clu0(ep) */
  265. clus = fsi->fs_func->get_entry_clu0(ep);
  266. if (clus != chunk->d_clus) {
  267. err = -ENXIO;
  268. goto error;
  269. }
  270. } else {
  271. /* Normal case */
  272. FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
  273. ERR_HANDLE(err);
  274. err = fat_ent_get(sb, chunk->prev_clus, &clus);
  275. if (err)
  276. goto error;
  277. if (chunk->d_clus != clus)
  278. err = -ENXIO;
  279. }
  280. error:
  281. return err;
  282. }
  283. static int
  284. __defrag_validate_cluster_next(
  285. IN struct super_block *sb,
  286. IN struct defrag_chunk_info *chunk)
  287. {
  288. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  289. unsigned int clus = 0;
  290. int err = 0;
  291. /* Check next_clus */
  292. FAT32_CHECK_CLUSTER(fsi, (chunk->d_clus + chunk->nr_clus - 1), err);
  293. ERR_HANDLE(err);
  294. err = fat_ent_get(sb, (chunk->d_clus + chunk->nr_clus - 1), &clus);
  295. if (err)
  296. goto error;
  297. if (chunk->next_clus != (clus & FAT32_EOF))
  298. err = -ENXIO;
  299. error:
  300. return err;
  301. }
  302. /**
  303. * @fn __defrag_check_au
  304. * @brief check if this AU is in use
  305. * @return 0 if idle, 1 if busy
  306. * @param sb super block
  307. * @param clus physical cluster num
  308. * @param limit # of used clusters from daemon
  309. */
  310. static int
  311. __defrag_check_au(
  312. struct super_block *sb,
  313. u32 clus,
  314. u32 limit)
  315. {
  316. unsigned int nr_free = amap_get_freeclus(sb, clus);
  317. #if defined(CONFIG_SDFAT_DFR_DEBUG) && defined(CONFIG_SDFAT_DBG_MSG)
  318. if (nr_free < limit) {
  319. AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
  320. AU_INFO_T *au = GET_AU(amap, i_AU_of_CLU(amap, clus));
  321. dfr_debug("AU[%d] nr_free %d, limit %d", au->idx, nr_free, limit);
  322. }
  323. #endif
  324. return ((nr_free < limit) ? 1 : 0);
  325. }
  326. /**
  327. * @fn defrag_validate_cluster
  328. * @brief validate cluster info of given chunk
  329. * @return 0 on success, -errno otherwise
  330. * @param inode inode of given chunk
  331. * @param chunk given chunk
  332. * @param skip_prev flag to skip checking previous cluster info
  333. * @remark protected by super_block and volume lock
  334. */
  335. int
  336. defrag_validate_cluster(
  337. IN struct inode *inode,
  338. IN struct defrag_chunk_info *chunk,
  339. IN int skip_prev)
  340. {
  341. struct super_block *sb = inode->i_sb;
  342. FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
  343. unsigned int clus = 0;
  344. int err = 0, i = 0;
  345. /* If this inode is unlink-ed, skip it */
  346. if (fid->dir.dir == DIR_DELETED)
  347. return -ENOENT;
  348. /* Skip working-AU */
  349. err = amap_check_working(sb, chunk->d_clus);
  350. if (err)
  351. return -EBUSY;
  352. /* Check # of free_clus of belonged AU */
  353. err = __defrag_check_au(inode->i_sb, chunk->d_clus, CLUS_PER_AU(sb) - chunk->au_clus);
  354. if (err)
  355. return -EINVAL;
  356. /* Check chunk's clusters */
  357. for (i = 0; i < chunk->nr_clus; i++) {
  358. err = fsapi_map_clus(inode, chunk->f_clus + i, &clus, ALLOC_NOWHERE);
  359. if (err || (chunk->d_clus + i != clus)) {
  360. if (!err)
  361. err = -ENXIO;
  362. goto error;
  363. }
  364. }
  365. /* Check next_clus */
  366. err = __defrag_validate_cluster_next(sb, chunk);
  367. ERR_HANDLE(err);
  368. if (!skip_prev) {
  369. /* Check prev_clus */
  370. err = __defrag_validate_cluster_prev(sb, chunk);
  371. ERR_HANDLE(err);
  372. }
  373. error:
  374. return err;
  375. }
  376. /**
  377. * @fn defrag_reserve_clusters
  378. * @brief reserve clusters for defrag
  379. * @return 0 on success, -errno otherwise
  380. * @param sb super block
  381. * @param nr_clus # of clusters to reserve
  382. * @remark protected by super_block and volume lock
  383. */
  384. int
  385. defrag_reserve_clusters(
  386. INOUT struct super_block *sb,
  387. IN int nr_clus)
  388. {
  389. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  390. FS_INFO_T *fsi = &(sbi->fsi);
  391. if (!(sbi->options.improved_allocation & SDFAT_ALLOC_DELAY))
  392. /* Nothing to do */
  393. return 0;
  394. /* Check error case */
  395. if (fsi->used_clusters + fsi->reserved_clusters + nr_clus >= fsi->num_clusters - 2) {
  396. return -ENOSPC;
  397. } else if (fsi->reserved_clusters + nr_clus < 0) {
  398. dfr_err("Reserve count: reserved_clusters %d, nr_clus %d",
  399. fsi->reserved_clusters, nr_clus);
  400. BUG_ON(fsi->reserved_clusters + nr_clus < 0);
  401. }
  402. sbi->dfr_reserved_clus += nr_clus;
  403. fsi->reserved_clusters += nr_clus;
  404. return 0;
  405. }
  406. /**
  407. * @fn defrag_mark_ignore
  408. * @brief mark corresponding AU to be ignored
  409. * @return 0 on success, -errno otherwise
  410. * @param sb super block
  411. * @param clus given cluster num
  412. * @remark protected by super_block
  413. */
  414. int
  415. defrag_mark_ignore(
  416. INOUT struct super_block *sb,
  417. IN unsigned int clus)
  418. {
  419. int err = 0;
  420. if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
  421. err = amap_mark_ignore(sb, clus);
  422. if (err)
  423. dfr_debug("err %d", err);
  424. return err;
  425. }
  426. /**
  427. * @fn defrag_unmark_ignore_all
  428. * @brief unmark all ignored AUs
  429. * @return void
  430. * @param sb super block
  431. * @remark protected by super_block
  432. */
  433. void
  434. defrag_unmark_ignore_all(struct super_block *sb)
  435. {
  436. if (SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART)
  437. amap_unmark_ignore_all(sb);
  438. }
  439. /**
  440. * @fn defrag_map_cluster
  441. * @brief get_block function for defrag dests
  442. * @return 0 on success, -errno otherwise
  443. * @param inode inode
  444. * @param clu_offset logical cluster offset
  445. * @param clu mapped cluster (physical)
  446. * @remark protected by super_block and volume lock
  447. */
  448. int
  449. defrag_map_cluster(
  450. struct inode *inode,
  451. unsigned int clu_offset,
  452. unsigned int *clu)
  453. {
  454. struct super_block *sb = inode->i_sb;
  455. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  456. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  457. #ifdef CONFIG_SDFAT_DFR_PACKING
  458. AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
  459. #endif
  460. FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
  461. struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
  462. struct defrag_chunk_info *chunk = NULL;
  463. CHAIN_T new_clu;
  464. int i = 0, nr_new = 0, err = 0;
  465. /* Get corresponding chunk */
  466. for (i = 0; i < ino_dfr->nr_chunks; i++) {
  467. chunk = &(ino_dfr->chunks[i]);
  468. if ((chunk->f_clus <= clu_offset) && (clu_offset < chunk->f_clus + chunk->nr_clus)) {
  469. /* For already allocated new_clus */
  470. if (sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus]) {
  471. *clu = sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus];
  472. return 0;
  473. }
  474. break;
  475. }
  476. }
  477. BUG_ON(!chunk);
  478. fscore_set_vol_flags(sb, VOL_DIRTY, 0);
  479. new_clu.dir = CLUS_EOF;
  480. new_clu.size = 0;
  481. new_clu.flags = fid->flags;
  482. /* Allocate new cluster */
  483. #ifdef CONFIG_SDFAT_DFR_PACKING
  484. if (amap->n_clean_au * DFR_FULL_RATIO <= amap->n_au * DFR_DEFAULT_PACKING_RATIO)
  485. err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_PACKING);
  486. else
  487. err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
  488. #else
  489. err = fsi->fs_func->alloc_cluster(sb, 1, &new_clu, ALLOC_COLD_ALIGNED);
  490. #endif
  491. if (err) {
  492. dfr_err("Map: 1 %d", 0);
  493. return err;
  494. }
  495. /* Decrease reserved cluster count */
  496. defrag_reserve_clusters(sb, -1);
  497. /* Add new_clus info in ino_dfr */
  498. sbi->dfr_new_clus[chunk->new_idx + clu_offset - chunk->f_clus] = new_clu.dir;
  499. /* Make FAT-chain for new_clus */
  500. for (i = 0; i < chunk->nr_clus; i++) {
  501. #if 0
  502. if (sbi->dfr_new_clus[chunk->new_idx + i])
  503. nr_new++;
  504. else
  505. break;
  506. #else
  507. if (!sbi->dfr_new_clus[chunk->new_idx + i])
  508. break;
  509. nr_new++;
  510. #endif
  511. }
  512. if (nr_new == chunk->nr_clus) {
  513. for (i = 0; i < chunk->nr_clus - 1; i++) {
  514. FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
  515. BUG_ON(err);
  516. if (fat_ent_set(sb,
  517. sbi->dfr_new_clus[chunk->new_idx + i],
  518. sbi->dfr_new_clus[chunk->new_idx + i + 1]))
  519. return -EIO;
  520. }
  521. }
  522. *clu = new_clu.dir;
  523. return 0;
  524. }
  525. /**
  526. * @fn defrag_writepage_end_io
  527. * @brief check WB status of requested page
  528. * @return void
  529. * @param page page
  530. */
  531. void
  532. defrag_writepage_end_io(
  533. INOUT struct page *page)
  534. {
  535. struct super_block *sb = page->mapping->host->i_sb;
  536. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  537. struct defrag_info *ino_dfr = &(SDFAT_I(page->mapping->host)->dfr_info);
  538. unsigned int clus_start = 0, clus_end = 0;
  539. int i = 0;
  540. /* Check if this inode is on defrag */
  541. if (atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ)
  542. return;
  543. clus_start = page->index / PAGES_PER_CLUS(sb);
  544. clus_end = clus_start + 1;
  545. /* Check each chunk in given inode */
  546. for (i = 0; i < ino_dfr->nr_chunks; i++) {
  547. struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
  548. unsigned int chunk_start = 0, chunk_end = 0;
  549. chunk_start = chunk->f_clus;
  550. chunk_end = chunk->f_clus + chunk->nr_clus;
  551. if ((clus_start >= chunk_start) && (clus_end <= chunk_end)) {
  552. int off = clus_start - chunk_start;
  553. clear_bit((page->index & (PAGES_PER_CLUS(sb) - 1)),
  554. (volatile unsigned long *)&(sbi->dfr_page_wb[chunk->new_idx + off]));
  555. }
  556. }
  557. }
  558. /**
  559. * @fn __defrag_check_wb
  560. * @brief check if WB for given chunk completed
  561. * @return 0 on success, -errno otherwise
  562. * @param sbi super block info
  563. * @param chunk given chunk
  564. */
  565. static int
  566. __defrag_check_wb(
  567. IN struct sdfat_sb_info *sbi,
  568. IN struct defrag_chunk_info *chunk)
  569. {
  570. int err = 0, wb_i = 0, i = 0, nr_new = 0;
  571. if (!sbi || !chunk)
  572. return -EINVAL;
  573. /* Check WB complete status first */
  574. for (wb_i = 0; wb_i < chunk->nr_clus; wb_i++) {
  575. if (atomic_read((atomic_t *)&(sbi->dfr_page_wb[chunk->new_idx + wb_i]))) {
  576. err = -EBUSY;
  577. break;
  578. }
  579. }
  580. /**
  581. * Check NEW_CLUS status.
  582. * writepage_end_io cannot check whole WB complete status,
  583. * so we need to check NEW_CLUS status.
  584. */
  585. for (i = 0; i < chunk->nr_clus; i++)
  586. if (sbi->dfr_new_clus[chunk->new_idx + i])
  587. nr_new++;
  588. if (nr_new == chunk->nr_clus) {
  589. err = 0;
  590. if ((wb_i != chunk->nr_clus) && (wb_i != chunk->nr_clus - 1))
  591. dfr_debug("submit_fullpage_bio() called on a page (nr_clus %d, wb_i %d)",
  592. chunk->nr_clus, wb_i);
  593. BUG_ON(nr_new > chunk->nr_clus);
  594. } else {
  595. dfr_debug("nr_new %d, nr_clus %d", nr_new, chunk->nr_clus);
  596. err = -EBUSY;
  597. }
  598. /* Update chunk's state */
  599. if (!err)
  600. chunk->stat |= DFR_CHUNK_STAT_WB;
  601. return err;
  602. }
  603. static void
  604. __defrag_check_fat_old(
  605. IN struct super_block *sb,
  606. IN struct inode *inode,
  607. IN struct defrag_chunk_info *chunk)
  608. {
  609. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  610. unsigned int clus = 0;
  611. int err = 0, idx = 0, max_idx = 0;
  612. /* Get start_clus */
  613. clus = SDFAT_I(inode)->fid.start_clu;
  614. /* Follow FAT-chain */
  615. #define num_clusters(val) ((val) ? (s32)((val - 1) >> fsi->cluster_size_bits) + 1 : 0)
  616. max_idx = num_clusters(SDFAT_I(inode)->i_size_ondisk);
  617. for (idx = 0; idx < max_idx; idx++) {
  618. FAT32_CHECK_CLUSTER(fsi, clus, err);
  619. ERR_HANDLE(err);
  620. err = fat_ent_get(sb, clus, &clus);
  621. ERR_HANDLE(err);
  622. if ((idx < max_idx - 1) && (IS_CLUS_EOF(clus) || IS_CLUS_FREE(clus))) {
  623. dfr_err("FAT: inode %p, max_idx %d, idx %d, clus %08x, "
  624. "f_clus %d, nr_clus %d", inode, max_idx,
  625. idx, clus, chunk->f_clus, chunk->nr_clus);
  626. BUG_ON(idx < max_idx - 1);
  627. goto error;
  628. }
  629. }
  630. error:
  631. return;
  632. }
  633. static void
  634. __defrag_check_fat_new(
  635. IN struct super_block *sb,
  636. IN struct inode *inode,
  637. IN struct defrag_chunk_info *chunk)
  638. {
  639. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  640. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  641. unsigned int clus = 0;
  642. int i = 0, err = 0;
  643. /* Check start of FAT-chain */
  644. if (chunk->prev_clus) {
  645. FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
  646. BUG_ON(err);
  647. err = fat_ent_get(sb, chunk->prev_clus, &clus);
  648. BUG_ON(err);
  649. } else {
  650. clus = SDFAT_I(inode)->fid.start_clu;
  651. }
  652. if (sbi->dfr_new_clus[chunk->new_idx] != clus) {
  653. dfr_err("FAT: inode %p, start_clus %08x, read_clus %08x",
  654. inode, sbi->dfr_new_clus[chunk->new_idx], clus);
  655. err = EIO;
  656. goto error;
  657. }
  658. /* Check inside of FAT-chain */
  659. if (chunk->nr_clus > 1) {
  660. for (i = 0; i < chunk->nr_clus - 1; i++) {
  661. FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + i], err);
  662. BUG_ON(err);
  663. err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + i], &clus);
  664. BUG_ON(err);
  665. if (sbi->dfr_new_clus[chunk->new_idx + i + 1] != clus) {
  666. dfr_err("FAT: inode %p, new_clus %08x, read_clus %08x",
  667. inode, sbi->dfr_new_clus[chunk->new_idx], clus);
  668. err = EIO;
  669. goto error;
  670. }
  671. }
  672. clus = 0;
  673. }
  674. /* Check end of FAT-chain */
  675. FAT32_CHECK_CLUSTER(fsi, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
  676. BUG_ON(err);
  677. err = fat_ent_get(sb, sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], &clus);
  678. BUG_ON(err);
  679. if ((chunk->next_clus & 0x0FFFFFFF) != (clus & 0x0FFFFFFF)) {
  680. dfr_err("FAT: inode %p, next_clus %08x, read_clus %08x", inode, chunk->next_clus, clus);
  681. err = EIO;
  682. }
  683. error:
  684. BUG_ON(err);
  685. }
  686. /**
  687. * @fn __defrag_update_dirent
  688. * @brief update DIR entry for defrag req
  689. * @return void
  690. * @param sb super block
  691. * @param chunk given chunk
  692. */
  693. static void
  694. __defrag_update_dirent(
  695. struct super_block *sb,
  696. struct defrag_chunk_info *chunk)
  697. {
  698. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  699. FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
  700. CHAIN_T dir;
  701. DOS_DENTRY_T *dos_ep;
  702. unsigned int entry = 0;
  703. unsigned long long sector = 0;
  704. unsigned short hi = 0, lo = 0;
  705. int err = 0;
  706. dir.dir = GET64_HI(chunk->i_pos);
  707. dir.flags = 0x1; // Assume non-continuous
  708. entry = GET64_LO(chunk->i_pos);
  709. FAT32_CHECK_CLUSTER(fsi, dir.dir, err);
  710. BUG_ON(err);
  711. dos_ep = (DOS_DENTRY_T *) get_dentry_in_dir(sb, &dir, entry, &sector);
  712. hi = GET32_HI(sbi->dfr_new_clus[chunk->new_idx]);
  713. lo = GET32_LO(sbi->dfr_new_clus[chunk->new_idx]);
  714. dos_ep->start_clu_hi = cpu_to_le16(hi);
  715. dos_ep->start_clu_lo = cpu_to_le16(lo);
  716. dcache_modify(sb, sector);
  717. }
  718. /**
  719. * @fn defrag_update_fat_prev
  720. * @brief update FAT chain for defrag requests
  721. * @return void
  722. * @param sb super block
  723. * @param force flag to force FAT update
  724. * @remark protected by super_block and volume lock
  725. */
  726. void
  727. defrag_update_fat_prev(
  728. struct super_block *sb,
  729. int force)
  730. {
  731. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  732. FS_INFO_T *fsi = &(sbi->fsi);
  733. struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
  734. int skip = 0, done = 0;
  735. /* Check if FS_ERROR occurred */
  736. if (sb->s_flags & MS_RDONLY) {
  737. dfr_err("RDONLY partition (err %d)", -EPERM);
  738. goto out;
  739. }
  740. list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
  741. struct inode *inode = &(container_of(ino_dfr, struct sdfat_inode_info, dfr_info)->vfs_inode);
  742. struct sdfat_inode_info *ino_info = SDFAT_I(inode);
  743. struct defrag_chunk_info *chunk_prev = NULL;
  744. int i = 0, j = 0;
  745. mutex_lock(&ino_dfr->lock);
  746. BUG_ON(atomic_read(&ino_dfr->stat) != DFR_INO_STAT_REQ);
  747. for (i = 0; i < ino_dfr->nr_chunks; i++) {
  748. struct defrag_chunk_info *chunk = NULL;
  749. int err = 0;
  750. chunk = &(ino_dfr->chunks[i]);
  751. BUG_ON(!chunk);
  752. /* Do nothing for already passed chunk */
  753. if (chunk->stat == DFR_CHUNK_STAT_PASS) {
  754. done++;
  755. continue;
  756. }
  757. /* Handle error case */
  758. if (chunk->stat == DFR_CHUNK_STAT_ERR) {
  759. err = -EINVAL;
  760. goto error;
  761. }
  762. /* Double-check clusters */
  763. if (chunk_prev &&
  764. (chunk->f_clus == chunk_prev->f_clus + chunk_prev->nr_clus) &&
  765. (chunk_prev->stat == DFR_CHUNK_STAT_PASS)) {
  766. err = defrag_validate_cluster(inode, chunk, 1);
  767. /* Handle continuous chunks in a file */
  768. if (!err) {
  769. chunk->prev_clus =
  770. sbi->dfr_new_clus[chunk_prev->new_idx + chunk_prev->nr_clus - 1];
  771. dfr_debug("prev->f_clus %d, prev->nr_clus %d, chunk->f_clus %d",
  772. chunk_prev->f_clus, chunk_prev->nr_clus, chunk->f_clus);
  773. }
  774. } else {
  775. err = defrag_validate_cluster(inode, chunk, 0);
  776. }
  777. if (err) {
  778. dfr_err("Cluster validation: inode %p, chunk->f_clus %d, err %d",
  779. inode, chunk->f_clus, err);
  780. goto error;
  781. }
  782. /**
  783. * Skip update_fat_prev if WB or update_fat_next not completed.
  784. * Go to error case if FORCE set.
  785. */
  786. if (__defrag_check_wb(sbi, chunk) || (chunk->stat != DFR_CHUNK_STAT_PREP)) {
  787. if (force) {
  788. err = -EPERM;
  789. dfr_err("Skip case: inode %p, stat %x, f_clus %d, err %d",
  790. inode, chunk->stat, chunk->f_clus, err);
  791. goto error;
  792. }
  793. skip++;
  794. continue;
  795. }
  796. #ifdef CONFIG_SDFAT_DFR_DEBUG
  797. /* SPO test */
  798. defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
  799. #endif
  800. /* Update chunk's previous cluster */
  801. if (chunk->prev_clus == 0) {
  802. /* For the first cluster of a file */
  803. /* Update ino_info->fid.start_clu */
  804. ino_info->fid.start_clu = sbi->dfr_new_clus[chunk->new_idx];
  805. __defrag_update_dirent(sb, chunk);
  806. } else {
  807. FAT32_CHECK_CLUSTER(fsi, chunk->prev_clus, err);
  808. BUG_ON(err);
  809. if (fat_ent_set(sb,
  810. chunk->prev_clus,
  811. sbi->dfr_new_clus[chunk->new_idx])) {
  812. err = -EIO;
  813. goto error;
  814. }
  815. }
  816. /* Clear extent cache */
  817. extent_cache_inval_inode(inode);
  818. /* Update FID info */
  819. ino_info->fid.hint_bmap.off = CLUS_EOF;
  820. ino_info->fid.hint_bmap.clu = 0;
  821. /* Clear old FAT-chain */
  822. for (j = 0; j < chunk->nr_clus; j++)
  823. defrag_free_cluster(sb, chunk->d_clus + j);
  824. /* Mark this chunk PASS */
  825. chunk->stat = DFR_CHUNK_STAT_PASS;
  826. __defrag_check_fat_new(sb, inode, chunk);
  827. done++;
  828. error:
  829. if (err) {
  830. /**
  831. * chunk->new_idx != 0 means this chunk needs to be cleaned up
  832. */
  833. if (chunk->new_idx) {
  834. /* Free already allocated clusters */
  835. for (j = 0; j < chunk->nr_clus; j++) {
  836. if (sbi->dfr_new_clus[chunk->new_idx + j]) {
  837. defrag_free_cluster(sb, sbi->dfr_new_clus[chunk->new_idx + j]);
  838. sbi->dfr_new_clus[chunk->new_idx + j] = 0;
  839. }
  840. }
  841. __defrag_check_fat_old(sb, inode, chunk);
  842. }
  843. /**
  844. * chunk->new_idx == 0 means this chunk already cleaned up
  845. */
  846. chunk->new_idx = 0;
  847. chunk->stat = DFR_CHUNK_STAT_ERR;
  848. }
  849. chunk_prev = chunk;
  850. }
  851. BUG_ON(!mutex_is_locked(&ino_dfr->lock));
  852. mutex_unlock(&ino_dfr->lock);
  853. }
  854. out:
  855. if (skip) {
  856. dfr_debug("%s skipped (nr_reqs %d, done %d, skip %d)",
  857. __func__, sb_dfr->nr_chunks - 1, done, skip);
  858. } else {
  859. /* Make dfr_reserved_clus zero */
  860. if (sbi->dfr_reserved_clus > 0) {
  861. if (fsi->reserved_clusters < sbi->dfr_reserved_clus) {
  862. dfr_err("Reserved count: reserved_clus %d, dfr_reserved_clus %d",
  863. fsi->reserved_clusters, sbi->dfr_reserved_clus);
  864. BUG_ON(fsi->reserved_clusters < sbi->dfr_reserved_clus);
  865. }
  866. defrag_reserve_clusters(sb, 0 - sbi->dfr_reserved_clus);
  867. }
  868. dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
  869. }
  870. }
  871. /**
  872. * @fn defrag_update_fat_next
  873. * @brief update FAT chain for defrag requests
  874. * @return void
  875. * @param sb super block
  876. * @remark protected by super_block and volume lock
  877. */
  878. void
  879. defrag_update_fat_next(
  880. struct super_block *sb)
  881. {
  882. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  883. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  884. struct defrag_info *sb_dfr = &sbi->dfr_info, *ino_dfr = NULL;
  885. struct defrag_chunk_info *chunk = NULL;
  886. int done = 0, i = 0, j = 0, err = 0;
  887. /* Check if FS_ERROR occurred */
  888. if (sb->s_flags & MS_RDONLY) {
  889. dfr_err("RDONLY partition (err %d)", -EROFS);
  890. goto out;
  891. }
  892. list_for_each_entry(ino_dfr, &sb_dfr->entry, entry) {
  893. for (i = 0; i < ino_dfr->nr_chunks; i++) {
  894. int skip = 0;
  895. chunk = &(ino_dfr->chunks[i]);
  896. /* Do nothing if error occurred or update_fat_next already passed */
  897. if (chunk->stat == DFR_CHUNK_STAT_ERR)
  898. continue;
  899. if (chunk->stat & DFR_CHUNK_STAT_FAT) {
  900. done++;
  901. continue;
  902. }
  903. /* Ship this chunk if get_block not passed for this chunk */
  904. for (j = 0; j < chunk->nr_clus; j++) {
  905. if (sbi->dfr_new_clus[chunk->new_idx + j] == 0) {
  906. skip = 1;
  907. break;
  908. }
  909. }
  910. if (skip)
  911. continue;
  912. /* Update chunk's next cluster */
  913. FAT32_CHECK_CLUSTER(fsi,
  914. sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1], err);
  915. BUG_ON(err);
  916. if (fat_ent_set(sb,
  917. sbi->dfr_new_clus[chunk->new_idx + chunk->nr_clus - 1],
  918. chunk->next_clus))
  919. goto out;
  920. #ifdef CONFIG_SDFAT_DFR_DEBUG
  921. /* SPO test */
  922. defrag_spo_test(sb, DFR_SPO_RANDOM, __func__);
  923. #endif
  924. /* Update chunk's state */
  925. chunk->stat |= DFR_CHUNK_STAT_FAT;
  926. done++;
  927. }
  928. }
  929. out:
  930. dfr_debug("%s done (nr_reqs %d, done %d)", __func__, sb_dfr->nr_chunks - 1, done);
  931. }
  932. /**
  933. * @fn defrag_check_discard
  934. * @brief check if we can send discard for this AU, if so, send discard
  935. * @return void
  936. * @param sb super block
  937. * @remark protected by super_block and volume lock
  938. */
  939. void
  940. defrag_check_discard(
  941. IN struct super_block *sb)
  942. {
  943. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  944. AMAP_T *amap = SDFAT_SB(sb)->fsi.amap;
  945. AU_INFO_T *au = NULL;
  946. struct defrag_info *sb_dfr = &(SDFAT_SB(sb)->dfr_info);
  947. unsigned int tmp[DFR_MAX_AU_MOVED];
  948. int i = 0, j = 0;
  949. BUG_ON(!amap);
  950. if (!(SDFAT_SB(sb)->options.discard) ||
  951. !(SDFAT_SB(sb)->options.improved_allocation & SDFAT_ALLOC_SMART))
  952. return;
  953. memset(tmp, 0, sizeof(int) * DFR_MAX_AU_MOVED);
  954. for (i = REQ_HEADER_IDX + 1; i < sb_dfr->nr_chunks; i++) {
  955. struct defrag_chunk_info *chunk = &(sb_dfr->chunks[i]);
  956. int skip = 0;
  957. au = GET_AU(amap, i_AU_of_CLU(amap, chunk->d_clus));
  958. /* Send DISCARD for free AU */
  959. if ((IS_AU_IGNORED(au, amap)) &&
  960. (amap_get_freeclus(sb, chunk->d_clus) == CLUS_PER_AU(sb))) {
  961. sector_t blk = 0, nr_blks = 0;
  962. unsigned int au_align_factor = amap->option.au_align_factor % amap->option.au_size;
  963. BUG_ON(au->idx == 0);
  964. /* Avoid multiple DISCARD */
  965. for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
  966. if (tmp[j] == au->idx) {
  967. skip = 1;
  968. break;
  969. }
  970. }
  971. if (skip == 1)
  972. continue;
  973. /* Send DISCARD cmd */
  974. blk = (sector_t) (((au->idx * CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits)
  975. - au_align_factor);
  976. nr_blks = ((sector_t)CLUS_PER_AU(sb)) << fsi->sect_per_clus_bits;
  977. dfr_debug("Send DISCARD for AU[%d] (blk %08zx)", au->idx, blk);
  978. sb_issue_discard(sb, blk, nr_blks, GFP_NOFS, 0);
  979. /* Save previous AU's index */
  980. for (j = 0; j < DFR_MAX_AU_MOVED; j++) {
  981. if (!tmp[j]) {
  982. tmp[j] = au->idx;
  983. break;
  984. }
  985. }
  986. }
  987. }
  988. }
  989. /**
  990. * @fn defrag_free_cluster
  991. * @brief free uneccessary cluster
  992. * @return void
  993. * @param sb super block
  994. * @param clus physical cluster num
  995. * @remark protected by super_block and volume lock
  996. */
  997. int
  998. defrag_free_cluster(
  999. struct super_block *sb,
  1000. unsigned int clus)
  1001. {
  1002. FS_INFO_T *fsi = &SDFAT_SB(sb)->fsi;
  1003. unsigned int val = 0;
  1004. s32 err = 0;
  1005. FAT32_CHECK_CLUSTER(fsi, clus, err);
  1006. BUG_ON(err);
  1007. if (fat_ent_get(sb, clus, &val))
  1008. return -EIO;
  1009. if (val) {
  1010. if (fat_ent_set(sb, clus, 0))
  1011. return -EIO;
  1012. } else {
  1013. dfr_err("Free: Already freed, clus %08x, val %08x", clus, val);
  1014. BUG_ON(!val);
  1015. }
  1016. set_sb_dirty(sb);
  1017. fsi->used_clusters--;
  1018. if (fsi->amap)
  1019. amap_release_cluster(sb, clus);
  1020. return 0;
  1021. }
  1022. /**
  1023. * @fn defrag_check_defrag_required
  1024. * @brief check if defrag required
  1025. * @return 1 if required, 0 otherwise
  1026. * @param sb super block
  1027. * @param totalau # of total AUs
  1028. * @param cleanau # of clean AUs
  1029. * @param fullau # of full AUs
  1030. * @remark protected by super_block
  1031. */
  1032. int
  1033. defrag_check_defrag_required(
  1034. IN struct super_block *sb,
  1035. OUT int *totalau,
  1036. OUT int *cleanau,
  1037. OUT int *fullau)
  1038. {
  1039. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  1040. AMAP_T *amap = NULL;
  1041. int clean_ratio = 0, frag_ratio = 0;
  1042. int ret = 0;
  1043. if (!sb || !(SDFAT_SB(sb)->options.defrag))
  1044. return 0;
  1045. /* Check DFR_DEFAULT_STOP_RATIO first */
  1046. fsi = &(SDFAT_SB(sb)->fsi);
  1047. if (fsi->used_clusters == (unsigned int)(~0)) {
  1048. if (fsi->fs_func->count_used_clusters(sb, &fsi->used_clusters))
  1049. return -EIO;
  1050. }
  1051. if (fsi->used_clusters * DFR_FULL_RATIO >= fsi->num_clusters * DFR_DEFAULT_STOP_RATIO) {
  1052. dfr_debug("used_clusters %d, num_clusters %d", fsi->used_clusters, fsi->num_clusters);
  1053. return 0;
  1054. }
  1055. /* Check clean/frag ratio */
  1056. amap = SDFAT_SB(sb)->fsi.amap;
  1057. BUG_ON(!amap);
  1058. clean_ratio = (amap->n_clean_au * 100) / amap->n_au;
  1059. if (amap->n_full_au)
  1060. frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) / amap->n_full_au;
  1061. else
  1062. frag_ratio = ((amap->n_au - amap->n_clean_au) * 100) /
  1063. (fsi->used_clusters * CLUS_PER_AU(sb));
  1064. /*
  1065. * Wake-up defrag_daemon:
  1066. * when # of clean AUs too small, or frag_ratio exceeds the limit
  1067. */
  1068. if ((clean_ratio < DFR_DEFAULT_WAKEUP_RATIO) ||
  1069. ((clean_ratio < DFR_DEFAULT_CLEAN_RATIO) && (frag_ratio >= DFR_DEFAULT_FRAG_RATIO))) {
  1070. if (totalau)
  1071. *totalau = amap->n_au;
  1072. if (cleanau)
  1073. *cleanau = amap->n_clean_au;
  1074. if (fullau)
  1075. *fullau = amap->n_full_au;
  1076. ret = 1;
  1077. }
  1078. return ret;
  1079. }
  1080. /**
  1081. * @fn defrag_check_defrag_required
  1082. * @brief check defrag status on inode
  1083. * @return 1 if defrag in on, 0 otherwise
  1084. * @param inode inode
  1085. * @param start logical start addr
  1086. * @param end logical end addr
  1087. * @param cancel flag to cancel defrag
  1088. * @param caller caller info
  1089. */
  1090. int
  1091. defrag_check_defrag_on(
  1092. INOUT struct inode *inode,
  1093. IN loff_t start,
  1094. IN loff_t end,
  1095. IN int cancel,
  1096. IN const char *caller)
  1097. {
  1098. struct super_block *sb = inode->i_sb;
  1099. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  1100. FS_INFO_T *fsi = &(sbi->fsi);
  1101. struct defrag_info *ino_dfr = &(SDFAT_I(inode)->dfr_info);
  1102. unsigned int clus_start = 0, clus_end = 0;
  1103. int ret = 0, i = 0;
  1104. if (!inode || (start == end))
  1105. return 0;
  1106. mutex_lock(&ino_dfr->lock);
  1107. /* Check if this inode is on defrag */
  1108. if (atomic_read(&ino_dfr->stat) == DFR_INO_STAT_REQ) {
  1109. clus_start = start >> (fsi->cluster_size_bits);
  1110. clus_end = (end >> (fsi->cluster_size_bits)) +
  1111. ((end & (fsi->cluster_size - 1)) ? 1 : 0);
  1112. if (!ino_dfr->chunks)
  1113. goto error;
  1114. /* Check each chunk in given inode */
  1115. for (i = 0; i < ino_dfr->nr_chunks; i++) {
  1116. struct defrag_chunk_info *chunk = &(ino_dfr->chunks[i]);
  1117. unsigned int chunk_start = 0, chunk_end = 0;
  1118. /* Skip this chunk when error occurred or it already passed defrag process */
  1119. if ((chunk->stat == DFR_CHUNK_STAT_ERR) || (chunk->stat == DFR_CHUNK_STAT_PASS))
  1120. continue;
  1121. chunk_start = chunk->f_clus;
  1122. chunk_end = chunk->f_clus + chunk->nr_clus;
  1123. if (((clus_start >= chunk_start) && (clus_start < chunk_end)) ||
  1124. ((clus_end > chunk_start) && (clus_end <= chunk_end)) ||
  1125. ((clus_start < chunk_start) && (clus_end > chunk_end))) {
  1126. ret = 1;
  1127. if (cancel) {
  1128. chunk->stat = DFR_CHUNK_STAT_ERR;
  1129. dfr_debug("Defrag canceled: inode %p, start %08x, end %08x, caller %s",
  1130. inode, clus_start, clus_end, caller);
  1131. }
  1132. }
  1133. }
  1134. }
  1135. error:
  1136. BUG_ON(!mutex_is_locked(&ino_dfr->lock));
  1137. mutex_unlock(&ino_dfr->lock);
  1138. return ret;
  1139. }
  1140. #ifdef CONFIG_SDFAT_DFR_DEBUG
  1141. /**
  1142. * @fn defrag_spo_test
  1143. * @brief test SPO while defrag running
  1144. * @return void
  1145. * @param sb super block
  1146. * @param flag SPO debug flag
  1147. * @param caller caller info
  1148. */
  1149. void
  1150. defrag_spo_test(
  1151. struct super_block *sb,
  1152. int flag,
  1153. const char *caller)
  1154. {
  1155. struct sdfat_sb_info *sbi = SDFAT_SB(sb);
  1156. if (!sb || !(SDFAT_SB(sb)->options.defrag))
  1157. return;
  1158. if (flag == sbi->dfr_spo_flag) {
  1159. dfr_err("Defrag SPO test (flag %d, caller %s)", flag, caller);
  1160. panic("Defrag SPO test");
  1161. }
  1162. }
  1163. #endif /* CONFIG_SDFAT_DFR_DEBUG */
  1164. #endif /* CONFIG_SDFAT_DFR */