extent.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. /*
  2. * Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version 2
  7. * of the License, or (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  16. */
  17. /*
  18. * linux/fs/fat/cache.c
  19. *
  20. * Written 1992,1993 by Werner Almesberger
  21. *
  22. * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  23. * of inode number.
  24. * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
  25. */
  26. /************************************************************************/
  27. /* */
  28. /* PROJECT : exFAT & FAT12/16/32 File System */
  29. /* FILE : extent.c */
  30. /* PURPOSE : Improve the performance of traversing fat chain. */
  31. /* */
  32. /*----------------------------------------------------------------------*/
  33. /* NOTES */
  34. /* */
  35. /* */
  36. /************************************************************************/
  37. #include <linux/slab.h>
  38. #include "sdfat.h"
  39. #include "core.h"
  40. #define EXTENT_CACHE_VALID 0
  41. /* this must be > 0. */
  42. #define EXTENT_MAX_CACHE 16
  43. struct extent_cache {
  44. struct list_head cache_list;
  45. u32 nr_contig; /* number of contiguous clusters */
  46. u32 fcluster; /* cluster number in the file. */
  47. u32 dcluster; /* cluster number on disk. */
  48. };
  49. struct extent_cache_id {
  50. u32 id;
  51. u32 nr_contig;
  52. u32 fcluster;
  53. u32 dcluster;
  54. };
  55. static struct kmem_cache *extent_cache_cachep;
  56. static void init_once(void *c)
  57. {
  58. struct extent_cache *cache = (struct extent_cache *)c;
  59. INIT_LIST_HEAD(&cache->cache_list);
  60. }
  61. s32 extent_cache_init(void)
  62. {
  63. extent_cache_cachep = kmem_cache_create("sdfat_extent_cache",
  64. sizeof(struct extent_cache),
  65. 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
  66. init_once);
  67. if (!extent_cache_cachep)
  68. return -ENOMEM;
  69. return 0;
  70. }
  71. void extent_cache_shutdown(void)
  72. {
  73. if (!extent_cache_cachep)
  74. return;
  75. kmem_cache_destroy(extent_cache_cachep);
  76. }
  77. void extent_cache_init_inode(struct inode *inode)
  78. {
  79. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  80. spin_lock_init(&extent->cache_lru_lock);
  81. extent->nr_caches = 0;
  82. extent->cache_valid_id = EXTENT_CACHE_VALID + 1;
  83. INIT_LIST_HEAD(&extent->cache_lru);
  84. }
  85. static inline struct extent_cache *extent_cache_alloc(void)
  86. {
  87. return kmem_cache_alloc(extent_cache_cachep, GFP_NOFS);
  88. }
  89. static inline void extent_cache_free(struct extent_cache *cache)
  90. {
  91. BUG_ON(!list_empty(&cache->cache_list));
  92. kmem_cache_free(extent_cache_cachep, cache);
  93. }
  94. static inline void extent_cache_update_lru(struct inode *inode,
  95. struct extent_cache *cache)
  96. {
  97. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  98. if (extent->cache_lru.next != &cache->cache_list)
  99. list_move(&cache->cache_list, &extent->cache_lru);
  100. }
  101. static u32 extent_cache_lookup(struct inode *inode, u32 fclus,
  102. struct extent_cache_id *cid,
  103. u32 *cached_fclus, u32 *cached_dclus)
  104. {
  105. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  106. static struct extent_cache nohit = { .fcluster = 0, };
  107. struct extent_cache *hit = &nohit, *p;
  108. u32 offset = CLUS_EOF;
  109. spin_lock(&extent->cache_lru_lock);
  110. list_for_each_entry(p, &extent->cache_lru, cache_list) {
  111. /* Find the cache of "fclus" or nearest cache. */
  112. if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
  113. hit = p;
  114. if ((hit->fcluster + hit->nr_contig) < fclus) {
  115. offset = hit->nr_contig;
  116. } else {
  117. offset = fclus - hit->fcluster;
  118. break;
  119. }
  120. }
  121. }
  122. if (hit != &nohit) {
  123. extent_cache_update_lru(inode, hit);
  124. cid->id = extent->cache_valid_id;
  125. cid->nr_contig = hit->nr_contig;
  126. cid->fcluster = hit->fcluster;
  127. cid->dcluster = hit->dcluster;
  128. *cached_fclus = cid->fcluster + offset;
  129. *cached_dclus = cid->dcluster + offset;
  130. }
  131. spin_unlock(&extent->cache_lru_lock);
  132. return offset;
  133. }
  134. static struct extent_cache *extent_cache_merge(struct inode *inode,
  135. struct extent_cache_id *new)
  136. {
  137. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  138. struct extent_cache *p;
  139. list_for_each_entry(p, &extent->cache_lru, cache_list) {
  140. /* Find the same part as "new" in cluster-chain. */
  141. if (p->fcluster == new->fcluster) {
  142. ASSERT(p->dcluster == new->dcluster);
  143. if (new->nr_contig > p->nr_contig)
  144. p->nr_contig = new->nr_contig;
  145. return p;
  146. }
  147. }
  148. return NULL;
  149. }
  150. static void extent_cache_add(struct inode *inode, struct extent_cache_id *new)
  151. {
  152. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  153. struct extent_cache *cache, *tmp;
  154. if (new->fcluster == -1) /* dummy cache */
  155. return;
  156. spin_lock(&extent->cache_lru_lock);
  157. if (new->id != EXTENT_CACHE_VALID &&
  158. new->id != extent->cache_valid_id)
  159. goto out; /* this cache was invalidated */
  160. cache = extent_cache_merge(inode, new);
  161. if (cache == NULL) {
  162. if (extent->nr_caches < EXTENT_MAX_CACHE) {
  163. extent->nr_caches++;
  164. spin_unlock(&extent->cache_lru_lock);
  165. tmp = extent_cache_alloc();
  166. if (!tmp) {
  167. spin_lock(&extent->cache_lru_lock);
  168. extent->nr_caches--;
  169. spin_unlock(&extent->cache_lru_lock);
  170. return;
  171. }
  172. spin_lock(&extent->cache_lru_lock);
  173. cache = extent_cache_merge(inode, new);
  174. if (cache != NULL) {
  175. extent->nr_caches--;
  176. extent_cache_free(tmp);
  177. goto out_update_lru;
  178. }
  179. cache = tmp;
  180. } else {
  181. struct list_head *p = extent->cache_lru.prev;
  182. cache = list_entry(p, struct extent_cache, cache_list);
  183. }
  184. cache->fcluster = new->fcluster;
  185. cache->dcluster = new->dcluster;
  186. cache->nr_contig = new->nr_contig;
  187. }
  188. out_update_lru:
  189. extent_cache_update_lru(inode, cache);
  190. out:
  191. spin_unlock(&extent->cache_lru_lock);
  192. }
  193. /*
  194. * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
  195. * fixes itself after a while.
  196. */
  197. static void __extent_cache_inval_inode(struct inode *inode)
  198. {
  199. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  200. struct extent_cache *cache;
  201. while (!list_empty(&extent->cache_lru)) {
  202. cache = list_entry(extent->cache_lru.next,
  203. struct extent_cache, cache_list);
  204. list_del_init(&cache->cache_list);
  205. extent->nr_caches--;
  206. extent_cache_free(cache);
  207. }
  208. /* Update. The copy of caches before this id is discarded. */
  209. extent->cache_valid_id++;
  210. if (extent->cache_valid_id == EXTENT_CACHE_VALID)
  211. extent->cache_valid_id++;
  212. }
  213. void extent_cache_inval_inode(struct inode *inode)
  214. {
  215. EXTENT_T *extent = &(SDFAT_I(inode)->fid.extent);
  216. spin_lock(&extent->cache_lru_lock);
  217. __extent_cache_inval_inode(inode);
  218. spin_unlock(&extent->cache_lru_lock);
  219. }
  220. static inline s32 cache_contiguous(struct extent_cache_id *cid, u32 dclus)
  221. {
  222. cid->nr_contig++;
  223. return ((cid->dcluster + cid->nr_contig) == dclus);
  224. }
  225. static inline void cache_init(struct extent_cache_id *cid, u32 fclus, u32 dclus)
  226. {
  227. cid->id = EXTENT_CACHE_VALID;
  228. cid->fcluster = fclus;
  229. cid->dcluster = dclus;
  230. cid->nr_contig = 0;
  231. }
  232. s32 extent_get_clus(struct inode *inode, u32 cluster, u32 *fclus,
  233. u32 *dclus, u32 *last_dclus, s32 allow_eof)
  234. {
  235. struct super_block *sb = inode->i_sb;
  236. FS_INFO_T *fsi = &(SDFAT_SB(sb)->fsi);
  237. u32 limit = fsi->num_clusters;
  238. FILE_ID_T *fid = &(SDFAT_I(inode)->fid);
  239. struct extent_cache_id cid;
  240. u32 content;
  241. /* FOR GRACEFUL ERROR HANDLING */
  242. if (IS_CLUS_FREE(fid->start_clu)) {
  243. sdfat_fs_error(sb, "invalid access to "
  244. "extent cache (entry 0x%08x)", fid->start_clu);
  245. ASSERT(0);
  246. return -EIO;
  247. }
  248. *fclus = 0;
  249. *dclus = fid->start_clu;
  250. *last_dclus = *dclus;
  251. /*
  252. * Don`t use extent_cache if zero offset or non-cluster allocation
  253. */
  254. if ((cluster == 0) || IS_CLUS_EOF(*dclus))
  255. return 0;
  256. cache_init(&cid, CLUS_EOF, CLUS_EOF);
  257. if (extent_cache_lookup(inode, cluster, &cid, fclus, dclus) == CLUS_EOF) {
  258. /*
  259. * dummy, always not contiguous
  260. * This is reinitialized by cache_init(), later.
  261. */
  262. ASSERT((cid.id == EXTENT_CACHE_VALID)
  263. && (cid.fcluster == CLUS_EOF)
  264. && (cid.dcluster == CLUS_EOF)
  265. && (cid.nr_contig == 0));
  266. }
  267. if (*fclus == cluster)
  268. return 0;
  269. while (*fclus < cluster) {
  270. /* prevent the infinite loop of cluster chain */
  271. if (*fclus > limit) {
  272. sdfat_fs_error(sb,
  273. "%s: detected the cluster chain loop"
  274. " (i_pos %u)", __func__,
  275. (*fclus));
  276. return -EIO;
  277. }
  278. if (fat_ent_get_safe(sb, *dclus, &content))
  279. return -EIO;
  280. *last_dclus = *dclus;
  281. *dclus = content;
  282. (*fclus)++;
  283. if (IS_CLUS_EOF(content)) {
  284. if (!allow_eof) {
  285. sdfat_fs_error(sb,
  286. "%s: invalid cluster chain (i_pos %u,"
  287. "last_clus 0x%08x is EOF)",
  288. __func__, *fclus, (*last_dclus));
  289. return -EIO;
  290. }
  291. break;
  292. }
  293. if (!cache_contiguous(&cid, *dclus))
  294. cache_init(&cid, *fclus, *dclus);
  295. }
  296. extent_cache_add(inode, &cid);
  297. return 0;
  298. }