cache.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * linux/fs/fat/cache.c
  3. *
  4. * Written 1992,1993 by Werner Almesberger
  5. *
  6. * Mar 1999. AV. Changed cache, so that it uses the starting cluster instead
  7. * of inode number.
  8. * May 1999. AV. Fixed the bogosity with FAT32 (read "FAT28"). Fscking lusers.
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/msdos_fs.h>
  12. #include <linux/buffer_head.h>
  13. /* this must be > 0. */
  14. #define FAT_MAX_CACHE 8
  15. struct fat_cache {
  16. struct list_head cache_list;
  17. int nr_contig; /* number of contiguous clusters */
  18. int fcluster; /* cluster number in the file. */
  19. int dcluster; /* cluster number on disk. */
  20. };
  21. struct fat_cache_id {
  22. unsigned int id;
  23. int nr_contig;
  24. int fcluster;
  25. int dcluster;
  26. };
  27. static inline int fat_max_cache(struct inode *inode)
  28. {
  29. return FAT_MAX_CACHE;
  30. }
  31. static kmem_cache_t *fat_cache_cachep;
  32. static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
  33. {
  34. struct fat_cache *cache = (struct fat_cache *)foo;
  35. if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
  36. SLAB_CTOR_CONSTRUCTOR)
  37. INIT_LIST_HEAD(&cache->cache_list);
  38. }
  39. int __init fat_cache_init(void)
  40. {
  41. fat_cache_cachep = kmem_cache_create("fat_cache",
  42. sizeof(struct fat_cache),
  43. 0, SLAB_RECLAIM_ACCOUNT,
  44. init_once, NULL);
  45. if (fat_cache_cachep == NULL)
  46. return -ENOMEM;
  47. return 0;
  48. }
  49. void fat_cache_destroy(void)
  50. {
  51. if (kmem_cache_destroy(fat_cache_cachep))
  52. printk(KERN_INFO "fat_cache: not all structures were freed\n");
  53. }
  54. static inline struct fat_cache *fat_cache_alloc(struct inode *inode)
  55. {
  56. return kmem_cache_alloc(fat_cache_cachep, SLAB_KERNEL);
  57. }
  58. static inline void fat_cache_free(struct fat_cache *cache)
  59. {
  60. BUG_ON(!list_empty(&cache->cache_list));
  61. kmem_cache_free(fat_cache_cachep, cache);
  62. }
  63. static inline void fat_cache_update_lru(struct inode *inode,
  64. struct fat_cache *cache)
  65. {
  66. if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list)
  67. list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru);
  68. }
  69. static int fat_cache_lookup(struct inode *inode, int fclus,
  70. struct fat_cache_id *cid,
  71. int *cached_fclus, int *cached_dclus)
  72. {
  73. static struct fat_cache nohit = { .fcluster = 0, };
  74. struct fat_cache *hit = &nohit, *p;
  75. int offset = -1;
  76. spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  77. list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
  78. /* Find the cache of "fclus" or nearest cache. */
  79. if (p->fcluster <= fclus && hit->fcluster < p->fcluster) {
  80. hit = p;
  81. if ((hit->fcluster + hit->nr_contig) < fclus) {
  82. offset = hit->nr_contig;
  83. } else {
  84. offset = fclus - hit->fcluster;
  85. break;
  86. }
  87. }
  88. }
  89. if (hit != &nohit) {
  90. fat_cache_update_lru(inode, hit);
  91. cid->id = MSDOS_I(inode)->cache_valid_id;
  92. cid->nr_contig = hit->nr_contig;
  93. cid->fcluster = hit->fcluster;
  94. cid->dcluster = hit->dcluster;
  95. *cached_fclus = cid->fcluster + offset;
  96. *cached_dclus = cid->dcluster + offset;
  97. }
  98. spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
  99. return offset;
  100. }
  101. static struct fat_cache *fat_cache_merge(struct inode *inode,
  102. struct fat_cache_id *new)
  103. {
  104. struct fat_cache *p;
  105. list_for_each_entry(p, &MSDOS_I(inode)->cache_lru, cache_list) {
  106. /* Find the same part as "new" in cluster-chain. */
  107. if (p->fcluster == new->fcluster) {
  108. BUG_ON(p->dcluster != new->dcluster);
  109. if (new->nr_contig > p->nr_contig)
  110. p->nr_contig = new->nr_contig;
  111. return p;
  112. }
  113. }
  114. return NULL;
  115. }
  116. static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
  117. {
  118. struct fat_cache *cache, *tmp;
  119. if (new->fcluster == -1) /* dummy cache */
  120. return;
  121. spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  122. if (new->id != FAT_CACHE_VALID &&
  123. new->id != MSDOS_I(inode)->cache_valid_id)
  124. goto out; /* this cache was invalidated */
  125. cache = fat_cache_merge(inode, new);
  126. if (cache == NULL) {
  127. if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
  128. MSDOS_I(inode)->nr_caches++;
  129. spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
  130. tmp = fat_cache_alloc(inode);
  131. spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  132. cache = fat_cache_merge(inode, new);
  133. if (cache != NULL) {
  134. MSDOS_I(inode)->nr_caches--;
  135. fat_cache_free(tmp);
  136. goto out_update_lru;
  137. }
  138. cache = tmp;
  139. } else {
  140. struct list_head *p = MSDOS_I(inode)->cache_lru.prev;
  141. cache = list_entry(p, struct fat_cache, cache_list);
  142. }
  143. cache->fcluster = new->fcluster;
  144. cache->dcluster = new->dcluster;
  145. cache->nr_contig = new->nr_contig;
  146. }
  147. out_update_lru:
  148. fat_cache_update_lru(inode, cache);
  149. out:
  150. spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
  151. }
  152. /*
  153. * Cache invalidation occurs rarely, thus the LRU chain is not updated. It
  154. * fixes itself after a while.
  155. */
  156. static void __fat_cache_inval_inode(struct inode *inode)
  157. {
  158. struct msdos_inode_info *i = MSDOS_I(inode);
  159. struct fat_cache *cache;
  160. while (!list_empty(&i->cache_lru)) {
  161. cache = list_entry(i->cache_lru.next, struct fat_cache, cache_list);
  162. list_del_init(&cache->cache_list);
  163. i->nr_caches--;
  164. fat_cache_free(cache);
  165. }
  166. /* Update. The copy of caches before this id is discarded. */
  167. i->cache_valid_id++;
  168. if (i->cache_valid_id == FAT_CACHE_VALID)
  169. i->cache_valid_id++;
  170. }
  171. void fat_cache_inval_inode(struct inode *inode)
  172. {
  173. spin_lock(&MSDOS_I(inode)->cache_lru_lock);
  174. __fat_cache_inval_inode(inode);
  175. spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
  176. }
  177. static inline int cache_contiguous(struct fat_cache_id *cid, int dclus)
  178. {
  179. cid->nr_contig++;
  180. return ((cid->dcluster + cid->nr_contig) == dclus);
  181. }
  182. static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
  183. {
  184. cid->id = FAT_CACHE_VALID;
  185. cid->fcluster = fclus;
  186. cid->dcluster = dclus;
  187. cid->nr_contig = 0;
  188. }
  189. int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
  190. {
  191. struct super_block *sb = inode->i_sb;
  192. const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
  193. struct fat_entry fatent;
  194. struct fat_cache_id cid;
  195. int nr;
  196. BUG_ON(MSDOS_I(inode)->i_start == 0);
  197. *fclus = 0;
  198. *dclus = MSDOS_I(inode)->i_start;
  199. if (cluster == 0)
  200. return 0;
  201. if (fat_cache_lookup(inode, cluster, &cid, fclus, dclus) < 0) {
  202. /*
  203. * dummy, always not contiguous
  204. * This is reinitialized by cache_init(), later.
  205. */
  206. cache_init(&cid, -1, -1);
  207. }
  208. fatent_init(&fatent);
  209. while (*fclus < cluster) {
  210. /* prevent the infinite loop of cluster chain */
  211. if (*fclus > limit) {
  212. fat_fs_panic(sb, "%s: detected the cluster chain loop"
  213. " (i_pos %lld)", __FUNCTION__,
  214. MSDOS_I(inode)->i_pos);
  215. nr = -EIO;
  216. goto out;
  217. }
  218. nr = fat_ent_read(inode, &fatent, *dclus);
  219. if (nr < 0)
  220. goto out;
  221. else if (nr == FAT_ENT_FREE) {
  222. fat_fs_panic(sb, "%s: invalid cluster chain"
  223. " (i_pos %lld)", __FUNCTION__,
  224. MSDOS_I(inode)->i_pos);
  225. nr = -EIO;
  226. goto out;
  227. } else if (nr == FAT_ENT_EOF) {
  228. fat_cache_add(inode, &cid);
  229. goto out;
  230. }
  231. (*fclus)++;
  232. *dclus = nr;
  233. if (!cache_contiguous(&cid, *dclus))
  234. cache_init(&cid, *fclus, *dclus);
  235. }
  236. nr = 0;
  237. fat_cache_add(inode, &cid);
  238. out:
  239. fatent_brelse(&fatent);
  240. return nr;
  241. }
  242. static int fat_bmap_cluster(struct inode *inode, int cluster)
  243. {
  244. struct super_block *sb = inode->i_sb;
  245. int ret, fclus, dclus;
  246. if (MSDOS_I(inode)->i_start == 0)
  247. return 0;
  248. ret = fat_get_cluster(inode, cluster, &fclus, &dclus);
  249. if (ret < 0)
  250. return ret;
  251. else if (ret == FAT_ENT_EOF) {
  252. fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)",
  253. __FUNCTION__, MSDOS_I(inode)->i_pos);
  254. return -EIO;
  255. }
  256. return dclus;
  257. }
  258. int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
  259. unsigned long *mapped_blocks)
  260. {
  261. struct super_block *sb = inode->i_sb;
  262. struct msdos_sb_info *sbi = MSDOS_SB(sb);
  263. sector_t last_block;
  264. int cluster, offset;
  265. *phys = 0;
  266. *mapped_blocks = 0;
  267. if ((sbi->fat_bits != 32) && (inode->i_ino == MSDOS_ROOT_INO)) {
  268. if (sector < (sbi->dir_entries >> sbi->dir_per_block_bits)) {
  269. *phys = sector + sbi->dir_start;
  270. *mapped_blocks = 1;
  271. }
  272. return 0;
  273. }
  274. last_block = (MSDOS_I(inode)->mmu_private + (sb->s_blocksize - 1))
  275. >> sb->s_blocksize_bits;
  276. if (sector >= last_block)
  277. return 0;
  278. cluster = sector >> (sbi->cluster_bits - sb->s_blocksize_bits);
  279. offset = sector & (sbi->sec_per_clus - 1);
  280. cluster = fat_bmap_cluster(inode, cluster);
  281. if (cluster < 0)
  282. return cluster;
  283. else if (cluster) {
  284. *phys = fat_clus_to_blknr(sbi, cluster) + offset;
  285. *mapped_blocks = sbi->sec_per_clus - offset;
  286. if (*mapped_blocks > last_block - sector)
  287. *mapped_blocks = last_block - sector;
  288. }
  289. return 0;
  290. }