cylinder.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. /*
  2. * linux/fs/ufs/cylinder.c
  3. *
  4. * Copyright (C) 1998
  5. * Daniel Pirkl <daniel.pirkl@email.cz>
  6. * Charles University, Faculty of Mathematics and Physics
  7. *
  8. * ext2 - inode (block) bitmap caching inspired
  9. */
  10. #include <linux/fs.h>
  11. #include <linux/ufs_fs.h>
  12. #include <linux/time.h>
  13. #include <linux/stat.h>
  14. #include <linux/string.h>
  15. #include <linux/bitops.h>
  16. #include <asm/byteorder.h>
  17. #include "swab.h"
  18. #include "util.h"
  19. #undef UFS_CYLINDER_DEBUG
  20. #ifdef UFS_CYLINDER_DEBUG
  21. #define UFSD(x) printk("(%s, %d), %s:", __FILE__, __LINE__, __FUNCTION__); printk x;
  22. #else
  23. #define UFSD(x)
  24. #endif
  25. /*
  26. * Read cylinder group into cache. The memory space for ufs_cg_private_info
  27. * structure is already allocated during ufs_read_super.
  28. */
  29. static void ufs_read_cylinder (struct super_block * sb,
  30. unsigned cgno, unsigned bitmap_nr)
  31. {
  32. struct ufs_sb_info * sbi = UFS_SB(sb);
  33. struct ufs_sb_private_info * uspi;
  34. struct ufs_cg_private_info * ucpi;
  35. struct ufs_cylinder_group * ucg;
  36. unsigned i, j;
  37. UFSD(("ENTER, cgno %u, bitmap_nr %u\n", cgno, bitmap_nr))
  38. uspi = sbi->s_uspi;
  39. ucpi = sbi->s_ucpi[bitmap_nr];
  40. ucg = (struct ufs_cylinder_group *)sbi->s_ucg[cgno]->b_data;
  41. UCPI_UBH->fragment = ufs_cgcmin(cgno);
  42. UCPI_UBH->count = uspi->s_cgsize >> sb->s_blocksize_bits;
  43. /*
  44. * We have already the first fragment of cylinder group block in buffer
  45. */
  46. UCPI_UBH->bh[0] = sbi->s_ucg[cgno];
  47. for (i = 1; i < UCPI_UBH->count; i++)
  48. if (!(UCPI_UBH->bh[i] = sb_bread(sb, UCPI_UBH->fragment + i)))
  49. goto failed;
  50. sbi->s_cgno[bitmap_nr] = cgno;
  51. ucpi->c_cgx = fs32_to_cpu(sb, ucg->cg_cgx);
  52. ucpi->c_ncyl = fs16_to_cpu(sb, ucg->cg_ncyl);
  53. ucpi->c_niblk = fs16_to_cpu(sb, ucg->cg_niblk);
  54. ucpi->c_ndblk = fs32_to_cpu(sb, ucg->cg_ndblk);
  55. ucpi->c_rotor = fs32_to_cpu(sb, ucg->cg_rotor);
  56. ucpi->c_frotor = fs32_to_cpu(sb, ucg->cg_frotor);
  57. ucpi->c_irotor = fs32_to_cpu(sb, ucg->cg_irotor);
  58. ucpi->c_btotoff = fs32_to_cpu(sb, ucg->cg_btotoff);
  59. ucpi->c_boff = fs32_to_cpu(sb, ucg->cg_boff);
  60. ucpi->c_iusedoff = fs32_to_cpu(sb, ucg->cg_iusedoff);
  61. ucpi->c_freeoff = fs32_to_cpu(sb, ucg->cg_freeoff);
  62. ucpi->c_nextfreeoff = fs32_to_cpu(sb, ucg->cg_nextfreeoff);
  63. ucpi->c_clustersumoff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clustersumoff);
  64. ucpi->c_clusteroff = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_clusteroff);
  65. ucpi->c_nclusterblks = fs32_to_cpu(sb, ucg->cg_u.cg_44.cg_nclusterblks);
  66. UFSD(("EXIT\n"))
  67. return;
  68. failed:
  69. for (j = 1; j < i; j++)
  70. brelse (sbi->s_ucg[j]);
  71. sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
  72. ufs_error (sb, "ufs_read_cylinder", "can't read cylinder group block %u", cgno);
  73. }
  74. /*
  75. * Remove cylinder group from cache, doesn't release memory
  76. * allocated for cylinder group (this is done at ufs_put_super only).
  77. */
  78. void ufs_put_cylinder (struct super_block * sb, unsigned bitmap_nr)
  79. {
  80. struct ufs_sb_info * sbi = UFS_SB(sb);
  81. struct ufs_sb_private_info * uspi;
  82. struct ufs_cg_private_info * ucpi;
  83. struct ufs_cylinder_group * ucg;
  84. unsigned i;
  85. UFSD(("ENTER, bitmap_nr %u\n", bitmap_nr))
  86. uspi = sbi->s_uspi;
  87. if (sbi->s_cgno[bitmap_nr] == UFS_CGNO_EMPTY) {
  88. UFSD(("EXIT\n"))
  89. return;
  90. }
  91. ucpi = sbi->s_ucpi[bitmap_nr];
  92. ucg = ubh_get_ucg(UCPI_UBH);
  93. if (uspi->s_ncg > UFS_MAX_GROUP_LOADED && bitmap_nr >= sbi->s_cg_loaded) {
  94. ufs_panic (sb, "ufs_put_cylinder", "internal error");
  95. return;
  96. }
  97. /*
  98. * rotor is not so important data, so we put it to disk
  99. * at the end of working with cylinder
  100. */
  101. ucg->cg_rotor = cpu_to_fs32(sb, ucpi->c_rotor);
  102. ucg->cg_frotor = cpu_to_fs32(sb, ucpi->c_frotor);
  103. ucg->cg_irotor = cpu_to_fs32(sb, ucpi->c_irotor);
  104. ubh_mark_buffer_dirty (UCPI_UBH);
  105. for (i = 1; i < UCPI_UBH->count; i++) {
  106. brelse (UCPI_UBH->bh[i]);
  107. }
  108. sbi->s_cgno[bitmap_nr] = UFS_CGNO_EMPTY;
  109. UFSD(("EXIT\n"))
  110. }
  111. /*
  112. * Find cylinder group in cache and return it as pointer.
  113. * If cylinder group is not in cache, we will load it from disk.
  114. *
  115. * The cache is managed by LRU algorithm.
  116. */
  117. struct ufs_cg_private_info * ufs_load_cylinder (
  118. struct super_block * sb, unsigned cgno)
  119. {
  120. struct ufs_sb_info * sbi = UFS_SB(sb);
  121. struct ufs_sb_private_info * uspi;
  122. struct ufs_cg_private_info * ucpi;
  123. unsigned cg, i, j;
  124. UFSD(("ENTER, cgno %u\n", cgno))
  125. uspi = sbi->s_uspi;
  126. if (cgno >= uspi->s_ncg) {
  127. ufs_panic (sb, "ufs_load_cylinder", "internal error, high number of cg");
  128. return NULL;
  129. }
  130. /*
  131. * Cylinder group number cg it in cache and it was last used
  132. */
  133. if (sbi->s_cgno[0] == cgno) {
  134. UFSD(("EXIT\n"))
  135. return sbi->s_ucpi[0];
  136. }
  137. /*
  138. * Number of cylinder groups is not higher than UFS_MAX_GROUP_LOADED
  139. */
  140. if (uspi->s_ncg <= UFS_MAX_GROUP_LOADED) {
  141. if (sbi->s_cgno[cgno] != UFS_CGNO_EMPTY) {
  142. if (sbi->s_cgno[cgno] != cgno) {
  143. ufs_panic (sb, "ufs_load_cylinder", "internal error, wrong number of cg in cache");
  144. UFSD(("EXIT (FAILED)\n"))
  145. return NULL;
  146. }
  147. else {
  148. UFSD(("EXIT\n"))
  149. return sbi->s_ucpi[cgno];
  150. }
  151. } else {
  152. ufs_read_cylinder (sb, cgno, cgno);
  153. UFSD(("EXIT\n"))
  154. return sbi->s_ucpi[cgno];
  155. }
  156. }
  157. /*
  158. * Cylinder group number cg is in cache but it was not last used,
  159. * we will move to the first position
  160. */
  161. for (i = 0; i < sbi->s_cg_loaded && sbi->s_cgno[i] != cgno; i++);
  162. if (i < sbi->s_cg_loaded && sbi->s_cgno[i] == cgno) {
  163. cg = sbi->s_cgno[i];
  164. ucpi = sbi->s_ucpi[i];
  165. for (j = i; j > 0; j--) {
  166. sbi->s_cgno[j] = sbi->s_cgno[j-1];
  167. sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
  168. }
  169. sbi->s_cgno[0] = cg;
  170. sbi->s_ucpi[0] = ucpi;
  171. /*
  172. * Cylinder group number cg is not in cache, we will read it from disk
  173. * and put it to the first position
  174. */
  175. } else {
  176. if (sbi->s_cg_loaded < UFS_MAX_GROUP_LOADED)
  177. sbi->s_cg_loaded++;
  178. else
  179. ufs_put_cylinder (sb, UFS_MAX_GROUP_LOADED-1);
  180. ucpi = sbi->s_ucpi[sbi->s_cg_loaded - 1];
  181. for (j = sbi->s_cg_loaded - 1; j > 0; j--) {
  182. sbi->s_cgno[j] = sbi->s_cgno[j-1];
  183. sbi->s_ucpi[j] = sbi->s_ucpi[j-1];
  184. }
  185. sbi->s_ucpi[0] = ucpi;
  186. ufs_read_cylinder (sb, cgno, 0);
  187. }
  188. UFSD(("EXIT\n"))
  189. return sbi->s_ucpi[0];
  190. }