partition.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * partition.c
  3. *
  4. * PURPOSE
  5. * Partition handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1998-2001 Ben Fennema
  14. *
  15. * HISTORY
  16. *
  17. * 12/06/98 blf Created file.
  18. *
  19. */
  20. #include "udfdecl.h"
  21. #include "udf_sb.h"
  22. #include "udf_i.h"
  23. #include <linux/fs.h>
  24. #include <linux/string.h>
  25. #include <linux/udf_fs.h>
  26. #include <linux/slab.h>
  27. #include <linux/buffer_head.h>
  28. inline uint32_t udf_get_pblock(struct super_block *sb, uint32_t block,
  29. uint16_t partition, uint32_t offset)
  30. {
  31. if (partition >= UDF_SB_NUMPARTS(sb)) {
  32. udf_debug("block=%d, partition=%d, offset=%d: invalid partition\n",
  33. block, partition, offset);
  34. return 0xFFFFFFFF;
  35. }
  36. if (UDF_SB_PARTFUNC(sb, partition))
  37. return UDF_SB_PARTFUNC(sb, partition)(sb, block, partition, offset);
  38. else
  39. return UDF_SB_PARTROOT(sb, partition) + block + offset;
  40. }
  41. uint32_t udf_get_pblock_virt15(struct super_block *sb, uint32_t block,
  42. uint16_t partition, uint32_t offset)
  43. {
  44. struct buffer_head *bh = NULL;
  45. uint32_t newblock;
  46. uint32_t index;
  47. uint32_t loc;
  48. index = (sb->s_blocksize - UDF_SB_TYPEVIRT(sb,partition).s_start_offset) / sizeof(uint32_t);
  49. if (block > UDF_SB_TYPEVIRT(sb,partition).s_num_entries) {
  50. udf_debug("Trying to access block beyond end of VAT (%d max %d)\n",
  51. block, UDF_SB_TYPEVIRT(sb,partition).s_num_entries);
  52. return 0xFFFFFFFF;
  53. }
  54. if (block >= index) {
  55. block -= index;
  56. newblock = 1 + (block / (sb->s_blocksize / sizeof(uint32_t)));
  57. index = block % (sb->s_blocksize / sizeof(uint32_t));
  58. } else {
  59. newblock = 0;
  60. index = UDF_SB_TYPEVIRT(sb,partition).s_start_offset / sizeof(uint32_t) + block;
  61. }
  62. loc = udf_block_map(UDF_SB_VAT(sb), newblock);
  63. if (!(bh = sb_bread(sb, loc))) {
  64. udf_debug("get_pblock(UDF_VIRTUAL_MAP:%p,%d,%d) VAT: %d[%d]\n",
  65. sb, block, partition, loc, index);
  66. return 0xFFFFFFFF;
  67. }
  68. loc = le32_to_cpu(((__le32 *)bh->b_data)[index]);
  69. brelse(bh);
  70. if (UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum == partition) {
  71. udf_debug("recursive call to udf_get_pblock!\n");
  72. return 0xFFFFFFFF;
  73. }
  74. return udf_get_pblock(sb, loc,
  75. UDF_I_LOCATION(UDF_SB_VAT(sb)).partitionReferenceNum,
  76. offset);
  77. }
  78. inline uint32_t udf_get_pblock_virt20(struct super_block * sb, uint32_t block,
  79. uint16_t partition, uint32_t offset)
  80. {
  81. return udf_get_pblock_virt15(sb, block, partition, offset);
  82. }
  83. uint32_t udf_get_pblock_spar15(struct super_block * sb, uint32_t block,
  84. uint16_t partition, uint32_t offset)
  85. {
  86. int i;
  87. struct sparingTable *st = NULL;
  88. uint32_t packet = (block + offset) & ~(UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1);
  89. for (i = 0; i < 4; i++) {
  90. if (UDF_SB_TYPESPAR(sb,partition).s_spar_map[i] != NULL) {
  91. st = (struct sparingTable *)UDF_SB_TYPESPAR(sb,partition).s_spar_map[i]->b_data;
  92. break;
  93. }
  94. }
  95. if (st) {
  96. for (i = 0; i < le16_to_cpu(st->reallocationTableLen); i++) {
  97. if (le32_to_cpu(st->mapEntry[i].origLocation) >= 0xFFFFFFF0) {
  98. break;
  99. } else if (le32_to_cpu(st->mapEntry[i].origLocation) == packet) {
  100. return le32_to_cpu(st->mapEntry[i].mappedLocation) +
  101. ((block + offset) & (UDF_SB_TYPESPAR(sb,partition).s_packet_len - 1));
  102. } else if (le32_to_cpu(st->mapEntry[i].origLocation) > packet) {
  103. break;
  104. }
  105. }
  106. }
  107. return UDF_SB_PARTROOT(sb,partition) + block + offset;
  108. }
  109. int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
  110. {
  111. struct udf_sparing_data *sdata;
  112. struct sparingTable *st = NULL;
  113. struct sparingEntry mapEntry;
  114. uint32_t packet;
  115. int i, j, k, l;
  116. for (i = 0; i < UDF_SB_NUMPARTS(sb); i++) {
  117. if (old_block > UDF_SB_PARTROOT(sb,i) &&
  118. old_block < UDF_SB_PARTROOT(sb,i) + UDF_SB_PARTLEN(sb,i)) {
  119. sdata = &UDF_SB_TYPESPAR(sb,i);
  120. packet = (old_block - UDF_SB_PARTROOT(sb,i)) & ~(sdata->s_packet_len - 1);
  121. for (j = 0; j < 4; j++) {
  122. if (UDF_SB_TYPESPAR(sb,i).s_spar_map[j] != NULL) {
  123. st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
  124. break;
  125. }
  126. }
  127. if (!st)
  128. return 1;
  129. for (k = 0; k < le16_to_cpu(st->reallocationTableLen); k++) {
  130. if (le32_to_cpu(st->mapEntry[k].origLocation) == 0xFFFFFFFF) {
  131. for (; j < 4; j++) {
  132. if (sdata->s_spar_map[j]) {
  133. st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
  134. st->mapEntry[k].origLocation = cpu_to_le32(packet);
  135. udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
  136. mark_buffer_dirty(sdata->s_spar_map[j]);
  137. }
  138. }
  139. *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
  140. ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
  141. return 0;
  142. } else if (le32_to_cpu(st->mapEntry[k].origLocation) == packet) {
  143. *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
  144. ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
  145. return 0;
  146. } else if (le32_to_cpu(st->mapEntry[k].origLocation) > packet) {
  147. break;
  148. }
  149. }
  150. for (l = k; l < le16_to_cpu(st->reallocationTableLen); l++) {
  151. if (le32_to_cpu(st->mapEntry[l].origLocation) == 0xFFFFFFFF) {
  152. for (; j < 4; j++) {
  153. if (sdata->s_spar_map[j]) {
  154. st = (struct sparingTable *)sdata->s_spar_map[j]->b_data;
  155. mapEntry = st->mapEntry[l];
  156. mapEntry.origLocation = cpu_to_le32(packet);
  157. memmove(&st->mapEntry[k + 1], &st->mapEntry[k], (l - k) * sizeof(struct sparingEntry));
  158. st->mapEntry[k] = mapEntry;
  159. udf_update_tag((char *)st, sizeof(struct sparingTable) + le16_to_cpu(st->reallocationTableLen) * sizeof(struct sparingEntry));
  160. mark_buffer_dirty(sdata->s_spar_map[j]);
  161. }
  162. }
  163. *new_block = le32_to_cpu(st->mapEntry[k].mappedLocation) +
  164. ((old_block - UDF_SB_PARTROOT(sb,i)) & (sdata->s_packet_len - 1));
  165. return 0;
  166. }
  167. }
  168. return 1;
  169. } /* if old_block */
  170. }
  171. if (i == UDF_SB_NUMPARTS(sb)) {
  172. /* outside of partitions */
  173. /* for now, fail =) */
  174. return 1;
  175. }
  176. return 0;
  177. }