ialloc.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * ialloc.c
  3. *
  4. * PURPOSE
  5. * Inode allocation handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1998-2001 Ben Fennema
  14. *
  15. * HISTORY
  16. *
  17. * 02/24/99 blf Created.
  18. *
  19. */
  20. #include "udfdecl.h"
  21. #include <linux/fs.h>
  22. #include <linux/quotaops.h>
  23. #include <linux/udf_fs.h>
  24. #include <linux/sched.h>
  25. #include <linux/slab.h>
  26. #include "udf_i.h"
  27. #include "udf_sb.h"
  28. void udf_free_inode(struct inode *inode)
  29. {
  30. struct super_block *sb = inode->i_sb;
  31. struct udf_sb_info *sbi = UDF_SB(sb);
  32. /*
  33. * Note: we must free any quota before locking the superblock,
  34. * as writing the quota to disk may need the lock as well.
  35. */
  36. DQUOT_FREE_INODE(inode);
  37. DQUOT_DROP(inode);
  38. clear_inode(inode);
  39. mutex_lock(&sbi->s_alloc_mutex);
  40. if (sbi->s_lvid_bh) {
  41. struct logicalVolIntegrityDescImpUse *lvidiu =
  42. udf_sb_lvidiu(sbi);
  43. if (S_ISDIR(inode->i_mode))
  44. lvidiu->numDirs =
  45. cpu_to_le32(le32_to_cpu(lvidiu->numDirs) - 1);
  46. else
  47. lvidiu->numFiles =
  48. cpu_to_le32(le32_to_cpu(lvidiu->numFiles) - 1);
  49. mark_buffer_dirty(sbi->s_lvid_bh);
  50. }
  51. mutex_unlock(&sbi->s_alloc_mutex);
  52. udf_free_blocks(sb, NULL, UDF_I_LOCATION(inode), 0, 1);
  53. }
  54. struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
  55. {
  56. struct super_block *sb = dir->i_sb;
  57. struct udf_sb_info *sbi = UDF_SB(sb);
  58. struct inode *inode;
  59. int block;
  60. uint32_t start = UDF_I_LOCATION(dir).logicalBlockNum;
  61. inode = new_inode(sb);
  62. if (!inode) {
  63. *err = -ENOMEM;
  64. return NULL;
  65. }
  66. *err = -ENOSPC;
  67. UDF_I_UNIQUE(inode) = 0;
  68. UDF_I_LENEXTENTS(inode) = 0;
  69. UDF_I_NEXT_ALLOC_BLOCK(inode) = 0;
  70. UDF_I_NEXT_ALLOC_GOAL(inode) = 0;
  71. UDF_I_STRAT4096(inode) = 0;
  72. block = udf_new_block(dir->i_sb, NULL,
  73. UDF_I_LOCATION(dir).partitionReferenceNum,
  74. start, err);
  75. if (*err) {
  76. iput(inode);
  77. return NULL;
  78. }
  79. mutex_lock(&sbi->s_alloc_mutex);
  80. if (sbi->s_lvid_bh) {
  81. struct logicalVolIntegrityDesc *lvid =
  82. (struct logicalVolIntegrityDesc *)
  83. sbi->s_lvid_bh->b_data;
  84. struct logicalVolIntegrityDescImpUse *lvidiu =
  85. udf_sb_lvidiu(sbi);
  86. struct logicalVolHeaderDesc *lvhd;
  87. uint64_t uniqueID;
  88. lvhd = (struct logicalVolHeaderDesc *)
  89. (lvid->logicalVolContentsUse);
  90. if (S_ISDIR(mode))
  91. lvidiu->numDirs =
  92. cpu_to_le32(le32_to_cpu(lvidiu->numDirs) + 1);
  93. else
  94. lvidiu->numFiles =
  95. cpu_to_le32(le32_to_cpu(lvidiu->numFiles) + 1);
  96. UDF_I_UNIQUE(inode) = uniqueID = le64_to_cpu(lvhd->uniqueID);
  97. if (!(++uniqueID & 0x00000000FFFFFFFFUL))
  98. uniqueID += 16;
  99. lvhd->uniqueID = cpu_to_le64(uniqueID);
  100. mark_buffer_dirty(sbi->s_lvid_bh);
  101. }
  102. inode->i_mode = mode;
  103. inode->i_uid = current->fsuid;
  104. if (dir->i_mode & S_ISGID) {
  105. inode->i_gid = dir->i_gid;
  106. if (S_ISDIR(mode))
  107. mode |= S_ISGID;
  108. } else {
  109. inode->i_gid = current->fsgid;
  110. }
  111. UDF_I_LOCATION(inode).logicalBlockNum = block;
  112. UDF_I_LOCATION(inode).partitionReferenceNum =
  113. UDF_I_LOCATION(dir).partitionReferenceNum;
  114. inode->i_ino = udf_get_lb_pblock(sb, UDF_I_LOCATION(inode), 0);
  115. inode->i_blocks = 0;
  116. UDF_I_LENEATTR(inode) = 0;
  117. UDF_I_LENALLOC(inode) = 0;
  118. UDF_I_USE(inode) = 0;
  119. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
  120. UDF_I_EFE(inode) = 1;
  121. if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
  122. sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
  123. UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize -
  124. sizeof(struct extendedFileEntry),
  125. GFP_KERNEL);
  126. } else {
  127. UDF_I_EFE(inode) = 0;
  128. UDF_I_DATA(inode) = kzalloc(inode->i_sb->s_blocksize -
  129. sizeof(struct fileEntry),
  130. GFP_KERNEL);
  131. }
  132. if (!UDF_I_DATA(inode)) {
  133. iput(inode);
  134. *err = -ENOMEM;
  135. mutex_unlock(&sbi->s_alloc_mutex);
  136. return NULL;
  137. }
  138. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
  139. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_IN_ICB;
  140. else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
  141. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_SHORT;
  142. else
  143. UDF_I_ALLOCTYPE(inode) = ICBTAG_FLAG_AD_LONG;
  144. inode->i_mtime = inode->i_atime = inode->i_ctime =
  145. UDF_I_CRTIME(inode) = current_fs_time(inode->i_sb);
  146. insert_inode_hash(inode);
  147. mark_inode_dirty(inode);
  148. mutex_unlock(&sbi->s_alloc_mutex);
  149. if (DQUOT_ALLOC_INODE(inode)) {
  150. DQUOT_DROP(inode);
  151. inode->i_flags |= S_NOQUOTA;
  152. inode->i_nlink = 0;
  153. iput(inode);
  154. *err = -EDQUOT;
  155. return NULL;
  156. }
  157. *err = 0;
  158. return inode;
  159. }