ialloc.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /*
  2. * ialloc.c
  3. *
  4. * PURPOSE
  5. * Inode allocation handling routines for the OSTA-UDF(tm) filesystem.
  6. *
  7. * COPYRIGHT
  8. * This file is distributed under the terms of the GNU General Public
  9. * License (GPL). Copies of the GPL can be obtained from:
  10. * ftp://prep.ai.mit.edu/pub/gnu/GPL
  11. * Each contributing author retains all rights to their own work.
  12. *
  13. * (C) 1998-2001 Ben Fennema
  14. *
  15. * HISTORY
  16. *
  17. * 02/24/99 blf Created.
  18. *
  19. */
  20. #include "udfdecl.h"
  21. #include <linux/fs.h>
  22. #include <linux/quotaops.h>
  23. #include <linux/sched.h>
  24. #include <linux/slab.h>
  25. #include "udf_i.h"
  26. #include "udf_sb.h"
  27. void udf_free_inode(struct inode *inode)
  28. {
  29. struct super_block *sb = inode->i_sb;
  30. struct udf_sb_info *sbi = UDF_SB(sb);
  31. /*
  32. * Note: we must free any quota before locking the superblock,
  33. * as writing the quota to disk may need the lock as well.
  34. */
  35. vfs_dq_free_inode(inode);
  36. vfs_dq_drop(inode);
  37. clear_inode(inode);
  38. mutex_lock(&sbi->s_alloc_mutex);
  39. if (sbi->s_lvid_bh) {
  40. struct logicalVolIntegrityDescImpUse *lvidiu =
  41. udf_sb_lvidiu(sbi);
  42. if (S_ISDIR(inode->i_mode))
  43. le32_add_cpu(&lvidiu->numDirs, -1);
  44. else
  45. le32_add_cpu(&lvidiu->numFiles, -1);
  46. udf_updated_lvid(sb);
  47. }
  48. mutex_unlock(&sbi->s_alloc_mutex);
  49. udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
  50. }
  51. struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
  52. {
  53. struct super_block *sb = dir->i_sb;
  54. struct udf_sb_info *sbi = UDF_SB(sb);
  55. struct inode *inode;
  56. int block;
  57. uint32_t start = UDF_I(dir)->i_location.logicalBlockNum;
  58. struct udf_inode_info *iinfo;
  59. struct udf_inode_info *dinfo = UDF_I(dir);
  60. inode = new_inode(sb);
  61. if (!inode) {
  62. *err = -ENOMEM;
  63. return NULL;
  64. }
  65. *err = -ENOSPC;
  66. iinfo = UDF_I(inode);
  67. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) {
  68. iinfo->i_efe = 1;
  69. if (UDF_VERS_USE_EXTENDED_FE > sbi->s_udfrev)
  70. sbi->s_udfrev = UDF_VERS_USE_EXTENDED_FE;
  71. iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
  72. sizeof(struct extendedFileEntry),
  73. GFP_KERNEL);
  74. } else {
  75. iinfo->i_efe = 0;
  76. iinfo->i_ext.i_data = kzalloc(inode->i_sb->s_blocksize -
  77. sizeof(struct fileEntry),
  78. GFP_KERNEL);
  79. }
  80. if (!iinfo->i_ext.i_data) {
  81. iput(inode);
  82. *err = -ENOMEM;
  83. return NULL;
  84. }
  85. block = udf_new_block(dir->i_sb, NULL,
  86. dinfo->i_location.partitionReferenceNum,
  87. start, err);
  88. if (*err) {
  89. iput(inode);
  90. return NULL;
  91. }
  92. mutex_lock(&sbi->s_alloc_mutex);
  93. if (sbi->s_lvid_bh) {
  94. struct logicalVolIntegrityDesc *lvid =
  95. (struct logicalVolIntegrityDesc *)
  96. sbi->s_lvid_bh->b_data;
  97. struct logicalVolIntegrityDescImpUse *lvidiu =
  98. udf_sb_lvidiu(sbi);
  99. struct logicalVolHeaderDesc *lvhd;
  100. uint64_t uniqueID;
  101. lvhd = (struct logicalVolHeaderDesc *)
  102. (lvid->logicalVolContentsUse);
  103. if (S_ISDIR(mode))
  104. le32_add_cpu(&lvidiu->numDirs, 1);
  105. else
  106. le32_add_cpu(&lvidiu->numFiles, 1);
  107. iinfo->i_unique = uniqueID = le64_to_cpu(lvhd->uniqueID);
  108. if (!(++uniqueID & 0x00000000FFFFFFFFUL))
  109. uniqueID += 16;
  110. lvhd->uniqueID = cpu_to_le64(uniqueID);
  111. udf_updated_lvid(sb);
  112. }
  113. mutex_unlock(&sbi->s_alloc_mutex);
  114. inode->i_mode = mode;
  115. inode->i_uid = current_fsuid();
  116. if (dir->i_mode & S_ISGID) {
  117. inode->i_gid = dir->i_gid;
  118. if (S_ISDIR(mode))
  119. mode |= S_ISGID;
  120. } else {
  121. inode->i_gid = current_fsgid();
  122. }
  123. iinfo->i_location.logicalBlockNum = block;
  124. iinfo->i_location.partitionReferenceNum =
  125. dinfo->i_location.partitionReferenceNum;
  126. inode->i_ino = udf_get_lb_pblock(sb, &iinfo->i_location, 0);
  127. inode->i_blocks = 0;
  128. iinfo->i_lenEAttr = 0;
  129. iinfo->i_lenAlloc = 0;
  130. iinfo->i_use = 0;
  131. if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
  132. iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
  133. else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
  134. iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
  135. else
  136. iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
  137. inode->i_mtime = inode->i_atime = inode->i_ctime =
  138. iinfo->i_crtime = current_fs_time(inode->i_sb);
  139. insert_inode_hash(inode);
  140. mark_inode_dirty(inode);
  141. if (vfs_dq_alloc_inode(inode)) {
  142. vfs_dq_drop(inode);
  143. inode->i_flags |= S_NOQUOTA;
  144. inode->i_nlink = 0;
  145. iput(inode);
  146. *err = -EDQUOT;
  147. return NULL;
  148. }
  149. *err = 0;
  150. return inode;
  151. }