inode.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686
  1. /*
  2. * linux/fs/hfs/inode.c
  3. *
  4. * Copyright (C) 1995-1997 Paul H. Hargrove
  5. * (C) 2003 Ardis Technologies <roman@ardistech.com>
  6. * This file may be distributed under the terms of the GNU General Public License.
  7. *
  8. * This file contains inode-related functions which do not depend on
  9. * which scheme is being used to represent forks.
  10. *
  11. * Based on the minix file system code, (C) 1991, 1992 by Linus Torvalds
  12. */
  13. #include <linux/pagemap.h>
  14. #include <linux/mpage.h>
  15. #include <linux/sched.h>
  16. #include "hfs_fs.h"
  17. #include "btree.h"
  18. static const struct file_operations hfs_file_operations;
  19. static const struct inode_operations hfs_file_inode_operations;
  20. /*================ Variable-like macros ================*/
  21. #define HFS_VALID_MODE_BITS (S_IFREG | S_IFDIR | S_IRWXUGO)
  22. static int hfs_writepage(struct page *page, struct writeback_control *wbc)
  23. {
  24. return block_write_full_page(page, hfs_get_block, wbc);
  25. }
  26. static int hfs_readpage(struct file *file, struct page *page)
  27. {
  28. return block_read_full_page(page, hfs_get_block);
  29. }
  30. static void hfs_write_failed(struct address_space *mapping, loff_t to)
  31. {
  32. struct inode *inode = mapping->host;
  33. if (to > inode->i_size) {
  34. truncate_pagecache(inode, to, inode->i_size);
  35. hfs_file_truncate(inode);
  36. }
  37. }
  38. static int hfs_write_begin(struct file *file, struct address_space *mapping,
  39. loff_t pos, unsigned len, unsigned flags,
  40. struct page **pagep, void **fsdata)
  41. {
  42. int ret;
  43. *pagep = NULL;
  44. ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
  45. hfs_get_block,
  46. &HFS_I(mapping->host)->phys_size);
  47. if (unlikely(ret))
  48. hfs_write_failed(mapping, pos + len);
  49. return ret;
  50. }
  51. static sector_t hfs_bmap(struct address_space *mapping, sector_t block)
  52. {
  53. return generic_block_bmap(mapping, block, hfs_get_block);
  54. }
  55. static int hfs_releasepage(struct page *page, gfp_t mask)
  56. {
  57. struct inode *inode = page->mapping->host;
  58. struct super_block *sb = inode->i_sb;
  59. struct hfs_btree *tree;
  60. struct hfs_bnode *node;
  61. u32 nidx;
  62. int i, res = 1;
  63. switch (inode->i_ino) {
  64. case HFS_EXT_CNID:
  65. tree = HFS_SB(sb)->ext_tree;
  66. break;
  67. case HFS_CAT_CNID:
  68. tree = HFS_SB(sb)->cat_tree;
  69. break;
  70. default:
  71. BUG();
  72. return 0;
  73. }
  74. if (!tree)
  75. return 0;
  76. if (tree->node_size >= PAGE_CACHE_SIZE) {
  77. nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT);
  78. spin_lock(&tree->hash_lock);
  79. node = hfs_bnode_findhash(tree, nidx);
  80. if (!node)
  81. ;
  82. else if (atomic_read(&node->refcnt))
  83. res = 0;
  84. if (res && node) {
  85. hfs_bnode_unhash(node);
  86. hfs_bnode_free(node);
  87. }
  88. spin_unlock(&tree->hash_lock);
  89. } else {
  90. nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift);
  91. i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift);
  92. spin_lock(&tree->hash_lock);
  93. do {
  94. node = hfs_bnode_findhash(tree, nidx++);
  95. if (!node)
  96. continue;
  97. if (atomic_read(&node->refcnt)) {
  98. res = 0;
  99. break;
  100. }
  101. hfs_bnode_unhash(node);
  102. hfs_bnode_free(node);
  103. } while (--i && nidx < tree->node_count);
  104. spin_unlock(&tree->hash_lock);
  105. }
  106. return res ? try_to_free_buffers(page) : 0;
  107. }
  108. static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
  109. const struct iovec *iov, loff_t offset, unsigned long nr_segs)
  110. {
  111. struct file *file = iocb->ki_filp;
  112. struct address_space *mapping = file->f_mapping;
  113. struct inode *inode = file_inode(file)->i_mapping->host;
  114. ssize_t ret;
  115. ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
  116. hfs_get_block);
  117. /*
  118. * In case of error extending write may have instantiated a few
  119. * blocks outside i_size. Trim these off again.
  120. */
  121. if (unlikely((rw & WRITE) && ret < 0)) {
  122. loff_t isize = i_size_read(inode);
  123. loff_t end = offset + iov_length(iov, nr_segs);
  124. if (end > isize)
  125. hfs_write_failed(mapping, end);
  126. }
  127. return ret;
  128. }
  129. static int hfs_writepages(struct address_space *mapping,
  130. struct writeback_control *wbc)
  131. {
  132. return mpage_writepages(mapping, wbc, hfs_get_block);
  133. }
  134. const struct address_space_operations hfs_btree_aops = {
  135. .readpage = hfs_readpage,
  136. .writepage = hfs_writepage,
  137. .write_begin = hfs_write_begin,
  138. .write_end = generic_write_end,
  139. .bmap = hfs_bmap,
  140. .releasepage = hfs_releasepage,
  141. };
  142. const struct address_space_operations hfs_aops = {
  143. .readpage = hfs_readpage,
  144. .writepage = hfs_writepage,
  145. .write_begin = hfs_write_begin,
  146. .write_end = generic_write_end,
  147. .bmap = hfs_bmap,
  148. .direct_IO = hfs_direct_IO,
  149. .writepages = hfs_writepages,
  150. };
  151. /*
  152. * hfs_new_inode
  153. */
  154. struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
  155. {
  156. struct super_block *sb = dir->i_sb;
  157. struct inode *inode = new_inode(sb);
  158. if (!inode)
  159. return NULL;
  160. mutex_init(&HFS_I(inode)->extents_lock);
  161. INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
  162. hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name);
  163. inode->i_ino = HFS_SB(sb)->next_id++;
  164. inode->i_mode = mode;
  165. inode->i_uid = current_fsuid();
  166. inode->i_gid = current_fsgid();
  167. set_nlink(inode, 1);
  168. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
  169. HFS_I(inode)->flags = 0;
  170. HFS_I(inode)->rsrc_inode = NULL;
  171. HFS_I(inode)->fs_blocks = 0;
  172. if (S_ISDIR(mode)) {
  173. inode->i_size = 2;
  174. HFS_SB(sb)->folder_count++;
  175. if (dir->i_ino == HFS_ROOT_CNID)
  176. HFS_SB(sb)->root_dirs++;
  177. inode->i_op = &hfs_dir_inode_operations;
  178. inode->i_fop = &hfs_dir_operations;
  179. inode->i_mode |= S_IRWXUGO;
  180. inode->i_mode &= ~HFS_SB(inode->i_sb)->s_dir_umask;
  181. } else if (S_ISREG(mode)) {
  182. HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
  183. HFS_SB(sb)->file_count++;
  184. if (dir->i_ino == HFS_ROOT_CNID)
  185. HFS_SB(sb)->root_files++;
  186. inode->i_op = &hfs_file_inode_operations;
  187. inode->i_fop = &hfs_file_operations;
  188. inode->i_mapping->a_ops = &hfs_aops;
  189. inode->i_mode |= S_IRUGO|S_IXUGO;
  190. if (mode & S_IWUSR)
  191. inode->i_mode |= S_IWUGO;
  192. inode->i_mode &= ~HFS_SB(inode->i_sb)->s_file_umask;
  193. HFS_I(inode)->phys_size = 0;
  194. HFS_I(inode)->alloc_blocks = 0;
  195. HFS_I(inode)->first_blocks = 0;
  196. HFS_I(inode)->cached_start = 0;
  197. HFS_I(inode)->cached_blocks = 0;
  198. memset(HFS_I(inode)->first_extents, 0, sizeof(hfs_extent_rec));
  199. memset(HFS_I(inode)->cached_extents, 0, sizeof(hfs_extent_rec));
  200. }
  201. insert_inode_hash(inode);
  202. mark_inode_dirty(inode);
  203. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  204. hfs_mark_mdb_dirty(sb);
  205. return inode;
  206. }
  207. void hfs_delete_inode(struct inode *inode)
  208. {
  209. struct super_block *sb = inode->i_sb;
  210. dprint(DBG_INODE, "delete_inode: %lu\n", inode->i_ino);
  211. if (S_ISDIR(inode->i_mode)) {
  212. HFS_SB(sb)->folder_count--;
  213. if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
  214. HFS_SB(sb)->root_dirs--;
  215. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  216. hfs_mark_mdb_dirty(sb);
  217. return;
  218. }
  219. HFS_SB(sb)->file_count--;
  220. if (HFS_I(inode)->cat_key.ParID == cpu_to_be32(HFS_ROOT_CNID))
  221. HFS_SB(sb)->root_files--;
  222. if (S_ISREG(inode->i_mode)) {
  223. if (!inode->i_nlink) {
  224. inode->i_size = 0;
  225. hfs_file_truncate(inode);
  226. }
  227. }
  228. set_bit(HFS_FLG_MDB_DIRTY, &HFS_SB(sb)->flags);
  229. hfs_mark_mdb_dirty(sb);
  230. }
  231. void hfs_inode_read_fork(struct inode *inode, struct hfs_extent *ext,
  232. __be32 __log_size, __be32 phys_size, u32 clump_size)
  233. {
  234. struct super_block *sb = inode->i_sb;
  235. u32 log_size = be32_to_cpu(__log_size);
  236. u16 count;
  237. int i;
  238. memcpy(HFS_I(inode)->first_extents, ext, sizeof(hfs_extent_rec));
  239. for (count = 0, i = 0; i < 3; i++)
  240. count += be16_to_cpu(ext[i].count);
  241. HFS_I(inode)->first_blocks = count;
  242. inode->i_size = HFS_I(inode)->phys_size = log_size;
  243. HFS_I(inode)->fs_blocks = (log_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
  244. inode_set_bytes(inode, HFS_I(inode)->fs_blocks << sb->s_blocksize_bits);
  245. HFS_I(inode)->alloc_blocks = be32_to_cpu(phys_size) /
  246. HFS_SB(sb)->alloc_blksz;
  247. HFS_I(inode)->clump_blocks = clump_size / HFS_SB(sb)->alloc_blksz;
  248. if (!HFS_I(inode)->clump_blocks)
  249. HFS_I(inode)->clump_blocks = HFS_SB(sb)->clumpablks;
  250. }
  251. struct hfs_iget_data {
  252. struct hfs_cat_key *key;
  253. hfs_cat_rec *rec;
  254. };
  255. static int hfs_test_inode(struct inode *inode, void *data)
  256. {
  257. struct hfs_iget_data *idata = data;
  258. hfs_cat_rec *rec;
  259. rec = idata->rec;
  260. switch (rec->type) {
  261. case HFS_CDR_DIR:
  262. return inode->i_ino == be32_to_cpu(rec->dir.DirID);
  263. case HFS_CDR_FIL:
  264. return inode->i_ino == be32_to_cpu(rec->file.FlNum);
  265. default:
  266. BUG();
  267. return 1;
  268. }
  269. }
  270. /*
  271. * hfs_read_inode
  272. */
  273. static int hfs_read_inode(struct inode *inode, void *data)
  274. {
  275. struct hfs_iget_data *idata = data;
  276. struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
  277. hfs_cat_rec *rec;
  278. HFS_I(inode)->flags = 0;
  279. HFS_I(inode)->rsrc_inode = NULL;
  280. mutex_init(&HFS_I(inode)->extents_lock);
  281. INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list);
  282. /* Initialize the inode */
  283. inode->i_uid = hsb->s_uid;
  284. inode->i_gid = hsb->s_gid;
  285. set_nlink(inode, 1);
  286. if (idata->key)
  287. HFS_I(inode)->cat_key = *idata->key;
  288. else
  289. HFS_I(inode)->flags |= HFS_FLG_RSRC;
  290. HFS_I(inode)->tz_secondswest = sys_tz.tz_minuteswest * 60;
  291. rec = idata->rec;
  292. switch (rec->type) {
  293. case HFS_CDR_FIL:
  294. if (!HFS_IS_RSRC(inode)) {
  295. hfs_inode_read_fork(inode, rec->file.ExtRec, rec->file.LgLen,
  296. rec->file.PyLen, be16_to_cpu(rec->file.ClpSize));
  297. } else {
  298. hfs_inode_read_fork(inode, rec->file.RExtRec, rec->file.RLgLen,
  299. rec->file.RPyLen, be16_to_cpu(rec->file.ClpSize));
  300. }
  301. inode->i_ino = be32_to_cpu(rec->file.FlNum);
  302. inode->i_mode = S_IRUGO | S_IXUGO;
  303. if (!(rec->file.Flags & HFS_FIL_LOCK))
  304. inode->i_mode |= S_IWUGO;
  305. inode->i_mode &= ~hsb->s_file_umask;
  306. inode->i_mode |= S_IFREG;
  307. inode->i_ctime = inode->i_atime = inode->i_mtime =
  308. hfs_m_to_utime(rec->file.MdDat);
  309. inode->i_op = &hfs_file_inode_operations;
  310. inode->i_fop = &hfs_file_operations;
  311. inode->i_mapping->a_ops = &hfs_aops;
  312. break;
  313. case HFS_CDR_DIR:
  314. inode->i_ino = be32_to_cpu(rec->dir.DirID);
  315. inode->i_size = be16_to_cpu(rec->dir.Val) + 2;
  316. HFS_I(inode)->fs_blocks = 0;
  317. inode->i_mode = S_IFDIR | (S_IRWXUGO & ~hsb->s_dir_umask);
  318. inode->i_ctime = inode->i_atime = inode->i_mtime =
  319. hfs_m_to_utime(rec->dir.MdDat);
  320. inode->i_op = &hfs_dir_inode_operations;
  321. inode->i_fop = &hfs_dir_operations;
  322. break;
  323. default:
  324. make_bad_inode(inode);
  325. }
  326. return 0;
  327. }
  328. /*
  329. * __hfs_iget()
  330. *
  331. * Given the MDB for a HFS filesystem, a 'key' and an 'entry' in
  332. * the catalog B-tree and the 'type' of the desired file return the
  333. * inode for that file/directory or NULL. Note that 'type' indicates
  334. * whether we want the actual file or directory, or the corresponding
  335. * metadata (AppleDouble header file or CAP metadata file).
  336. */
  337. struct inode *hfs_iget(struct super_block *sb, struct hfs_cat_key *key, hfs_cat_rec *rec)
  338. {
  339. struct hfs_iget_data data = { key, rec };
  340. struct inode *inode;
  341. u32 cnid;
  342. switch (rec->type) {
  343. case HFS_CDR_DIR:
  344. cnid = be32_to_cpu(rec->dir.DirID);
  345. break;
  346. case HFS_CDR_FIL:
  347. cnid = be32_to_cpu(rec->file.FlNum);
  348. break;
  349. default:
  350. return NULL;
  351. }
  352. inode = iget5_locked(sb, cnid, hfs_test_inode, hfs_read_inode, &data);
  353. if (inode && (inode->i_state & I_NEW))
  354. unlock_new_inode(inode);
  355. return inode;
  356. }
  357. void hfs_inode_write_fork(struct inode *inode, struct hfs_extent *ext,
  358. __be32 *log_size, __be32 *phys_size)
  359. {
  360. memcpy(ext, HFS_I(inode)->first_extents, sizeof(hfs_extent_rec));
  361. if (log_size)
  362. *log_size = cpu_to_be32(inode->i_size);
  363. if (phys_size)
  364. *phys_size = cpu_to_be32(HFS_I(inode)->alloc_blocks *
  365. HFS_SB(inode->i_sb)->alloc_blksz);
  366. }
  367. int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  368. {
  369. struct inode *main_inode = inode;
  370. struct hfs_find_data fd;
  371. hfs_cat_rec rec;
  372. dprint(DBG_INODE, "hfs_write_inode: %lu\n", inode->i_ino);
  373. hfs_ext_write_extent(inode);
  374. if (inode->i_ino < HFS_FIRSTUSER_CNID) {
  375. switch (inode->i_ino) {
  376. case HFS_ROOT_CNID:
  377. break;
  378. case HFS_EXT_CNID:
  379. hfs_btree_write(HFS_SB(inode->i_sb)->ext_tree);
  380. return 0;
  381. case HFS_CAT_CNID:
  382. hfs_btree_write(HFS_SB(inode->i_sb)->cat_tree);
  383. return 0;
  384. default:
  385. BUG();
  386. return -EIO;
  387. }
  388. }
  389. if (HFS_IS_RSRC(inode))
  390. main_inode = HFS_I(inode)->rsrc_inode;
  391. if (!main_inode->i_nlink)
  392. return 0;
  393. if (hfs_find_init(HFS_SB(main_inode->i_sb)->cat_tree, &fd))
  394. /* panic? */
  395. return -EIO;
  396. fd.search_key->cat = HFS_I(main_inode)->cat_key;
  397. if (hfs_brec_find(&fd))
  398. /* panic? */
  399. goto out;
  400. if (S_ISDIR(main_inode->i_mode)) {
  401. if (fd.entrylength < sizeof(struct hfs_cat_dir))
  402. /* panic? */;
  403. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  404. sizeof(struct hfs_cat_dir));
  405. if (rec.type != HFS_CDR_DIR ||
  406. be32_to_cpu(rec.dir.DirID) != inode->i_ino) {
  407. }
  408. rec.dir.MdDat = hfs_u_to_mtime(inode->i_mtime);
  409. rec.dir.Val = cpu_to_be16(inode->i_size - 2);
  410. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  411. sizeof(struct hfs_cat_dir));
  412. } else if (HFS_IS_RSRC(inode)) {
  413. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  414. sizeof(struct hfs_cat_file));
  415. hfs_inode_write_fork(inode, rec.file.RExtRec,
  416. &rec.file.RLgLen, &rec.file.RPyLen);
  417. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  418. sizeof(struct hfs_cat_file));
  419. } else {
  420. if (fd.entrylength < sizeof(struct hfs_cat_file))
  421. /* panic? */;
  422. hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
  423. sizeof(struct hfs_cat_file));
  424. if (rec.type != HFS_CDR_FIL ||
  425. be32_to_cpu(rec.file.FlNum) != inode->i_ino) {
  426. }
  427. if (inode->i_mode & S_IWUSR)
  428. rec.file.Flags &= ~HFS_FIL_LOCK;
  429. else
  430. rec.file.Flags |= HFS_FIL_LOCK;
  431. hfs_inode_write_fork(inode, rec.file.ExtRec, &rec.file.LgLen, &rec.file.PyLen);
  432. rec.file.MdDat = hfs_u_to_mtime(inode->i_mtime);
  433. hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
  434. sizeof(struct hfs_cat_file));
  435. }
  436. out:
  437. hfs_find_exit(&fd);
  438. return 0;
  439. }
  440. static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
  441. unsigned int flags)
  442. {
  443. struct inode *inode = NULL;
  444. hfs_cat_rec rec;
  445. struct hfs_find_data fd;
  446. int res;
  447. if (HFS_IS_RSRC(dir) || strcmp(dentry->d_name.name, "rsrc"))
  448. goto out;
  449. inode = HFS_I(dir)->rsrc_inode;
  450. if (inode)
  451. goto out;
  452. inode = new_inode(dir->i_sb);
  453. if (!inode)
  454. return ERR_PTR(-ENOMEM);
  455. hfs_find_init(HFS_SB(dir->i_sb)->cat_tree, &fd);
  456. fd.search_key->cat = HFS_I(dir)->cat_key;
  457. res = hfs_brec_read(&fd, &rec, sizeof(rec));
  458. if (!res) {
  459. struct hfs_iget_data idata = { NULL, &rec };
  460. hfs_read_inode(inode, &idata);
  461. }
  462. hfs_find_exit(&fd);
  463. if (res) {
  464. iput(inode);
  465. return ERR_PTR(res);
  466. }
  467. HFS_I(inode)->rsrc_inode = dir;
  468. HFS_I(dir)->rsrc_inode = inode;
  469. igrab(dir);
  470. hlist_add_fake(&inode->i_hash);
  471. mark_inode_dirty(inode);
  472. out:
  473. d_add(dentry, inode);
  474. return NULL;
  475. }
  476. void hfs_evict_inode(struct inode *inode)
  477. {
  478. truncate_inode_pages(&inode->i_data, 0);
  479. clear_inode(inode);
  480. if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
  481. HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
  482. iput(HFS_I(inode)->rsrc_inode);
  483. }
  484. }
  485. static int hfs_file_open(struct inode *inode, struct file *file)
  486. {
  487. if (HFS_IS_RSRC(inode))
  488. inode = HFS_I(inode)->rsrc_inode;
  489. atomic_inc(&HFS_I(inode)->opencnt);
  490. return 0;
  491. }
  492. static int hfs_file_release(struct inode *inode, struct file *file)
  493. {
  494. //struct super_block *sb = inode->i_sb;
  495. if (HFS_IS_RSRC(inode))
  496. inode = HFS_I(inode)->rsrc_inode;
  497. if (atomic_dec_and_test(&HFS_I(inode)->opencnt)) {
  498. mutex_lock(&inode->i_mutex);
  499. hfs_file_truncate(inode);
  500. //if (inode->i_flags & S_DEAD) {
  501. // hfs_delete_cat(inode->i_ino, HFSPLUS_SB(sb).hidden_dir, NULL);
  502. // hfs_delete_inode(inode);
  503. //}
  504. mutex_unlock(&inode->i_mutex);
  505. }
  506. return 0;
  507. }
  508. /*
  509. * hfs_notify_change()
  510. *
  511. * Based very closely on fs/msdos/inode.c by Werner Almesberger
  512. *
  513. * This is the notify_change() field in the super_operations structure
  514. * for HFS file systems. The purpose is to take that changes made to
  515. * an inode and apply then in a filesystem-dependent manner. In this
  516. * case the process has a few of tasks to do:
  517. * 1) prevent changes to the i_uid and i_gid fields.
  518. * 2) map file permissions to the closest allowable permissions
  519. * 3) Since multiple Linux files can share the same on-disk inode under
  520. * HFS (for instance the data and resource forks of a file) a change
  521. * to permissions must be applied to all other in-core inodes which
  522. * correspond to the same HFS file.
  523. */
  524. int hfs_inode_setattr(struct dentry *dentry, struct iattr * attr)
  525. {
  526. struct inode *inode = dentry->d_inode;
  527. struct hfs_sb_info *hsb = HFS_SB(inode->i_sb);
  528. int error;
  529. error = inode_change_ok(inode, attr); /* basic permission checks */
  530. if (error)
  531. return error;
  532. /* no uig/gid changes and limit which mode bits can be set */
  533. if (((attr->ia_valid & ATTR_UID) &&
  534. (!uid_eq(attr->ia_uid, hsb->s_uid))) ||
  535. ((attr->ia_valid & ATTR_GID) &&
  536. (!gid_eq(attr->ia_gid, hsb->s_gid))) ||
  537. ((attr->ia_valid & ATTR_MODE) &&
  538. ((S_ISDIR(inode->i_mode) &&
  539. (attr->ia_mode != inode->i_mode)) ||
  540. (attr->ia_mode & ~HFS_VALID_MODE_BITS)))) {
  541. return hsb->s_quiet ? 0 : error;
  542. }
  543. if (attr->ia_valid & ATTR_MODE) {
  544. /* Only the 'w' bits can ever change and only all together. */
  545. if (attr->ia_mode & S_IWUSR)
  546. attr->ia_mode = inode->i_mode | S_IWUGO;
  547. else
  548. attr->ia_mode = inode->i_mode & ~S_IWUGO;
  549. attr->ia_mode &= S_ISDIR(inode->i_mode) ? ~hsb->s_dir_umask: ~hsb->s_file_umask;
  550. }
  551. if ((attr->ia_valid & ATTR_SIZE) &&
  552. attr->ia_size != i_size_read(inode)) {
  553. inode_dio_wait(inode);
  554. error = inode_newsize_ok(inode, attr->ia_size);
  555. if (error)
  556. return error;
  557. truncate_setsize(inode, attr->ia_size);
  558. hfs_file_truncate(inode);
  559. }
  560. setattr_copy(inode, attr);
  561. mark_inode_dirty(inode);
  562. return 0;
  563. }
  564. static int hfs_file_fsync(struct file *filp, loff_t start, loff_t end,
  565. int datasync)
  566. {
  567. struct inode *inode = filp->f_mapping->host;
  568. struct super_block * sb;
  569. int ret, err;
  570. ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
  571. if (ret)
  572. return ret;
  573. mutex_lock(&inode->i_mutex);
  574. /* sync the inode to buffers */
  575. ret = write_inode_now(inode, 0);
  576. /* sync the superblock to buffers */
  577. sb = inode->i_sb;
  578. flush_delayed_work(&HFS_SB(sb)->mdb_work);
  579. /* .. finally sync the buffers to disk */
  580. err = sync_blockdev(sb->s_bdev);
  581. if (!ret)
  582. ret = err;
  583. mutex_unlock(&inode->i_mutex);
  584. return ret;
  585. }
  586. static const struct file_operations hfs_file_operations = {
  587. .llseek = generic_file_llseek,
  588. .read = do_sync_read,
  589. .aio_read = generic_file_aio_read,
  590. .write = do_sync_write,
  591. .aio_write = generic_file_aio_write,
  592. .mmap = generic_file_mmap,
  593. .splice_read = generic_file_splice_read,
  594. .fsync = hfs_file_fsync,
  595. .open = hfs_file_open,
  596. .release = hfs_file_release,
  597. };
  598. static const struct inode_operations hfs_file_inode_operations = {
  599. .lookup = hfs_file_lookup,
  600. .setattr = hfs_inode_setattr,
  601. .setxattr = hfs_setxattr,
  602. .getxattr = hfs_getxattr,
  603. .listxattr = hfs_listxattr,
  604. };