dir.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * dir.c - NILFS directory entry operations
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>
  21. */
  22. /*
  23. * linux/fs/ext2/dir.c
  24. *
  25. * Copyright (C) 1992, 1993, 1994, 1995
  26. * Remy Card (card@masi.ibp.fr)
  27. * Laboratoire MASI - Institut Blaise Pascal
  28. * Universite Pierre et Marie Curie (Paris VI)
  29. *
  30. * from
  31. *
  32. * linux/fs/minix/dir.c
  33. *
  34. * Copyright (C) 1991, 1992 Linus Torvalds
  35. *
  36. * ext2 directory handling functions
  37. *
  38. * Big-endian to little-endian byte-swapping/bitmaps by
  39. * David S. Miller (davem@caip.rutgers.edu), 1995
  40. *
  41. * All code that works with directory layout had been switched to pagecache
  42. * and moved here. AV
  43. */
  44. #include <linux/pagemap.h>
  45. #include "nilfs.h"
  46. #include "page.h"
  47. /*
  48. * nilfs uses block-sized chunks. Arguably, sector-sized ones would be
  49. * more robust, but we have what we have
  50. */
  51. static inline unsigned nilfs_chunk_size(struct inode *inode)
  52. {
  53. return inode->i_sb->s_blocksize;
  54. }
  55. static inline void nilfs_put_page(struct page *page)
  56. {
  57. kunmap(page);
  58. page_cache_release(page);
  59. }
  60. static inline unsigned long dir_pages(struct inode *inode)
  61. {
  62. return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
  63. }
  64. /*
  65. * Return the offset into page `page_nr' of the last valid
  66. * byte in that page, plus one.
  67. */
  68. static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr)
  69. {
  70. unsigned last_byte = inode->i_size;
  71. last_byte -= page_nr << PAGE_CACHE_SHIFT;
  72. if (last_byte > PAGE_CACHE_SIZE)
  73. last_byte = PAGE_CACHE_SIZE;
  74. return last_byte;
  75. }
  76. static int nilfs_prepare_chunk_uninterruptible(struct page *page,
  77. struct address_space *mapping,
  78. unsigned from, unsigned to)
  79. {
  80. loff_t pos = page_offset(page) + from;
  81. return block_write_begin(NULL, mapping, pos, to - from,
  82. AOP_FLAG_UNINTERRUPTIBLE, &page,
  83. NULL, nilfs_get_block);
  84. }
  85. static int nilfs_prepare_chunk(struct page *page,
  86. struct address_space *mapping,
  87. unsigned from, unsigned to)
  88. {
  89. loff_t pos = page_offset(page) + from;
  90. return block_write_begin(NULL, mapping, pos, to - from, 0, &page,
  91. NULL, nilfs_get_block);
  92. }
  93. static int nilfs_commit_chunk(struct page *page,
  94. struct address_space *mapping,
  95. unsigned from, unsigned to)
  96. {
  97. struct inode *dir = mapping->host;
  98. struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
  99. loff_t pos = page_offset(page) + from;
  100. unsigned len = to - from;
  101. unsigned nr_dirty, copied;
  102. int err;
  103. nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
  104. copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
  105. if (pos + copied > dir->i_size) {
  106. i_size_write(dir, pos + copied);
  107. mark_inode_dirty(dir);
  108. }
  109. if (IS_DIRSYNC(dir))
  110. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  111. err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
  112. unlock_page(page);
  113. return err;
  114. }
  115. static void nilfs_check_page(struct page *page)
  116. {
  117. struct inode *dir = page->mapping->host;
  118. struct super_block *sb = dir->i_sb;
  119. unsigned chunk_size = nilfs_chunk_size(dir);
  120. char *kaddr = page_address(page);
  121. unsigned offs, rec_len;
  122. unsigned limit = PAGE_CACHE_SIZE;
  123. struct nilfs_dir_entry *p;
  124. char *error;
  125. if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
  126. limit = dir->i_size & ~PAGE_CACHE_MASK;
  127. if (limit & (chunk_size - 1))
  128. goto Ebadsize;
  129. if (!limit)
  130. goto out;
  131. }
  132. for (offs = 0; offs <= limit - NILFS_DIR_REC_LEN(1); offs += rec_len) {
  133. p = (struct nilfs_dir_entry *)(kaddr + offs);
  134. rec_len = le16_to_cpu(p->rec_len);
  135. if (rec_len < NILFS_DIR_REC_LEN(1))
  136. goto Eshort;
  137. if (rec_len & 3)
  138. goto Ealign;
  139. if (rec_len < NILFS_DIR_REC_LEN(p->name_len))
  140. goto Enamelen;
  141. if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
  142. goto Espan;
  143. }
  144. if (offs != limit)
  145. goto Eend;
  146. out:
  147. SetPageChecked(page);
  148. return;
  149. /* Too bad, we had an error */
  150. Ebadsize:
  151. nilfs_error(sb, "nilfs_check_page",
  152. "size of directory #%lu is not a multiple of chunk size",
  153. dir->i_ino
  154. );
  155. goto fail;
  156. Eshort:
  157. error = "rec_len is smaller than minimal";
  158. goto bad_entry;
  159. Ealign:
  160. error = "unaligned directory entry";
  161. goto bad_entry;
  162. Enamelen:
  163. error = "rec_len is too small for name_len";
  164. goto bad_entry;
  165. Espan:
  166. error = "directory entry across blocks";
  167. bad_entry:
  168. nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
  169. "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
  170. dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
  171. (unsigned long) le64_to_cpu(p->inode),
  172. rec_len, p->name_len);
  173. goto fail;
  174. Eend:
  175. p = (struct nilfs_dir_entry *)(kaddr + offs);
  176. nilfs_error(sb, "nilfs_check_page",
  177. "entry in directory #%lu spans the page boundary"
  178. "offset=%lu, inode=%lu",
  179. dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
  180. (unsigned long) le64_to_cpu(p->inode));
  181. fail:
  182. SetPageChecked(page);
  183. SetPageError(page);
  184. }
  185. static struct page *nilfs_get_page(struct inode *dir, unsigned long n)
  186. {
  187. struct address_space *mapping = dir->i_mapping;
  188. struct page *page = read_cache_page(mapping, n,
  189. (filler_t *)mapping->a_ops->readpage, NULL);
  190. if (!IS_ERR(page)) {
  191. wait_on_page_locked(page);
  192. kmap(page);
  193. if (!PageUptodate(page))
  194. goto fail;
  195. if (!PageChecked(page))
  196. nilfs_check_page(page);
  197. if (PageError(page))
  198. goto fail;
  199. }
  200. return page;
  201. fail:
  202. nilfs_put_page(page);
  203. return ERR_PTR(-EIO);
  204. }
  205. /*
  206. * NOTE! unlike strncmp, nilfs_match returns 1 for success, 0 for failure.
  207. *
  208. * len <= NILFS_NAME_LEN and de != NULL are guaranteed by caller.
  209. */
  210. static int
  211. nilfs_match(int len, const char * const name, struct nilfs_dir_entry *de)
  212. {
  213. if (len != de->name_len)
  214. return 0;
  215. if (!de->inode)
  216. return 0;
  217. return !memcmp(name, de->name, len);
  218. }
  219. /*
  220. * p is at least 6 bytes before the end of page
  221. */
  222. static struct nilfs_dir_entry *nilfs_next_entry(struct nilfs_dir_entry *p)
  223. {
  224. return (struct nilfs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len));
  225. }
  226. static unsigned char
  227. nilfs_filetype_table[NILFS_FT_MAX] = {
  228. [NILFS_FT_UNKNOWN] = DT_UNKNOWN,
  229. [NILFS_FT_REG_FILE] = DT_REG,
  230. [NILFS_FT_DIR] = DT_DIR,
  231. [NILFS_FT_CHRDEV] = DT_CHR,
  232. [NILFS_FT_BLKDEV] = DT_BLK,
  233. [NILFS_FT_FIFO] = DT_FIFO,
  234. [NILFS_FT_SOCK] = DT_SOCK,
  235. [NILFS_FT_SYMLINK] = DT_LNK,
  236. };
  237. #define S_SHIFT 12
  238. static unsigned char
  239. nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
  240. [S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
  241. [S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
  242. [S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,
  243. [S_IFBLK >> S_SHIFT] = NILFS_FT_BLKDEV,
  244. [S_IFIFO >> S_SHIFT] = NILFS_FT_FIFO,
  245. [S_IFSOCK >> S_SHIFT] = NILFS_FT_SOCK,
  246. [S_IFLNK >> S_SHIFT] = NILFS_FT_SYMLINK,
  247. };
  248. static void nilfs_set_de_type(struct nilfs_dir_entry *de, struct inode *inode)
  249. {
  250. mode_t mode = inode->i_mode;
  251. de->file_type = nilfs_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
  252. }
  253. static int nilfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
  254. {
  255. loff_t pos = filp->f_pos;
  256. struct inode *inode = filp->f_dentry->d_inode;
  257. struct super_block *sb = inode->i_sb;
  258. unsigned int offset = pos & ~PAGE_CACHE_MASK;
  259. unsigned long n = pos >> PAGE_CACHE_SHIFT;
  260. unsigned long npages = dir_pages(inode);
  261. /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */
  262. unsigned char *types = NULL;
  263. int ret;
  264. if (pos > inode->i_size - NILFS_DIR_REC_LEN(1))
  265. goto success;
  266. types = nilfs_filetype_table;
  267. for ( ; n < npages; n++, offset = 0) {
  268. char *kaddr, *limit;
  269. struct nilfs_dir_entry *de;
  270. struct page *page = nilfs_get_page(inode, n);
  271. if (IS_ERR(page)) {
  272. nilfs_error(sb, __func__, "bad page in #%lu",
  273. inode->i_ino);
  274. filp->f_pos += PAGE_CACHE_SIZE - offset;
  275. ret = -EIO;
  276. goto done;
  277. }
  278. kaddr = page_address(page);
  279. de = (struct nilfs_dir_entry *)(kaddr + offset);
  280. limit = kaddr + nilfs_last_byte(inode, n) -
  281. NILFS_DIR_REC_LEN(1);
  282. for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
  283. if (de->rec_len == 0) {
  284. nilfs_error(sb, __func__,
  285. "zero-length directory entry");
  286. ret = -EIO;
  287. nilfs_put_page(page);
  288. goto done;
  289. }
  290. if (de->inode) {
  291. int over;
  292. unsigned char d_type = DT_UNKNOWN;
  293. if (types && de->file_type < NILFS_FT_MAX)
  294. d_type = types[de->file_type];
  295. offset = (char *)de - kaddr;
  296. over = filldir(dirent, de->name, de->name_len,
  297. (n<<PAGE_CACHE_SHIFT) | offset,
  298. le64_to_cpu(de->inode), d_type);
  299. if (over) {
  300. nilfs_put_page(page);
  301. goto success;
  302. }
  303. }
  304. filp->f_pos += le16_to_cpu(de->rec_len);
  305. }
  306. nilfs_put_page(page);
  307. }
  308. success:
  309. ret = 0;
  310. done:
  311. return ret;
  312. }
  313. /*
  314. * nilfs_find_entry()
  315. *
  316. * finds an entry in the specified directory with the wanted name. It
  317. * returns the page in which the entry was found, and the entry itself
  318. * (as a parameter - res_dir). Page is returned mapped and unlocked.
  319. * Entry is guaranteed to be valid.
  320. */
  321. struct nilfs_dir_entry *
  322. nilfs_find_entry(struct inode *dir, struct dentry *dentry,
  323. struct page **res_page)
  324. {
  325. const char *name = dentry->d_name.name;
  326. int namelen = dentry->d_name.len;
  327. unsigned reclen = NILFS_DIR_REC_LEN(namelen);
  328. unsigned long start, n;
  329. unsigned long npages = dir_pages(dir);
  330. struct page *page = NULL;
  331. struct nilfs_inode_info *ei = NILFS_I(dir);
  332. struct nilfs_dir_entry *de;
  333. if (npages == 0)
  334. goto out;
  335. /* OFFSET_CACHE */
  336. *res_page = NULL;
  337. start = ei->i_dir_start_lookup;
  338. if (start >= npages)
  339. start = 0;
  340. n = start;
  341. do {
  342. char *kaddr;
  343. page = nilfs_get_page(dir, n);
  344. if (!IS_ERR(page)) {
  345. kaddr = page_address(page);
  346. de = (struct nilfs_dir_entry *)kaddr;
  347. kaddr += nilfs_last_byte(dir, n) - reclen;
  348. while ((char *) de <= kaddr) {
  349. if (de->rec_len == 0) {
  350. nilfs_error(dir->i_sb, __func__,
  351. "zero-length directory entry");
  352. nilfs_put_page(page);
  353. goto out;
  354. }
  355. if (nilfs_match(namelen, name, de))
  356. goto found;
  357. de = nilfs_next_entry(de);
  358. }
  359. nilfs_put_page(page);
  360. }
  361. if (++n >= npages)
  362. n = 0;
  363. /* next page is past the blocks we've got */
  364. if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
  365. nilfs_error(dir->i_sb, __func__,
  366. "dir %lu size %lld exceeds block cout %llu",
  367. dir->i_ino, dir->i_size,
  368. (unsigned long long)dir->i_blocks);
  369. goto out;
  370. }
  371. } while (n != start);
  372. out:
  373. return NULL;
  374. found:
  375. *res_page = page;
  376. ei->i_dir_start_lookup = n;
  377. return de;
  378. }
  379. struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p)
  380. {
  381. struct page *page = nilfs_get_page(dir, 0);
  382. struct nilfs_dir_entry *de = NULL;
  383. if (!IS_ERR(page)) {
  384. de = nilfs_next_entry(
  385. (struct nilfs_dir_entry *)page_address(page));
  386. *p = page;
  387. }
  388. return de;
  389. }
  390. ino_t nilfs_inode_by_name(struct inode *dir, struct dentry *dentry)
  391. {
  392. ino_t res = 0;
  393. struct nilfs_dir_entry *de;
  394. struct page *page;
  395. de = nilfs_find_entry(dir, dentry, &page);
  396. if (de) {
  397. res = le64_to_cpu(de->inode);
  398. kunmap(page);
  399. page_cache_release(page);
  400. }
  401. return res;
  402. }
  403. /* Releases the page */
  404. void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
  405. struct page *page, struct inode *inode)
  406. {
  407. unsigned from = (char *) de - (char *) page_address(page);
  408. unsigned to = from + le16_to_cpu(de->rec_len);
  409. struct address_space *mapping = page->mapping;
  410. int err;
  411. lock_page(page);
  412. err = nilfs_prepare_chunk_uninterruptible(page, mapping, from, to);
  413. BUG_ON(err);
  414. de->inode = cpu_to_le64(inode->i_ino);
  415. nilfs_set_de_type(de, inode);
  416. err = nilfs_commit_chunk(page, mapping, from, to);
  417. nilfs_put_page(page);
  418. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  419. /* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
  420. mark_inode_dirty(dir);
  421. }
  422. /*
  423. * Parent is locked.
  424. */
  425. int nilfs_add_link(struct dentry *dentry, struct inode *inode)
  426. {
  427. struct inode *dir = dentry->d_parent->d_inode;
  428. const char *name = dentry->d_name.name;
  429. int namelen = dentry->d_name.len;
  430. unsigned chunk_size = nilfs_chunk_size(dir);
  431. unsigned reclen = NILFS_DIR_REC_LEN(namelen);
  432. unsigned short rec_len, name_len;
  433. struct page *page = NULL;
  434. struct nilfs_dir_entry *de;
  435. unsigned long npages = dir_pages(dir);
  436. unsigned long n;
  437. char *kaddr;
  438. unsigned from, to;
  439. int err;
  440. /*
  441. * We take care of directory expansion in the same loop.
  442. * This code plays outside i_size, so it locks the page
  443. * to protect that region.
  444. */
  445. for (n = 0; n <= npages; n++) {
  446. char *dir_end;
  447. page = nilfs_get_page(dir, n);
  448. err = PTR_ERR(page);
  449. if (IS_ERR(page))
  450. goto out;
  451. lock_page(page);
  452. kaddr = page_address(page);
  453. dir_end = kaddr + nilfs_last_byte(dir, n);
  454. de = (struct nilfs_dir_entry *)kaddr;
  455. kaddr += PAGE_CACHE_SIZE - reclen;
  456. while ((char *)de <= kaddr) {
  457. if ((char *)de == dir_end) {
  458. /* We hit i_size */
  459. name_len = 0;
  460. rec_len = chunk_size;
  461. de->rec_len = cpu_to_le16(chunk_size);
  462. de->inode = 0;
  463. goto got_it;
  464. }
  465. if (de->rec_len == 0) {
  466. nilfs_error(dir->i_sb, __func__,
  467. "zero-length directory entry");
  468. err = -EIO;
  469. goto out_unlock;
  470. }
  471. err = -EEXIST;
  472. if (nilfs_match(namelen, name, de))
  473. goto out_unlock;
  474. name_len = NILFS_DIR_REC_LEN(de->name_len);
  475. rec_len = le16_to_cpu(de->rec_len);
  476. if (!de->inode && rec_len >= reclen)
  477. goto got_it;
  478. if (rec_len >= name_len + reclen)
  479. goto got_it;
  480. de = (struct nilfs_dir_entry *)((char *)de + rec_len);
  481. }
  482. unlock_page(page);
  483. nilfs_put_page(page);
  484. }
  485. BUG();
  486. return -EINVAL;
  487. got_it:
  488. from = (char *)de - (char *)page_address(page);
  489. to = from + rec_len;
  490. err = nilfs_prepare_chunk(page, page->mapping, from, to);
  491. if (err)
  492. goto out_unlock;
  493. if (de->inode) {
  494. struct nilfs_dir_entry *de1;
  495. de1 = (struct nilfs_dir_entry *)((char *)de + name_len);
  496. de1->rec_len = cpu_to_le16(rec_len - name_len);
  497. de->rec_len = cpu_to_le16(name_len);
  498. de = de1;
  499. }
  500. de->name_len = namelen;
  501. memcpy(de->name, name, namelen);
  502. de->inode = cpu_to_le64(inode->i_ino);
  503. nilfs_set_de_type(de, inode);
  504. err = nilfs_commit_chunk(page, page->mapping, from, to);
  505. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  506. /* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
  507. mark_inode_dirty(dir);
  508. /* OFFSET_CACHE */
  509. out_put:
  510. nilfs_put_page(page);
  511. out:
  512. return err;
  513. out_unlock:
  514. unlock_page(page);
  515. goto out_put;
  516. }
  517. /*
  518. * nilfs_delete_entry deletes a directory entry by merging it with the
  519. * previous entry. Page is up-to-date. Releases the page.
  520. */
  521. int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
  522. {
  523. struct address_space *mapping = page->mapping;
  524. struct inode *inode = mapping->host;
  525. char *kaddr = page_address(page);
  526. unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1);
  527. unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len);
  528. struct nilfs_dir_entry *pde = NULL;
  529. struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from);
  530. int err;
  531. while ((char *)de < (char *)dir) {
  532. if (de->rec_len == 0) {
  533. nilfs_error(inode->i_sb, __func__,
  534. "zero-length directory entry");
  535. err = -EIO;
  536. goto out;
  537. }
  538. pde = de;
  539. de = nilfs_next_entry(de);
  540. }
  541. if (pde)
  542. from = (char *)pde - (char *)page_address(page);
  543. lock_page(page);
  544. err = nilfs_prepare_chunk(page, mapping, from, to);
  545. BUG_ON(err);
  546. if (pde)
  547. pde->rec_len = cpu_to_le16(to - from);
  548. dir->inode = 0;
  549. err = nilfs_commit_chunk(page, mapping, from, to);
  550. inode->i_ctime = inode->i_mtime = CURRENT_TIME;
  551. /* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
  552. mark_inode_dirty(inode);
  553. out:
  554. nilfs_put_page(page);
  555. return err;
  556. }
  557. /*
  558. * Set the first fragment of directory.
  559. */
  560. int nilfs_make_empty(struct inode *inode, struct inode *parent)
  561. {
  562. struct address_space *mapping = inode->i_mapping;
  563. struct page *page = grab_cache_page(mapping, 0);
  564. unsigned chunk_size = nilfs_chunk_size(inode);
  565. struct nilfs_dir_entry *de;
  566. int err;
  567. void *kaddr;
  568. if (!page)
  569. return -ENOMEM;
  570. err = nilfs_prepare_chunk(page, mapping, 0, chunk_size);
  571. if (unlikely(err)) {
  572. unlock_page(page);
  573. goto fail;
  574. }
  575. kaddr = kmap_atomic(page, KM_USER0);
  576. memset(kaddr, 0, chunk_size);
  577. de = (struct nilfs_dir_entry *)kaddr;
  578. de->name_len = 1;
  579. de->rec_len = cpu_to_le16(NILFS_DIR_REC_LEN(1));
  580. memcpy(de->name, ".\0\0", 4);
  581. de->inode = cpu_to_le64(inode->i_ino);
  582. nilfs_set_de_type(de, inode);
  583. de = (struct nilfs_dir_entry *)(kaddr + NILFS_DIR_REC_LEN(1));
  584. de->name_len = 2;
  585. de->rec_len = cpu_to_le16(chunk_size - NILFS_DIR_REC_LEN(1));
  586. de->inode = cpu_to_le64(parent->i_ino);
  587. memcpy(de->name, "..\0", 4);
  588. nilfs_set_de_type(de, inode);
  589. kunmap_atomic(kaddr, KM_USER0);
  590. err = nilfs_commit_chunk(page, mapping, 0, chunk_size);
  591. fail:
  592. page_cache_release(page);
  593. return err;
  594. }
  595. /*
  596. * routine to check that the specified directory is empty (for rmdir)
  597. */
  598. int nilfs_empty_dir(struct inode *inode)
  599. {
  600. struct page *page = NULL;
  601. unsigned long i, npages = dir_pages(inode);
  602. for (i = 0; i < npages; i++) {
  603. char *kaddr;
  604. struct nilfs_dir_entry *de;
  605. page = nilfs_get_page(inode, i);
  606. if (IS_ERR(page))
  607. continue;
  608. kaddr = page_address(page);
  609. de = (struct nilfs_dir_entry *)kaddr;
  610. kaddr += nilfs_last_byte(inode, i) - NILFS_DIR_REC_LEN(1);
  611. while ((char *)de <= kaddr) {
  612. if (de->rec_len == 0) {
  613. nilfs_error(inode->i_sb, __func__,
  614. "zero-length directory entry "
  615. "(kaddr=%p, de=%p)\n", kaddr, de);
  616. goto not_empty;
  617. }
  618. if (de->inode != 0) {
  619. /* check for . and .. */
  620. if (de->name[0] != '.')
  621. goto not_empty;
  622. if (de->name_len > 2)
  623. goto not_empty;
  624. if (de->name_len < 2) {
  625. if (de->inode !=
  626. cpu_to_le64(inode->i_ino))
  627. goto not_empty;
  628. } else if (de->name[1] != '.')
  629. goto not_empty;
  630. }
  631. de = nilfs_next_entry(de);
  632. }
  633. nilfs_put_page(page);
  634. }
  635. return 1;
  636. not_empty:
  637. nilfs_put_page(page);
  638. return 0;
  639. }
  640. struct file_operations nilfs_dir_operations = {
  641. .llseek = generic_file_llseek,
  642. .read = generic_read_dir,
  643. .readdir = nilfs_readdir,
  644. .unlocked_ioctl = nilfs_ioctl,
  645. #ifdef CONFIG_COMPAT
  646. .compat_ioctl = nilfs_ioctl,
  647. #endif /* CONFIG_COMPAT */
  648. .fsync = nilfs_sync_file,
  649. };