dir.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * linux/fs/ext2/dir.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/dir.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * ext2 directory handling functions
  16. *
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. *
  20. * All code that works with directory layout had been switched to pagecache
  21. * and moved here. AV
  22. */
  23. #include "ext2.h"
  24. #include <linux/buffer_head.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/swap.h>
  27. typedef struct ext2_dir_entry_2 ext2_dirent;
  28. static inline unsigned ext2_rec_len_from_disk(__le16 dlen)
  29. {
  30. unsigned len = le16_to_cpu(dlen);
  31. if (len == EXT2_MAX_REC_LEN)
  32. return 1 << 16;
  33. return len;
  34. }
  35. static inline __le16 ext2_rec_len_to_disk(unsigned len)
  36. {
  37. if (len == (1 << 16))
  38. return cpu_to_le16(EXT2_MAX_REC_LEN);
  39. else
  40. BUG_ON(len > (1 << 16));
  41. return cpu_to_le16(len);
  42. }
  43. /*
  44. * ext2 uses block-sized chunks. Arguably, sector-sized ones would be
  45. * more robust, but we have what we have
  46. */
  47. static inline unsigned ext2_chunk_size(struct inode *inode)
  48. {
  49. return inode->i_sb->s_blocksize;
  50. }
  51. static inline void ext2_put_page(struct page *page)
  52. {
  53. kunmap(page);
  54. page_cache_release(page);
  55. }
  56. static inline unsigned long dir_pages(struct inode *inode)
  57. {
  58. return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
  59. }
  60. /*
  61. * Return the offset into page `page_nr' of the last valid
  62. * byte in that page, plus one.
  63. */
  64. static unsigned
  65. ext2_last_byte(struct inode *inode, unsigned long page_nr)
  66. {
  67. unsigned last_byte = inode->i_size;
  68. last_byte -= page_nr << PAGE_CACHE_SHIFT;
  69. if (last_byte > PAGE_CACHE_SIZE)
  70. last_byte = PAGE_CACHE_SIZE;
  71. return last_byte;
  72. }
  73. static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len)
  74. {
  75. struct address_space *mapping = page->mapping;
  76. struct inode *dir = mapping->host;
  77. int err = 0;
  78. dir->i_version++;
  79. block_write_end(NULL, mapping, pos, len, len, page, NULL);
  80. if (pos+len > dir->i_size) {
  81. i_size_write(dir, pos+len);
  82. mark_inode_dirty(dir);
  83. }
  84. if (IS_DIRSYNC(dir))
  85. err = write_one_page(page, 1);
  86. else
  87. unlock_page(page);
  88. return err;
  89. }
  90. static void ext2_check_page(struct page *page, int quiet)
  91. {
  92. struct inode *dir = page->mapping->host;
  93. struct super_block *sb = dir->i_sb;
  94. unsigned chunk_size = ext2_chunk_size(dir);
  95. char *kaddr = page_address(page);
  96. u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count);
  97. unsigned offs, rec_len;
  98. unsigned limit = PAGE_CACHE_SIZE;
  99. ext2_dirent *p;
  100. char *error;
  101. if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
  102. limit = dir->i_size & ~PAGE_CACHE_MASK;
  103. if (limit & (chunk_size - 1))
  104. goto Ebadsize;
  105. if (!limit)
  106. goto out;
  107. }
  108. for (offs = 0; offs <= limit - EXT2_DIR_REC_LEN(1); offs += rec_len) {
  109. p = (ext2_dirent *)(kaddr + offs);
  110. rec_len = ext2_rec_len_from_disk(p->rec_len);
  111. if (rec_len < EXT2_DIR_REC_LEN(1))
  112. goto Eshort;
  113. if (rec_len & 3)
  114. goto Ealign;
  115. if (rec_len < EXT2_DIR_REC_LEN(p->name_len))
  116. goto Enamelen;
  117. if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
  118. goto Espan;
  119. if (le32_to_cpu(p->inode) > max_inumber)
  120. goto Einumber;
  121. }
  122. if (offs != limit)
  123. goto Eend;
  124. out:
  125. SetPageChecked(page);
  126. return;
  127. /* Too bad, we had an error */
  128. Ebadsize:
  129. if (!quiet)
  130. ext2_error(sb, __func__,
  131. "size of directory #%lu is not a multiple "
  132. "of chunk size", dir->i_ino);
  133. goto fail;
  134. Eshort:
  135. error = "rec_len is smaller than minimal";
  136. goto bad_entry;
  137. Ealign:
  138. error = "unaligned directory entry";
  139. goto bad_entry;
  140. Enamelen:
  141. error = "rec_len is too small for name_len";
  142. goto bad_entry;
  143. Espan:
  144. error = "directory entry across blocks";
  145. goto bad_entry;
  146. Einumber:
  147. error = "inode out of bounds";
  148. bad_entry:
  149. if (!quiet)
  150. ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - "
  151. "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
  152. dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
  153. (unsigned long) le32_to_cpu(p->inode),
  154. rec_len, p->name_len);
  155. goto fail;
  156. Eend:
  157. if (!quiet) {
  158. p = (ext2_dirent *)(kaddr + offs);
  159. ext2_error(sb, "ext2_check_page",
  160. "entry in directory #%lu spans the page boundary"
  161. "offset=%lu, inode=%lu",
  162. dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
  163. (unsigned long) le32_to_cpu(p->inode));
  164. }
  165. fail:
  166. SetPageChecked(page);
  167. SetPageError(page);
  168. }
  169. static struct page * ext2_get_page(struct inode *dir, unsigned long n,
  170. int quiet)
  171. {
  172. struct address_space *mapping = dir->i_mapping;
  173. struct page *page = read_mapping_page(mapping, n, NULL);
  174. if (!IS_ERR(page)) {
  175. kmap(page);
  176. if (!PageChecked(page))
  177. ext2_check_page(page, quiet);
  178. if (PageError(page))
  179. goto fail;
  180. }
  181. return page;
  182. fail:
  183. ext2_put_page(page);
  184. return ERR_PTR(-EIO);
  185. }
  186. /*
  187. * NOTE! unlike strncmp, ext2_match returns 1 for success, 0 for failure.
  188. *
  189. * len <= EXT2_NAME_LEN and de != NULL are guaranteed by caller.
  190. */
  191. static inline int ext2_match (int len, const char * const name,
  192. struct ext2_dir_entry_2 * de)
  193. {
  194. if (len != de->name_len)
  195. return 0;
  196. if (!de->inode)
  197. return 0;
  198. return !memcmp(name, de->name, len);
  199. }
  200. /*
  201. * p is at least 6 bytes before the end of page
  202. */
  203. static inline ext2_dirent *ext2_next_entry(ext2_dirent *p)
  204. {
  205. return (ext2_dirent *)((char *)p +
  206. ext2_rec_len_from_disk(p->rec_len));
  207. }
  208. static inline unsigned
  209. ext2_validate_entry(char *base, unsigned offset, unsigned mask)
  210. {
  211. ext2_dirent *de = (ext2_dirent*)(base + offset);
  212. ext2_dirent *p = (ext2_dirent*)(base + (offset&mask));
  213. while ((char*)p < (char*)de) {
  214. if (p->rec_len == 0)
  215. break;
  216. p = ext2_next_entry(p);
  217. }
  218. return (char *)p - base;
  219. }
  220. static unsigned char ext2_filetype_table[EXT2_FT_MAX] = {
  221. [EXT2_FT_UNKNOWN] = DT_UNKNOWN,
  222. [EXT2_FT_REG_FILE] = DT_REG,
  223. [EXT2_FT_DIR] = DT_DIR,
  224. [EXT2_FT_CHRDEV] = DT_CHR,
  225. [EXT2_FT_BLKDEV] = DT_BLK,
  226. [EXT2_FT_FIFO] = DT_FIFO,
  227. [EXT2_FT_SOCK] = DT_SOCK,
  228. [EXT2_FT_SYMLINK] = DT_LNK,
  229. };
  230. #define S_SHIFT 12
  231. static unsigned char ext2_type_by_mode[S_IFMT >> S_SHIFT] = {
  232. [S_IFREG >> S_SHIFT] = EXT2_FT_REG_FILE,
  233. [S_IFDIR >> S_SHIFT] = EXT2_FT_DIR,
  234. [S_IFCHR >> S_SHIFT] = EXT2_FT_CHRDEV,
  235. [S_IFBLK >> S_SHIFT] = EXT2_FT_BLKDEV,
  236. [S_IFIFO >> S_SHIFT] = EXT2_FT_FIFO,
  237. [S_IFSOCK >> S_SHIFT] = EXT2_FT_SOCK,
  238. [S_IFLNK >> S_SHIFT] = EXT2_FT_SYMLINK,
  239. };
  240. static inline void ext2_set_de_type(ext2_dirent *de, struct inode *inode)
  241. {
  242. mode_t mode = inode->i_mode;
  243. if (EXT2_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
  244. de->file_type = ext2_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
  245. else
  246. de->file_type = 0;
  247. }
  248. static int
  249. ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
  250. {
  251. loff_t pos = filp->f_pos;
  252. struct inode *inode = filp->f_path.dentry->d_inode;
  253. struct super_block *sb = inode->i_sb;
  254. unsigned int offset = pos & ~PAGE_CACHE_MASK;
  255. unsigned long n = pos >> PAGE_CACHE_SHIFT;
  256. unsigned long npages = dir_pages(inode);
  257. unsigned chunk_mask = ~(ext2_chunk_size(inode)-1);
  258. unsigned char *types = NULL;
  259. int need_revalidate = filp->f_version != inode->i_version;
  260. if (pos > inode->i_size - EXT2_DIR_REC_LEN(1))
  261. return 0;
  262. if (EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_FILETYPE))
  263. types = ext2_filetype_table;
  264. for ( ; n < npages; n++, offset = 0) {
  265. char *kaddr, *limit;
  266. ext2_dirent *de;
  267. struct page *page = ext2_get_page(inode, n, 0);
  268. if (IS_ERR(page)) {
  269. ext2_error(sb, __func__,
  270. "bad page in #%lu",
  271. inode->i_ino);
  272. filp->f_pos += PAGE_CACHE_SIZE - offset;
  273. return PTR_ERR(page);
  274. }
  275. kaddr = page_address(page);
  276. if (unlikely(need_revalidate)) {
  277. if (offset) {
  278. offset = ext2_validate_entry(kaddr, offset, chunk_mask);
  279. filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
  280. }
  281. filp->f_version = inode->i_version;
  282. need_revalidate = 0;
  283. }
  284. de = (ext2_dirent *)(kaddr+offset);
  285. limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
  286. for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
  287. if (de->rec_len == 0) {
  288. ext2_error(sb, __func__,
  289. "zero-length directory entry");
  290. ext2_put_page(page);
  291. return -EIO;
  292. }
  293. if (de->inode) {
  294. int over;
  295. unsigned char d_type = DT_UNKNOWN;
  296. if (types && de->file_type < EXT2_FT_MAX)
  297. d_type = types[de->file_type];
  298. offset = (char *)de - kaddr;
  299. over = filldir(dirent, de->name, de->name_len,
  300. (n<<PAGE_CACHE_SHIFT) | offset,
  301. le32_to_cpu(de->inode), d_type);
  302. if (over) {
  303. ext2_put_page(page);
  304. return 0;
  305. }
  306. }
  307. filp->f_pos += ext2_rec_len_from_disk(de->rec_len);
  308. }
  309. ext2_put_page(page);
  310. }
  311. return 0;
  312. }
  313. /*
  314. * ext2_find_entry()
  315. *
  316. * finds an entry in the specified directory with the wanted name. It
  317. * returns the page in which the entry was found, and the entry itself
  318. * (as a parameter - res_dir). Page is returned mapped and unlocked.
  319. * Entry is guaranteed to be valid.
  320. */
  321. struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
  322. struct qstr *child, struct page ** res_page)
  323. {
  324. const char *name = child->name;
  325. int namelen = child->len;
  326. unsigned reclen = EXT2_DIR_REC_LEN(namelen);
  327. unsigned long start, n;
  328. unsigned long npages = dir_pages(dir);
  329. struct page *page = NULL;
  330. struct ext2_inode_info *ei = EXT2_I(dir);
  331. ext2_dirent * de;
  332. int dir_has_error = 0;
  333. if (npages == 0)
  334. goto out;
  335. /* OFFSET_CACHE */
  336. *res_page = NULL;
  337. start = ei->i_dir_start_lookup;
  338. if (start >= npages)
  339. start = 0;
  340. n = start;
  341. do {
  342. char *kaddr;
  343. page = ext2_get_page(dir, n, dir_has_error);
  344. if (!IS_ERR(page)) {
  345. kaddr = page_address(page);
  346. de = (ext2_dirent *) kaddr;
  347. kaddr += ext2_last_byte(dir, n) - reclen;
  348. while ((char *) de <= kaddr) {
  349. if (de->rec_len == 0) {
  350. ext2_error(dir->i_sb, __func__,
  351. "zero-length directory entry");
  352. ext2_put_page(page);
  353. goto out;
  354. }
  355. if (ext2_match (namelen, name, de))
  356. goto found;
  357. de = ext2_next_entry(de);
  358. }
  359. ext2_put_page(page);
  360. } else
  361. dir_has_error = 1;
  362. if (++n >= npages)
  363. n = 0;
  364. /* next page is past the blocks we've got */
  365. if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
  366. ext2_error(dir->i_sb, __func__,
  367. "dir %lu size %lld exceeds block count %llu",
  368. dir->i_ino, dir->i_size,
  369. (unsigned long long)dir->i_blocks);
  370. goto out;
  371. }
  372. } while (n != start);
  373. out:
  374. return NULL;
  375. found:
  376. *res_page = page;
  377. ei->i_dir_start_lookup = n;
  378. return de;
  379. }
  380. struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
  381. {
  382. struct page *page = ext2_get_page(dir, 0, 0);
  383. ext2_dirent *de = NULL;
  384. if (!IS_ERR(page)) {
  385. de = ext2_next_entry((ext2_dirent *) page_address(page));
  386. *p = page;
  387. }
  388. return de;
  389. }
  390. ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child)
  391. {
  392. ino_t res = 0;
  393. struct ext2_dir_entry_2 *de;
  394. struct page *page;
  395. de = ext2_find_entry (dir, child, &page);
  396. if (de) {
  397. res = le32_to_cpu(de->inode);
  398. ext2_put_page(page);
  399. }
  400. return res;
  401. }
  402. /* Releases the page */
  403. void ext2_set_link(struct inode *dir, struct ext2_dir_entry_2 *de,
  404. struct page *page, struct inode *inode)
  405. {
  406. loff_t pos = page_offset(page) +
  407. (char *) de - (char *) page_address(page);
  408. unsigned len = ext2_rec_len_from_disk(de->rec_len);
  409. int err;
  410. lock_page(page);
  411. err = __ext2_write_begin(NULL, page->mapping, pos, len,
  412. AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
  413. BUG_ON(err);
  414. de->inode = cpu_to_le32(inode->i_ino);
  415. ext2_set_de_type(de, inode);
  416. err = ext2_commit_chunk(page, pos, len);
  417. ext2_put_page(page);
  418. dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
  419. EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
  420. mark_inode_dirty(dir);
  421. }
  422. /*
  423. * Parent is locked.
  424. */
  425. int ext2_add_link (struct dentry *dentry, struct inode *inode)
  426. {
  427. struct inode *dir = dentry->d_parent->d_inode;
  428. const char *name = dentry->d_name.name;
  429. int namelen = dentry->d_name.len;
  430. unsigned chunk_size = ext2_chunk_size(dir);
  431. unsigned reclen = EXT2_DIR_REC_LEN(namelen);
  432. unsigned short rec_len, name_len;
  433. struct page *page = NULL;
  434. ext2_dirent * de;
  435. unsigned long npages = dir_pages(dir);
  436. unsigned long n;
  437. char *kaddr;
  438. loff_t pos;
  439. int err;
  440. /*
  441. * We take care of directory expansion in the same loop.
  442. * This code plays outside i_size, so it locks the page
  443. * to protect that region.
  444. */
  445. for (n = 0; n <= npages; n++) {
  446. char *dir_end;
  447. page = ext2_get_page(dir, n, 0);
  448. err = PTR_ERR(page);
  449. if (IS_ERR(page))
  450. goto out;
  451. lock_page(page);
  452. kaddr = page_address(page);
  453. dir_end = kaddr + ext2_last_byte(dir, n);
  454. de = (ext2_dirent *)kaddr;
  455. kaddr += PAGE_CACHE_SIZE - reclen;
  456. while ((char *)de <= kaddr) {
  457. if ((char *)de == dir_end) {
  458. /* We hit i_size */
  459. name_len = 0;
  460. rec_len = chunk_size;
  461. de->rec_len = ext2_rec_len_to_disk(chunk_size);
  462. de->inode = 0;
  463. goto got_it;
  464. }
  465. if (de->rec_len == 0) {
  466. ext2_error(dir->i_sb, __func__,
  467. "zero-length directory entry");
  468. err = -EIO;
  469. goto out_unlock;
  470. }
  471. err = -EEXIST;
  472. if (ext2_match (namelen, name, de))
  473. goto out_unlock;
  474. name_len = EXT2_DIR_REC_LEN(de->name_len);
  475. rec_len = ext2_rec_len_from_disk(de->rec_len);
  476. if (!de->inode && rec_len >= reclen)
  477. goto got_it;
  478. if (rec_len >= name_len + reclen)
  479. goto got_it;
  480. de = (ext2_dirent *) ((char *) de + rec_len);
  481. }
  482. unlock_page(page);
  483. ext2_put_page(page);
  484. }
  485. BUG();
  486. return -EINVAL;
  487. got_it:
  488. pos = page_offset(page) +
  489. (char*)de - (char*)page_address(page);
  490. err = __ext2_write_begin(NULL, page->mapping, pos, rec_len, 0,
  491. &page, NULL);
  492. if (err)
  493. goto out_unlock;
  494. if (de->inode) {
  495. ext2_dirent *de1 = (ext2_dirent *) ((char *) de + name_len);
  496. de1->rec_len = ext2_rec_len_to_disk(rec_len - name_len);
  497. de->rec_len = ext2_rec_len_to_disk(name_len);
  498. de = de1;
  499. }
  500. de->name_len = namelen;
  501. memcpy(de->name, name, namelen);
  502. de->inode = cpu_to_le32(inode->i_ino);
  503. ext2_set_de_type (de, inode);
  504. err = ext2_commit_chunk(page, pos, rec_len);
  505. dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
  506. EXT2_I(dir)->i_flags &= ~EXT2_BTREE_FL;
  507. mark_inode_dirty(dir);
  508. /* OFFSET_CACHE */
  509. out_put:
  510. ext2_put_page(page);
  511. out:
  512. return err;
  513. out_unlock:
  514. unlock_page(page);
  515. goto out_put;
  516. }
  517. /*
  518. * ext2_delete_entry deletes a directory entry by merging it with the
  519. * previous entry. Page is up-to-date. Releases the page.
  520. */
  521. int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
  522. {
  523. struct address_space *mapping = page->mapping;
  524. struct inode *inode = mapping->host;
  525. char *kaddr = page_address(page);
  526. unsigned from = ((char*)dir - kaddr) & ~(ext2_chunk_size(inode)-1);
  527. unsigned to = ((char *)dir - kaddr) +
  528. ext2_rec_len_from_disk(dir->rec_len);
  529. loff_t pos;
  530. ext2_dirent * pde = NULL;
  531. ext2_dirent * de = (ext2_dirent *) (kaddr + from);
  532. int err;
  533. while ((char*)de < (char*)dir) {
  534. if (de->rec_len == 0) {
  535. ext2_error(inode->i_sb, __func__,
  536. "zero-length directory entry");
  537. err = -EIO;
  538. goto out;
  539. }
  540. pde = de;
  541. de = ext2_next_entry(de);
  542. }
  543. if (pde)
  544. from = (char*)pde - (char*)page_address(page);
  545. pos = page_offset(page) + from;
  546. lock_page(page);
  547. err = __ext2_write_begin(NULL, page->mapping, pos, to - from, 0,
  548. &page, NULL);
  549. BUG_ON(err);
  550. if (pde)
  551. pde->rec_len = ext2_rec_len_to_disk(to - from);
  552. dir->inode = 0;
  553. err = ext2_commit_chunk(page, pos, to - from);
  554. inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
  555. EXT2_I(inode)->i_flags &= ~EXT2_BTREE_FL;
  556. mark_inode_dirty(inode);
  557. out:
  558. ext2_put_page(page);
  559. return err;
  560. }
  561. /*
  562. * Set the first fragment of directory.
  563. */
  564. int ext2_make_empty(struct inode *inode, struct inode *parent)
  565. {
  566. struct address_space *mapping = inode->i_mapping;
  567. struct page *page = grab_cache_page(mapping, 0);
  568. unsigned chunk_size = ext2_chunk_size(inode);
  569. struct ext2_dir_entry_2 * de;
  570. int err;
  571. void *kaddr;
  572. if (!page)
  573. return -ENOMEM;
  574. err = __ext2_write_begin(NULL, page->mapping, 0, chunk_size, 0,
  575. &page, NULL);
  576. if (err) {
  577. unlock_page(page);
  578. goto fail;
  579. }
  580. kaddr = kmap_atomic(page, KM_USER0);
  581. memset(kaddr, 0, chunk_size);
  582. de = (struct ext2_dir_entry_2 *)kaddr;
  583. de->name_len = 1;
  584. de->rec_len = ext2_rec_len_to_disk(EXT2_DIR_REC_LEN(1));
  585. memcpy (de->name, ".\0\0", 4);
  586. de->inode = cpu_to_le32(inode->i_ino);
  587. ext2_set_de_type (de, inode);
  588. de = (struct ext2_dir_entry_2 *)(kaddr + EXT2_DIR_REC_LEN(1));
  589. de->name_len = 2;
  590. de->rec_len = ext2_rec_len_to_disk(chunk_size - EXT2_DIR_REC_LEN(1));
  591. de->inode = cpu_to_le32(parent->i_ino);
  592. memcpy (de->name, "..\0", 4);
  593. ext2_set_de_type (de, inode);
  594. kunmap_atomic(kaddr, KM_USER0);
  595. err = ext2_commit_chunk(page, 0, chunk_size);
  596. fail:
  597. page_cache_release(page);
  598. return err;
  599. }
  600. /*
  601. * routine to check that the specified directory is empty (for rmdir)
  602. */
  603. int ext2_empty_dir (struct inode * inode)
  604. {
  605. struct page *page = NULL;
  606. unsigned long i, npages = dir_pages(inode);
  607. int dir_has_error = 0;
  608. for (i = 0; i < npages; i++) {
  609. char *kaddr;
  610. ext2_dirent * de;
  611. page = ext2_get_page(inode, i, dir_has_error);
  612. if (IS_ERR(page)) {
  613. dir_has_error = 1;
  614. continue;
  615. }
  616. kaddr = page_address(page);
  617. de = (ext2_dirent *)kaddr;
  618. kaddr += ext2_last_byte(inode, i) - EXT2_DIR_REC_LEN(1);
  619. while ((char *)de <= kaddr) {
  620. if (de->rec_len == 0) {
  621. ext2_error(inode->i_sb, __func__,
  622. "zero-length directory entry");
  623. printk("kaddr=%p, de=%p\n", kaddr, de);
  624. goto not_empty;
  625. }
  626. if (de->inode != 0) {
  627. /* check for . and .. */
  628. if (de->name[0] != '.')
  629. goto not_empty;
  630. if (de->name_len > 2)
  631. goto not_empty;
  632. if (de->name_len < 2) {
  633. if (de->inode !=
  634. cpu_to_le32(inode->i_ino))
  635. goto not_empty;
  636. } else if (de->name[1] != '.')
  637. goto not_empty;
  638. }
  639. de = ext2_next_entry(de);
  640. }
  641. ext2_put_page(page);
  642. }
  643. return 1;
  644. not_empty:
  645. ext2_put_page(page);
  646. return 0;
  647. }
  648. const struct file_operations ext2_dir_operations = {
  649. .llseek = generic_file_llseek,
  650. .read = generic_read_dir,
  651. .readdir = ext2_readdir,
  652. .unlocked_ioctl = ext2_ioctl,
  653. #ifdef CONFIG_COMPAT
  654. .compat_ioctl = ext2_compat_ioctl,
  655. #endif
  656. .fsync = ext2_sync_file,
  657. };