dir.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672
  1. /*
  2. * Copyright (C) 2005, 2006
  3. * Avishay Traeger (avishay@gmail.com) (avishay@il.ibm.com)
  4. * Copyright (C) 2005, 2006
  5. * International Business Machines
  6. * Copyright (C) 2008, 2009
  7. * Boaz Harrosh <bharrosh@panasas.com>
  8. *
  9. * Copyrights for code taken from ext2:
  10. * Copyright (C) 1992, 1993, 1994, 1995
  11. * Remy Card (card@masi.ibp.fr)
  12. * Laboratoire MASI - Institut Blaise Pascal
  13. * Universite Pierre et Marie Curie (Paris VI)
  14. * from
  15. * linux/fs/minix/inode.c
  16. * Copyright (C) 1991, 1992 Linus Torvalds
  17. *
  18. * This file is part of exofs.
  19. *
  20. * exofs is free software; you can redistribute it and/or modify
  21. * it under the terms of the GNU General Public License as published by
  22. * the Free Software Foundation. Since it is based on ext2, and the only
  23. * valid version of GPL for the Linux kernel is version 2, the only valid
  24. * version of GPL for exofs is version 2.
  25. *
  26. * exofs is distributed in the hope that it will be useful,
  27. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  28. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  29. * GNU General Public License for more details.
  30. *
  31. * You should have received a copy of the GNU General Public License
  32. * along with exofs; if not, write to the Free Software
  33. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  34. */
  35. #include "exofs.h"
  36. static inline unsigned exofs_chunk_size(struct inode *inode)
  37. {
  38. return inode->i_sb->s_blocksize;
  39. }
  40. static inline void exofs_put_page(struct page *page)
  41. {
  42. kunmap(page);
  43. page_cache_release(page);
  44. }
  45. /* Accesses dir's inode->i_size must be called under inode lock */
  46. static inline unsigned long dir_pages(struct inode *inode)
  47. {
  48. return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  49. }
  50. static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr)
  51. {
  52. loff_t last_byte = inode->i_size;
  53. last_byte -= page_nr << PAGE_CACHE_SHIFT;
  54. if (last_byte > PAGE_CACHE_SIZE)
  55. last_byte = PAGE_CACHE_SIZE;
  56. return last_byte;
  57. }
  58. static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len)
  59. {
  60. struct address_space *mapping = page->mapping;
  61. struct inode *dir = mapping->host;
  62. int err = 0;
  63. dir->i_version++;
  64. if (!PageUptodate(page))
  65. SetPageUptodate(page);
  66. if (pos+len > dir->i_size) {
  67. i_size_write(dir, pos+len);
  68. mark_inode_dirty(dir);
  69. }
  70. set_page_dirty(page);
  71. if (IS_DIRSYNC(dir))
  72. err = write_one_page(page, 1);
  73. else
  74. unlock_page(page);
  75. return err;
  76. }
  77. static void exofs_check_page(struct page *page)
  78. {
  79. struct inode *dir = page->mapping->host;
  80. unsigned chunk_size = exofs_chunk_size(dir);
  81. char *kaddr = page_address(page);
  82. unsigned offs, rec_len;
  83. unsigned limit = PAGE_CACHE_SIZE;
  84. struct exofs_dir_entry *p;
  85. char *error;
  86. /* if the page is the last one in the directory */
  87. if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) {
  88. limit = dir->i_size & ~PAGE_CACHE_MASK;
  89. if (limit & (chunk_size - 1))
  90. goto Ebadsize;
  91. if (!limit)
  92. goto out;
  93. }
  94. for (offs = 0; offs <= limit - EXOFS_DIR_REC_LEN(1); offs += rec_len) {
  95. p = (struct exofs_dir_entry *)(kaddr + offs);
  96. rec_len = le16_to_cpu(p->rec_len);
  97. if (rec_len < EXOFS_DIR_REC_LEN(1))
  98. goto Eshort;
  99. if (rec_len & 3)
  100. goto Ealign;
  101. if (rec_len < EXOFS_DIR_REC_LEN(p->name_len))
  102. goto Enamelen;
  103. if (((offs + rec_len - 1) ^ offs) & ~(chunk_size-1))
  104. goto Espan;
  105. }
  106. if (offs != limit)
  107. goto Eend;
  108. out:
  109. SetPageChecked(page);
  110. return;
  111. Ebadsize:
  112. EXOFS_ERR("ERROR [exofs_check_page]: "
  113. "size of directory #%lu is not a multiple of chunk size",
  114. dir->i_ino
  115. );
  116. goto fail;
  117. Eshort:
  118. error = "rec_len is smaller than minimal";
  119. goto bad_entry;
  120. Ealign:
  121. error = "unaligned directory entry";
  122. goto bad_entry;
  123. Enamelen:
  124. error = "rec_len is too small for name_len";
  125. goto bad_entry;
  126. Espan:
  127. error = "directory entry across blocks";
  128. goto bad_entry;
  129. bad_entry:
  130. EXOFS_ERR(
  131. "ERROR [exofs_check_page]: bad entry in directory #%lu: %s - "
  132. "offset=%lu, inode=%llu, rec_len=%d, name_len=%d",
  133. dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs,
  134. _LLU(le64_to_cpu(p->inode_no)),
  135. rec_len, p->name_len);
  136. goto fail;
  137. Eend:
  138. p = (struct exofs_dir_entry *)(kaddr + offs);
  139. EXOFS_ERR("ERROR [exofs_check_page]: "
  140. "entry in directory #%lu spans the page boundary"
  141. "offset=%lu, inode=%llu",
  142. dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs,
  143. _LLU(le64_to_cpu(p->inode_no)));
  144. fail:
  145. SetPageChecked(page);
  146. SetPageError(page);
  147. }
  148. static struct page *exofs_get_page(struct inode *dir, unsigned long n)
  149. {
  150. struct address_space *mapping = dir->i_mapping;
  151. struct page *page = read_mapping_page(mapping, n, NULL);
  152. if (!IS_ERR(page)) {
  153. kmap(page);
  154. if (!PageChecked(page))
  155. exofs_check_page(page);
  156. if (PageError(page))
  157. goto fail;
  158. }
  159. return page;
  160. fail:
  161. exofs_put_page(page);
  162. return ERR_PTR(-EIO);
  163. }
  164. static inline int exofs_match(int len, const unsigned char *name,
  165. struct exofs_dir_entry *de)
  166. {
  167. if (len != de->name_len)
  168. return 0;
  169. if (!de->inode_no)
  170. return 0;
  171. return !memcmp(name, de->name, len);
  172. }
  173. static inline
  174. struct exofs_dir_entry *exofs_next_entry(struct exofs_dir_entry *p)
  175. {
  176. return (struct exofs_dir_entry *)((char *)p + le16_to_cpu(p->rec_len));
  177. }
  178. static inline unsigned
  179. exofs_validate_entry(char *base, unsigned offset, unsigned mask)
  180. {
  181. struct exofs_dir_entry *de = (struct exofs_dir_entry *)(base + offset);
  182. struct exofs_dir_entry *p =
  183. (struct exofs_dir_entry *)(base + (offset&mask));
  184. while ((char *)p < (char *)de) {
  185. if (p->rec_len == 0)
  186. break;
  187. p = exofs_next_entry(p);
  188. }
  189. return (char *)p - base;
  190. }
  191. static unsigned char exofs_filetype_table[EXOFS_FT_MAX] = {
  192. [EXOFS_FT_UNKNOWN] = DT_UNKNOWN,
  193. [EXOFS_FT_REG_FILE] = DT_REG,
  194. [EXOFS_FT_DIR] = DT_DIR,
  195. [EXOFS_FT_CHRDEV] = DT_CHR,
  196. [EXOFS_FT_BLKDEV] = DT_BLK,
  197. [EXOFS_FT_FIFO] = DT_FIFO,
  198. [EXOFS_FT_SOCK] = DT_SOCK,
  199. [EXOFS_FT_SYMLINK] = DT_LNK,
  200. };
  201. #define S_SHIFT 12
  202. static unsigned char exofs_type_by_mode[S_IFMT >> S_SHIFT] = {
  203. [S_IFREG >> S_SHIFT] = EXOFS_FT_REG_FILE,
  204. [S_IFDIR >> S_SHIFT] = EXOFS_FT_DIR,
  205. [S_IFCHR >> S_SHIFT] = EXOFS_FT_CHRDEV,
  206. [S_IFBLK >> S_SHIFT] = EXOFS_FT_BLKDEV,
  207. [S_IFIFO >> S_SHIFT] = EXOFS_FT_FIFO,
  208. [S_IFSOCK >> S_SHIFT] = EXOFS_FT_SOCK,
  209. [S_IFLNK >> S_SHIFT] = EXOFS_FT_SYMLINK,
  210. };
  211. static inline
  212. void exofs_set_de_type(struct exofs_dir_entry *de, struct inode *inode)
  213. {
  214. mode_t mode = inode->i_mode;
  215. de->file_type = exofs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
  216. }
  217. static int
  218. exofs_readdir(struct file *filp, void *dirent, filldir_t filldir)
  219. {
  220. loff_t pos = filp->f_pos;
  221. struct inode *inode = filp->f_path.dentry->d_inode;
  222. unsigned int offset = pos & ~PAGE_CACHE_MASK;
  223. unsigned long n = pos >> PAGE_CACHE_SHIFT;
  224. unsigned long npages = dir_pages(inode);
  225. unsigned chunk_mask = ~(exofs_chunk_size(inode)-1);
  226. unsigned char *types = NULL;
  227. int need_revalidate = (filp->f_version != inode->i_version);
  228. if (pos > inode->i_size - EXOFS_DIR_REC_LEN(1))
  229. return 0;
  230. types = exofs_filetype_table;
  231. for ( ; n < npages; n++, offset = 0) {
  232. char *kaddr, *limit;
  233. struct exofs_dir_entry *de;
  234. struct page *page = exofs_get_page(inode, n);
  235. if (IS_ERR(page)) {
  236. EXOFS_ERR("ERROR: "
  237. "bad page in #%lu",
  238. inode->i_ino);
  239. filp->f_pos += PAGE_CACHE_SIZE - offset;
  240. return PTR_ERR(page);
  241. }
  242. kaddr = page_address(page);
  243. if (unlikely(need_revalidate)) {
  244. if (offset) {
  245. offset = exofs_validate_entry(kaddr, offset,
  246. chunk_mask);
  247. filp->f_pos = (n<<PAGE_CACHE_SHIFT) + offset;
  248. }
  249. filp->f_version = inode->i_version;
  250. need_revalidate = 0;
  251. }
  252. de = (struct exofs_dir_entry *)(kaddr + offset);
  253. limit = kaddr + exofs_last_byte(inode, n) -
  254. EXOFS_DIR_REC_LEN(1);
  255. for (; (char *)de <= limit; de = exofs_next_entry(de)) {
  256. if (de->rec_len == 0) {
  257. EXOFS_ERR("ERROR: "
  258. "zero-length directory entry");
  259. exofs_put_page(page);
  260. return -EIO;
  261. }
  262. if (de->inode_no) {
  263. int over;
  264. unsigned char d_type = DT_UNKNOWN;
  265. if (types && de->file_type < EXOFS_FT_MAX)
  266. d_type = types[de->file_type];
  267. offset = (char *)de - kaddr;
  268. over = filldir(dirent, de->name, de->name_len,
  269. (n<<PAGE_CACHE_SHIFT) | offset,
  270. le64_to_cpu(de->inode_no),
  271. d_type);
  272. if (over) {
  273. exofs_put_page(page);
  274. return 0;
  275. }
  276. }
  277. filp->f_pos += le16_to_cpu(de->rec_len);
  278. }
  279. exofs_put_page(page);
  280. }
  281. return 0;
  282. }
  283. struct exofs_dir_entry *exofs_find_entry(struct inode *dir,
  284. struct dentry *dentry, struct page **res_page)
  285. {
  286. const unsigned char *name = dentry->d_name.name;
  287. int namelen = dentry->d_name.len;
  288. unsigned reclen = EXOFS_DIR_REC_LEN(namelen);
  289. unsigned long start, n;
  290. unsigned long npages = dir_pages(dir);
  291. struct page *page = NULL;
  292. struct exofs_i_info *oi = exofs_i(dir);
  293. struct exofs_dir_entry *de;
  294. if (npages == 0)
  295. goto out;
  296. *res_page = NULL;
  297. start = oi->i_dir_start_lookup;
  298. if (start >= npages)
  299. start = 0;
  300. n = start;
  301. do {
  302. char *kaddr;
  303. page = exofs_get_page(dir, n);
  304. if (!IS_ERR(page)) {
  305. kaddr = page_address(page);
  306. de = (struct exofs_dir_entry *) kaddr;
  307. kaddr += exofs_last_byte(dir, n) - reclen;
  308. while ((char *) de <= kaddr) {
  309. if (de->rec_len == 0) {
  310. EXOFS_ERR(
  311. "ERROR: exofs_find_entry: "
  312. "zero-length directory entry");
  313. exofs_put_page(page);
  314. goto out;
  315. }
  316. if (exofs_match(namelen, name, de))
  317. goto found;
  318. de = exofs_next_entry(de);
  319. }
  320. exofs_put_page(page);
  321. }
  322. if (++n >= npages)
  323. n = 0;
  324. } while (n != start);
  325. out:
  326. return NULL;
  327. found:
  328. *res_page = page;
  329. oi->i_dir_start_lookup = n;
  330. return de;
  331. }
  332. struct exofs_dir_entry *exofs_dotdot(struct inode *dir, struct page **p)
  333. {
  334. struct page *page = exofs_get_page(dir, 0);
  335. struct exofs_dir_entry *de = NULL;
  336. if (!IS_ERR(page)) {
  337. de = exofs_next_entry(
  338. (struct exofs_dir_entry *)page_address(page));
  339. *p = page;
  340. }
  341. return de;
  342. }
  343. ino_t exofs_parent_ino(struct dentry *child)
  344. {
  345. struct page *page;
  346. struct exofs_dir_entry *de;
  347. ino_t ino;
  348. de = exofs_dotdot(child->d_inode, &page);
  349. if (!de)
  350. return 0;
  351. ino = le64_to_cpu(de->inode_no);
  352. exofs_put_page(page);
  353. return ino;
  354. }
  355. ino_t exofs_inode_by_name(struct inode *dir, struct dentry *dentry)
  356. {
  357. ino_t res = 0;
  358. struct exofs_dir_entry *de;
  359. struct page *page;
  360. de = exofs_find_entry(dir, dentry, &page);
  361. if (de) {
  362. res = le64_to_cpu(de->inode_no);
  363. exofs_put_page(page);
  364. }
  365. return res;
  366. }
  367. int exofs_set_link(struct inode *dir, struct exofs_dir_entry *de,
  368. struct page *page, struct inode *inode)
  369. {
  370. loff_t pos = page_offset(page) +
  371. (char *) de - (char *) page_address(page);
  372. unsigned len = le16_to_cpu(de->rec_len);
  373. int err;
  374. lock_page(page);
  375. err = exofs_write_begin(NULL, page->mapping, pos, len,
  376. AOP_FLAG_UNINTERRUPTIBLE, &page, NULL);
  377. if (err)
  378. EXOFS_ERR("exofs_set_link: exofs_write_begin FAILD => %d\n",
  379. err);
  380. de->inode_no = cpu_to_le64(inode->i_ino);
  381. exofs_set_de_type(de, inode);
  382. if (likely(!err))
  383. err = exofs_commit_chunk(page, pos, len);
  384. exofs_put_page(page);
  385. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  386. mark_inode_dirty(dir);
  387. return err;
  388. }
  389. int exofs_add_link(struct dentry *dentry, struct inode *inode)
  390. {
  391. struct inode *dir = dentry->d_parent->d_inode;
  392. const unsigned char *name = dentry->d_name.name;
  393. int namelen = dentry->d_name.len;
  394. unsigned chunk_size = exofs_chunk_size(dir);
  395. unsigned reclen = EXOFS_DIR_REC_LEN(namelen);
  396. unsigned short rec_len, name_len;
  397. struct page *page = NULL;
  398. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  399. struct exofs_dir_entry *de;
  400. unsigned long npages = dir_pages(dir);
  401. unsigned long n;
  402. char *kaddr;
  403. loff_t pos;
  404. int err;
  405. for (n = 0; n <= npages; n++) {
  406. char *dir_end;
  407. page = exofs_get_page(dir, n);
  408. err = PTR_ERR(page);
  409. if (IS_ERR(page))
  410. goto out;
  411. lock_page(page);
  412. kaddr = page_address(page);
  413. dir_end = kaddr + exofs_last_byte(dir, n);
  414. de = (struct exofs_dir_entry *)kaddr;
  415. kaddr += PAGE_CACHE_SIZE - reclen;
  416. while ((char *)de <= kaddr) {
  417. if ((char *)de == dir_end) {
  418. name_len = 0;
  419. rec_len = chunk_size;
  420. de->rec_len = cpu_to_le16(chunk_size);
  421. de->inode_no = 0;
  422. goto got_it;
  423. }
  424. if (de->rec_len == 0) {
  425. EXOFS_ERR("ERROR: exofs_add_link: "
  426. "zero-length directory entry");
  427. err = -EIO;
  428. goto out_unlock;
  429. }
  430. err = -EEXIST;
  431. if (exofs_match(namelen, name, de))
  432. goto out_unlock;
  433. name_len = EXOFS_DIR_REC_LEN(de->name_len);
  434. rec_len = le16_to_cpu(de->rec_len);
  435. if (!de->inode_no && rec_len >= reclen)
  436. goto got_it;
  437. if (rec_len >= name_len + reclen)
  438. goto got_it;
  439. de = (struct exofs_dir_entry *) ((char *) de + rec_len);
  440. }
  441. unlock_page(page);
  442. exofs_put_page(page);
  443. }
  444. EXOFS_ERR("exofs_add_link: BAD dentry=%p or inode=%p", dentry, inode);
  445. return -EINVAL;
  446. got_it:
  447. pos = page_offset(page) +
  448. (char *)de - (char *)page_address(page);
  449. err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0,
  450. &page, NULL);
  451. if (err)
  452. goto out_unlock;
  453. if (de->inode_no) {
  454. struct exofs_dir_entry *de1 =
  455. (struct exofs_dir_entry *)((char *)de + name_len);
  456. de1->rec_len = cpu_to_le16(rec_len - name_len);
  457. de->rec_len = cpu_to_le16(name_len);
  458. de = de1;
  459. }
  460. de->name_len = namelen;
  461. memcpy(de->name, name, namelen);
  462. de->inode_no = cpu_to_le64(inode->i_ino);
  463. exofs_set_de_type(de, inode);
  464. err = exofs_commit_chunk(page, pos, rec_len);
  465. dir->i_mtime = dir->i_ctime = CURRENT_TIME;
  466. mark_inode_dirty(dir);
  467. sbi->s_numfiles++;
  468. out_put:
  469. exofs_put_page(page);
  470. out:
  471. return err;
  472. out_unlock:
  473. unlock_page(page);
  474. goto out_put;
  475. }
  476. int exofs_delete_entry(struct exofs_dir_entry *dir, struct page *page)
  477. {
  478. struct address_space *mapping = page->mapping;
  479. struct inode *inode = mapping->host;
  480. struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
  481. char *kaddr = page_address(page);
  482. unsigned from = ((char *)dir - kaddr) & ~(exofs_chunk_size(inode)-1);
  483. unsigned to = ((char *)dir - kaddr) + le16_to_cpu(dir->rec_len);
  484. loff_t pos;
  485. struct exofs_dir_entry *pde = NULL;
  486. struct exofs_dir_entry *de = (struct exofs_dir_entry *) (kaddr + from);
  487. int err;
  488. while (de < dir) {
  489. if (de->rec_len == 0) {
  490. EXOFS_ERR("ERROR: exofs_delete_entry:"
  491. "zero-length directory entry");
  492. err = -EIO;
  493. goto out;
  494. }
  495. pde = de;
  496. de = exofs_next_entry(de);
  497. }
  498. if (pde)
  499. from = (char *)pde - (char *)page_address(page);
  500. pos = page_offset(page) + from;
  501. lock_page(page);
  502. err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0,
  503. &page, NULL);
  504. if (err)
  505. EXOFS_ERR("exofs_delete_entry: exofs_write_begin FAILD => %d\n",
  506. err);
  507. if (pde)
  508. pde->rec_len = cpu_to_le16(to - from);
  509. dir->inode_no = 0;
  510. if (likely(!err))
  511. err = exofs_commit_chunk(page, pos, to - from);
  512. inode->i_ctime = inode->i_mtime = CURRENT_TIME;
  513. mark_inode_dirty(inode);
  514. sbi->s_numfiles--;
  515. out:
  516. exofs_put_page(page);
  517. return err;
  518. }
  519. /* kept aligned on 4 bytes */
  520. #define THIS_DIR ".\0\0"
  521. #define PARENT_DIR "..\0"
  522. int exofs_make_empty(struct inode *inode, struct inode *parent)
  523. {
  524. struct address_space *mapping = inode->i_mapping;
  525. struct page *page = grab_cache_page(mapping, 0);
  526. unsigned chunk_size = exofs_chunk_size(inode);
  527. struct exofs_dir_entry *de;
  528. int err;
  529. void *kaddr;
  530. if (!page)
  531. return -ENOMEM;
  532. err = exofs_write_begin(NULL, page->mapping, 0, chunk_size, 0,
  533. &page, NULL);
  534. if (err) {
  535. unlock_page(page);
  536. goto fail;
  537. }
  538. kaddr = kmap_atomic(page, KM_USER0);
  539. de = (struct exofs_dir_entry *)kaddr;
  540. de->name_len = 1;
  541. de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
  542. memcpy(de->name, THIS_DIR, sizeof(THIS_DIR));
  543. de->inode_no = cpu_to_le64(inode->i_ino);
  544. exofs_set_de_type(de, inode);
  545. de = (struct exofs_dir_entry *)(kaddr + EXOFS_DIR_REC_LEN(1));
  546. de->name_len = 2;
  547. de->rec_len = cpu_to_le16(chunk_size - EXOFS_DIR_REC_LEN(1));
  548. de->inode_no = cpu_to_le64(parent->i_ino);
  549. memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
  550. exofs_set_de_type(de, inode);
  551. kunmap_atomic(page, KM_USER0);
  552. err = exofs_commit_chunk(page, 0, chunk_size);
  553. fail:
  554. page_cache_release(page);
  555. return err;
  556. }
  557. int exofs_empty_dir(struct inode *inode)
  558. {
  559. struct page *page = NULL;
  560. unsigned long i, npages = dir_pages(inode);
  561. for (i = 0; i < npages; i++) {
  562. char *kaddr;
  563. struct exofs_dir_entry *de;
  564. page = exofs_get_page(inode, i);
  565. if (IS_ERR(page))
  566. continue;
  567. kaddr = page_address(page);
  568. de = (struct exofs_dir_entry *)kaddr;
  569. kaddr += exofs_last_byte(inode, i) - EXOFS_DIR_REC_LEN(1);
  570. while ((char *)de <= kaddr) {
  571. if (de->rec_len == 0) {
  572. EXOFS_ERR("ERROR: exofs_empty_dir: "
  573. "zero-length directory entry"
  574. "kaddr=%p, de=%p\n", kaddr, de);
  575. goto not_empty;
  576. }
  577. if (de->inode_no != 0) {
  578. /* check for . and .. */
  579. if (de->name[0] != '.')
  580. goto not_empty;
  581. if (de->name_len > 2)
  582. goto not_empty;
  583. if (de->name_len < 2) {
  584. if (le64_to_cpu(de->inode_no) !=
  585. inode->i_ino)
  586. goto not_empty;
  587. } else if (de->name[1] != '.')
  588. goto not_empty;
  589. }
  590. de = exofs_next_entry(de);
  591. }
  592. exofs_put_page(page);
  593. }
  594. return 1;
  595. not_empty:
  596. exofs_put_page(page);
  597. return 0;
  598. }
  599. const struct file_operations exofs_dir_operations = {
  600. .llseek = generic_file_llseek,
  601. .read = generic_read_dir,
  602. .readdir = exofs_readdir,
  603. };