inode.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * William Irwin, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/thread_info.h>
  10. #include <asm/current.h>
  11. #include <linux/sched.h> /* remove ASAP */
  12. #include <linux/fs.h>
  13. #include <linux/mount.h>
  14. #include <linux/file.h>
  15. #include <linux/kernel.h>
  16. #include <linux/writeback.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/highmem.h>
  19. #include <linux/init.h>
  20. #include <linux/string.h>
  21. #include <linux/capability.h>
  22. #include <linux/ctype.h>
  23. #include <linux/backing-dev.h>
  24. #include <linux/hugetlb.h>
  25. #include <linux/pagevec.h>
  26. #include <linux/parser.h>
  27. #include <linux/mman.h>
  28. #include <linux/slab.h>
  29. #include <linux/dnotify.h>
  30. #include <linux/statfs.h>
  31. #include <linux/security.h>
  32. #include <linux/magic.h>
  33. #include <linux/migrate.h>
  34. #include <asm/uaccess.h>
  35. static const struct super_operations hugetlbfs_ops;
  36. static const struct address_space_operations hugetlbfs_aops;
  37. const struct file_operations hugetlbfs_file_operations;
  38. static const struct inode_operations hugetlbfs_dir_inode_operations;
  39. static const struct inode_operations hugetlbfs_inode_operations;
  40. struct hugetlbfs_config {
  41. kuid_t uid;
  42. kgid_t gid;
  43. umode_t mode;
  44. long nr_blocks;
  45. long nr_inodes;
  46. struct hstate *hstate;
  47. };
  48. struct hugetlbfs_inode_info {
  49. struct shared_policy policy;
  50. struct inode vfs_inode;
  51. };
  52. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  53. {
  54. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  55. }
  56. static struct backing_dev_info hugetlbfs_backing_dev_info = {
  57. .name = "hugetlbfs",
  58. .ra_pages = 0, /* No readahead */
  59. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  60. };
  61. int sysctl_hugetlb_shm_group;
  62. enum {
  63. Opt_size, Opt_nr_inodes,
  64. Opt_mode, Opt_uid, Opt_gid,
  65. Opt_pagesize,
  66. Opt_err,
  67. };
  68. static const match_table_t tokens = {
  69. {Opt_size, "size=%s"},
  70. {Opt_nr_inodes, "nr_inodes=%s"},
  71. {Opt_mode, "mode=%o"},
  72. {Opt_uid, "uid=%u"},
  73. {Opt_gid, "gid=%u"},
  74. {Opt_pagesize, "pagesize=%s"},
  75. {Opt_err, NULL},
  76. };
  77. static void huge_pagevec_release(struct pagevec *pvec)
  78. {
  79. int i;
  80. for (i = 0; i < pagevec_count(pvec); ++i)
  81. put_page(pvec->pages[i]);
  82. pagevec_reinit(pvec);
  83. }
  84. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  85. {
  86. struct inode *inode = file->f_path.dentry->d_inode;
  87. loff_t len, vma_len;
  88. int ret;
  89. struct hstate *h = hstate_file(file);
  90. /*
  91. * vma address alignment (but not the pgoff alignment) has
  92. * already been checked by prepare_hugepage_range. If you add
  93. * any error returns here, do so after setting VM_HUGETLB, so
  94. * is_vm_hugetlb_page tests below unmap_region go the right
  95. * way when do_mmap_pgoff unwinds (may be important on powerpc
  96. * and ia64).
  97. */
  98. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND | VM_DONTDUMP;
  99. vma->vm_ops = &hugetlb_vm_ops;
  100. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  101. return -EINVAL;
  102. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  103. mutex_lock(&inode->i_mutex);
  104. file_accessed(file);
  105. ret = -ENOMEM;
  106. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  107. if (hugetlb_reserve_pages(inode,
  108. vma->vm_pgoff >> huge_page_order(h),
  109. len >> huge_page_shift(h), vma,
  110. vma->vm_flags))
  111. goto out;
  112. ret = 0;
  113. hugetlb_prefault_arch_hook(vma->vm_mm);
  114. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  115. inode->i_size = len;
  116. out:
  117. mutex_unlock(&inode->i_mutex);
  118. return ret;
  119. }
  120. /*
  121. * Called under down_write(mmap_sem).
  122. */
  123. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  124. static unsigned long
  125. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  126. unsigned long len, unsigned long pgoff, unsigned long flags)
  127. {
  128. struct mm_struct *mm = current->mm;
  129. struct vm_area_struct *vma;
  130. unsigned long start_addr;
  131. struct hstate *h = hstate_file(file);
  132. if (len & ~huge_page_mask(h))
  133. return -EINVAL;
  134. if (len > TASK_SIZE)
  135. return -ENOMEM;
  136. if (flags & MAP_FIXED) {
  137. if (prepare_hugepage_range(file, addr, len))
  138. return -EINVAL;
  139. return addr;
  140. }
  141. if (addr) {
  142. addr = ALIGN(addr, huge_page_size(h));
  143. vma = find_vma(mm, addr);
  144. if (TASK_SIZE - len >= addr &&
  145. (!vma || addr + len <= vma->vm_start))
  146. return addr;
  147. }
  148. if (len > mm->cached_hole_size)
  149. start_addr = mm->free_area_cache;
  150. else {
  151. start_addr = TASK_UNMAPPED_BASE;
  152. mm->cached_hole_size = 0;
  153. }
  154. full_search:
  155. addr = ALIGN(start_addr, huge_page_size(h));
  156. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  157. /* At this point: (!vma || addr < vma->vm_end). */
  158. if (TASK_SIZE - len < addr) {
  159. /*
  160. * Start a new search - just in case we missed
  161. * some holes.
  162. */
  163. if (start_addr != TASK_UNMAPPED_BASE) {
  164. start_addr = TASK_UNMAPPED_BASE;
  165. mm->cached_hole_size = 0;
  166. goto full_search;
  167. }
  168. return -ENOMEM;
  169. }
  170. if (!vma || addr + len <= vma->vm_start) {
  171. mm->free_area_cache = addr + len;
  172. return addr;
  173. }
  174. if (addr + mm->cached_hole_size < vma->vm_start)
  175. mm->cached_hole_size = vma->vm_start - addr;
  176. addr = ALIGN(vma->vm_end, huge_page_size(h));
  177. }
  178. }
  179. #endif
  180. static int
  181. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  182. char __user *buf, unsigned long count,
  183. unsigned long size)
  184. {
  185. char *kaddr;
  186. unsigned long left, copied = 0;
  187. int i, chunksize;
  188. if (size > count)
  189. size = count;
  190. /* Find which 4k chunk and offset with in that chunk */
  191. i = offset >> PAGE_CACHE_SHIFT;
  192. offset = offset & ~PAGE_CACHE_MASK;
  193. while (size) {
  194. chunksize = PAGE_CACHE_SIZE;
  195. if (offset)
  196. chunksize -= offset;
  197. if (chunksize > size)
  198. chunksize = size;
  199. kaddr = kmap(&page[i]);
  200. left = __copy_to_user(buf, kaddr + offset, chunksize);
  201. kunmap(&page[i]);
  202. if (left) {
  203. copied += (chunksize - left);
  204. break;
  205. }
  206. offset = 0;
  207. size -= chunksize;
  208. buf += chunksize;
  209. copied += chunksize;
  210. i++;
  211. }
  212. return copied ? copied : -EFAULT;
  213. }
  214. /*
  215. * Support for read() - Find the page attached to f_mapping and copy out the
  216. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  217. * since it has PAGE_CACHE_SIZE assumptions.
  218. */
  219. static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
  220. size_t len, loff_t *ppos)
  221. {
  222. struct hstate *h = hstate_file(filp);
  223. struct address_space *mapping = filp->f_mapping;
  224. struct inode *inode = mapping->host;
  225. unsigned long index = *ppos >> huge_page_shift(h);
  226. unsigned long offset = *ppos & ~huge_page_mask(h);
  227. unsigned long end_index;
  228. loff_t isize;
  229. ssize_t retval = 0;
  230. /* validate length */
  231. if (len == 0)
  232. goto out;
  233. for (;;) {
  234. struct page *page;
  235. unsigned long nr, ret;
  236. int ra;
  237. /* nr is the maximum number of bytes to copy from this page */
  238. nr = huge_page_size(h);
  239. isize = i_size_read(inode);
  240. if (!isize)
  241. goto out;
  242. end_index = (isize - 1) >> huge_page_shift(h);
  243. if (index >= end_index) {
  244. if (index > end_index)
  245. goto out;
  246. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  247. if (nr <= offset)
  248. goto out;
  249. }
  250. nr = nr - offset;
  251. /* Find the page */
  252. page = find_lock_page(mapping, index);
  253. if (unlikely(page == NULL)) {
  254. /*
  255. * We have a HOLE, zero out the user-buffer for the
  256. * length of the hole or request.
  257. */
  258. ret = len < nr ? len : nr;
  259. if (clear_user(buf, ret))
  260. ra = -EFAULT;
  261. else
  262. ra = 0;
  263. } else {
  264. unlock_page(page);
  265. /*
  266. * We have the page, copy it to user space buffer.
  267. */
  268. ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
  269. ret = ra;
  270. page_cache_release(page);
  271. }
  272. if (ra < 0) {
  273. if (retval == 0)
  274. retval = ra;
  275. goto out;
  276. }
  277. offset += ret;
  278. retval += ret;
  279. len -= ret;
  280. index += offset >> huge_page_shift(h);
  281. offset &= ~huge_page_mask(h);
  282. /* short read or no more work */
  283. if ((ret != nr) || (len == 0))
  284. break;
  285. }
  286. out:
  287. *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
  288. return retval;
  289. }
  290. static int hugetlbfs_write_begin(struct file *file,
  291. struct address_space *mapping,
  292. loff_t pos, unsigned len, unsigned flags,
  293. struct page **pagep, void **fsdata)
  294. {
  295. return -EINVAL;
  296. }
  297. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  298. loff_t pos, unsigned len, unsigned copied,
  299. struct page *page, void *fsdata)
  300. {
  301. BUG();
  302. return -EINVAL;
  303. }
  304. static void truncate_huge_page(struct page *page)
  305. {
  306. cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
  307. ClearPageUptodate(page);
  308. delete_from_page_cache(page);
  309. }
  310. static void truncate_hugepages(struct inode *inode, loff_t lstart)
  311. {
  312. struct hstate *h = hstate_inode(inode);
  313. struct address_space *mapping = &inode->i_data;
  314. const pgoff_t start = lstart >> huge_page_shift(h);
  315. struct pagevec pvec;
  316. pgoff_t next;
  317. int i, freed = 0;
  318. pagevec_init(&pvec, 0);
  319. next = start;
  320. while (1) {
  321. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  322. if (next == start)
  323. break;
  324. next = start;
  325. continue;
  326. }
  327. for (i = 0; i < pagevec_count(&pvec); ++i) {
  328. struct page *page = pvec.pages[i];
  329. lock_page(page);
  330. if (page->index > next)
  331. next = page->index;
  332. ++next;
  333. truncate_huge_page(page);
  334. unlock_page(page);
  335. freed++;
  336. }
  337. huge_pagevec_release(&pvec);
  338. }
  339. BUG_ON(!lstart && mapping->nrpages);
  340. hugetlb_unreserve_pages(inode, start, freed);
  341. }
  342. static void hugetlbfs_evict_inode(struct inode *inode)
  343. {
  344. truncate_hugepages(inode, 0);
  345. clear_inode(inode);
  346. }
  347. static inline void
  348. hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
  349. {
  350. struct vm_area_struct *vma;
  351. vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
  352. unsigned long v_offset;
  353. /*
  354. * Can the expression below overflow on 32-bit arches?
  355. * No, because the interval tree returns us only those vmas
  356. * which overlap the truncated area starting at pgoff,
  357. * and no vma on a 32-bit arch can span beyond the 4GB.
  358. */
  359. if (vma->vm_pgoff < pgoff)
  360. v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
  361. else
  362. v_offset = 0;
  363. unmap_hugepage_range(vma, vma->vm_start + v_offset,
  364. vma->vm_end, NULL);
  365. }
  366. }
  367. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  368. {
  369. pgoff_t pgoff;
  370. struct address_space *mapping = inode->i_mapping;
  371. struct hstate *h = hstate_inode(inode);
  372. BUG_ON(offset & ~huge_page_mask(h));
  373. pgoff = offset >> PAGE_SHIFT;
  374. i_size_write(inode, offset);
  375. mutex_lock(&mapping->i_mmap_mutex);
  376. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  377. hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
  378. mutex_unlock(&mapping->i_mmap_mutex);
  379. truncate_hugepages(inode, offset);
  380. return 0;
  381. }
  382. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  383. {
  384. struct inode *inode = dentry->d_inode;
  385. struct hstate *h = hstate_inode(inode);
  386. int error;
  387. unsigned int ia_valid = attr->ia_valid;
  388. BUG_ON(!inode);
  389. error = inode_change_ok(inode, attr);
  390. if (error)
  391. return error;
  392. if (ia_valid & ATTR_SIZE) {
  393. error = -EINVAL;
  394. if (attr->ia_size & ~huge_page_mask(h))
  395. return -EINVAL;
  396. error = hugetlb_vmtruncate(inode, attr->ia_size);
  397. if (error)
  398. return error;
  399. }
  400. setattr_copy(inode, attr);
  401. mark_inode_dirty(inode);
  402. return 0;
  403. }
  404. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  405. struct hugetlbfs_config *config)
  406. {
  407. struct inode *inode;
  408. inode = new_inode(sb);
  409. if (inode) {
  410. struct hugetlbfs_inode_info *info;
  411. inode->i_ino = get_next_ino();
  412. inode->i_mode = S_IFDIR | config->mode;
  413. inode->i_uid = config->uid;
  414. inode->i_gid = config->gid;
  415. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  416. info = HUGETLBFS_I(inode);
  417. mpol_shared_policy_init(&info->policy, NULL);
  418. inode->i_op = &hugetlbfs_dir_inode_operations;
  419. inode->i_fop = &simple_dir_operations;
  420. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  421. inc_nlink(inode);
  422. lockdep_annotate_inode_mutex_key(inode);
  423. }
  424. return inode;
  425. }
  426. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  427. struct inode *dir,
  428. umode_t mode, dev_t dev)
  429. {
  430. struct inode *inode;
  431. inode = new_inode(sb);
  432. if (inode) {
  433. struct hugetlbfs_inode_info *info;
  434. inode->i_ino = get_next_ino();
  435. inode_init_owner(inode, dir, mode);
  436. inode->i_mapping->a_ops = &hugetlbfs_aops;
  437. inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
  438. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  439. INIT_LIST_HEAD(&inode->i_mapping->private_list);
  440. info = HUGETLBFS_I(inode);
  441. /*
  442. * The policy is initialized here even if we are creating a
  443. * private inode because initialization simply creates an
  444. * an empty rb tree and calls spin_lock_init(), later when we
  445. * call mpol_free_shared_policy() it will just return because
  446. * the rb tree will still be empty.
  447. */
  448. mpol_shared_policy_init(&info->policy, NULL);
  449. switch (mode & S_IFMT) {
  450. default:
  451. init_special_inode(inode, mode, dev);
  452. break;
  453. case S_IFREG:
  454. inode->i_op = &hugetlbfs_inode_operations;
  455. inode->i_fop = &hugetlbfs_file_operations;
  456. break;
  457. case S_IFDIR:
  458. inode->i_op = &hugetlbfs_dir_inode_operations;
  459. inode->i_fop = &simple_dir_operations;
  460. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  461. inc_nlink(inode);
  462. break;
  463. case S_IFLNK:
  464. inode->i_op = &page_symlink_inode_operations;
  465. break;
  466. }
  467. lockdep_annotate_inode_mutex_key(inode);
  468. }
  469. return inode;
  470. }
  471. /*
  472. * File creation. Allocate an inode, and we're done..
  473. */
  474. static int hugetlbfs_mknod(struct inode *dir,
  475. struct dentry *dentry, umode_t mode, dev_t dev)
  476. {
  477. struct inode *inode;
  478. int error = -ENOSPC;
  479. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  480. if (inode) {
  481. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  482. d_instantiate(dentry, inode);
  483. dget(dentry); /* Extra count - pin the dentry in core */
  484. error = 0;
  485. }
  486. return error;
  487. }
  488. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  489. {
  490. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  491. if (!retval)
  492. inc_nlink(dir);
  493. return retval;
  494. }
  495. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  496. {
  497. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  498. }
  499. static int hugetlbfs_symlink(struct inode *dir,
  500. struct dentry *dentry, const char *symname)
  501. {
  502. struct inode *inode;
  503. int error = -ENOSPC;
  504. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  505. if (inode) {
  506. int l = strlen(symname)+1;
  507. error = page_symlink(inode, symname, l);
  508. if (!error) {
  509. d_instantiate(dentry, inode);
  510. dget(dentry);
  511. } else
  512. iput(inode);
  513. }
  514. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  515. return error;
  516. }
  517. /*
  518. * mark the head page dirty
  519. */
  520. static int hugetlbfs_set_page_dirty(struct page *page)
  521. {
  522. struct page *head = compound_head(page);
  523. SetPageDirty(head);
  524. return 0;
  525. }
  526. static int hugetlbfs_migrate_page(struct address_space *mapping,
  527. struct page *newpage, struct page *page,
  528. enum migrate_mode mode)
  529. {
  530. int rc;
  531. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  532. if (rc)
  533. return rc;
  534. migrate_page_copy(newpage, page);
  535. return 0;
  536. }
  537. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  538. {
  539. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  540. struct hstate *h = hstate_inode(dentry->d_inode);
  541. buf->f_type = HUGETLBFS_MAGIC;
  542. buf->f_bsize = huge_page_size(h);
  543. if (sbinfo) {
  544. spin_lock(&sbinfo->stat_lock);
  545. /* If no limits set, just report 0 for max/free/used
  546. * blocks, like simple_statfs() */
  547. if (sbinfo->spool) {
  548. long free_pages;
  549. spin_lock(&sbinfo->spool->lock);
  550. buf->f_blocks = sbinfo->spool->max_hpages;
  551. free_pages = sbinfo->spool->max_hpages
  552. - sbinfo->spool->used_hpages;
  553. buf->f_bavail = buf->f_bfree = free_pages;
  554. spin_unlock(&sbinfo->spool->lock);
  555. buf->f_files = sbinfo->max_inodes;
  556. buf->f_ffree = sbinfo->free_inodes;
  557. }
  558. spin_unlock(&sbinfo->stat_lock);
  559. }
  560. buf->f_namelen = NAME_MAX;
  561. return 0;
  562. }
  563. static void hugetlbfs_put_super(struct super_block *sb)
  564. {
  565. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  566. if (sbi) {
  567. sb->s_fs_info = NULL;
  568. if (sbi->spool)
  569. hugepage_put_subpool(sbi->spool);
  570. kfree(sbi);
  571. }
  572. }
  573. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  574. {
  575. if (sbinfo->free_inodes >= 0) {
  576. spin_lock(&sbinfo->stat_lock);
  577. if (unlikely(!sbinfo->free_inodes)) {
  578. spin_unlock(&sbinfo->stat_lock);
  579. return 0;
  580. }
  581. sbinfo->free_inodes--;
  582. spin_unlock(&sbinfo->stat_lock);
  583. }
  584. return 1;
  585. }
  586. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  587. {
  588. if (sbinfo->free_inodes >= 0) {
  589. spin_lock(&sbinfo->stat_lock);
  590. sbinfo->free_inodes++;
  591. spin_unlock(&sbinfo->stat_lock);
  592. }
  593. }
  594. static struct kmem_cache *hugetlbfs_inode_cachep;
  595. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  596. {
  597. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  598. struct hugetlbfs_inode_info *p;
  599. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  600. return NULL;
  601. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  602. if (unlikely(!p)) {
  603. hugetlbfs_inc_free_inodes(sbinfo);
  604. return NULL;
  605. }
  606. return &p->vfs_inode;
  607. }
  608. static void hugetlbfs_i_callback(struct rcu_head *head)
  609. {
  610. struct inode *inode = container_of(head, struct inode, i_rcu);
  611. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  612. }
  613. static void hugetlbfs_destroy_inode(struct inode *inode)
  614. {
  615. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  616. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  617. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  618. }
  619. static const struct address_space_operations hugetlbfs_aops = {
  620. .write_begin = hugetlbfs_write_begin,
  621. .write_end = hugetlbfs_write_end,
  622. .set_page_dirty = hugetlbfs_set_page_dirty,
  623. .migratepage = hugetlbfs_migrate_page,
  624. };
  625. static void init_once(void *foo)
  626. {
  627. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  628. inode_init_once(&ei->vfs_inode);
  629. }
  630. const struct file_operations hugetlbfs_file_operations = {
  631. .read = hugetlbfs_read,
  632. .mmap = hugetlbfs_file_mmap,
  633. .fsync = noop_fsync,
  634. .get_unmapped_area = hugetlb_get_unmapped_area,
  635. .llseek = default_llseek,
  636. };
  637. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  638. .create = hugetlbfs_create,
  639. .lookup = simple_lookup,
  640. .link = simple_link,
  641. .unlink = simple_unlink,
  642. .symlink = hugetlbfs_symlink,
  643. .mkdir = hugetlbfs_mkdir,
  644. .rmdir = simple_rmdir,
  645. .mknod = hugetlbfs_mknod,
  646. .rename = simple_rename,
  647. .setattr = hugetlbfs_setattr,
  648. };
  649. static const struct inode_operations hugetlbfs_inode_operations = {
  650. .setattr = hugetlbfs_setattr,
  651. };
  652. static const struct super_operations hugetlbfs_ops = {
  653. .alloc_inode = hugetlbfs_alloc_inode,
  654. .destroy_inode = hugetlbfs_destroy_inode,
  655. .evict_inode = hugetlbfs_evict_inode,
  656. .statfs = hugetlbfs_statfs,
  657. .put_super = hugetlbfs_put_super,
  658. .show_options = generic_show_options,
  659. };
  660. static int
  661. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  662. {
  663. char *p, *rest;
  664. substring_t args[MAX_OPT_ARGS];
  665. int option;
  666. unsigned long long size = 0;
  667. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
  668. if (!options)
  669. return 0;
  670. while ((p = strsep(&options, ",")) != NULL) {
  671. int token;
  672. if (!*p)
  673. continue;
  674. token = match_token(p, tokens, args);
  675. switch (token) {
  676. case Opt_uid:
  677. if (match_int(&args[0], &option))
  678. goto bad_val;
  679. pconfig->uid = make_kuid(current_user_ns(), option);
  680. if (!uid_valid(pconfig->uid))
  681. goto bad_val;
  682. break;
  683. case Opt_gid:
  684. if (match_int(&args[0], &option))
  685. goto bad_val;
  686. pconfig->gid = make_kgid(current_user_ns(), option);
  687. if (!gid_valid(pconfig->gid))
  688. goto bad_val;
  689. break;
  690. case Opt_mode:
  691. if (match_octal(&args[0], &option))
  692. goto bad_val;
  693. pconfig->mode = option & 01777U;
  694. break;
  695. case Opt_size: {
  696. /* memparse() will accept a K/M/G without a digit */
  697. if (!isdigit(*args[0].from))
  698. goto bad_val;
  699. size = memparse(args[0].from, &rest);
  700. setsize = SIZE_STD;
  701. if (*rest == '%')
  702. setsize = SIZE_PERCENT;
  703. break;
  704. }
  705. case Opt_nr_inodes:
  706. /* memparse() will accept a K/M/G without a digit */
  707. if (!isdigit(*args[0].from))
  708. goto bad_val;
  709. pconfig->nr_inodes = memparse(args[0].from, &rest);
  710. break;
  711. case Opt_pagesize: {
  712. unsigned long ps;
  713. ps = memparse(args[0].from, &rest);
  714. pconfig->hstate = size_to_hstate(ps);
  715. if (!pconfig->hstate) {
  716. printk(KERN_ERR
  717. "hugetlbfs: Unsupported page size %lu MB\n",
  718. ps >> 20);
  719. return -EINVAL;
  720. }
  721. break;
  722. }
  723. default:
  724. printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
  725. p);
  726. return -EINVAL;
  727. break;
  728. }
  729. }
  730. /* Do size after hstate is set up */
  731. if (setsize > NO_SIZE) {
  732. struct hstate *h = pconfig->hstate;
  733. if (setsize == SIZE_PERCENT) {
  734. size <<= huge_page_shift(h);
  735. size *= h->max_huge_pages;
  736. do_div(size, 100);
  737. }
  738. pconfig->nr_blocks = (size >> huge_page_shift(h));
  739. }
  740. return 0;
  741. bad_val:
  742. printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
  743. args[0].from, p);
  744. return -EINVAL;
  745. }
  746. static int
  747. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  748. {
  749. int ret;
  750. struct hugetlbfs_config config;
  751. struct hugetlbfs_sb_info *sbinfo;
  752. save_mount_options(sb, data);
  753. config.nr_blocks = -1; /* No limit on size by default */
  754. config.nr_inodes = -1; /* No limit on number of inodes by default */
  755. config.uid = current_fsuid();
  756. config.gid = current_fsgid();
  757. config.mode = 0755;
  758. config.hstate = &default_hstate;
  759. ret = hugetlbfs_parse_options(data, &config);
  760. if (ret)
  761. return ret;
  762. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  763. if (!sbinfo)
  764. return -ENOMEM;
  765. sb->s_fs_info = sbinfo;
  766. sbinfo->hstate = config.hstate;
  767. spin_lock_init(&sbinfo->stat_lock);
  768. sbinfo->max_inodes = config.nr_inodes;
  769. sbinfo->free_inodes = config.nr_inodes;
  770. sbinfo->spool = NULL;
  771. if (config.nr_blocks != -1) {
  772. sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
  773. if (!sbinfo->spool)
  774. goto out_free;
  775. }
  776. sb->s_maxbytes = MAX_LFS_FILESIZE;
  777. sb->s_blocksize = huge_page_size(config.hstate);
  778. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  779. sb->s_magic = HUGETLBFS_MAGIC;
  780. sb->s_op = &hugetlbfs_ops;
  781. sb->s_time_gran = 1;
  782. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  783. if (!sb->s_root)
  784. goto out_free;
  785. return 0;
  786. out_free:
  787. if (sbinfo->spool)
  788. kfree(sbinfo->spool);
  789. kfree(sbinfo);
  790. return -ENOMEM;
  791. }
  792. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  793. int flags, const char *dev_name, void *data)
  794. {
  795. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  796. }
  797. static struct file_system_type hugetlbfs_fs_type = {
  798. .name = "hugetlbfs",
  799. .mount = hugetlbfs_mount,
  800. .kill_sb = kill_litter_super,
  801. };
  802. static struct vfsmount *hugetlbfs_vfsmount;
  803. static int can_do_hugetlb_shm(void)
  804. {
  805. kgid_t shm_group;
  806. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  807. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  808. }
  809. struct file *hugetlb_file_setup(const char *name, unsigned long addr,
  810. size_t size, vm_flags_t acctflag,
  811. struct user_struct **user, int creat_flags)
  812. {
  813. int error = -ENOMEM;
  814. struct file *file;
  815. struct inode *inode;
  816. struct path path;
  817. struct dentry *root;
  818. struct qstr quick_string;
  819. struct hstate *hstate;
  820. unsigned long num_pages;
  821. *user = NULL;
  822. if (!hugetlbfs_vfsmount)
  823. return ERR_PTR(-ENOENT);
  824. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  825. *user = current_user();
  826. if (user_shm_lock(size, *user)) {
  827. task_lock(current);
  828. printk_once(KERN_WARNING
  829. "%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  830. current->comm, current->pid);
  831. task_unlock(current);
  832. } else {
  833. *user = NULL;
  834. return ERR_PTR(-EPERM);
  835. }
  836. }
  837. root = hugetlbfs_vfsmount->mnt_root;
  838. quick_string.name = name;
  839. quick_string.len = strlen(quick_string.name);
  840. quick_string.hash = 0;
  841. path.dentry = d_alloc(root, &quick_string);
  842. if (!path.dentry)
  843. goto out_shm_unlock;
  844. path.mnt = mntget(hugetlbfs_vfsmount);
  845. error = -ENOSPC;
  846. inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
  847. if (!inode)
  848. goto out_dentry;
  849. hstate = hstate_inode(inode);
  850. size += addr & ~huge_page_mask(hstate);
  851. num_pages = ALIGN(size, huge_page_size(hstate)) >>
  852. huge_page_shift(hstate);
  853. error = -ENOMEM;
  854. if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
  855. goto out_inode;
  856. d_instantiate(path.dentry, inode);
  857. inode->i_size = size;
  858. clear_nlink(inode);
  859. error = -ENFILE;
  860. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  861. &hugetlbfs_file_operations);
  862. if (!file)
  863. goto out_dentry; /* inode is already attached */
  864. return file;
  865. out_inode:
  866. iput(inode);
  867. out_dentry:
  868. path_put(&path);
  869. out_shm_unlock:
  870. if (*user) {
  871. user_shm_unlock(size, *user);
  872. *user = NULL;
  873. }
  874. return ERR_PTR(error);
  875. }
  876. static int __init init_hugetlbfs_fs(void)
  877. {
  878. int error;
  879. struct vfsmount *vfsmount;
  880. error = bdi_init(&hugetlbfs_backing_dev_info);
  881. if (error)
  882. return error;
  883. error = -ENOMEM;
  884. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  885. sizeof(struct hugetlbfs_inode_info),
  886. 0, 0, init_once);
  887. if (hugetlbfs_inode_cachep == NULL)
  888. goto out2;
  889. error = register_filesystem(&hugetlbfs_fs_type);
  890. if (error)
  891. goto out;
  892. vfsmount = kern_mount(&hugetlbfs_fs_type);
  893. if (!IS_ERR(vfsmount)) {
  894. hugetlbfs_vfsmount = vfsmount;
  895. return 0;
  896. }
  897. error = PTR_ERR(vfsmount);
  898. out:
  899. kmem_cache_destroy(hugetlbfs_inode_cachep);
  900. out2:
  901. bdi_destroy(&hugetlbfs_backing_dev_info);
  902. return error;
  903. }
  904. static void __exit exit_hugetlbfs_fs(void)
  905. {
  906. /*
  907. * Make sure all delayed rcu free inodes are flushed before we
  908. * destroy cache.
  909. */
  910. rcu_barrier();
  911. kmem_cache_destroy(hugetlbfs_inode_cachep);
  912. kern_unmount(hugetlbfs_vfsmount);
  913. unregister_filesystem(&hugetlbfs_fs_type);
  914. bdi_destroy(&hugetlbfs_backing_dev_info);
  915. }
  916. module_init(init_hugetlbfs_fs)
  917. module_exit(exit_hugetlbfs_fs)
  918. MODULE_LICENSE("GPL");