1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323 |
- /*
- * Resizable virtual memory filesystem for Linux.
- *
- * Copyright (C) 2000 Linus Torvalds.
- * 2000 Transmeta Corp.
- * 2000-2001 Christoph Rohland
- * 2000-2001 SAP AG
- * 2002 Red Hat Inc.
- * Copyright (C) 2002-2005 Hugh Dickins.
- * Copyright (C) 2002-2005 VERITAS Software Corporation.
- * Copyright (C) 2004 Andi Kleen, SuSE Labs
- *
- * Extended attribute support for tmpfs:
- * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
- * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
- *
- * This file is released under the GPL.
- */
- /*
- * This virtual memory filesystem is heavily based on the ramfs. It
- * extends ramfs by the ability to use swap and honor resource limits
- * which makes it a completely usable filesystem.
- */
- #include <linux/config.h>
- #include <linux/module.h>
- #include <linux/init.h>
- #include <linux/devfs_fs_kernel.h>
- #include <linux/fs.h>
- #include <linux/mm.h>
- #include <linux/mman.h>
- #include <linux/file.h>
- #include <linux/swap.h>
- #include <linux/pagemap.h>
- #include <linux/string.h>
- #include <linux/slab.h>
- #include <linux/backing-dev.h>
- #include <linux/shmem_fs.h>
- #include <linux/mount.h>
- #include <linux/writeback.h>
- #include <linux/vfs.h>
- #include <linux/blkdev.h>
- #include <linux/security.h>
- #include <linux/swapops.h>
- #include <linux/mempolicy.h>
- #include <linux/namei.h>
- #include <linux/xattr.h>
- #include <asm/uaccess.h>
- #include <asm/div64.h>
- #include <asm/pgtable.h>
- /* This magic number is used in glibc for posix shared memory */
- #define TMPFS_MAGIC 0x01021994
- #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
- #define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
- #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
- #define SHMEM_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
- #define SHMEM_MAX_BYTES ((unsigned long long)SHMEM_MAX_INDEX << PAGE_CACHE_SHIFT)
- #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
- /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
- #define SHMEM_PAGEIN VM_READ
- #define SHMEM_TRUNCATE VM_WRITE
- /* Definition to limit shmem_truncate's steps between cond_rescheds */
- #define LATENCY_LIMIT 64
- /* Pretend that each entry is of this size in directory's i_size */
- #define BOGO_DIRENT_SIZE 20
- /* Keep swapped page count in private field of indirect struct page */
- #define nr_swapped private
- /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
- enum sgp_type {
- SGP_QUICK, /* don't try more than file page cache lookup */
- SGP_READ, /* don't exceed i_size, don't allocate page */
- SGP_CACHE, /* don't exceed i_size, may allocate page */
- SGP_WRITE, /* may exceed i_size, may allocate page */
- };
- static int shmem_getpage(struct inode *inode, unsigned long idx,
- struct page **pagep, enum sgp_type sgp, int *type);
- static inline struct page *shmem_dir_alloc(unsigned int gfp_mask)
- {
- /*
- * The above definition of ENTRIES_PER_PAGE, and the use of
- * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
- * might be reconsidered if it ever diverges from PAGE_SIZE.
- */
- return alloc_pages(gfp_mask, PAGE_CACHE_SHIFT-PAGE_SHIFT);
- }
- static inline void shmem_dir_free(struct page *page)
- {
- __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
- }
- static struct page **shmem_dir_map(struct page *page)
- {
- return (struct page **)kmap_atomic(page, KM_USER0);
- }
- static inline void shmem_dir_unmap(struct page **dir)
- {
- kunmap_atomic(dir, KM_USER0);
- }
- static swp_entry_t *shmem_swp_map(struct page *page)
- {
- return (swp_entry_t *)kmap_atomic(page, KM_USER1);
- }
- static inline void shmem_swp_balance_unmap(void)
- {
- /*
- * When passing a pointer to an i_direct entry, to code which
- * also handles indirect entries and so will shmem_swp_unmap,
- * we must arrange for the preempt count to remain in balance.
- * What kmap_atomic of a lowmem page does depends on config
- * and architecture, so pretend to kmap_atomic some lowmem page.
- */
- (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
- }
- static inline void shmem_swp_unmap(swp_entry_t *entry)
- {
- kunmap_atomic(entry, KM_USER1);
- }
- static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
- {
- return sb->s_fs_info;
- }
- /*
- * shmem_file_setup pre-accounts the whole fixed size of a VM object,
- * for shared memory and for shared anonymous (/dev/zero) mappings
- * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
- * consistent with the pre-accounting of private mappings ...
- */
- static inline int shmem_acct_size(unsigned long flags, loff_t size)
- {
- return (flags & VM_ACCOUNT)?
- security_vm_enough_memory(VM_ACCT(size)): 0;
- }
- static inline void shmem_unacct_size(unsigned long flags, loff_t size)
- {
- if (flags & VM_ACCOUNT)
- vm_unacct_memory(VM_ACCT(size));
- }
- /*
- * ... whereas tmpfs objects are accounted incrementally as
- * pages are allocated, in order to allow huge sparse files.
- * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
- * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
- */
- static inline int shmem_acct_block(unsigned long flags)
- {
- return (flags & VM_ACCOUNT)?
- 0: security_vm_enough_memory(VM_ACCT(PAGE_CACHE_SIZE));
- }
- static inline void shmem_unacct_blocks(unsigned long flags, long pages)
- {
- if (!(flags & VM_ACCOUNT))
- vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
- }
- static struct super_operations shmem_ops;
- static struct address_space_operations shmem_aops;
- static struct file_operations shmem_file_operations;
- static struct inode_operations shmem_inode_operations;
- static struct inode_operations shmem_dir_inode_operations;
- static struct inode_operations shmem_special_inode_operations;
- static struct vm_operations_struct shmem_vm_ops;
- static struct backing_dev_info shmem_backing_dev_info = {
- .ra_pages = 0, /* No readahead */
- .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
- .unplug_io_fn = default_unplug_io_fn,
- };
- static LIST_HEAD(shmem_swaplist);
- static DEFINE_SPINLOCK(shmem_swaplist_lock);
- static void shmem_free_blocks(struct inode *inode, long pages)
- {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_blocks) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_blocks += pages;
- inode->i_blocks -= pages*BLOCKS_PER_PAGE;
- spin_unlock(&sbinfo->stat_lock);
- }
- }
- /*
- * shmem_recalc_inode - recalculate the size of an inode
- *
- * @inode: inode to recalc
- *
- * We have to calculate the free blocks since the mm can drop
- * undirtied hole pages behind our back.
- *
- * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
- * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
- *
- * It has to be called with the spinlock held.
- */
- static void shmem_recalc_inode(struct inode *inode)
- {
- struct shmem_inode_info *info = SHMEM_I(inode);
- long freed;
- freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
- if (freed > 0) {
- info->alloced -= freed;
- shmem_unacct_blocks(info->flags, freed);
- shmem_free_blocks(inode, freed);
- }
- }
- /*
- * shmem_swp_entry - find the swap vector position in the info structure
- *
- * @info: info structure for the inode
- * @index: index of the page to find
- * @page: optional page to add to the structure. Has to be preset to
- * all zeros
- *
- * If there is no space allocated yet it will return NULL when
- * page is NULL, else it will use the page for the needed block,
- * setting it to NULL on return to indicate that it has been used.
- *
- * The swap vector is organized the following way:
- *
- * There are SHMEM_NR_DIRECT entries directly stored in the
- * shmem_inode_info structure. So small files do not need an addional
- * allocation.
- *
- * For pages with index > SHMEM_NR_DIRECT there is the pointer
- * i_indirect which points to a page which holds in the first half
- * doubly indirect blocks, in the second half triple indirect blocks:
- *
- * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
- * following layout (for SHMEM_NR_DIRECT == 16):
- *
- * i_indirect -> dir --> 16-19
- * | +-> 20-23
- * |
- * +-->dir2 --> 24-27
- * | +-> 28-31
- * | +-> 32-35
- * | +-> 36-39
- * |
- * +-->dir3 --> 40-43
- * +-> 44-47
- * +-> 48-51
- * +-> 52-55
- */
- static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
- {
- unsigned long offset;
- struct page **dir;
- struct page *subdir;
- if (index < SHMEM_NR_DIRECT) {
- shmem_swp_balance_unmap();
- return info->i_direct+index;
- }
- if (!info->i_indirect) {
- if (page) {
- info->i_indirect = *page;
- *page = NULL;
- }
- return NULL; /* need another page */
- }
- index -= SHMEM_NR_DIRECT;
- offset = index % ENTRIES_PER_PAGE;
- index /= ENTRIES_PER_PAGE;
- dir = shmem_dir_map(info->i_indirect);
- if (index >= ENTRIES_PER_PAGE/2) {
- index -= ENTRIES_PER_PAGE/2;
- dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
- index %= ENTRIES_PER_PAGE;
- subdir = *dir;
- if (!subdir) {
- if (page) {
- *dir = *page;
- *page = NULL;
- }
- shmem_dir_unmap(dir);
- return NULL; /* need another page */
- }
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(subdir);
- }
- dir += index;
- subdir = *dir;
- if (!subdir) {
- if (!page || !(subdir = *page)) {
- shmem_dir_unmap(dir);
- return NULL; /* need a page */
- }
- *dir = subdir;
- *page = NULL;
- }
- shmem_dir_unmap(dir);
- return shmem_swp_map(subdir) + offset;
- }
- static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
- {
- long incdec = value? 1: -1;
- entry->val = value;
- info->swapped += incdec;
- if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT)
- kmap_atomic_to_page(entry)->nr_swapped += incdec;
- }
- /*
- * shmem_swp_alloc - get the position of the swap entry for the page.
- * If it does not exist allocate the entry.
- *
- * @info: info structure for the inode
- * @index: index of the page to find
- * @sgp: check and recheck i_size? skip allocation?
- */
- static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
- {
- struct inode *inode = &info->vfs_inode;
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- struct page *page = NULL;
- swp_entry_t *entry;
- if (sgp != SGP_WRITE &&
- ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- return ERR_PTR(-EINVAL);
- while (!(entry = shmem_swp_entry(info, index, &page))) {
- if (sgp == SGP_READ)
- return shmem_swp_map(ZERO_PAGE(0));
- /*
- * Test free_blocks against 1 not 0, since we have 1 data
- * page (and perhaps indirect index pages) yet to allocate:
- * a waste to allocate index if we cannot allocate data.
- */
- if (sbinfo->max_blocks) {
- spin_lock(&sbinfo->stat_lock);
- if (sbinfo->free_blocks <= 1) {
- spin_unlock(&sbinfo->stat_lock);
- return ERR_PTR(-ENOSPC);
- }
- sbinfo->free_blocks--;
- inode->i_blocks += BLOCKS_PER_PAGE;
- spin_unlock(&sbinfo->stat_lock);
- }
- spin_unlock(&info->lock);
- page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping) | __GFP_ZERO);
- if (page) {
- page->nr_swapped = 0;
- }
- spin_lock(&info->lock);
- if (!page) {
- shmem_free_blocks(inode, 1);
- return ERR_PTR(-ENOMEM);
- }
- if (sgp != SGP_WRITE &&
- ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
- entry = ERR_PTR(-EINVAL);
- break;
- }
- if (info->next_index <= index)
- info->next_index = index + 1;
- }
- if (page) {
- /* another task gave its page, or truncated the file */
- shmem_free_blocks(inode, 1);
- shmem_dir_free(page);
- }
- if (info->next_index <= index && !IS_ERR(entry))
- info->next_index = index + 1;
- return entry;
- }
- /*
- * shmem_free_swp - free some swap entries in a directory
- *
- * @dir: pointer to the directory
- * @edir: pointer after last entry of the directory
- */
- static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir)
- {
- swp_entry_t *ptr;
- int freed = 0;
- for (ptr = dir; ptr < edir; ptr++) {
- if (ptr->val) {
- free_swap_and_cache(*ptr);
- *ptr = (swp_entry_t){0};
- freed++;
- }
- }
- return freed;
- }
- static int shmem_map_and_free_swp(struct page *subdir,
- int offset, int limit, struct page ***dir)
- {
- swp_entry_t *ptr;
- int freed = 0;
- ptr = shmem_swp_map(subdir);
- for (; offset < limit; offset += LATENCY_LIMIT) {
- int size = limit - offset;
- if (size > LATENCY_LIMIT)
- size = LATENCY_LIMIT;
- freed += shmem_free_swp(ptr+offset, ptr+offset+size);
- if (need_resched()) {
- shmem_swp_unmap(ptr);
- if (*dir) {
- shmem_dir_unmap(*dir);
- *dir = NULL;
- }
- cond_resched();
- ptr = shmem_swp_map(subdir);
- }
- }
- shmem_swp_unmap(ptr);
- return freed;
- }
- static void shmem_free_pages(struct list_head *next)
- {
- struct page *page;
- int freed = 0;
- do {
- page = container_of(next, struct page, lru);
- next = next->next;
- shmem_dir_free(page);
- freed++;
- if (freed >= LATENCY_LIMIT) {
- cond_resched();
- freed = 0;
- }
- } while (next);
- }
- static void shmem_truncate(struct inode *inode)
- {
- struct shmem_inode_info *info = SHMEM_I(inode);
- unsigned long idx;
- unsigned long size;
- unsigned long limit;
- unsigned long stage;
- unsigned long diroff;
- struct page **dir;
- struct page *topdir;
- struct page *middir;
- struct page *subdir;
- swp_entry_t *ptr;
- LIST_HEAD(pages_to_free);
- long nr_pages_to_free = 0;
- long nr_swaps_freed = 0;
- int offset;
- int freed;
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- idx = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (idx >= info->next_index)
- return;
- spin_lock(&info->lock);
- info->flags |= SHMEM_TRUNCATE;
- limit = info->next_index;
- info->next_index = idx;
- topdir = info->i_indirect;
- if (topdir && idx <= SHMEM_NR_DIRECT) {
- info->i_indirect = NULL;
- nr_pages_to_free++;
- list_add(&topdir->lru, &pages_to_free);
- }
- spin_unlock(&info->lock);
- if (info->swapped && idx < SHMEM_NR_DIRECT) {
- ptr = info->i_direct;
- size = limit;
- if (size > SHMEM_NR_DIRECT)
- size = SHMEM_NR_DIRECT;
- nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size);
- }
- if (!topdir)
- goto done2;
- BUG_ON(limit <= SHMEM_NR_DIRECT);
- limit -= SHMEM_NR_DIRECT;
- idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
- offset = idx % ENTRIES_PER_PAGE;
- idx -= offset;
- dir = shmem_dir_map(topdir);
- stage = ENTRIES_PER_PAGEPAGE/2;
- if (idx < ENTRIES_PER_PAGEPAGE/2) {
- middir = topdir;
- diroff = idx/ENTRIES_PER_PAGE;
- } else {
- dir += ENTRIES_PER_PAGE/2;
- dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
- while (stage <= idx)
- stage += ENTRIES_PER_PAGEPAGE;
- middir = *dir;
- if (*dir) {
- diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
- ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
- if (!diroff && !offset) {
- *dir = NULL;
- nr_pages_to_free++;
- list_add(&middir->lru, &pages_to_free);
- }
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(middir);
- } else {
- diroff = 0;
- offset = 0;
- idx = stage;
- }
- }
- for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
- if (unlikely(idx == stage)) {
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(topdir) +
- ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
- while (!*dir) {
- dir++;
- idx += ENTRIES_PER_PAGEPAGE;
- if (idx >= limit)
- goto done1;
- }
- stage = idx + ENTRIES_PER_PAGEPAGE;
- middir = *dir;
- *dir = NULL;
- nr_pages_to_free++;
- list_add(&middir->lru, &pages_to_free);
- shmem_dir_unmap(dir);
- cond_resched();
- dir = shmem_dir_map(middir);
- diroff = 0;
- }
- subdir = dir[diroff];
- if (subdir && subdir->nr_swapped) {
- size = limit - idx;
- if (size > ENTRIES_PER_PAGE)
- size = ENTRIES_PER_PAGE;
- freed = shmem_map_and_free_swp(subdir,
- offset, size, &dir);
- if (!dir)
- dir = shmem_dir_map(middir);
- nr_swaps_freed += freed;
- if (offset)
- spin_lock(&info->lock);
- subdir->nr_swapped -= freed;
- if (offset)
- spin_unlock(&info->lock);
- BUG_ON(subdir->nr_swapped > offset);
- }
- if (offset)
- offset = 0;
- else if (subdir) {
- dir[diroff] = NULL;
- nr_pages_to_free++;
- list_add(&subdir->lru, &pages_to_free);
- }
- }
- done1:
- shmem_dir_unmap(dir);
- done2:
- if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
- /*
- * Call truncate_inode_pages again: racing shmem_unuse_inode
- * may have swizzled a page in from swap since vmtruncate or
- * generic_delete_inode did it, before we lowered next_index.
- * Also, though shmem_getpage checks i_size before adding to
- * cache, no recheck after: so fix the narrow window there too.
- */
- truncate_inode_pages(inode->i_mapping, inode->i_size);
- }
- spin_lock(&info->lock);
- info->flags &= ~SHMEM_TRUNCATE;
- info->swapped -= nr_swaps_freed;
- if (nr_pages_to_free)
- shmem_free_blocks(inode, nr_pages_to_free);
- shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
- /*
- * Empty swap vector directory pages to be freed?
- */
- if (!list_empty(&pages_to_free)) {
- pages_to_free.prev->next = NULL;
- shmem_free_pages(pages_to_free.next);
- }
- }
- static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
- {
- struct inode *inode = dentry->d_inode;
- struct page *page = NULL;
- int error;
- if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size < inode->i_size) {
- /*
- * If truncating down to a partial page, then
- * if that page is already allocated, hold it
- * in memory until the truncation is over, so
- * truncate_partial_page cannnot miss it were
- * it assigned to swap.
- */
- if (attr->ia_size & (PAGE_CACHE_SIZE-1)) {
- (void) shmem_getpage(inode,
- attr->ia_size>>PAGE_CACHE_SHIFT,
- &page, SGP_READ, NULL);
- }
- /*
- * Reset SHMEM_PAGEIN flag so that shmem_truncate can
- * detect if any pages might have been added to cache
- * after truncate_inode_pages. But we needn't bother
- * if it's being fully truncated to zero-length: the
- * nrpages check is efficient enough in that case.
- */
- if (attr->ia_size) {
- struct shmem_inode_info *info = SHMEM_I(inode);
- spin_lock(&info->lock);
- info->flags &= ~SHMEM_PAGEIN;
- spin_unlock(&info->lock);
- }
- }
- }
- error = inode_change_ok(inode, attr);
- if (!error)
- error = inode_setattr(inode, attr);
- if (page)
- page_cache_release(page);
- return error;
- }
- static void shmem_delete_inode(struct inode *inode)
- {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- struct shmem_inode_info *info = SHMEM_I(inode);
- if (inode->i_op->truncate == shmem_truncate) {
- shmem_unacct_size(info->flags, inode->i_size);
- inode->i_size = 0;
- shmem_truncate(inode);
- if (!list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
- list_del_init(&info->swaplist);
- spin_unlock(&shmem_swaplist_lock);
- }
- }
- BUG_ON(inode->i_blocks);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
- clear_inode(inode);
- }
- static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
- {
- swp_entry_t *ptr;
- for (ptr = dir; ptr < edir; ptr++) {
- if (ptr->val == entry.val)
- return ptr - dir;
- }
- return -1;
- }
- static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
- {
- struct inode *inode;
- unsigned long idx;
- unsigned long size;
- unsigned long limit;
- unsigned long stage;
- struct page **dir;
- struct page *subdir;
- swp_entry_t *ptr;
- int offset;
- idx = 0;
- ptr = info->i_direct;
- spin_lock(&info->lock);
- limit = info->next_index;
- size = limit;
- if (size > SHMEM_NR_DIRECT)
- size = SHMEM_NR_DIRECT;
- offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0) {
- shmem_swp_balance_unmap();
- goto found;
- }
- if (!info->i_indirect)
- goto lost2;
- dir = shmem_dir_map(info->i_indirect);
- stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
- for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
- if (unlikely(idx == stage)) {
- shmem_dir_unmap(dir-1);
- dir = shmem_dir_map(info->i_indirect) +
- ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
- while (!*dir) {
- dir++;
- idx += ENTRIES_PER_PAGEPAGE;
- if (idx >= limit)
- goto lost1;
- }
- stage = idx + ENTRIES_PER_PAGEPAGE;
- subdir = *dir;
- shmem_dir_unmap(dir);
- dir = shmem_dir_map(subdir);
- }
- subdir = *dir;
- if (subdir && subdir->nr_swapped) {
- ptr = shmem_swp_map(subdir);
- size = limit - idx;
- if (size > ENTRIES_PER_PAGE)
- size = ENTRIES_PER_PAGE;
- offset = shmem_find_swp(entry, ptr, ptr+size);
- if (offset >= 0) {
- shmem_dir_unmap(dir);
- goto found;
- }
- shmem_swp_unmap(ptr);
- }
- }
- lost1:
- shmem_dir_unmap(dir-1);
- lost2:
- spin_unlock(&info->lock);
- return 0;
- found:
- idx += offset;
- inode = &info->vfs_inode;
- if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) {
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, ptr + offset, 0);
- }
- shmem_swp_unmap(ptr);
- spin_unlock(&info->lock);
- /*
- * Decrement swap count even when the entry is left behind:
- * try_to_unuse will skip over mms, then reincrement count.
- */
- swap_free(entry);
- return 1;
- }
- /*
- * shmem_unuse() search for an eventually swapped out shmem page.
- */
- int shmem_unuse(swp_entry_t entry, struct page *page)
- {
- struct list_head *p, *next;
- struct shmem_inode_info *info;
- int found = 0;
- spin_lock(&shmem_swaplist_lock);
- list_for_each_safe(p, next, &shmem_swaplist) {
- info = list_entry(p, struct shmem_inode_info, swaplist);
- if (!info->swapped)
- list_del_init(&info->swaplist);
- else if (shmem_unuse_inode(info, entry, page)) {
- /* move head to start search for next from here */
- list_move_tail(&shmem_swaplist, &info->swaplist);
- found = 1;
- break;
- }
- }
- spin_unlock(&shmem_swaplist_lock);
- return found;
- }
- /*
- * Move the page from the page cache to the swap cache.
- */
- static int shmem_writepage(struct page *page, struct writeback_control *wbc)
- {
- struct shmem_inode_info *info;
- swp_entry_t *entry, swap;
- struct address_space *mapping;
- unsigned long index;
- struct inode *inode;
- BUG_ON(!PageLocked(page));
- BUG_ON(page_mapped(page));
- mapping = page->mapping;
- index = page->index;
- inode = mapping->host;
- info = SHMEM_I(inode);
- if (info->flags & VM_LOCKED)
- goto redirty;
- swap = get_swap_page();
- if (!swap.val)
- goto redirty;
- spin_lock(&info->lock);
- shmem_recalc_inode(inode);
- if (index >= info->next_index) {
- BUG_ON(!(info->flags & SHMEM_TRUNCATE));
- goto unlock;
- }
- entry = shmem_swp_entry(info, index, NULL);
- BUG_ON(!entry);
- BUG_ON(entry->val);
- if (move_to_swap_cache(page, swap) == 0) {
- shmem_swp_set(info, entry, swap.val);
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- if (list_empty(&info->swaplist)) {
- spin_lock(&shmem_swaplist_lock);
- /* move instead of add in case we're racing */
- list_move_tail(&info->swaplist, &shmem_swaplist);
- spin_unlock(&shmem_swaplist_lock);
- }
- unlock_page(page);
- return 0;
- }
- shmem_swp_unmap(entry);
- unlock:
- spin_unlock(&info->lock);
- swap_free(swap);
- redirty:
- set_page_dirty(page);
- return WRITEPAGE_ACTIVATE; /* Return with the page locked */
- }
- #ifdef CONFIG_NUMA
- static struct page *shmem_swapin_async(struct shared_policy *p,
- swp_entry_t entry, unsigned long idx)
- {
- struct page *page;
- struct vm_area_struct pvma;
- /* Create a pseudo vma that just contains the policy */
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_end = PAGE_SIZE;
- pvma.vm_pgoff = idx;
- pvma.vm_policy = mpol_shared_policy_lookup(p, idx);
- page = read_swap_cache_async(entry, &pvma, 0);
- mpol_free(pvma.vm_policy);
- return page;
- }
- struct page *shmem_swapin(struct shmem_inode_info *info, swp_entry_t entry,
- unsigned long idx)
- {
- struct shared_policy *p = &info->policy;
- int i, num;
- struct page *page;
- unsigned long offset;
- num = valid_swaphandles(entry, &offset);
- for (i = 0; i < num; offset++, i++) {
- page = shmem_swapin_async(p,
- swp_entry(swp_type(entry), offset), idx);
- if (!page)
- break;
- page_cache_release(page);
- }
- lru_add_drain(); /* Push any new pages onto the LRU now */
- return shmem_swapin_async(p, entry, idx);
- }
- static struct page *
- shmem_alloc_page(unsigned long gfp, struct shmem_inode_info *info,
- unsigned long idx)
- {
- struct vm_area_struct pvma;
- struct page *page;
- memset(&pvma, 0, sizeof(struct vm_area_struct));
- pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
- pvma.vm_pgoff = idx;
- pvma.vm_end = PAGE_SIZE;
- page = alloc_page_vma(gfp | __GFP_ZERO, &pvma, 0);
- mpol_free(pvma.vm_policy);
- return page;
- }
- #else
- static inline struct page *
- shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
- {
- swapin_readahead(entry, 0, NULL);
- return read_swap_cache_async(entry, NULL, 0);
- }
- static inline struct page *
- shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
- unsigned long idx)
- {
- return alloc_page(gfp | __GFP_ZERO);
- }
- #endif
- /*
- * shmem_getpage - either get the page from swap or allocate a new one
- *
- * If we allocate a new one we do not mark it dirty. That's up to the
- * vm. If we swap it in we mark it dirty since we also free the swap
- * entry since a page cannot live in both the swap and page cache
- */
- static int shmem_getpage(struct inode *inode, unsigned long idx,
- struct page **pagep, enum sgp_type sgp, int *type)
- {
- struct address_space *mapping = inode->i_mapping;
- struct shmem_inode_info *info = SHMEM_I(inode);
- struct shmem_sb_info *sbinfo;
- struct page *filepage = *pagep;
- struct page *swappage;
- swp_entry_t *entry;
- swp_entry_t swap;
- int error;
- if (idx >= SHMEM_MAX_INDEX)
- return -EFBIG;
- /*
- * Normally, filepage is NULL on entry, and either found
- * uptodate immediately, or allocated and zeroed, or read
- * in under swappage, which is then assigned to filepage.
- * But shmem_prepare_write passes in a locked filepage,
- * which may be found not uptodate by other callers too,
- * and may need to be copied from the swappage read in.
- */
- repeat:
- if (!filepage)
- filepage = find_lock_page(mapping, idx);
- if (filepage && PageUptodate(filepage))
- goto done;
- error = 0;
- if (sgp == SGP_QUICK)
- goto failed;
- spin_lock(&info->lock);
- shmem_recalc_inode(inode);
- entry = shmem_swp_alloc(info, idx, sgp);
- if (IS_ERR(entry)) {
- spin_unlock(&info->lock);
- error = PTR_ERR(entry);
- goto failed;
- }
- swap = *entry;
- if (swap.val) {
- /* Look it up and read it in.. */
- swappage = lookup_swap_cache(swap);
- if (!swappage) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- /* here we actually do the io */
- if (type && *type == VM_FAULT_MINOR) {
- inc_page_state(pgmajfault);
- *type = VM_FAULT_MAJOR;
- }
- swappage = shmem_swapin(info, swap, idx);
- if (!swappage) {
- spin_lock(&info->lock);
- entry = shmem_swp_alloc(info, idx, sgp);
- if (IS_ERR(entry))
- error = PTR_ERR(entry);
- else {
- if (entry->val == swap.val)
- error = -ENOMEM;
- shmem_swp_unmap(entry);
- }
- spin_unlock(&info->lock);
- if (error)
- goto failed;
- goto repeat;
- }
- wait_on_page_locked(swappage);
- page_cache_release(swappage);
- goto repeat;
- }
- /* We have to do this with page locked to prevent races */
- if (TestSetPageLocked(swappage)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- wait_on_page_locked(swappage);
- page_cache_release(swappage);
- goto repeat;
- }
- if (PageWriteback(swappage)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- wait_on_page_writeback(swappage);
- unlock_page(swappage);
- page_cache_release(swappage);
- goto repeat;
- }
- if (!PageUptodate(swappage)) {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
- error = -EIO;
- goto failed;
- }
- if (filepage) {
- shmem_swp_set(info, entry, 0);
- shmem_swp_unmap(entry);
- delete_from_swap_cache(swappage);
- spin_unlock(&info->lock);
- copy_highpage(filepage, swappage);
- unlock_page(swappage);
- page_cache_release(swappage);
- flush_dcache_page(filepage);
- SetPageUptodate(filepage);
- set_page_dirty(filepage);
- swap_free(swap);
- } else if (!(error = move_from_swap_cache(
- swappage, idx, mapping))) {
- info->flags |= SHMEM_PAGEIN;
- shmem_swp_set(info, entry, 0);
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- filepage = swappage;
- swap_free(swap);
- } else {
- shmem_swp_unmap(entry);
- spin_unlock(&info->lock);
- unlock_page(swappage);
- page_cache_release(swappage);
- if (error == -ENOMEM) {
- /* let kswapd refresh zone for GFP_ATOMICs */
- blk_congestion_wait(WRITE, HZ/50);
- }
- goto repeat;
- }
- } else if (sgp == SGP_READ && !filepage) {
- shmem_swp_unmap(entry);
- filepage = find_get_page(mapping, idx);
- if (filepage &&
- (!PageUptodate(filepage) || TestSetPageLocked(filepage))) {
- spin_unlock(&info->lock);
- wait_on_page_locked(filepage);
- page_cache_release(filepage);
- filepage = NULL;
- goto repeat;
- }
- spin_unlock(&info->lock);
- } else {
- shmem_swp_unmap(entry);
- sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_blocks) {
- spin_lock(&sbinfo->stat_lock);
- if (sbinfo->free_blocks == 0 ||
- shmem_acct_block(info->flags)) {
- spin_unlock(&sbinfo->stat_lock);
- spin_unlock(&info->lock);
- error = -ENOSPC;
- goto failed;
- }
- sbinfo->free_blocks--;
- inode->i_blocks += BLOCKS_PER_PAGE;
- spin_unlock(&sbinfo->stat_lock);
- } else if (shmem_acct_block(info->flags)) {
- spin_unlock(&info->lock);
- error = -ENOSPC;
- goto failed;
- }
- if (!filepage) {
- spin_unlock(&info->lock);
- filepage = shmem_alloc_page(mapping_gfp_mask(mapping),
- info,
- idx);
- if (!filepage) {
- shmem_unacct_blocks(info->flags, 1);
- shmem_free_blocks(inode, 1);
- error = -ENOMEM;
- goto failed;
- }
- spin_lock(&info->lock);
- entry = shmem_swp_alloc(info, idx, sgp);
- if (IS_ERR(entry))
- error = PTR_ERR(entry);
- else {
- swap = *entry;
- shmem_swp_unmap(entry);
- }
- if (error || swap.val || 0 != add_to_page_cache_lru(
- filepage, mapping, idx, GFP_ATOMIC)) {
- spin_unlock(&info->lock);
- page_cache_release(filepage);
- shmem_unacct_blocks(info->flags, 1);
- shmem_free_blocks(inode, 1);
- filepage = NULL;
- if (error)
- goto failed;
- goto repeat;
- }
- info->flags |= SHMEM_PAGEIN;
- }
- info->alloced++;
- spin_unlock(&info->lock);
- flush_dcache_page(filepage);
- SetPageUptodate(filepage);
- }
- done:
- if (*pagep != filepage) {
- unlock_page(filepage);
- *pagep = filepage;
- }
- return 0;
- failed:
- if (*pagep != filepage) {
- unlock_page(filepage);
- page_cache_release(filepage);
- }
- return error;
- }
- struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
- {
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct page *page = NULL;
- unsigned long idx;
- int error;
- idx = (address - vma->vm_start) >> PAGE_SHIFT;
- idx += vma->vm_pgoff;
- idx >>= PAGE_CACHE_SHIFT - PAGE_SHIFT;
- if (((loff_t) idx << PAGE_CACHE_SHIFT) >= i_size_read(inode))
- return NOPAGE_SIGBUS;
- error = shmem_getpage(inode, idx, &page, SGP_CACHE, type);
- if (error)
- return (error == -ENOMEM)? NOPAGE_OOM: NOPAGE_SIGBUS;
- mark_page_accessed(page);
- return page;
- }
- static int shmem_populate(struct vm_area_struct *vma,
- unsigned long addr, unsigned long len,
- pgprot_t prot, unsigned long pgoff, int nonblock)
- {
- struct inode *inode = vma->vm_file->f_dentry->d_inode;
- struct mm_struct *mm = vma->vm_mm;
- enum sgp_type sgp = nonblock? SGP_QUICK: SGP_CACHE;
- unsigned long size;
- size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (pgoff >= size || pgoff + (len >> PAGE_SHIFT) > size)
- return -EINVAL;
- while ((long) len > 0) {
- struct page *page = NULL;
- int err;
- /*
- * Will need changing if PAGE_CACHE_SIZE != PAGE_SIZE
- */
- err = shmem_getpage(inode, pgoff, &page, sgp, NULL);
- if (err)
- return err;
- if (page) {
- mark_page_accessed(page);
- err = install_page(mm, vma, addr, page, prot);
- if (err) {
- page_cache_release(page);
- return err;
- }
- } else if (nonblock) {
- err = install_file_pte(mm, vma, addr, pgoff, prot);
- if (err)
- return err;
- }
- len -= PAGE_SIZE;
- addr += PAGE_SIZE;
- pgoff++;
- }
- return 0;
- }
- #ifdef CONFIG_NUMA
- int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
- {
- struct inode *i = vma->vm_file->f_dentry->d_inode;
- return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
- }
- struct mempolicy *
- shmem_get_policy(struct vm_area_struct *vma, unsigned long addr)
- {
- struct inode *i = vma->vm_file->f_dentry->d_inode;
- unsigned long idx;
- idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
- }
- #endif
- int shmem_lock(struct file *file, int lock, struct user_struct *user)
- {
- struct inode *inode = file->f_dentry->d_inode;
- struct shmem_inode_info *info = SHMEM_I(inode);
- int retval = -ENOMEM;
- spin_lock(&info->lock);
- if (lock && !(info->flags & VM_LOCKED)) {
- if (!user_shm_lock(inode->i_size, user))
- goto out_nomem;
- info->flags |= VM_LOCKED;
- }
- if (!lock && (info->flags & VM_LOCKED) && user) {
- user_shm_unlock(inode->i_size, user);
- info->flags &= ~VM_LOCKED;
- }
- retval = 0;
- out_nomem:
- spin_unlock(&info->lock);
- return retval;
- }
- static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
- {
- file_accessed(file);
- vma->vm_ops = &shmem_vm_ops;
- return 0;
- }
- static struct inode *
- shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
- {
- struct inode *inode;
- struct shmem_inode_info *info;
- struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return NULL;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
- inode = new_inode(sb);
- if (inode) {
- inode->i_mode = mode;
- inode->i_uid = current->fsuid;
- inode->i_gid = current->fsgid;
- inode->i_blksize = PAGE_CACHE_SIZE;
- inode->i_blocks = 0;
- inode->i_mapping->a_ops = &shmem_aops;
- inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- info = SHMEM_I(inode);
- memset(info, 0, (char *)inode - (char *)info);
- spin_lock_init(&info->lock);
- INIT_LIST_HEAD(&info->swaplist);
- switch (mode & S_IFMT) {
- default:
- inode->i_op = &shmem_special_inode_operations;
- init_special_inode(inode, mode, dev);
- break;
- case S_IFREG:
- inode->i_op = &shmem_inode_operations;
- inode->i_fop = &shmem_file_operations;
- mpol_shared_policy_init(&info->policy);
- break;
- case S_IFDIR:
- inode->i_nlink++;
- /* Some things misbehave if size == 0 on a directory */
- inode->i_size = 2 * BOGO_DIRENT_SIZE;
- inode->i_op = &shmem_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- break;
- case S_IFLNK:
- /*
- * Must not load anything in the rbtree,
- * mpol_free_shared_policy will not be called.
- */
- mpol_shared_policy_init(&info->policy);
- break;
- }
- } else if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
- return inode;
- }
- #ifdef CONFIG_TMPFS
- static struct inode_operations shmem_symlink_inode_operations;
- static struct inode_operations shmem_symlink_inline_operations;
- /*
- * Normally tmpfs makes no use of shmem_prepare_write, but it
- * lets a tmpfs file be used read-write below the loop driver.
- */
- static int
- shmem_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
- {
- struct inode *inode = page->mapping->host;
- return shmem_getpage(inode, page->index, &page, SGP_WRITE, NULL);
- }
- static ssize_t
- shmem_file_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
- {
- struct inode *inode = file->f_dentry->d_inode;
- loff_t pos;
- unsigned long written;
- ssize_t err;
- if ((ssize_t) count < 0)
- return -EINVAL;
- if (!access_ok(VERIFY_READ, buf, count))
- return -EFAULT;
- down(&inode->i_sem);
- pos = *ppos;
- written = 0;
- err = generic_write_checks(file, &pos, &count, 0);
- if (err || !count)
- goto out;
- err = remove_suid(file->f_dentry);
- if (err)
- goto out;
- inode->i_ctime = inode->i_mtime = CURRENT_TIME;
- do {
- struct page *page = NULL;
- unsigned long bytes, index, offset;
- char *kaddr;
- int left;
- offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
- index = pos >> PAGE_CACHE_SHIFT;
- bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
- /*
- * We don't hold page lock across copy from user -
- * what would it guard against? - so no deadlock here.
- * But it still may be a good idea to prefault below.
- */
- err = shmem_getpage(inode, index, &page, SGP_WRITE, NULL);
- if (err)
- break;
- left = bytes;
- if (PageHighMem(page)) {
- volatile unsigned char dummy;
- __get_user(dummy, buf);
- __get_user(dummy, buf + bytes - 1);
- kaddr = kmap_atomic(page, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + offset,
- buf, bytes);
- kunmap_atomic(kaddr, KM_USER0);
- }
- if (left) {
- kaddr = kmap(page);
- left = __copy_from_user(kaddr + offset, buf, bytes);
- kunmap(page);
- }
- written += bytes;
- count -= bytes;
- pos += bytes;
- buf += bytes;
- if (pos > inode->i_size)
- i_size_write(inode, pos);
- flush_dcache_page(page);
- set_page_dirty(page);
- mark_page_accessed(page);
- page_cache_release(page);
- if (left) {
- pos -= left;
- written -= left;
- err = -EFAULT;
- break;
- }
- /*
- * Our dirty pages are not counted in nr_dirty,
- * and we do not attempt to balance dirty pages.
- */
- cond_resched();
- } while (count);
- *ppos = pos;
- if (written)
- err = written;
- out:
- up(&inode->i_sem);
- return err;
- }
- static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
- {
- struct inode *inode = filp->f_dentry->d_inode;
- struct address_space *mapping = inode->i_mapping;
- unsigned long index, offset;
- index = *ppos >> PAGE_CACHE_SHIFT;
- offset = *ppos & ~PAGE_CACHE_MASK;
- for (;;) {
- struct page *page = NULL;
- unsigned long end_index, nr, ret;
- loff_t i_size = i_size_read(inode);
- end_index = i_size >> PAGE_CACHE_SHIFT;
- if (index > end_index)
- break;
- if (index == end_index) {
- nr = i_size & ~PAGE_CACHE_MASK;
- if (nr <= offset)
- break;
- }
- desc->error = shmem_getpage(inode, index, &page, SGP_READ, NULL);
- if (desc->error) {
- if (desc->error == -EINVAL)
- desc->error = 0;
- break;
- }
- /*
- * We must evaluate after, since reads (unlike writes)
- * are called without i_sem protection against truncate
- */
- nr = PAGE_CACHE_SIZE;
- i_size = i_size_read(inode);
- end_index = i_size >> PAGE_CACHE_SHIFT;
- if (index == end_index) {
- nr = i_size & ~PAGE_CACHE_MASK;
- if (nr <= offset) {
- if (page)
- page_cache_release(page);
- break;
- }
- }
- nr -= offset;
- if (page) {
- /*
- * If users can be writing to this page using arbitrary
- * virtual addresses, take care about potential aliasing
- * before reading the page on the kernel side.
- */
- if (mapping_writably_mapped(mapping))
- flush_dcache_page(page);
- /*
- * Mark the page accessed if we read the beginning.
- */
- if (!offset)
- mark_page_accessed(page);
- } else
- page = ZERO_PAGE(0);
- /*
- * Ok, we have the page, and it's up-to-date, so
- * now we can copy it to user space...
- *
- * The actor routine returns how many bytes were actually used..
- * NOTE! This may not be the same as how much of a user buffer
- * we filled up (we may be padding etc), so we can only update
- * "pos" here (the actor routine has to update the user buffer
- * pointers and the remaining count).
- */
- ret = actor(desc, page, offset, nr);
- offset += ret;
- index += offset >> PAGE_CACHE_SHIFT;
- offset &= ~PAGE_CACHE_MASK;
- page_cache_release(page);
- if (ret != nr || !desc->count)
- break;
- cond_resched();
- }
- *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
- file_accessed(filp);
- }
- static ssize_t shmem_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
- {
- read_descriptor_t desc;
- if ((ssize_t) count < 0)
- return -EINVAL;
- if (!access_ok(VERIFY_WRITE, buf, count))
- return -EFAULT;
- if (!count)
- return 0;
- desc.written = 0;
- desc.count = count;
- desc.arg.buf = buf;
- desc.error = 0;
- do_shmem_file_read(filp, ppos, &desc, file_read_actor);
- if (desc.written)
- return desc.written;
- return desc.error;
- }
- static ssize_t shmem_file_sendfile(struct file *in_file, loff_t *ppos,
- size_t count, read_actor_t actor, void *target)
- {
- read_descriptor_t desc;
- if (!count)
- return 0;
- desc.written = 0;
- desc.count = count;
- desc.arg.data = target;
- desc.error = 0;
- do_shmem_file_read(in_file, ppos, &desc, actor);
- if (desc.written)
- return desc.written;
- return desc.error;
- }
- static int shmem_statfs(struct super_block *sb, struct kstatfs *buf)
- {
- struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- buf->f_type = TMPFS_MAGIC;
- buf->f_bsize = PAGE_CACHE_SIZE;
- buf->f_namelen = NAME_MAX;
- spin_lock(&sbinfo->stat_lock);
- if (sbinfo->max_blocks) {
- buf->f_blocks = sbinfo->max_blocks;
- buf->f_bavail = buf->f_bfree = sbinfo->free_blocks;
- }
- if (sbinfo->max_inodes) {
- buf->f_files = sbinfo->max_inodes;
- buf->f_ffree = sbinfo->free_inodes;
- }
- /* else leave those fields 0 like simple_statfs */
- spin_unlock(&sbinfo->stat_lock);
- return 0;
- }
- /*
- * File creation. Allocate an inode, and we're done..
- */
- static int
- shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
- {
- struct inode *inode = shmem_get_inode(dir->i_sb, mode, dev);
- int error = -ENOSPC;
- if (inode) {
- if (dir->i_mode & S_ISGID) {
- inode->i_gid = dir->i_gid;
- if (S_ISDIR(mode))
- inode->i_mode |= S_ISGID;
- }
- dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- d_instantiate(dentry, inode);
- dget(dentry); /* Extra count - pin the dentry in core */
- error = 0;
- }
- return error;
- }
- static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
- {
- int error;
- if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
- return error;
- dir->i_nlink++;
- return 0;
- }
- static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
- {
- return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
- }
- /*
- * Link a file..
- */
- static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
- {
- struct inode *inode = old_dentry->d_inode;
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- /*
- * No ordinary (disk based) filesystem counts links as inodes;
- * but each new link needs a new dentry, pinning lowmem, and
- * tmpfs dentries cannot be pruned until they are unlinked.
- */
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- if (!sbinfo->free_inodes) {
- spin_unlock(&sbinfo->stat_lock);
- return -ENOSPC;
- }
- sbinfo->free_inodes--;
- spin_unlock(&sbinfo->stat_lock);
- }
- dir->i_size += BOGO_DIRENT_SIZE;
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- inode->i_nlink++;
- atomic_inc(&inode->i_count); /* New dentry reference */
- dget(dentry); /* Extra pinning count for the created dentry */
- d_instantiate(dentry, inode);
- return 0;
- }
- static int shmem_unlink(struct inode *dir, struct dentry *dentry)
- {
- struct inode *inode = dentry->d_inode;
- if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode)) {
- struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
- if (sbinfo->max_inodes) {
- spin_lock(&sbinfo->stat_lock);
- sbinfo->free_inodes++;
- spin_unlock(&sbinfo->stat_lock);
- }
- }
- dir->i_size -= BOGO_DIRENT_SIZE;
- inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- inode->i_nlink--;
- dput(dentry); /* Undo the count from "create" - this does all the work */
- return 0;
- }
- static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
- {
- if (!simple_empty(dentry))
- return -ENOTEMPTY;
- dir->i_nlink--;
- return shmem_unlink(dir, dentry);
- }
- /*
- * The VFS layer already does all the dentry stuff for rename,
- * we just have to decrement the usage count for the target if
- * it exists so that the VFS layer correctly free's it when it
- * gets overwritten.
- */
- static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
- {
- struct inode *inode = old_dentry->d_inode;
- int they_are_dirs = S_ISDIR(inode->i_mode);
- if (!simple_empty(new_dentry))
- return -ENOTEMPTY;
- if (new_dentry->d_inode) {
- (void) shmem_unlink(new_dir, new_dentry);
- if (they_are_dirs)
- old_dir->i_nlink--;
- } else if (they_are_dirs) {
- old_dir->i_nlink--;
- new_dir->i_nlink++;
- }
- old_dir->i_size -= BOGO_DIRENT_SIZE;
- new_dir->i_size += BOGO_DIRENT_SIZE;
- old_dir->i_ctime = old_dir->i_mtime =
- new_dir->i_ctime = new_dir->i_mtime =
- inode->i_ctime = CURRENT_TIME;
- return 0;
- }
- static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
- {
- int error;
- int len;
- struct inode *inode;
- struct page *page = NULL;
- char *kaddr;
- struct shmem_inode_info *info;
- len = strlen(symname) + 1;
- if (len > PAGE_CACHE_SIZE)
- return -ENAMETOOLONG;
- inode = shmem_get_inode(dir->i_sb, S_IFLNK|S_IRWXUGO, 0);
- if (!inode)
- return -ENOSPC;
- info = SHMEM_I(inode);
- inode->i_size = len-1;
- if (len <= (char *)inode - (char *)info) {
- /* do it inline */
- memcpy(info, symname, len);
- inode->i_op = &shmem_symlink_inline_operations;
- } else {
- error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
- if (error) {
- iput(inode);
- return error;
- }
- inode->i_op = &shmem_symlink_inode_operations;
- kaddr = kmap_atomic(page, KM_USER0);
- memcpy(kaddr, symname, len);
- kunmap_atomic(kaddr, KM_USER0);
- set_page_dirty(page);
- page_cache_release(page);
- }
- if (dir->i_mode & S_ISGID)
- inode->i_gid = dir->i_gid;
- dir->i_size += BOGO_DIRENT_SIZE;
- dir->i_ctime = dir->i_mtime = CURRENT_TIME;
- d_instantiate(dentry, inode);
- dget(dentry);
- return 0;
- }
- static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
- {
- nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
- return 0;
- }
- static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
- {
- struct page *page = NULL;
- int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
- nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
- return 0;
- }
- static void shmem_put_link(struct dentry *dentry, struct nameidata *nd)
- {
- if (!IS_ERR(nd_get_link(nd))) {
- struct page *page;
- page = find_get_page(dentry->d_inode->i_mapping, 0);
- if (!page)
- BUG();
- kunmap(page);
- mark_page_accessed(page);
- page_cache_release(page);
- page_cache_release(page);
- }
- }
- static struct inode_operations shmem_symlink_inline_operations = {
- .readlink = generic_readlink,
- .follow_link = shmem_follow_link_inline,
- #ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
- #endif
- };
- static struct inode_operations shmem_symlink_inode_operations = {
- .truncate = shmem_truncate,
- .readlink = generic_readlink,
- .follow_link = shmem_follow_link,
- .put_link = shmem_put_link,
- #ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
- #endif
- };
- static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes)
- {
- char *this_char, *value, *rest;
- while ((this_char = strsep(&options, ",")) != NULL) {
- if (!*this_char)
- continue;
- if ((value = strchr(this_char,'=')) != NULL) {
- *value++ = 0;
- } else {
- printk(KERN_ERR
- "tmpfs: No value for mount option '%s'\n",
- this_char);
- return 1;
- }
- if (!strcmp(this_char,"size")) {
- unsigned long long size;
- size = memparse(value,&rest);
- if (*rest == '%') {
- size <<= PAGE_SHIFT;
- size *= totalram_pages;
- do_div(size, 100);
- rest++;
- }
- if (*rest)
- goto bad_val;
- *blocks = size >> PAGE_CACHE_SHIFT;
- } else if (!strcmp(this_char,"nr_blocks")) {
- *blocks = memparse(value,&rest);
- if (*rest)
- goto bad_val;
- } else if (!strcmp(this_char,"nr_inodes")) {
- *inodes = memparse(value,&rest);
- if (*rest)
- goto bad_val;
- } else if (!strcmp(this_char,"mode")) {
- if (!mode)
- continue;
- *mode = simple_strtoul(value,&rest,8);
- if (*rest)
- goto bad_val;
- } else if (!strcmp(this_char,"uid")) {
- if (!uid)
- continue;
- *uid = simple_strtoul(value,&rest,0);
- if (*rest)
- goto bad_val;
- } else if (!strcmp(this_char,"gid")) {
- if (!gid)
- continue;
- *gid = simple_strtoul(value,&rest,0);
- if (*rest)
- goto bad_val;
- } else {
- printk(KERN_ERR "tmpfs: Bad mount option %s\n",
- this_char);
- return 1;
- }
- }
- return 0;
- bad_val:
- printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
- value, this_char);
- return 1;
- }
- static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
- {
- struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
- unsigned long max_blocks = sbinfo->max_blocks;
- unsigned long max_inodes = sbinfo->max_inodes;
- unsigned long blocks;
- unsigned long inodes;
- int error = -EINVAL;
- if (shmem_parse_options(data, NULL, NULL, NULL,
- &max_blocks, &max_inodes))
- return error;
- spin_lock(&sbinfo->stat_lock);
- blocks = sbinfo->max_blocks - sbinfo->free_blocks;
- inodes = sbinfo->max_inodes - sbinfo->free_inodes;
- if (max_blocks < blocks)
- goto out;
- if (max_inodes < inodes)
- goto out;
- /*
- * Those tests also disallow limited->unlimited while any are in
- * use, so i_blocks will always be zero when max_blocks is zero;
- * but we must separately disallow unlimited->limited, because
- * in that case we have no record of how much is already in use.
- */
- if (max_blocks && !sbinfo->max_blocks)
- goto out;
- if (max_inodes && !sbinfo->max_inodes)
- goto out;
- error = 0;
- sbinfo->max_blocks = max_blocks;
- sbinfo->free_blocks = max_blocks - blocks;
- sbinfo->max_inodes = max_inodes;
- sbinfo->free_inodes = max_inodes - inodes;
- out:
- spin_unlock(&sbinfo->stat_lock);
- return error;
- }
- #endif
- static void shmem_put_super(struct super_block *sb)
- {
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
- }
- #ifdef CONFIG_TMPFS_XATTR
- static struct xattr_handler *shmem_xattr_handlers[];
- #else
- #define shmem_xattr_handlers NULL
- #endif
- static int shmem_fill_super(struct super_block *sb,
- void *data, int silent)
- {
- struct inode *inode;
- struct dentry *root;
- int mode = S_IRWXUGO | S_ISVTX;
- uid_t uid = current->fsuid;
- gid_t gid = current->fsgid;
- int err = -ENOMEM;
- struct shmem_sb_info *sbinfo;
- unsigned long blocks = 0;
- unsigned long inodes = 0;
- #ifdef CONFIG_TMPFS
- /*
- * Per default we only allow half of the physical ram per
- * tmpfs instance, limiting inodes to one per page of lowmem;
- * but the internal instance is left unlimited.
- */
- if (!(sb->s_flags & MS_NOUSER)) {
- blocks = totalram_pages / 2;
- inodes = totalram_pages - totalhigh_pages;
- if (inodes > blocks)
- inodes = blocks;
- if (shmem_parse_options(data, &mode, &uid, &gid,
- &blocks, &inodes))
- return -EINVAL;
- }
- #else
- sb->s_flags |= MS_NOUSER;
- #endif
- /* Round up to L1_CACHE_BYTES to resist false sharing */
- sbinfo = kmalloc(max((int)sizeof(struct shmem_sb_info),
- L1_CACHE_BYTES), GFP_KERNEL);
- if (!sbinfo)
- return -ENOMEM;
- spin_lock_init(&sbinfo->stat_lock);
- sbinfo->max_blocks = blocks;
- sbinfo->free_blocks = blocks;
- sbinfo->max_inodes = inodes;
- sbinfo->free_inodes = inodes;
- sb->s_fs_info = sbinfo;
- sb->s_maxbytes = SHMEM_MAX_BYTES;
- sb->s_blocksize = PAGE_CACHE_SIZE;
- sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = TMPFS_MAGIC;
- sb->s_op = &shmem_ops;
- sb->s_xattr = shmem_xattr_handlers;
- inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
- if (!inode)
- goto failed;
- inode->i_uid = uid;
- inode->i_gid = gid;
- root = d_alloc_root(inode);
- if (!root)
- goto failed_iput;
- sb->s_root = root;
- return 0;
- failed_iput:
- iput(inode);
- failed:
- shmem_put_super(sb);
- return err;
- }
- static kmem_cache_t *shmem_inode_cachep;
- static struct inode *shmem_alloc_inode(struct super_block *sb)
- {
- struct shmem_inode_info *p;
- p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, SLAB_KERNEL);
- if (!p)
- return NULL;
- return &p->vfs_inode;
- }
- static void shmem_destroy_inode(struct inode *inode)
- {
- if ((inode->i_mode & S_IFMT) == S_IFREG) {
- /* only struct inode is valid if it's an inline symlink */
- mpol_free_shared_policy(&SHMEM_I(inode)->policy);
- }
- kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
- }
- static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
- {
- struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
- if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
- SLAB_CTOR_CONSTRUCTOR) {
- inode_init_once(&p->vfs_inode);
- }
- }
- static int init_inodecache(void)
- {
- shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
- sizeof(struct shmem_inode_info),
- 0, 0, init_once, NULL);
- if (shmem_inode_cachep == NULL)
- return -ENOMEM;
- return 0;
- }
- static void destroy_inodecache(void)
- {
- if (kmem_cache_destroy(shmem_inode_cachep))
- printk(KERN_INFO "shmem_inode_cache: not all structures were freed\n");
- }
- static struct address_space_operations shmem_aops = {
- .writepage = shmem_writepage,
- .set_page_dirty = __set_page_dirty_nobuffers,
- #ifdef CONFIG_TMPFS
- .prepare_write = shmem_prepare_write,
- .commit_write = simple_commit_write,
- #endif
- };
- static struct file_operations shmem_file_operations = {
- .mmap = shmem_mmap,
- #ifdef CONFIG_TMPFS
- .llseek = generic_file_llseek,
- .read = shmem_file_read,
- .write = shmem_file_write,
- .fsync = simple_sync_file,
- .sendfile = shmem_file_sendfile,
- #endif
- };
- static struct inode_operations shmem_inode_operations = {
- .truncate = shmem_truncate,
- .setattr = shmem_notify_change,
- #ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
- #endif
- };
- static struct inode_operations shmem_dir_inode_operations = {
- #ifdef CONFIG_TMPFS
- .create = shmem_create,
- .lookup = simple_lookup,
- .link = shmem_link,
- .unlink = shmem_unlink,
- .symlink = shmem_symlink,
- .mkdir = shmem_mkdir,
- .rmdir = shmem_rmdir,
- .mknod = shmem_mknod,
- .rename = shmem_rename,
- #ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
- #endif
- #endif
- };
- static struct inode_operations shmem_special_inode_operations = {
- #ifdef CONFIG_TMPFS_XATTR
- .setxattr = generic_setxattr,
- .getxattr = generic_getxattr,
- .listxattr = generic_listxattr,
- .removexattr = generic_removexattr,
- #endif
- };
- static struct super_operations shmem_ops = {
- .alloc_inode = shmem_alloc_inode,
- .destroy_inode = shmem_destroy_inode,
- #ifdef CONFIG_TMPFS
- .statfs = shmem_statfs,
- .remount_fs = shmem_remount_fs,
- #endif
- .delete_inode = shmem_delete_inode,
- .drop_inode = generic_delete_inode,
- .put_super = shmem_put_super,
- };
- static struct vm_operations_struct shmem_vm_ops = {
- .nopage = shmem_nopage,
- .populate = shmem_populate,
- #ifdef CONFIG_NUMA
- .set_policy = shmem_set_policy,
- .get_policy = shmem_get_policy,
- #endif
- };
- #ifdef CONFIG_TMPFS_SECURITY
- static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len,
- const char *name, size_t name_len)
- {
- return security_inode_listsecurity(inode, list, list_len);
- }
- static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size)
- {
- if (strcmp(name, "") == 0)
- return -EINVAL;
- return security_inode_getsecurity(inode, name, buffer, size);
- }
- static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags)
- {
- if (strcmp(name, "") == 0)
- return -EINVAL;
- return security_inode_setsecurity(inode, name, value, size, flags);
- }
- static struct xattr_handler shmem_xattr_security_handler = {
- .prefix = XATTR_SECURITY_PREFIX,
- .list = shmem_xattr_security_list,
- .get = shmem_xattr_security_get,
- .set = shmem_xattr_security_set,
- };
- #endif /* CONFIG_TMPFS_SECURITY */
- #ifdef CONFIG_TMPFS_XATTR
- static struct xattr_handler *shmem_xattr_handlers[] = {
- #ifdef CONFIG_TMPFS_SECURITY
- &shmem_xattr_security_handler,
- #endif
- NULL
- };
- #endif /* CONFIG_TMPFS_XATTR */
- static struct super_block *shmem_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
- {
- return get_sb_nodev(fs_type, flags, data, shmem_fill_super);
- }
- static struct file_system_type tmpfs_fs_type = {
- .owner = THIS_MODULE,
- .name = "tmpfs",
- .get_sb = shmem_get_sb,
- .kill_sb = kill_litter_super,
- };
- static struct vfsmount *shm_mnt;
- static int __init init_tmpfs(void)
- {
- int error;
- error = init_inodecache();
- if (error)
- goto out3;
- error = register_filesystem(&tmpfs_fs_type);
- if (error) {
- printk(KERN_ERR "Could not register tmpfs\n");
- goto out2;
- }
- #ifdef CONFIG_TMPFS
- devfs_mk_dir("shm");
- #endif
- shm_mnt = do_kern_mount(tmpfs_fs_type.name, MS_NOUSER,
- tmpfs_fs_type.name, NULL);
- if (IS_ERR(shm_mnt)) {
- error = PTR_ERR(shm_mnt);
- printk(KERN_ERR "Could not kern_mount tmpfs\n");
- goto out1;
- }
- return 0;
- out1:
- unregister_filesystem(&tmpfs_fs_type);
- out2:
- destroy_inodecache();
- out3:
- shm_mnt = ERR_PTR(error);
- return error;
- }
- module_init(init_tmpfs)
- /*
- * shmem_file_setup - get an unlinked file living in tmpfs
- *
- * @name: name for dentry (to be seen in /proc/<pid>/maps
- * @size: size to be set for the file
- *
- */
- struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
- {
- int error;
- struct file *file;
- struct inode *inode;
- struct dentry *dentry, *root;
- struct qstr this;
- if (IS_ERR(shm_mnt))
- return (void *)shm_mnt;
- if (size < 0 || size > SHMEM_MAX_BYTES)
- return ERR_PTR(-EINVAL);
- if (shmem_acct_size(flags, size))
- return ERR_PTR(-ENOMEM);
- error = -ENOMEM;
- this.name = name;
- this.len = strlen(name);
- this.hash = 0; /* will go */
- root = shm_mnt->mnt_root;
- dentry = d_alloc(root, &this);
- if (!dentry)
- goto put_memory;
- error = -ENFILE;
- file = get_empty_filp();
- if (!file)
- goto put_dentry;
- error = -ENOSPC;
- inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);
- if (!inode)
- goto close_file;
- SHMEM_I(inode)->flags = flags & VM_ACCOUNT;
- d_instantiate(dentry, inode);
- inode->i_size = size;
- inode->i_nlink = 0; /* It is unlinked */
- file->f_vfsmnt = mntget(shm_mnt);
- file->f_dentry = dentry;
- file->f_mapping = inode->i_mapping;
- file->f_op = &shmem_file_operations;
- file->f_mode = FMODE_WRITE | FMODE_READ;
- return file;
- close_file:
- put_filp(file);
- put_dentry:
- dput(dentry);
- put_memory:
- shmem_unacct_size(flags, size);
- return ERR_PTR(error);
- }
- /*
- * shmem_zero_setup - setup a shared anonymous mapping
- *
- * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
- */
- int shmem_zero_setup(struct vm_area_struct *vma)
- {
- struct file *file;
- loff_t size = vma->vm_end - vma->vm_start;
- file = shmem_file_setup("dev/zero", size, vma->vm_flags);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = file;
- vma->vm_ops = &shmem_vm_ops;
- return 0;
- }
|