slab.c 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740
  1. /*
  2. * linux/mm/slab.c
  3. * Written by Mark Hemment, 1996/97.
  4. * (markhe@nextd.demon.co.uk)
  5. *
  6. * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
  7. *
  8. * Major cleanup, different bufctl logic, per-cpu arrays
  9. * (c) 2000 Manfred Spraul
  10. *
  11. * Cleanup, make the head arrays unconditional, preparation for NUMA
  12. * (c) 2002 Manfred Spraul
  13. *
  14. * An implementation of the Slab Allocator as described in outline in;
  15. * UNIX Internals: The New Frontiers by Uresh Vahalia
  16. * Pub: Prentice Hall ISBN 0-13-101908-2
  17. * or with a little more detail in;
  18. * The Slab Allocator: An Object-Caching Kernel Memory Allocator
  19. * Jeff Bonwick (Sun Microsystems).
  20. * Presented at: USENIX Summer 1994 Technical Conference
  21. *
  22. * The memory is organized in caches, one cache for each object type.
  23. * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
  24. * Each cache consists out of many slabs (they are small (usually one
  25. * page long) and always contiguous), and each slab contains multiple
  26. * initialized objects.
  27. *
  28. * This means, that your constructor is used only for newly allocated
  29. * slabs and you must pass objects with the same intializations to
  30. * kmem_cache_free.
  31. *
  32. * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
  33. * normal). If you need a special memory type, then must create a new
  34. * cache for that memory type.
  35. *
  36. * In order to reduce fragmentation, the slabs are sorted in 3 groups:
  37. * full slabs with 0 free objects
  38. * partial slabs
  39. * empty slabs with no allocated objects
  40. *
  41. * If partial slabs exist, then new allocations come from these slabs,
  42. * otherwise from empty slabs or new slabs are allocated.
  43. *
  44. * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
  45. * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
  46. *
  47. * Each cache has a short per-cpu head array, most allocs
  48. * and frees go into that array, and if that array overflows, then 1/2
  49. * of the entries in the array are given back into the global cache.
  50. * The head array is strictly LIFO and should improve the cache hit rates.
  51. * On SMP, it additionally reduces the spinlock operations.
  52. *
  53. * The c_cpuarray may not be read with enabled local interrupts -
  54. * it's changed with a smp_call_function().
  55. *
  56. * SMP synchronization:
  57. * constructors and destructors are called without any locking.
  58. * Several members in struct kmem_cache and struct slab never change, they
  59. * are accessed without any locking.
  60. * The per-cpu arrays are never accessed from the wrong cpu, no locking,
  61. * and local interrupts are disabled so slab code is preempt-safe.
  62. * The non-constant members are protected with a per-cache irq spinlock.
  63. *
  64. * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
  65. * in 2000 - many ideas in the current implementation are derived from
  66. * his patch.
  67. *
  68. * Further notes from the original documentation:
  69. *
  70. * 11 April '97. Started multi-threading - markhe
  71. * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
  72. * The sem is only needed when accessing/extending the cache-chain, which
  73. * can never happen inside an interrupt (kmem_cache_create(),
  74. * kmem_cache_shrink() and kmem_cache_reap()).
  75. *
  76. * At present, each engine can be growing a cache. This should be blocked.
  77. *
  78. * 15 March 2005. NUMA slab allocator.
  79. * Shai Fultheim <shai@scalex86.org>.
  80. * Shobhit Dayal <shobhit@calsoftinc.com>
  81. * Alok N Kataria <alokk@calsoftinc.com>
  82. * Christoph Lameter <christoph@lameter.com>
  83. *
  84. * Modified the slab allocator to be node aware on NUMA systems.
  85. * Each node has its own list of partial, free and full slabs.
  86. * All object allocations for a node occur from node specific slab lists.
  87. */
  88. #include <linux/config.h>
  89. #include <linux/slab.h>
  90. #include <linux/mm.h>
  91. #include <linux/swap.h>
  92. #include <linux/cache.h>
  93. #include <linux/interrupt.h>
  94. #include <linux/init.h>
  95. #include <linux/compiler.h>
  96. #include <linux/seq_file.h>
  97. #include <linux/notifier.h>
  98. #include <linux/kallsyms.h>
  99. #include <linux/cpu.h>
  100. #include <linux/sysctl.h>
  101. #include <linux/module.h>
  102. #include <linux/rcupdate.h>
  103. #include <linux/string.h>
  104. #include <linux/nodemask.h>
  105. #include <linux/mempolicy.h>
  106. #include <linux/mutex.h>
  107. #include <asm/uaccess.h>
  108. #include <asm/cacheflush.h>
  109. #include <asm/tlbflush.h>
  110. #include <asm/page.h>
  111. /*
  112. * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
  113. * SLAB_RED_ZONE & SLAB_POISON.
  114. * 0 for faster, smaller code (especially in the critical paths).
  115. *
  116. * STATS - 1 to collect stats for /proc/slabinfo.
  117. * 0 for faster, smaller code (especially in the critical paths).
  118. *
  119. * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
  120. */
  121. #ifdef CONFIG_DEBUG_SLAB
  122. #define DEBUG 1
  123. #define STATS 1
  124. #define FORCED_DEBUG 1
  125. #else
  126. #define DEBUG 0
  127. #define STATS 0
  128. #define FORCED_DEBUG 0
  129. #endif
  130. /* Shouldn't this be in a header file somewhere? */
  131. #define BYTES_PER_WORD sizeof(void *)
  132. #ifndef cache_line_size
  133. #define cache_line_size() L1_CACHE_BYTES
  134. #endif
  135. #ifndef ARCH_KMALLOC_MINALIGN
  136. /*
  137. * Enforce a minimum alignment for the kmalloc caches.
  138. * Usually, the kmalloc caches are cache_line_size() aligned, except when
  139. * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
  140. * Some archs want to perform DMA into kmalloc caches and need a guaranteed
  141. * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
  142. * Note that this flag disables some debug features.
  143. */
  144. #define ARCH_KMALLOC_MINALIGN 0
  145. #endif
  146. #ifndef ARCH_SLAB_MINALIGN
  147. /*
  148. * Enforce a minimum alignment for all caches.
  149. * Intended for archs that get misalignment faults even for BYTES_PER_WORD
  150. * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
  151. * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
  152. * some debug features.
  153. */
  154. #define ARCH_SLAB_MINALIGN 0
  155. #endif
  156. #ifndef ARCH_KMALLOC_FLAGS
  157. #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
  158. #endif
  159. /* Legal flag mask for kmem_cache_create(). */
  160. #if DEBUG
  161. # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
  162. SLAB_POISON | SLAB_HWCACHE_ALIGN | \
  163. SLAB_NO_REAP | SLAB_CACHE_DMA | \
  164. SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
  165. SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
  166. SLAB_DESTROY_BY_RCU)
  167. #else
  168. # define CREATE_MASK (SLAB_HWCACHE_ALIGN | SLAB_NO_REAP | \
  169. SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
  170. SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
  171. SLAB_DESTROY_BY_RCU)
  172. #endif
  173. /*
  174. * kmem_bufctl_t:
  175. *
  176. * Bufctl's are used for linking objs within a slab
  177. * linked offsets.
  178. *
  179. * This implementation relies on "struct page" for locating the cache &
  180. * slab an object belongs to.
  181. * This allows the bufctl structure to be small (one int), but limits
  182. * the number of objects a slab (not a cache) can contain when off-slab
  183. * bufctls are used. The limit is the size of the largest general cache
  184. * that does not use off-slab slabs.
  185. * For 32bit archs with 4 kB pages, is this 56.
  186. * This is not serious, as it is only for large objects, when it is unwise
  187. * to have too many per slab.
  188. * Note: This limit can be raised by introducing a general cache whose size
  189. * is less than 512 (PAGE_SIZE<<3), but greater than 256.
  190. */
  191. typedef unsigned int kmem_bufctl_t;
  192. #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
  193. #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
  194. #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2)
  195. /* Max number of objs-per-slab for caches which use off-slab slabs.
  196. * Needed to avoid a possible looping condition in cache_grow().
  197. */
  198. static unsigned long offslab_limit;
  199. /*
  200. * struct slab
  201. *
  202. * Manages the objs in a slab. Placed either at the beginning of mem allocated
  203. * for a slab, or allocated from an general cache.
  204. * Slabs are chained into three list: fully used, partial, fully free slabs.
  205. */
  206. struct slab {
  207. struct list_head list;
  208. unsigned long colouroff;
  209. void *s_mem; /* including colour offset */
  210. unsigned int inuse; /* num of objs active in slab */
  211. kmem_bufctl_t free;
  212. unsigned short nodeid;
  213. };
  214. /*
  215. * struct slab_rcu
  216. *
  217. * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
  218. * arrange for kmem_freepages to be called via RCU. This is useful if
  219. * we need to approach a kernel structure obliquely, from its address
  220. * obtained without the usual locking. We can lock the structure to
  221. * stabilize it and check it's still at the given address, only if we
  222. * can be sure that the memory has not been meanwhile reused for some
  223. * other kind of object (which our subsystem's lock might corrupt).
  224. *
  225. * rcu_read_lock before reading the address, then rcu_read_unlock after
  226. * taking the spinlock within the structure expected at that address.
  227. *
  228. * We assume struct slab_rcu can overlay struct slab when destroying.
  229. */
  230. struct slab_rcu {
  231. struct rcu_head head;
  232. struct kmem_cache *cachep;
  233. void *addr;
  234. };
  235. /*
  236. * struct array_cache
  237. *
  238. * Purpose:
  239. * - LIFO ordering, to hand out cache-warm objects from _alloc
  240. * - reduce the number of linked list operations
  241. * - reduce spinlock operations
  242. *
  243. * The limit is stored in the per-cpu structure to reduce the data cache
  244. * footprint.
  245. *
  246. */
  247. struct array_cache {
  248. unsigned int avail;
  249. unsigned int limit;
  250. unsigned int batchcount;
  251. unsigned int touched;
  252. spinlock_t lock;
  253. void *entry[0]; /*
  254. * Must have this definition in here for the proper
  255. * alignment of array_cache. Also simplifies accessing
  256. * the entries.
  257. * [0] is for gcc 2.95. It should really be [].
  258. */
  259. };
  260. /* bootstrap: The caches do not work without cpuarrays anymore,
  261. * but the cpuarrays are allocated from the generic caches...
  262. */
  263. #define BOOT_CPUCACHE_ENTRIES 1
  264. struct arraycache_init {
  265. struct array_cache cache;
  266. void *entries[BOOT_CPUCACHE_ENTRIES];
  267. };
  268. /*
  269. * The slab lists for all objects.
  270. */
  271. struct kmem_list3 {
  272. struct list_head slabs_partial; /* partial list first, better asm code */
  273. struct list_head slabs_full;
  274. struct list_head slabs_free;
  275. unsigned long free_objects;
  276. unsigned long next_reap;
  277. int free_touched;
  278. unsigned int free_limit;
  279. spinlock_t list_lock;
  280. struct array_cache *shared; /* shared per node */
  281. struct array_cache **alien; /* on other nodes */
  282. };
  283. /*
  284. * Need this for bootstrapping a per node allocator.
  285. */
  286. #define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
  287. struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
  288. #define CACHE_CACHE 0
  289. #define SIZE_AC 1
  290. #define SIZE_L3 (1 + MAX_NUMNODES)
  291. /*
  292. * This function must be completely optimized away if
  293. * a constant is passed to it. Mostly the same as
  294. * what is in linux/slab.h except it returns an
  295. * index.
  296. */
  297. static __always_inline int index_of(const size_t size)
  298. {
  299. extern void __bad_size(void);
  300. if (__builtin_constant_p(size)) {
  301. int i = 0;
  302. #define CACHE(x) \
  303. if (size <=x) \
  304. return i; \
  305. else \
  306. i++;
  307. #include "linux/kmalloc_sizes.h"
  308. #undef CACHE
  309. __bad_size();
  310. } else
  311. __bad_size();
  312. return 0;
  313. }
  314. #define INDEX_AC index_of(sizeof(struct arraycache_init))
  315. #define INDEX_L3 index_of(sizeof(struct kmem_list3))
  316. static void kmem_list3_init(struct kmem_list3 *parent)
  317. {
  318. INIT_LIST_HEAD(&parent->slabs_full);
  319. INIT_LIST_HEAD(&parent->slabs_partial);
  320. INIT_LIST_HEAD(&parent->slabs_free);
  321. parent->shared = NULL;
  322. parent->alien = NULL;
  323. spin_lock_init(&parent->list_lock);
  324. parent->free_objects = 0;
  325. parent->free_touched = 0;
  326. }
  327. #define MAKE_LIST(cachep, listp, slab, nodeid) \
  328. do { \
  329. INIT_LIST_HEAD(listp); \
  330. list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
  331. } while (0)
  332. #define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
  333. do { \
  334. MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
  335. MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
  336. MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
  337. } while (0)
  338. /*
  339. * struct kmem_cache
  340. *
  341. * manages a cache.
  342. */
  343. struct kmem_cache {
  344. /* 1) per-cpu data, touched during every alloc/free */
  345. struct array_cache *array[NR_CPUS];
  346. unsigned int batchcount;
  347. unsigned int limit;
  348. unsigned int shared;
  349. unsigned int buffer_size;
  350. /* 2) touched by every alloc & free from the backend */
  351. struct kmem_list3 *nodelists[MAX_NUMNODES];
  352. unsigned int flags; /* constant flags */
  353. unsigned int num; /* # of objs per slab */
  354. spinlock_t spinlock;
  355. /* 3) cache_grow/shrink */
  356. /* order of pgs per slab (2^n) */
  357. unsigned int gfporder;
  358. /* force GFP flags, e.g. GFP_DMA */
  359. gfp_t gfpflags;
  360. size_t colour; /* cache colouring range */
  361. unsigned int colour_off; /* colour offset */
  362. unsigned int colour_next; /* cache colouring */
  363. struct kmem_cache *slabp_cache;
  364. unsigned int slab_size;
  365. unsigned int dflags; /* dynamic flags */
  366. /* constructor func */
  367. void (*ctor) (void *, struct kmem_cache *, unsigned long);
  368. /* de-constructor func */
  369. void (*dtor) (void *, struct kmem_cache *, unsigned long);
  370. /* 4) cache creation/removal */
  371. const char *name;
  372. struct list_head next;
  373. /* 5) statistics */
  374. #if STATS
  375. unsigned long num_active;
  376. unsigned long num_allocations;
  377. unsigned long high_mark;
  378. unsigned long grown;
  379. unsigned long reaped;
  380. unsigned long errors;
  381. unsigned long max_freeable;
  382. unsigned long node_allocs;
  383. unsigned long node_frees;
  384. atomic_t allochit;
  385. atomic_t allocmiss;
  386. atomic_t freehit;
  387. atomic_t freemiss;
  388. #endif
  389. #if DEBUG
  390. /*
  391. * If debugging is enabled, then the allocator can add additional
  392. * fields and/or padding to every object. buffer_size contains the total
  393. * object size including these internal fields, the following two
  394. * variables contain the offset to the user object and its size.
  395. */
  396. int obj_offset;
  397. int obj_size;
  398. #endif
  399. };
  400. #define CFLGS_OFF_SLAB (0x80000000UL)
  401. #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
  402. #define BATCHREFILL_LIMIT 16
  403. /* Optimization question: fewer reaps means less
  404. * probability for unnessary cpucache drain/refill cycles.
  405. *
  406. * OTOH the cpuarrays can contain lots of objects,
  407. * which could lock up otherwise freeable slabs.
  408. */
  409. #define REAPTIMEOUT_CPUC (2*HZ)
  410. #define REAPTIMEOUT_LIST3 (4*HZ)
  411. #if STATS
  412. #define STATS_INC_ACTIVE(x) ((x)->num_active++)
  413. #define STATS_DEC_ACTIVE(x) ((x)->num_active--)
  414. #define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
  415. #define STATS_INC_GROWN(x) ((x)->grown++)
  416. #define STATS_INC_REAPED(x) ((x)->reaped++)
  417. #define STATS_SET_HIGH(x) do { if ((x)->num_active > (x)->high_mark) \
  418. (x)->high_mark = (x)->num_active; \
  419. } while (0)
  420. #define STATS_INC_ERR(x) ((x)->errors++)
  421. #define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
  422. #define STATS_INC_NODEFREES(x) ((x)->node_frees++)
  423. #define STATS_SET_FREEABLE(x, i) \
  424. do { if ((x)->max_freeable < i) \
  425. (x)->max_freeable = i; \
  426. } while (0)
  427. #define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
  428. #define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
  429. #define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
  430. #define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
  431. #else
  432. #define STATS_INC_ACTIVE(x) do { } while (0)
  433. #define STATS_DEC_ACTIVE(x) do { } while (0)
  434. #define STATS_INC_ALLOCED(x) do { } while (0)
  435. #define STATS_INC_GROWN(x) do { } while (0)
  436. #define STATS_INC_REAPED(x) do { } while (0)
  437. #define STATS_SET_HIGH(x) do { } while (0)
  438. #define STATS_INC_ERR(x) do { } while (0)
  439. #define STATS_INC_NODEALLOCS(x) do { } while (0)
  440. #define STATS_INC_NODEFREES(x) do { } while (0)
  441. #define STATS_SET_FREEABLE(x, i) \
  442. do { } while (0)
  443. #define STATS_INC_ALLOCHIT(x) do { } while (0)
  444. #define STATS_INC_ALLOCMISS(x) do { } while (0)
  445. #define STATS_INC_FREEHIT(x) do { } while (0)
  446. #define STATS_INC_FREEMISS(x) do { } while (0)
  447. #endif
  448. #if DEBUG
  449. /* Magic nums for obj red zoning.
  450. * Placed in the first word before and the first word after an obj.
  451. */
  452. #define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
  453. #define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
  454. /* ...and for poisoning */
  455. #define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
  456. #define POISON_FREE 0x6b /* for use-after-free poisoning */
  457. #define POISON_END 0xa5 /* end-byte of poisoning */
  458. /* memory layout of objects:
  459. * 0 : objp
  460. * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
  461. * the end of an object is aligned with the end of the real
  462. * allocation. Catches writes behind the end of the allocation.
  463. * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
  464. * redzone word.
  465. * cachep->obj_offset: The real object.
  466. * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
  467. * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
  468. */
  469. static int obj_offset(struct kmem_cache *cachep)
  470. {
  471. return cachep->obj_offset;
  472. }
  473. static int obj_size(struct kmem_cache *cachep)
  474. {
  475. return cachep->obj_size;
  476. }
  477. static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
  478. {
  479. BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
  480. return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
  481. }
  482. static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
  483. {
  484. BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
  485. if (cachep->flags & SLAB_STORE_USER)
  486. return (unsigned long *)(objp + cachep->buffer_size -
  487. 2 * BYTES_PER_WORD);
  488. return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
  489. }
  490. static void **dbg_userword(struct kmem_cache *cachep, void *objp)
  491. {
  492. BUG_ON(!(cachep->flags & SLAB_STORE_USER));
  493. return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
  494. }
  495. #else
  496. #define obj_offset(x) 0
  497. #define obj_size(cachep) (cachep->buffer_size)
  498. #define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
  499. #define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
  500. #define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
  501. #endif
  502. /*
  503. * Maximum size of an obj (in 2^order pages)
  504. * and absolute limit for the gfp order.
  505. */
  506. #if defined(CONFIG_LARGE_ALLOCS)
  507. #define MAX_OBJ_ORDER 13 /* up to 32Mb */
  508. #define MAX_GFP_ORDER 13 /* up to 32Mb */
  509. #elif defined(CONFIG_MMU)
  510. #define MAX_OBJ_ORDER 5 /* 32 pages */
  511. #define MAX_GFP_ORDER 5 /* 32 pages */
  512. #else
  513. #define MAX_OBJ_ORDER 8 /* up to 1Mb */
  514. #define MAX_GFP_ORDER 8 /* up to 1Mb */
  515. #endif
  516. /*
  517. * Do not go above this order unless 0 objects fit into the slab.
  518. */
  519. #define BREAK_GFP_ORDER_HI 1
  520. #define BREAK_GFP_ORDER_LO 0
  521. static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
  522. /* Functions for storing/retrieving the cachep and or slab from the
  523. * global 'mem_map'. These are used to find the slab an obj belongs to.
  524. * With kfree(), these are used to find the cache which an obj belongs to.
  525. */
  526. static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
  527. {
  528. page->lru.next = (struct list_head *)cache;
  529. }
  530. static inline struct kmem_cache *page_get_cache(struct page *page)
  531. {
  532. return (struct kmem_cache *)page->lru.next;
  533. }
  534. static inline void page_set_slab(struct page *page, struct slab *slab)
  535. {
  536. page->lru.prev = (struct list_head *)slab;
  537. }
  538. static inline struct slab *page_get_slab(struct page *page)
  539. {
  540. return (struct slab *)page->lru.prev;
  541. }
  542. static inline struct kmem_cache *virt_to_cache(const void *obj)
  543. {
  544. struct page *page = virt_to_page(obj);
  545. return page_get_cache(page);
  546. }
  547. static inline struct slab *virt_to_slab(const void *obj)
  548. {
  549. struct page *page = virt_to_page(obj);
  550. return page_get_slab(page);
  551. }
  552. /* These are the default caches for kmalloc. Custom caches can have other sizes. */
  553. struct cache_sizes malloc_sizes[] = {
  554. #define CACHE(x) { .cs_size = (x) },
  555. #include <linux/kmalloc_sizes.h>
  556. CACHE(ULONG_MAX)
  557. #undef CACHE
  558. };
  559. EXPORT_SYMBOL(malloc_sizes);
  560. /* Must match cache_sizes above. Out of line to keep cache footprint low. */
  561. struct cache_names {
  562. char *name;
  563. char *name_dma;
  564. };
  565. static struct cache_names __initdata cache_names[] = {
  566. #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
  567. #include <linux/kmalloc_sizes.h>
  568. {NULL,}
  569. #undef CACHE
  570. };
  571. static struct arraycache_init initarray_cache __initdata =
  572. { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
  573. static struct arraycache_init initarray_generic =
  574. { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
  575. /* internal cache of cache description objs */
  576. static struct kmem_cache cache_cache = {
  577. .batchcount = 1,
  578. .limit = BOOT_CPUCACHE_ENTRIES,
  579. .shared = 1,
  580. .buffer_size = sizeof(struct kmem_cache),
  581. .flags = SLAB_NO_REAP,
  582. .spinlock = SPIN_LOCK_UNLOCKED,
  583. .name = "kmem_cache",
  584. #if DEBUG
  585. .obj_size = sizeof(struct kmem_cache),
  586. #endif
  587. };
  588. /* Guard access to the cache-chain. */
  589. static DEFINE_MUTEX(cache_chain_mutex);
  590. static struct list_head cache_chain;
  591. /*
  592. * vm_enough_memory() looks at this to determine how many
  593. * slab-allocated pages are possibly freeable under pressure
  594. *
  595. * SLAB_RECLAIM_ACCOUNT turns this on per-slab
  596. */
  597. atomic_t slab_reclaim_pages;
  598. /*
  599. * chicken and egg problem: delay the per-cpu array allocation
  600. * until the general caches are up.
  601. */
  602. static enum {
  603. NONE,
  604. PARTIAL_AC,
  605. PARTIAL_L3,
  606. FULL
  607. } g_cpucache_up;
  608. static DEFINE_PER_CPU(struct work_struct, reap_work);
  609. static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
  610. static void enable_cpucache(struct kmem_cache *cachep);
  611. static void cache_reap(void *unused);
  612. static int __node_shrink(struct kmem_cache *cachep, int node);
  613. static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
  614. {
  615. return cachep->array[smp_processor_id()];
  616. }
  617. static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
  618. {
  619. struct cache_sizes *csizep = malloc_sizes;
  620. #if DEBUG
  621. /* This happens if someone tries to call
  622. * kmem_cache_create(), or __kmalloc(), before
  623. * the generic caches are initialized.
  624. */
  625. BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
  626. #endif
  627. while (size > csizep->cs_size)
  628. csizep++;
  629. /*
  630. * Really subtle: The last entry with cs->cs_size==ULONG_MAX
  631. * has cs_{dma,}cachep==NULL. Thus no special case
  632. * for large kmalloc calls required.
  633. */
  634. if (unlikely(gfpflags & GFP_DMA))
  635. return csizep->cs_dmacachep;
  636. return csizep->cs_cachep;
  637. }
  638. struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
  639. {
  640. return __find_general_cachep(size, gfpflags);
  641. }
  642. EXPORT_SYMBOL(kmem_find_general_cachep);
  643. static size_t slab_mgmt_size(size_t nr_objs, size_t align)
  644. {
  645. return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
  646. }
  647. /* Calculate the number of objects and left-over bytes for a given
  648. buffer size. */
  649. static void cache_estimate(unsigned long gfporder, size_t buffer_size,
  650. size_t align, int flags, size_t *left_over,
  651. unsigned int *num)
  652. {
  653. int nr_objs;
  654. size_t mgmt_size;
  655. size_t slab_size = PAGE_SIZE << gfporder;
  656. /*
  657. * The slab management structure can be either off the slab or
  658. * on it. For the latter case, the memory allocated for a
  659. * slab is used for:
  660. *
  661. * - The struct slab
  662. * - One kmem_bufctl_t for each object
  663. * - Padding to respect alignment of @align
  664. * - @buffer_size bytes for each object
  665. *
  666. * If the slab management structure is off the slab, then the
  667. * alignment will already be calculated into the size. Because
  668. * the slabs are all pages aligned, the objects will be at the
  669. * correct alignment when allocated.
  670. */
  671. if (flags & CFLGS_OFF_SLAB) {
  672. mgmt_size = 0;
  673. nr_objs = slab_size / buffer_size;
  674. if (nr_objs > SLAB_LIMIT)
  675. nr_objs = SLAB_LIMIT;
  676. } else {
  677. /*
  678. * Ignore padding for the initial guess. The padding
  679. * is at most @align-1 bytes, and @buffer_size is at
  680. * least @align. In the worst case, this result will
  681. * be one greater than the number of objects that fit
  682. * into the memory allocation when taking the padding
  683. * into account.
  684. */
  685. nr_objs = (slab_size - sizeof(struct slab)) /
  686. (buffer_size + sizeof(kmem_bufctl_t));
  687. /*
  688. * This calculated number will be either the right
  689. * amount, or one greater than what we want.
  690. */
  691. if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
  692. > slab_size)
  693. nr_objs--;
  694. if (nr_objs > SLAB_LIMIT)
  695. nr_objs = SLAB_LIMIT;
  696. mgmt_size = slab_mgmt_size(nr_objs, align);
  697. }
  698. *num = nr_objs;
  699. *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
  700. }
  701. #define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
  702. static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
  703. {
  704. printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
  705. function, cachep->name, msg);
  706. dump_stack();
  707. }
  708. /*
  709. * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
  710. * via the workqueue/eventd.
  711. * Add the CPU number into the expiration time to minimize the possibility of
  712. * the CPUs getting into lockstep and contending for the global cache chain
  713. * lock.
  714. */
  715. static void __devinit start_cpu_timer(int cpu)
  716. {
  717. struct work_struct *reap_work = &per_cpu(reap_work, cpu);
  718. /*
  719. * When this gets called from do_initcalls via cpucache_init(),
  720. * init_workqueues() has already run, so keventd will be setup
  721. * at that time.
  722. */
  723. if (keventd_up() && reap_work->func == NULL) {
  724. INIT_WORK(reap_work, cache_reap, NULL);
  725. schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
  726. }
  727. }
  728. static struct array_cache *alloc_arraycache(int node, int entries,
  729. int batchcount)
  730. {
  731. int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
  732. struct array_cache *nc = NULL;
  733. nc = kmalloc_node(memsize, GFP_KERNEL, node);
  734. if (nc) {
  735. nc->avail = 0;
  736. nc->limit = entries;
  737. nc->batchcount = batchcount;
  738. nc->touched = 0;
  739. spin_lock_init(&nc->lock);
  740. }
  741. return nc;
  742. }
  743. #ifdef CONFIG_NUMA
  744. static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
  745. static struct array_cache **alloc_alien_cache(int node, int limit)
  746. {
  747. struct array_cache **ac_ptr;
  748. int memsize = sizeof(void *) * MAX_NUMNODES;
  749. int i;
  750. if (limit > 1)
  751. limit = 12;
  752. ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
  753. if (ac_ptr) {
  754. for_each_node(i) {
  755. if (i == node || !node_online(i)) {
  756. ac_ptr[i] = NULL;
  757. continue;
  758. }
  759. ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
  760. if (!ac_ptr[i]) {
  761. for (i--; i <= 0; i--)
  762. kfree(ac_ptr[i]);
  763. kfree(ac_ptr);
  764. return NULL;
  765. }
  766. }
  767. }
  768. return ac_ptr;
  769. }
  770. static void free_alien_cache(struct array_cache **ac_ptr)
  771. {
  772. int i;
  773. if (!ac_ptr)
  774. return;
  775. for_each_node(i)
  776. kfree(ac_ptr[i]);
  777. kfree(ac_ptr);
  778. }
  779. static void __drain_alien_cache(struct kmem_cache *cachep,
  780. struct array_cache *ac, int node)
  781. {
  782. struct kmem_list3 *rl3 = cachep->nodelists[node];
  783. if (ac->avail) {
  784. spin_lock(&rl3->list_lock);
  785. free_block(cachep, ac->entry, ac->avail, node);
  786. ac->avail = 0;
  787. spin_unlock(&rl3->list_lock);
  788. }
  789. }
  790. static void drain_alien_cache(struct kmem_cache *cachep, struct kmem_list3 *l3)
  791. {
  792. int i = 0;
  793. struct array_cache *ac;
  794. unsigned long flags;
  795. for_each_online_node(i) {
  796. ac = l3->alien[i];
  797. if (ac) {
  798. spin_lock_irqsave(&ac->lock, flags);
  799. __drain_alien_cache(cachep, ac, i);
  800. spin_unlock_irqrestore(&ac->lock, flags);
  801. }
  802. }
  803. }
  804. #else
  805. #define alloc_alien_cache(node, limit) do { } while (0)
  806. #define free_alien_cache(ac_ptr) do { } while (0)
  807. #define drain_alien_cache(cachep, l3) do { } while (0)
  808. #endif
  809. static int __devinit cpuup_callback(struct notifier_block *nfb,
  810. unsigned long action, void *hcpu)
  811. {
  812. long cpu = (long)hcpu;
  813. struct kmem_cache *cachep;
  814. struct kmem_list3 *l3 = NULL;
  815. int node = cpu_to_node(cpu);
  816. int memsize = sizeof(struct kmem_list3);
  817. switch (action) {
  818. case CPU_UP_PREPARE:
  819. mutex_lock(&cache_chain_mutex);
  820. /* we need to do this right in the beginning since
  821. * alloc_arraycache's are going to use this list.
  822. * kmalloc_node allows us to add the slab to the right
  823. * kmem_list3 and not this cpu's kmem_list3
  824. */
  825. list_for_each_entry(cachep, &cache_chain, next) {
  826. /* setup the size64 kmemlist for cpu before we can
  827. * begin anything. Make sure some other cpu on this
  828. * node has not already allocated this
  829. */
  830. if (!cachep->nodelists[node]) {
  831. if (!(l3 = kmalloc_node(memsize,
  832. GFP_KERNEL, node)))
  833. goto bad;
  834. kmem_list3_init(l3);
  835. l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
  836. ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  837. cachep->nodelists[node] = l3;
  838. }
  839. spin_lock_irq(&cachep->nodelists[node]->list_lock);
  840. cachep->nodelists[node]->free_limit =
  841. (1 + nr_cpus_node(node)) *
  842. cachep->batchcount + cachep->num;
  843. spin_unlock_irq(&cachep->nodelists[node]->list_lock);
  844. }
  845. /* Now we can go ahead with allocating the shared array's
  846. & array cache's */
  847. list_for_each_entry(cachep, &cache_chain, next) {
  848. struct array_cache *nc;
  849. nc = alloc_arraycache(node, cachep->limit,
  850. cachep->batchcount);
  851. if (!nc)
  852. goto bad;
  853. cachep->array[cpu] = nc;
  854. l3 = cachep->nodelists[node];
  855. BUG_ON(!l3);
  856. if (!l3->shared) {
  857. if (!(nc = alloc_arraycache(node,
  858. cachep->shared *
  859. cachep->batchcount,
  860. 0xbaadf00d)))
  861. goto bad;
  862. /* we are serialised from CPU_DEAD or
  863. CPU_UP_CANCELLED by the cpucontrol lock */
  864. l3->shared = nc;
  865. }
  866. }
  867. mutex_unlock(&cache_chain_mutex);
  868. break;
  869. case CPU_ONLINE:
  870. start_cpu_timer(cpu);
  871. break;
  872. #ifdef CONFIG_HOTPLUG_CPU
  873. case CPU_DEAD:
  874. /* fall thru */
  875. case CPU_UP_CANCELED:
  876. mutex_lock(&cache_chain_mutex);
  877. list_for_each_entry(cachep, &cache_chain, next) {
  878. struct array_cache *nc;
  879. cpumask_t mask;
  880. mask = node_to_cpumask(node);
  881. spin_lock_irq(&cachep->spinlock);
  882. /* cpu is dead; no one can alloc from it. */
  883. nc = cachep->array[cpu];
  884. cachep->array[cpu] = NULL;
  885. l3 = cachep->nodelists[node];
  886. if (!l3)
  887. goto unlock_cache;
  888. spin_lock(&l3->list_lock);
  889. /* Free limit for this kmem_list3 */
  890. l3->free_limit -= cachep->batchcount;
  891. if (nc)
  892. free_block(cachep, nc->entry, nc->avail, node);
  893. if (!cpus_empty(mask)) {
  894. spin_unlock(&l3->list_lock);
  895. goto unlock_cache;
  896. }
  897. if (l3->shared) {
  898. free_block(cachep, l3->shared->entry,
  899. l3->shared->avail, node);
  900. kfree(l3->shared);
  901. l3->shared = NULL;
  902. }
  903. if (l3->alien) {
  904. drain_alien_cache(cachep, l3);
  905. free_alien_cache(l3->alien);
  906. l3->alien = NULL;
  907. }
  908. /* free slabs belonging to this node */
  909. if (__node_shrink(cachep, node)) {
  910. cachep->nodelists[node] = NULL;
  911. spin_unlock(&l3->list_lock);
  912. kfree(l3);
  913. } else {
  914. spin_unlock(&l3->list_lock);
  915. }
  916. unlock_cache:
  917. spin_unlock_irq(&cachep->spinlock);
  918. kfree(nc);
  919. }
  920. mutex_unlock(&cache_chain_mutex);
  921. break;
  922. #endif
  923. }
  924. return NOTIFY_OK;
  925. bad:
  926. mutex_unlock(&cache_chain_mutex);
  927. return NOTIFY_BAD;
  928. }
  929. static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
  930. /*
  931. * swap the static kmem_list3 with kmalloced memory
  932. */
  933. static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
  934. {
  935. struct kmem_list3 *ptr;
  936. BUG_ON(cachep->nodelists[nodeid] != list);
  937. ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
  938. BUG_ON(!ptr);
  939. local_irq_disable();
  940. memcpy(ptr, list, sizeof(struct kmem_list3));
  941. MAKE_ALL_LISTS(cachep, ptr, nodeid);
  942. cachep->nodelists[nodeid] = ptr;
  943. local_irq_enable();
  944. }
  945. /* Initialisation.
  946. * Called after the gfp() functions have been enabled, and before smp_init().
  947. */
  948. void __init kmem_cache_init(void)
  949. {
  950. size_t left_over;
  951. struct cache_sizes *sizes;
  952. struct cache_names *names;
  953. int i;
  954. for (i = 0; i < NUM_INIT_LISTS; i++) {
  955. kmem_list3_init(&initkmem_list3[i]);
  956. if (i < MAX_NUMNODES)
  957. cache_cache.nodelists[i] = NULL;
  958. }
  959. /*
  960. * Fragmentation resistance on low memory - only use bigger
  961. * page orders on machines with more than 32MB of memory.
  962. */
  963. if (num_physpages > (32 << 20) >> PAGE_SHIFT)
  964. slab_break_gfp_order = BREAK_GFP_ORDER_HI;
  965. /* Bootstrap is tricky, because several objects are allocated
  966. * from caches that do not exist yet:
  967. * 1) initialize the cache_cache cache: it contains the struct kmem_cache
  968. * structures of all caches, except cache_cache itself: cache_cache
  969. * is statically allocated.
  970. * Initially an __init data area is used for the head array and the
  971. * kmem_list3 structures, it's replaced with a kmalloc allocated
  972. * array at the end of the bootstrap.
  973. * 2) Create the first kmalloc cache.
  974. * The struct kmem_cache for the new cache is allocated normally.
  975. * An __init data area is used for the head array.
  976. * 3) Create the remaining kmalloc caches, with minimally sized
  977. * head arrays.
  978. * 4) Replace the __init data head arrays for cache_cache and the first
  979. * kmalloc cache with kmalloc allocated arrays.
  980. * 5) Replace the __init data for kmem_list3 for cache_cache and
  981. * the other cache's with kmalloc allocated memory.
  982. * 6) Resize the head arrays of the kmalloc caches to their final sizes.
  983. */
  984. /* 1) create the cache_cache */
  985. INIT_LIST_HEAD(&cache_chain);
  986. list_add(&cache_cache.next, &cache_chain);
  987. cache_cache.colour_off = cache_line_size();
  988. cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
  989. cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
  990. cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
  991. cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
  992. &left_over, &cache_cache.num);
  993. if (!cache_cache.num)
  994. BUG();
  995. cache_cache.colour = left_over / cache_cache.colour_off;
  996. cache_cache.colour_next = 0;
  997. cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
  998. sizeof(struct slab), cache_line_size());
  999. /* 2+3) create the kmalloc caches */
  1000. sizes = malloc_sizes;
  1001. names = cache_names;
  1002. /* Initialize the caches that provide memory for the array cache
  1003. * and the kmem_list3 structures first.
  1004. * Without this, further allocations will bug
  1005. */
  1006. sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
  1007. sizes[INDEX_AC].cs_size,
  1008. ARCH_KMALLOC_MINALIGN,
  1009. (ARCH_KMALLOC_FLAGS |
  1010. SLAB_PANIC), NULL, NULL);
  1011. if (INDEX_AC != INDEX_L3)
  1012. sizes[INDEX_L3].cs_cachep =
  1013. kmem_cache_create(names[INDEX_L3].name,
  1014. sizes[INDEX_L3].cs_size,
  1015. ARCH_KMALLOC_MINALIGN,
  1016. (ARCH_KMALLOC_FLAGS | SLAB_PANIC), NULL,
  1017. NULL);
  1018. while (sizes->cs_size != ULONG_MAX) {
  1019. /*
  1020. * For performance, all the general caches are L1 aligned.
  1021. * This should be particularly beneficial on SMP boxes, as it
  1022. * eliminates "false sharing".
  1023. * Note for systems short on memory removing the alignment will
  1024. * allow tighter packing of the smaller caches.
  1025. */
  1026. if (!sizes->cs_cachep)
  1027. sizes->cs_cachep = kmem_cache_create(names->name,
  1028. sizes->cs_size,
  1029. ARCH_KMALLOC_MINALIGN,
  1030. (ARCH_KMALLOC_FLAGS
  1031. | SLAB_PANIC),
  1032. NULL, NULL);
  1033. /* Inc off-slab bufctl limit until the ceiling is hit. */
  1034. if (!(OFF_SLAB(sizes->cs_cachep))) {
  1035. offslab_limit = sizes->cs_size - sizeof(struct slab);
  1036. offslab_limit /= sizeof(kmem_bufctl_t);
  1037. }
  1038. sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
  1039. sizes->cs_size,
  1040. ARCH_KMALLOC_MINALIGN,
  1041. (ARCH_KMALLOC_FLAGS |
  1042. SLAB_CACHE_DMA |
  1043. SLAB_PANIC), NULL,
  1044. NULL);
  1045. sizes++;
  1046. names++;
  1047. }
  1048. /* 4) Replace the bootstrap head arrays */
  1049. {
  1050. void *ptr;
  1051. ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
  1052. local_irq_disable();
  1053. BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
  1054. memcpy(ptr, cpu_cache_get(&cache_cache),
  1055. sizeof(struct arraycache_init));
  1056. cache_cache.array[smp_processor_id()] = ptr;
  1057. local_irq_enable();
  1058. ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
  1059. local_irq_disable();
  1060. BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
  1061. != &initarray_generic.cache);
  1062. memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
  1063. sizeof(struct arraycache_init));
  1064. malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
  1065. ptr;
  1066. local_irq_enable();
  1067. }
  1068. /* 5) Replace the bootstrap kmem_list3's */
  1069. {
  1070. int node;
  1071. /* Replace the static kmem_list3 structures for the boot cpu */
  1072. init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
  1073. numa_node_id());
  1074. for_each_online_node(node) {
  1075. init_list(malloc_sizes[INDEX_AC].cs_cachep,
  1076. &initkmem_list3[SIZE_AC + node], node);
  1077. if (INDEX_AC != INDEX_L3) {
  1078. init_list(malloc_sizes[INDEX_L3].cs_cachep,
  1079. &initkmem_list3[SIZE_L3 + node],
  1080. node);
  1081. }
  1082. }
  1083. }
  1084. /* 6) resize the head arrays to their final sizes */
  1085. {
  1086. struct kmem_cache *cachep;
  1087. mutex_lock(&cache_chain_mutex);
  1088. list_for_each_entry(cachep, &cache_chain, next)
  1089. enable_cpucache(cachep);
  1090. mutex_unlock(&cache_chain_mutex);
  1091. }
  1092. /* Done! */
  1093. g_cpucache_up = FULL;
  1094. /* Register a cpu startup notifier callback
  1095. * that initializes cpu_cache_get for all new cpus
  1096. */
  1097. register_cpu_notifier(&cpucache_notifier);
  1098. /* The reap timers are started later, with a module init call:
  1099. * That part of the kernel is not yet operational.
  1100. */
  1101. }
  1102. static int __init cpucache_init(void)
  1103. {
  1104. int cpu;
  1105. /*
  1106. * Register the timers that return unneeded
  1107. * pages to gfp.
  1108. */
  1109. for_each_online_cpu(cpu)
  1110. start_cpu_timer(cpu);
  1111. return 0;
  1112. }
  1113. __initcall(cpucache_init);
  1114. /*
  1115. * Interface to system's page allocator. No need to hold the cache-lock.
  1116. *
  1117. * If we requested dmaable memory, we will get it. Even if we
  1118. * did not request dmaable memory, we might get it, but that
  1119. * would be relatively rare and ignorable.
  1120. */
  1121. static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  1122. {
  1123. struct page *page;
  1124. void *addr;
  1125. int i;
  1126. flags |= cachep->gfpflags;
  1127. page = alloc_pages_node(nodeid, flags, cachep->gfporder);
  1128. if (!page)
  1129. return NULL;
  1130. addr = page_address(page);
  1131. i = (1 << cachep->gfporder);
  1132. if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  1133. atomic_add(i, &slab_reclaim_pages);
  1134. add_page_state(nr_slab, i);
  1135. while (i--) {
  1136. SetPageSlab(page);
  1137. page++;
  1138. }
  1139. return addr;
  1140. }
  1141. /*
  1142. * Interface to system's page release.
  1143. */
  1144. static void kmem_freepages(struct kmem_cache *cachep, void *addr)
  1145. {
  1146. unsigned long i = (1 << cachep->gfporder);
  1147. struct page *page = virt_to_page(addr);
  1148. const unsigned long nr_freed = i;
  1149. while (i--) {
  1150. if (!TestClearPageSlab(page))
  1151. BUG();
  1152. page++;
  1153. }
  1154. sub_page_state(nr_slab, nr_freed);
  1155. if (current->reclaim_state)
  1156. current->reclaim_state->reclaimed_slab += nr_freed;
  1157. free_pages((unsigned long)addr, cachep->gfporder);
  1158. if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  1159. atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
  1160. }
  1161. static void kmem_rcu_free(struct rcu_head *head)
  1162. {
  1163. struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
  1164. struct kmem_cache *cachep = slab_rcu->cachep;
  1165. kmem_freepages(cachep, slab_rcu->addr);
  1166. if (OFF_SLAB(cachep))
  1167. kmem_cache_free(cachep->slabp_cache, slab_rcu);
  1168. }
  1169. #if DEBUG
  1170. #ifdef CONFIG_DEBUG_PAGEALLOC
  1171. static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
  1172. unsigned long caller)
  1173. {
  1174. int size = obj_size(cachep);
  1175. addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
  1176. if (size < 5 * sizeof(unsigned long))
  1177. return;
  1178. *addr++ = 0x12345678;
  1179. *addr++ = caller;
  1180. *addr++ = smp_processor_id();
  1181. size -= 3 * sizeof(unsigned long);
  1182. {
  1183. unsigned long *sptr = &caller;
  1184. unsigned long svalue;
  1185. while (!kstack_end(sptr)) {
  1186. svalue = *sptr++;
  1187. if (kernel_text_address(svalue)) {
  1188. *addr++ = svalue;
  1189. size -= sizeof(unsigned long);
  1190. if (size <= sizeof(unsigned long))
  1191. break;
  1192. }
  1193. }
  1194. }
  1195. *addr++ = 0x87654321;
  1196. }
  1197. #endif
  1198. static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
  1199. {
  1200. int size = obj_size(cachep);
  1201. addr = &((char *)addr)[obj_offset(cachep)];
  1202. memset(addr, val, size);
  1203. *(unsigned char *)(addr + size - 1) = POISON_END;
  1204. }
  1205. static void dump_line(char *data, int offset, int limit)
  1206. {
  1207. int i;
  1208. printk(KERN_ERR "%03x:", offset);
  1209. for (i = 0; i < limit; i++) {
  1210. printk(" %02x", (unsigned char)data[offset + i]);
  1211. }
  1212. printk("\n");
  1213. }
  1214. #endif
  1215. #if DEBUG
  1216. static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
  1217. {
  1218. int i, size;
  1219. char *realobj;
  1220. if (cachep->flags & SLAB_RED_ZONE) {
  1221. printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
  1222. *dbg_redzone1(cachep, objp),
  1223. *dbg_redzone2(cachep, objp));
  1224. }
  1225. if (cachep->flags & SLAB_STORE_USER) {
  1226. printk(KERN_ERR "Last user: [<%p>]",
  1227. *dbg_userword(cachep, objp));
  1228. print_symbol("(%s)",
  1229. (unsigned long)*dbg_userword(cachep, objp));
  1230. printk("\n");
  1231. }
  1232. realobj = (char *)objp + obj_offset(cachep);
  1233. size = obj_size(cachep);
  1234. for (i = 0; i < size && lines; i += 16, lines--) {
  1235. int limit;
  1236. limit = 16;
  1237. if (i + limit > size)
  1238. limit = size - i;
  1239. dump_line(realobj, i, limit);
  1240. }
  1241. }
  1242. static void check_poison_obj(struct kmem_cache *cachep, void *objp)
  1243. {
  1244. char *realobj;
  1245. int size, i;
  1246. int lines = 0;
  1247. realobj = (char *)objp + obj_offset(cachep);
  1248. size = obj_size(cachep);
  1249. for (i = 0; i < size; i++) {
  1250. char exp = POISON_FREE;
  1251. if (i == size - 1)
  1252. exp = POISON_END;
  1253. if (realobj[i] != exp) {
  1254. int limit;
  1255. /* Mismatch ! */
  1256. /* Print header */
  1257. if (lines == 0) {
  1258. printk(KERN_ERR
  1259. "Slab corruption: start=%p, len=%d\n",
  1260. realobj, size);
  1261. print_objinfo(cachep, objp, 0);
  1262. }
  1263. /* Hexdump the affected line */
  1264. i = (i / 16) * 16;
  1265. limit = 16;
  1266. if (i + limit > size)
  1267. limit = size - i;
  1268. dump_line(realobj, i, limit);
  1269. i += 16;
  1270. lines++;
  1271. /* Limit to 5 lines */
  1272. if (lines > 5)
  1273. break;
  1274. }
  1275. }
  1276. if (lines != 0) {
  1277. /* Print some data about the neighboring objects, if they
  1278. * exist:
  1279. */
  1280. struct slab *slabp = virt_to_slab(objp);
  1281. int objnr;
  1282. objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
  1283. if (objnr) {
  1284. objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
  1285. realobj = (char *)objp + obj_offset(cachep);
  1286. printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
  1287. realobj, size);
  1288. print_objinfo(cachep, objp, 2);
  1289. }
  1290. if (objnr + 1 < cachep->num) {
  1291. objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
  1292. realobj = (char *)objp + obj_offset(cachep);
  1293. printk(KERN_ERR "Next obj: start=%p, len=%d\n",
  1294. realobj, size);
  1295. print_objinfo(cachep, objp, 2);
  1296. }
  1297. }
  1298. }
  1299. #endif
  1300. #if DEBUG
  1301. /**
  1302. * slab_destroy_objs - call the registered destructor for each object in
  1303. * a slab that is to be destroyed.
  1304. */
  1305. static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
  1306. {
  1307. int i;
  1308. for (i = 0; i < cachep->num; i++) {
  1309. void *objp = slabp->s_mem + cachep->buffer_size * i;
  1310. if (cachep->flags & SLAB_POISON) {
  1311. #ifdef CONFIG_DEBUG_PAGEALLOC
  1312. if ((cachep->buffer_size % PAGE_SIZE) == 0
  1313. && OFF_SLAB(cachep))
  1314. kernel_map_pages(virt_to_page(objp),
  1315. cachep->buffer_size / PAGE_SIZE,
  1316. 1);
  1317. else
  1318. check_poison_obj(cachep, objp);
  1319. #else
  1320. check_poison_obj(cachep, objp);
  1321. #endif
  1322. }
  1323. if (cachep->flags & SLAB_RED_ZONE) {
  1324. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
  1325. slab_error(cachep, "start of a freed object "
  1326. "was overwritten");
  1327. if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
  1328. slab_error(cachep, "end of a freed object "
  1329. "was overwritten");
  1330. }
  1331. if (cachep->dtor && !(cachep->flags & SLAB_POISON))
  1332. (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
  1333. }
  1334. }
  1335. #else
  1336. static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
  1337. {
  1338. if (cachep->dtor) {
  1339. int i;
  1340. for (i = 0; i < cachep->num; i++) {
  1341. void *objp = slabp->s_mem + cachep->buffer_size * i;
  1342. (cachep->dtor) (objp, cachep, 0);
  1343. }
  1344. }
  1345. }
  1346. #endif
  1347. /**
  1348. * Destroy all the objs in a slab, and release the mem back to the system.
  1349. * Before calling the slab must have been unlinked from the cache.
  1350. * The cache-lock is not held/needed.
  1351. */
  1352. static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
  1353. {
  1354. void *addr = slabp->s_mem - slabp->colouroff;
  1355. slab_destroy_objs(cachep, slabp);
  1356. if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
  1357. struct slab_rcu *slab_rcu;
  1358. slab_rcu = (struct slab_rcu *)slabp;
  1359. slab_rcu->cachep = cachep;
  1360. slab_rcu->addr = addr;
  1361. call_rcu(&slab_rcu->head, kmem_rcu_free);
  1362. } else {
  1363. kmem_freepages(cachep, addr);
  1364. if (OFF_SLAB(cachep))
  1365. kmem_cache_free(cachep->slabp_cache, slabp);
  1366. }
  1367. }
  1368. /* For setting up all the kmem_list3s for cache whose buffer_size is same
  1369. as size of kmem_list3. */
  1370. static void set_up_list3s(struct kmem_cache *cachep, int index)
  1371. {
  1372. int node;
  1373. for_each_online_node(node) {
  1374. cachep->nodelists[node] = &initkmem_list3[index + node];
  1375. cachep->nodelists[node]->next_reap = jiffies +
  1376. REAPTIMEOUT_LIST3 +
  1377. ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  1378. }
  1379. }
  1380. /**
  1381. * calculate_slab_order - calculate size (page order) of slabs and the number
  1382. * of objects per slab.
  1383. *
  1384. * This could be made much more intelligent. For now, try to avoid using
  1385. * high order pages for slabs. When the gfp() functions are more friendly
  1386. * towards high-order requests, this should be changed.
  1387. */
  1388. static inline size_t calculate_slab_order(struct kmem_cache *cachep, size_t size,
  1389. size_t align, gfp_t flags)
  1390. {
  1391. size_t left_over = 0;
  1392. for (;; cachep->gfporder++) {
  1393. unsigned int num;
  1394. size_t remainder;
  1395. if (cachep->gfporder > MAX_GFP_ORDER) {
  1396. cachep->num = 0;
  1397. break;
  1398. }
  1399. cache_estimate(cachep->gfporder, size, align, flags,
  1400. &remainder, &num);
  1401. if (!num)
  1402. continue;
  1403. /* More than offslab_limit objects will cause problems */
  1404. if (flags & CFLGS_OFF_SLAB && cachep->num > offslab_limit)
  1405. break;
  1406. cachep->num = num;
  1407. left_over = remainder;
  1408. /*
  1409. * Large number of objects is good, but very large slabs are
  1410. * currently bad for the gfp()s.
  1411. */
  1412. if (cachep->gfporder >= slab_break_gfp_order)
  1413. break;
  1414. if ((left_over * 8) <= (PAGE_SIZE << cachep->gfporder))
  1415. /* Acceptable internal fragmentation */
  1416. break;
  1417. }
  1418. return left_over;
  1419. }
  1420. /**
  1421. * kmem_cache_create - Create a cache.
  1422. * @name: A string which is used in /proc/slabinfo to identify this cache.
  1423. * @size: The size of objects to be created in this cache.
  1424. * @align: The required alignment for the objects.
  1425. * @flags: SLAB flags
  1426. * @ctor: A constructor for the objects.
  1427. * @dtor: A destructor for the objects.
  1428. *
  1429. * Returns a ptr to the cache on success, NULL on failure.
  1430. * Cannot be called within a int, but can be interrupted.
  1431. * The @ctor is run when new pages are allocated by the cache
  1432. * and the @dtor is run before the pages are handed back.
  1433. *
  1434. * @name must be valid until the cache is destroyed. This implies that
  1435. * the module calling this has to destroy the cache before getting
  1436. * unloaded.
  1437. *
  1438. * The flags are
  1439. *
  1440. * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
  1441. * to catch references to uninitialised memory.
  1442. *
  1443. * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  1444. * for buffer overruns.
  1445. *
  1446. * %SLAB_NO_REAP - Don't automatically reap this cache when we're under
  1447. * memory pressure.
  1448. *
  1449. * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  1450. * cacheline. This can be beneficial if you're counting cycles as closely
  1451. * as davem.
  1452. */
  1453. struct kmem_cache *
  1454. kmem_cache_create (const char *name, size_t size, size_t align,
  1455. unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
  1456. void (*dtor)(void*, struct kmem_cache *, unsigned long))
  1457. {
  1458. size_t left_over, slab_size, ralign;
  1459. struct kmem_cache *cachep = NULL;
  1460. struct list_head *p;
  1461. /*
  1462. * Sanity checks... these are all serious usage bugs.
  1463. */
  1464. if ((!name) ||
  1465. in_interrupt() ||
  1466. (size < BYTES_PER_WORD) ||
  1467. (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
  1468. printk(KERN_ERR "%s: Early error in slab %s\n",
  1469. __FUNCTION__, name);
  1470. BUG();
  1471. }
  1472. mutex_lock(&cache_chain_mutex);
  1473. list_for_each(p, &cache_chain) {
  1474. struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
  1475. mm_segment_t old_fs = get_fs();
  1476. char tmp;
  1477. int res;
  1478. /*
  1479. * This happens when the module gets unloaded and doesn't
  1480. * destroy its slab cache and no-one else reuses the vmalloc
  1481. * area of the module. Print a warning.
  1482. */
  1483. set_fs(KERNEL_DS);
  1484. res = __get_user(tmp, pc->name);
  1485. set_fs(old_fs);
  1486. if (res) {
  1487. printk("SLAB: cache with size %d has lost its name\n",
  1488. pc->buffer_size);
  1489. continue;
  1490. }
  1491. if (!strcmp(pc->name, name)) {
  1492. printk("kmem_cache_create: duplicate cache %s\n", name);
  1493. dump_stack();
  1494. goto oops;
  1495. }
  1496. }
  1497. #if DEBUG
  1498. WARN_ON(strchr(name, ' ')); /* It confuses parsers */
  1499. if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
  1500. /* No constructor, but inital state check requested */
  1501. printk(KERN_ERR "%s: No con, but init state check "
  1502. "requested - %s\n", __FUNCTION__, name);
  1503. flags &= ~SLAB_DEBUG_INITIAL;
  1504. }
  1505. #if FORCED_DEBUG
  1506. /*
  1507. * Enable redzoning and last user accounting, except for caches with
  1508. * large objects, if the increased size would increase the object size
  1509. * above the next power of two: caches with object sizes just above a
  1510. * power of two have a significant amount of internal fragmentation.
  1511. */
  1512. if ((size < 4096
  1513. || fls(size - 1) == fls(size - 1 + 3 * BYTES_PER_WORD)))
  1514. flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
  1515. if (!(flags & SLAB_DESTROY_BY_RCU))
  1516. flags |= SLAB_POISON;
  1517. #endif
  1518. if (flags & SLAB_DESTROY_BY_RCU)
  1519. BUG_ON(flags & SLAB_POISON);
  1520. #endif
  1521. if (flags & SLAB_DESTROY_BY_RCU)
  1522. BUG_ON(dtor);
  1523. /*
  1524. * Always checks flags, a caller might be expecting debug
  1525. * support which isn't available.
  1526. */
  1527. if (flags & ~CREATE_MASK)
  1528. BUG();
  1529. /* Check that size is in terms of words. This is needed to avoid
  1530. * unaligned accesses for some archs when redzoning is used, and makes
  1531. * sure any on-slab bufctl's are also correctly aligned.
  1532. */
  1533. if (size & (BYTES_PER_WORD - 1)) {
  1534. size += (BYTES_PER_WORD - 1);
  1535. size &= ~(BYTES_PER_WORD - 1);
  1536. }
  1537. /* calculate out the final buffer alignment: */
  1538. /* 1) arch recommendation: can be overridden for debug */
  1539. if (flags & SLAB_HWCACHE_ALIGN) {
  1540. /* Default alignment: as specified by the arch code.
  1541. * Except if an object is really small, then squeeze multiple
  1542. * objects into one cacheline.
  1543. */
  1544. ralign = cache_line_size();
  1545. while (size <= ralign / 2)
  1546. ralign /= 2;
  1547. } else {
  1548. ralign = BYTES_PER_WORD;
  1549. }
  1550. /* 2) arch mandated alignment: disables debug if necessary */
  1551. if (ralign < ARCH_SLAB_MINALIGN) {
  1552. ralign = ARCH_SLAB_MINALIGN;
  1553. if (ralign > BYTES_PER_WORD)
  1554. flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  1555. }
  1556. /* 3) caller mandated alignment: disables debug if necessary */
  1557. if (ralign < align) {
  1558. ralign = align;
  1559. if (ralign > BYTES_PER_WORD)
  1560. flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  1561. }
  1562. /* 4) Store it. Note that the debug code below can reduce
  1563. * the alignment to BYTES_PER_WORD.
  1564. */
  1565. align = ralign;
  1566. /* Get cache's description obj. */
  1567. cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
  1568. if (!cachep)
  1569. goto oops;
  1570. memset(cachep, 0, sizeof(struct kmem_cache));
  1571. #if DEBUG
  1572. cachep->obj_size = size;
  1573. if (flags & SLAB_RED_ZONE) {
  1574. /* redzoning only works with word aligned caches */
  1575. align = BYTES_PER_WORD;
  1576. /* add space for red zone words */
  1577. cachep->obj_offset += BYTES_PER_WORD;
  1578. size += 2 * BYTES_PER_WORD;
  1579. }
  1580. if (flags & SLAB_STORE_USER) {
  1581. /* user store requires word alignment and
  1582. * one word storage behind the end of the real
  1583. * object.
  1584. */
  1585. align = BYTES_PER_WORD;
  1586. size += BYTES_PER_WORD;
  1587. }
  1588. #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
  1589. if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
  1590. && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
  1591. cachep->obj_offset += PAGE_SIZE - size;
  1592. size = PAGE_SIZE;
  1593. }
  1594. #endif
  1595. #endif
  1596. /* Determine if the slab management is 'on' or 'off' slab. */
  1597. if (size >= (PAGE_SIZE >> 3))
  1598. /*
  1599. * Size is large, assume best to place the slab management obj
  1600. * off-slab (should allow better packing of objs).
  1601. */
  1602. flags |= CFLGS_OFF_SLAB;
  1603. size = ALIGN(size, align);
  1604. if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) {
  1605. /*
  1606. * A VFS-reclaimable slab tends to have most allocations
  1607. * as GFP_NOFS and we really don't want to have to be allocating
  1608. * higher-order pages when we are unable to shrink dcache.
  1609. */
  1610. cachep->gfporder = 0;
  1611. cache_estimate(cachep->gfporder, size, align, flags,
  1612. &left_over, &cachep->num);
  1613. } else
  1614. left_over = calculate_slab_order(cachep, size, align, flags);
  1615. if (!cachep->num) {
  1616. printk("kmem_cache_create: couldn't create cache %s.\n", name);
  1617. kmem_cache_free(&cache_cache, cachep);
  1618. cachep = NULL;
  1619. goto oops;
  1620. }
  1621. slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
  1622. + sizeof(struct slab), align);
  1623. /*
  1624. * If the slab has been placed off-slab, and we have enough space then
  1625. * move it on-slab. This is at the expense of any extra colouring.
  1626. */
  1627. if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
  1628. flags &= ~CFLGS_OFF_SLAB;
  1629. left_over -= slab_size;
  1630. }
  1631. if (flags & CFLGS_OFF_SLAB) {
  1632. /* really off slab. No need for manual alignment */
  1633. slab_size =
  1634. cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
  1635. }
  1636. cachep->colour_off = cache_line_size();
  1637. /* Offset must be a multiple of the alignment. */
  1638. if (cachep->colour_off < align)
  1639. cachep->colour_off = align;
  1640. cachep->colour = left_over / cachep->colour_off;
  1641. cachep->slab_size = slab_size;
  1642. cachep->flags = flags;
  1643. cachep->gfpflags = 0;
  1644. if (flags & SLAB_CACHE_DMA)
  1645. cachep->gfpflags |= GFP_DMA;
  1646. spin_lock_init(&cachep->spinlock);
  1647. cachep->buffer_size = size;
  1648. if (flags & CFLGS_OFF_SLAB)
  1649. cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
  1650. cachep->ctor = ctor;
  1651. cachep->dtor = dtor;
  1652. cachep->name = name;
  1653. /* Don't let CPUs to come and go */
  1654. lock_cpu_hotplug();
  1655. if (g_cpucache_up == FULL) {
  1656. enable_cpucache(cachep);
  1657. } else {
  1658. if (g_cpucache_up == NONE) {
  1659. /* Note: the first kmem_cache_create must create
  1660. * the cache that's used by kmalloc(24), otherwise
  1661. * the creation of further caches will BUG().
  1662. */
  1663. cachep->array[smp_processor_id()] =
  1664. &initarray_generic.cache;
  1665. /* If the cache that's used by
  1666. * kmalloc(sizeof(kmem_list3)) is the first cache,
  1667. * then we need to set up all its list3s, otherwise
  1668. * the creation of further caches will BUG().
  1669. */
  1670. set_up_list3s(cachep, SIZE_AC);
  1671. if (INDEX_AC == INDEX_L3)
  1672. g_cpucache_up = PARTIAL_L3;
  1673. else
  1674. g_cpucache_up = PARTIAL_AC;
  1675. } else {
  1676. cachep->array[smp_processor_id()] =
  1677. kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
  1678. if (g_cpucache_up == PARTIAL_AC) {
  1679. set_up_list3s(cachep, SIZE_L3);
  1680. g_cpucache_up = PARTIAL_L3;
  1681. } else {
  1682. int node;
  1683. for_each_online_node(node) {
  1684. cachep->nodelists[node] =
  1685. kmalloc_node(sizeof
  1686. (struct kmem_list3),
  1687. GFP_KERNEL, node);
  1688. BUG_ON(!cachep->nodelists[node]);
  1689. kmem_list3_init(cachep->
  1690. nodelists[node]);
  1691. }
  1692. }
  1693. }
  1694. cachep->nodelists[numa_node_id()]->next_reap =
  1695. jiffies + REAPTIMEOUT_LIST3 +
  1696. ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  1697. BUG_ON(!cpu_cache_get(cachep));
  1698. cpu_cache_get(cachep)->avail = 0;
  1699. cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
  1700. cpu_cache_get(cachep)->batchcount = 1;
  1701. cpu_cache_get(cachep)->touched = 0;
  1702. cachep->batchcount = 1;
  1703. cachep->limit = BOOT_CPUCACHE_ENTRIES;
  1704. }
  1705. /* cache setup completed, link it into the list */
  1706. list_add(&cachep->next, &cache_chain);
  1707. unlock_cpu_hotplug();
  1708. oops:
  1709. if (!cachep && (flags & SLAB_PANIC))
  1710. panic("kmem_cache_create(): failed to create slab `%s'\n",
  1711. name);
  1712. mutex_unlock(&cache_chain_mutex);
  1713. return cachep;
  1714. }
  1715. EXPORT_SYMBOL(kmem_cache_create);
  1716. #if DEBUG
  1717. static void check_irq_off(void)
  1718. {
  1719. BUG_ON(!irqs_disabled());
  1720. }
  1721. static void check_irq_on(void)
  1722. {
  1723. BUG_ON(irqs_disabled());
  1724. }
  1725. static void check_spinlock_acquired(struct kmem_cache *cachep)
  1726. {
  1727. #ifdef CONFIG_SMP
  1728. check_irq_off();
  1729. assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
  1730. #endif
  1731. }
  1732. static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
  1733. {
  1734. #ifdef CONFIG_SMP
  1735. check_irq_off();
  1736. assert_spin_locked(&cachep->nodelists[node]->list_lock);
  1737. #endif
  1738. }
  1739. #else
  1740. #define check_irq_off() do { } while(0)
  1741. #define check_irq_on() do { } while(0)
  1742. #define check_spinlock_acquired(x) do { } while(0)
  1743. #define check_spinlock_acquired_node(x, y) do { } while(0)
  1744. #endif
  1745. /*
  1746. * Waits for all CPUs to execute func().
  1747. */
  1748. static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
  1749. {
  1750. check_irq_on();
  1751. preempt_disable();
  1752. local_irq_disable();
  1753. func(arg);
  1754. local_irq_enable();
  1755. if (smp_call_function(func, arg, 1, 1))
  1756. BUG();
  1757. preempt_enable();
  1758. }
  1759. static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
  1760. int force, int node);
  1761. static void do_drain(void *arg)
  1762. {
  1763. struct kmem_cache *cachep = (struct kmem_cache *) arg;
  1764. struct array_cache *ac;
  1765. int node = numa_node_id();
  1766. check_irq_off();
  1767. ac = cpu_cache_get(cachep);
  1768. spin_lock(&cachep->nodelists[node]->list_lock);
  1769. free_block(cachep, ac->entry, ac->avail, node);
  1770. spin_unlock(&cachep->nodelists[node]->list_lock);
  1771. ac->avail = 0;
  1772. }
  1773. static void drain_cpu_caches(struct kmem_cache *cachep)
  1774. {
  1775. struct kmem_list3 *l3;
  1776. int node;
  1777. smp_call_function_all_cpus(do_drain, cachep);
  1778. check_irq_on();
  1779. spin_lock_irq(&cachep->spinlock);
  1780. for_each_online_node(node) {
  1781. l3 = cachep->nodelists[node];
  1782. if (l3) {
  1783. spin_lock(&l3->list_lock);
  1784. drain_array_locked(cachep, l3->shared, 1, node);
  1785. spin_unlock(&l3->list_lock);
  1786. if (l3->alien)
  1787. drain_alien_cache(cachep, l3);
  1788. }
  1789. }
  1790. spin_unlock_irq(&cachep->spinlock);
  1791. }
  1792. static int __node_shrink(struct kmem_cache *cachep, int node)
  1793. {
  1794. struct slab *slabp;
  1795. struct kmem_list3 *l3 = cachep->nodelists[node];
  1796. int ret;
  1797. for (;;) {
  1798. struct list_head *p;
  1799. p = l3->slabs_free.prev;
  1800. if (p == &l3->slabs_free)
  1801. break;
  1802. slabp = list_entry(l3->slabs_free.prev, struct slab, list);
  1803. #if DEBUG
  1804. if (slabp->inuse)
  1805. BUG();
  1806. #endif
  1807. list_del(&slabp->list);
  1808. l3->free_objects -= cachep->num;
  1809. spin_unlock_irq(&l3->list_lock);
  1810. slab_destroy(cachep, slabp);
  1811. spin_lock_irq(&l3->list_lock);
  1812. }
  1813. ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
  1814. return ret;
  1815. }
  1816. static int __cache_shrink(struct kmem_cache *cachep)
  1817. {
  1818. int ret = 0, i = 0;
  1819. struct kmem_list3 *l3;
  1820. drain_cpu_caches(cachep);
  1821. check_irq_on();
  1822. for_each_online_node(i) {
  1823. l3 = cachep->nodelists[i];
  1824. if (l3) {
  1825. spin_lock_irq(&l3->list_lock);
  1826. ret += __node_shrink(cachep, i);
  1827. spin_unlock_irq(&l3->list_lock);
  1828. }
  1829. }
  1830. return (ret ? 1 : 0);
  1831. }
  1832. /**
  1833. * kmem_cache_shrink - Shrink a cache.
  1834. * @cachep: The cache to shrink.
  1835. *
  1836. * Releases as many slabs as possible for a cache.
  1837. * To help debugging, a zero exit status indicates all slabs were released.
  1838. */
  1839. int kmem_cache_shrink(struct kmem_cache *cachep)
  1840. {
  1841. if (!cachep || in_interrupt())
  1842. BUG();
  1843. return __cache_shrink(cachep);
  1844. }
  1845. EXPORT_SYMBOL(kmem_cache_shrink);
  1846. /**
  1847. * kmem_cache_destroy - delete a cache
  1848. * @cachep: the cache to destroy
  1849. *
  1850. * Remove a struct kmem_cache object from the slab cache.
  1851. * Returns 0 on success.
  1852. *
  1853. * It is expected this function will be called by a module when it is
  1854. * unloaded. This will remove the cache completely, and avoid a duplicate
  1855. * cache being allocated each time a module is loaded and unloaded, if the
  1856. * module doesn't have persistent in-kernel storage across loads and unloads.
  1857. *
  1858. * The cache must be empty before calling this function.
  1859. *
  1860. * The caller must guarantee that noone will allocate memory from the cache
  1861. * during the kmem_cache_destroy().
  1862. */
  1863. int kmem_cache_destroy(struct kmem_cache *cachep)
  1864. {
  1865. int i;
  1866. struct kmem_list3 *l3;
  1867. if (!cachep || in_interrupt())
  1868. BUG();
  1869. /* Don't let CPUs to come and go */
  1870. lock_cpu_hotplug();
  1871. /* Find the cache in the chain of caches. */
  1872. mutex_lock(&cache_chain_mutex);
  1873. /*
  1874. * the chain is never empty, cache_cache is never destroyed
  1875. */
  1876. list_del(&cachep->next);
  1877. mutex_unlock(&cache_chain_mutex);
  1878. if (__cache_shrink(cachep)) {
  1879. slab_error(cachep, "Can't free all objects");
  1880. mutex_lock(&cache_chain_mutex);
  1881. list_add(&cachep->next, &cache_chain);
  1882. mutex_unlock(&cache_chain_mutex);
  1883. unlock_cpu_hotplug();
  1884. return 1;
  1885. }
  1886. if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
  1887. synchronize_rcu();
  1888. for_each_online_cpu(i)
  1889. kfree(cachep->array[i]);
  1890. /* NUMA: free the list3 structures */
  1891. for_each_online_node(i) {
  1892. if ((l3 = cachep->nodelists[i])) {
  1893. kfree(l3->shared);
  1894. free_alien_cache(l3->alien);
  1895. kfree(l3);
  1896. }
  1897. }
  1898. kmem_cache_free(&cache_cache, cachep);
  1899. unlock_cpu_hotplug();
  1900. return 0;
  1901. }
  1902. EXPORT_SYMBOL(kmem_cache_destroy);
  1903. /* Get the memory for a slab management obj. */
  1904. static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
  1905. int colour_off, gfp_t local_flags)
  1906. {
  1907. struct slab *slabp;
  1908. if (OFF_SLAB(cachep)) {
  1909. /* Slab management obj is off-slab. */
  1910. slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
  1911. if (!slabp)
  1912. return NULL;
  1913. } else {
  1914. slabp = objp + colour_off;
  1915. colour_off += cachep->slab_size;
  1916. }
  1917. slabp->inuse = 0;
  1918. slabp->colouroff = colour_off;
  1919. slabp->s_mem = objp + colour_off;
  1920. return slabp;
  1921. }
  1922. static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
  1923. {
  1924. return (kmem_bufctl_t *) (slabp + 1);
  1925. }
  1926. static void cache_init_objs(struct kmem_cache *cachep,
  1927. struct slab *slabp, unsigned long ctor_flags)
  1928. {
  1929. int i;
  1930. for (i = 0; i < cachep->num; i++) {
  1931. void *objp = slabp->s_mem + cachep->buffer_size * i;
  1932. #if DEBUG
  1933. /* need to poison the objs? */
  1934. if (cachep->flags & SLAB_POISON)
  1935. poison_obj(cachep, objp, POISON_FREE);
  1936. if (cachep->flags & SLAB_STORE_USER)
  1937. *dbg_userword(cachep, objp) = NULL;
  1938. if (cachep->flags & SLAB_RED_ZONE) {
  1939. *dbg_redzone1(cachep, objp) = RED_INACTIVE;
  1940. *dbg_redzone2(cachep, objp) = RED_INACTIVE;
  1941. }
  1942. /*
  1943. * Constructors are not allowed to allocate memory from
  1944. * the same cache which they are a constructor for.
  1945. * Otherwise, deadlock. They must also be threaded.
  1946. */
  1947. if (cachep->ctor && !(cachep->flags & SLAB_POISON))
  1948. cachep->ctor(objp + obj_offset(cachep), cachep,
  1949. ctor_flags);
  1950. if (cachep->flags & SLAB_RED_ZONE) {
  1951. if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
  1952. slab_error(cachep, "constructor overwrote the"
  1953. " end of an object");
  1954. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
  1955. slab_error(cachep, "constructor overwrote the"
  1956. " start of an object");
  1957. }
  1958. if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
  1959. && cachep->flags & SLAB_POISON)
  1960. kernel_map_pages(virt_to_page(objp),
  1961. cachep->buffer_size / PAGE_SIZE, 0);
  1962. #else
  1963. if (cachep->ctor)
  1964. cachep->ctor(objp, cachep, ctor_flags);
  1965. #endif
  1966. slab_bufctl(slabp)[i] = i + 1;
  1967. }
  1968. slab_bufctl(slabp)[i - 1] = BUFCTL_END;
  1969. slabp->free = 0;
  1970. }
  1971. static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
  1972. {
  1973. if (flags & SLAB_DMA) {
  1974. if (!(cachep->gfpflags & GFP_DMA))
  1975. BUG();
  1976. } else {
  1977. if (cachep->gfpflags & GFP_DMA)
  1978. BUG();
  1979. }
  1980. }
  1981. static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
  1982. {
  1983. void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
  1984. kmem_bufctl_t next;
  1985. slabp->inuse++;
  1986. next = slab_bufctl(slabp)[slabp->free];
  1987. #if DEBUG
  1988. slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
  1989. WARN_ON(slabp->nodeid != nodeid);
  1990. #endif
  1991. slabp->free = next;
  1992. return objp;
  1993. }
  1994. static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
  1995. int nodeid)
  1996. {
  1997. unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
  1998. #if DEBUG
  1999. /* Verify that the slab belongs to the intended node */
  2000. WARN_ON(slabp->nodeid != nodeid);
  2001. if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
  2002. printk(KERN_ERR "slab: double free detected in cache "
  2003. "'%s', objp %p\n", cachep->name, objp);
  2004. BUG();
  2005. }
  2006. #endif
  2007. slab_bufctl(slabp)[objnr] = slabp->free;
  2008. slabp->free = objnr;
  2009. slabp->inuse--;
  2010. }
  2011. static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
  2012. {
  2013. int i;
  2014. struct page *page;
  2015. /* Nasty!!!!!! I hope this is OK. */
  2016. i = 1 << cachep->gfporder;
  2017. page = virt_to_page(objp);
  2018. do {
  2019. page_set_cache(page, cachep);
  2020. page_set_slab(page, slabp);
  2021. page++;
  2022. } while (--i);
  2023. }
  2024. /*
  2025. * Grow (by 1) the number of slabs within a cache. This is called by
  2026. * kmem_cache_alloc() when there are no active objs left in a cache.
  2027. */
  2028. static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  2029. {
  2030. struct slab *slabp;
  2031. void *objp;
  2032. size_t offset;
  2033. gfp_t local_flags;
  2034. unsigned long ctor_flags;
  2035. struct kmem_list3 *l3;
  2036. /* Be lazy and only check for valid flags here,
  2037. * keeping it out of the critical path in kmem_cache_alloc().
  2038. */
  2039. if (flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW))
  2040. BUG();
  2041. if (flags & SLAB_NO_GROW)
  2042. return 0;
  2043. ctor_flags = SLAB_CTOR_CONSTRUCTOR;
  2044. local_flags = (flags & SLAB_LEVEL_MASK);
  2045. if (!(local_flags & __GFP_WAIT))
  2046. /*
  2047. * Not allowed to sleep. Need to tell a constructor about
  2048. * this - it might need to know...
  2049. */
  2050. ctor_flags |= SLAB_CTOR_ATOMIC;
  2051. /* About to mess with non-constant members - lock. */
  2052. check_irq_off();
  2053. spin_lock(&cachep->spinlock);
  2054. /* Get colour for the slab, and cal the next value. */
  2055. offset = cachep->colour_next;
  2056. cachep->colour_next++;
  2057. if (cachep->colour_next >= cachep->colour)
  2058. cachep->colour_next = 0;
  2059. offset *= cachep->colour_off;
  2060. spin_unlock(&cachep->spinlock);
  2061. check_irq_off();
  2062. if (local_flags & __GFP_WAIT)
  2063. local_irq_enable();
  2064. /*
  2065. * The test for missing atomic flag is performed here, rather than
  2066. * the more obvious place, simply to reduce the critical path length
  2067. * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
  2068. * will eventually be caught here (where it matters).
  2069. */
  2070. kmem_flagcheck(cachep, flags);
  2071. /* Get mem for the objs.
  2072. * Attempt to allocate a physical page from 'nodeid',
  2073. */
  2074. if (!(objp = kmem_getpages(cachep, flags, nodeid)))
  2075. goto failed;
  2076. /* Get slab management. */
  2077. if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
  2078. goto opps1;
  2079. slabp->nodeid = nodeid;
  2080. set_slab_attr(cachep, slabp, objp);
  2081. cache_init_objs(cachep, slabp, ctor_flags);
  2082. if (local_flags & __GFP_WAIT)
  2083. local_irq_disable();
  2084. check_irq_off();
  2085. l3 = cachep->nodelists[nodeid];
  2086. spin_lock(&l3->list_lock);
  2087. /* Make slab active. */
  2088. list_add_tail(&slabp->list, &(l3->slabs_free));
  2089. STATS_INC_GROWN(cachep);
  2090. l3->free_objects += cachep->num;
  2091. spin_unlock(&l3->list_lock);
  2092. return 1;
  2093. opps1:
  2094. kmem_freepages(cachep, objp);
  2095. failed:
  2096. if (local_flags & __GFP_WAIT)
  2097. local_irq_disable();
  2098. return 0;
  2099. }
  2100. #if DEBUG
  2101. /*
  2102. * Perform extra freeing checks:
  2103. * - detect bad pointers.
  2104. * - POISON/RED_ZONE checking
  2105. * - destructor calls, for caches with POISON+dtor
  2106. */
  2107. static void kfree_debugcheck(const void *objp)
  2108. {
  2109. struct page *page;
  2110. if (!virt_addr_valid(objp)) {
  2111. printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
  2112. (unsigned long)objp);
  2113. BUG();
  2114. }
  2115. page = virt_to_page(objp);
  2116. if (!PageSlab(page)) {
  2117. printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
  2118. (unsigned long)objp);
  2119. BUG();
  2120. }
  2121. }
  2122. static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
  2123. void *caller)
  2124. {
  2125. struct page *page;
  2126. unsigned int objnr;
  2127. struct slab *slabp;
  2128. objp -= obj_offset(cachep);
  2129. kfree_debugcheck(objp);
  2130. page = virt_to_page(objp);
  2131. if (page_get_cache(page) != cachep) {
  2132. printk(KERN_ERR
  2133. "mismatch in kmem_cache_free: expected cache %p, got %p\n",
  2134. page_get_cache(page), cachep);
  2135. printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
  2136. printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
  2137. page_get_cache(page)->name);
  2138. WARN_ON(1);
  2139. }
  2140. slabp = page_get_slab(page);
  2141. if (cachep->flags & SLAB_RED_ZONE) {
  2142. if (*dbg_redzone1(cachep, objp) != RED_ACTIVE
  2143. || *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
  2144. slab_error(cachep,
  2145. "double free, or memory outside"
  2146. " object was overwritten");
  2147. printk(KERN_ERR
  2148. "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
  2149. objp, *dbg_redzone1(cachep, objp),
  2150. *dbg_redzone2(cachep, objp));
  2151. }
  2152. *dbg_redzone1(cachep, objp) = RED_INACTIVE;
  2153. *dbg_redzone2(cachep, objp) = RED_INACTIVE;
  2154. }
  2155. if (cachep->flags & SLAB_STORE_USER)
  2156. *dbg_userword(cachep, objp) = caller;
  2157. objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
  2158. BUG_ON(objnr >= cachep->num);
  2159. BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
  2160. if (cachep->flags & SLAB_DEBUG_INITIAL) {
  2161. /* Need to call the slab's constructor so the
  2162. * caller can perform a verify of its state (debugging).
  2163. * Called without the cache-lock held.
  2164. */
  2165. cachep->ctor(objp + obj_offset(cachep),
  2166. cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
  2167. }
  2168. if (cachep->flags & SLAB_POISON && cachep->dtor) {
  2169. /* we want to cache poison the object,
  2170. * call the destruction callback
  2171. */
  2172. cachep->dtor(objp + obj_offset(cachep), cachep, 0);
  2173. }
  2174. if (cachep->flags & SLAB_POISON) {
  2175. #ifdef CONFIG_DEBUG_PAGEALLOC
  2176. if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
  2177. store_stackinfo(cachep, objp, (unsigned long)caller);
  2178. kernel_map_pages(virt_to_page(objp),
  2179. cachep->buffer_size / PAGE_SIZE, 0);
  2180. } else {
  2181. poison_obj(cachep, objp, POISON_FREE);
  2182. }
  2183. #else
  2184. poison_obj(cachep, objp, POISON_FREE);
  2185. #endif
  2186. }
  2187. return objp;
  2188. }
  2189. static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
  2190. {
  2191. kmem_bufctl_t i;
  2192. int entries = 0;
  2193. /* Check slab's freelist to see if this obj is there. */
  2194. for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
  2195. entries++;
  2196. if (entries > cachep->num || i >= cachep->num)
  2197. goto bad;
  2198. }
  2199. if (entries != cachep->num - slabp->inuse) {
  2200. bad:
  2201. printk(KERN_ERR
  2202. "slab: Internal list corruption detected in cache '%s'(%d), slabp %p(%d). Hexdump:\n",
  2203. cachep->name, cachep->num, slabp, slabp->inuse);
  2204. for (i = 0;
  2205. i < sizeof(slabp) + cachep->num * sizeof(kmem_bufctl_t);
  2206. i++) {
  2207. if ((i % 16) == 0)
  2208. printk("\n%03x:", i);
  2209. printk(" %02x", ((unsigned char *)slabp)[i]);
  2210. }
  2211. printk("\n");
  2212. BUG();
  2213. }
  2214. }
  2215. #else
  2216. #define kfree_debugcheck(x) do { } while(0)
  2217. #define cache_free_debugcheck(x,objp,z) (objp)
  2218. #define check_slabp(x,y) do { } while(0)
  2219. #endif
  2220. static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  2221. {
  2222. int batchcount;
  2223. struct kmem_list3 *l3;
  2224. struct array_cache *ac;
  2225. check_irq_off();
  2226. ac = cpu_cache_get(cachep);
  2227. retry:
  2228. batchcount = ac->batchcount;
  2229. if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  2230. /* if there was little recent activity on this
  2231. * cache, then perform only a partial refill.
  2232. * Otherwise we could generate refill bouncing.
  2233. */
  2234. batchcount = BATCHREFILL_LIMIT;
  2235. }
  2236. l3 = cachep->nodelists[numa_node_id()];
  2237. BUG_ON(ac->avail > 0 || !l3);
  2238. spin_lock(&l3->list_lock);
  2239. if (l3->shared) {
  2240. struct array_cache *shared_array = l3->shared;
  2241. if (shared_array->avail) {
  2242. if (batchcount > shared_array->avail)
  2243. batchcount = shared_array->avail;
  2244. shared_array->avail -= batchcount;
  2245. ac->avail = batchcount;
  2246. memcpy(ac->entry,
  2247. &(shared_array->entry[shared_array->avail]),
  2248. sizeof(void *) * batchcount);
  2249. shared_array->touched = 1;
  2250. goto alloc_done;
  2251. }
  2252. }
  2253. while (batchcount > 0) {
  2254. struct list_head *entry;
  2255. struct slab *slabp;
  2256. /* Get slab alloc is to come from. */
  2257. entry = l3->slabs_partial.next;
  2258. if (entry == &l3->slabs_partial) {
  2259. l3->free_touched = 1;
  2260. entry = l3->slabs_free.next;
  2261. if (entry == &l3->slabs_free)
  2262. goto must_grow;
  2263. }
  2264. slabp = list_entry(entry, struct slab, list);
  2265. check_slabp(cachep, slabp);
  2266. check_spinlock_acquired(cachep);
  2267. while (slabp->inuse < cachep->num && batchcount--) {
  2268. STATS_INC_ALLOCED(cachep);
  2269. STATS_INC_ACTIVE(cachep);
  2270. STATS_SET_HIGH(cachep);
  2271. ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
  2272. numa_node_id());
  2273. }
  2274. check_slabp(cachep, slabp);
  2275. /* move slabp to correct slabp list: */
  2276. list_del(&slabp->list);
  2277. if (slabp->free == BUFCTL_END)
  2278. list_add(&slabp->list, &l3->slabs_full);
  2279. else
  2280. list_add(&slabp->list, &l3->slabs_partial);
  2281. }
  2282. must_grow:
  2283. l3->free_objects -= ac->avail;
  2284. alloc_done:
  2285. spin_unlock(&l3->list_lock);
  2286. if (unlikely(!ac->avail)) {
  2287. int x;
  2288. x = cache_grow(cachep, flags, numa_node_id());
  2289. // cache_grow can reenable interrupts, then ac could change.
  2290. ac = cpu_cache_get(cachep);
  2291. if (!x && ac->avail == 0) // no objects in sight? abort
  2292. return NULL;
  2293. if (!ac->avail) // objects refilled by interrupt?
  2294. goto retry;
  2295. }
  2296. ac->touched = 1;
  2297. return ac->entry[--ac->avail];
  2298. }
  2299. static inline void
  2300. cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
  2301. {
  2302. might_sleep_if(flags & __GFP_WAIT);
  2303. #if DEBUG
  2304. kmem_flagcheck(cachep, flags);
  2305. #endif
  2306. }
  2307. #if DEBUG
  2308. static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
  2309. void *objp, void *caller)
  2310. {
  2311. if (!objp)
  2312. return objp;
  2313. if (cachep->flags & SLAB_POISON) {
  2314. #ifdef CONFIG_DEBUG_PAGEALLOC
  2315. if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
  2316. kernel_map_pages(virt_to_page(objp),
  2317. cachep->buffer_size / PAGE_SIZE, 1);
  2318. else
  2319. check_poison_obj(cachep, objp);
  2320. #else
  2321. check_poison_obj(cachep, objp);
  2322. #endif
  2323. poison_obj(cachep, objp, POISON_INUSE);
  2324. }
  2325. if (cachep->flags & SLAB_STORE_USER)
  2326. *dbg_userword(cachep, objp) = caller;
  2327. if (cachep->flags & SLAB_RED_ZONE) {
  2328. if (*dbg_redzone1(cachep, objp) != RED_INACTIVE
  2329. || *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
  2330. slab_error(cachep,
  2331. "double free, or memory outside"
  2332. " object was overwritten");
  2333. printk(KERN_ERR
  2334. "%p: redzone 1: 0x%lx, redzone 2: 0x%lx.\n",
  2335. objp, *dbg_redzone1(cachep, objp),
  2336. *dbg_redzone2(cachep, objp));
  2337. }
  2338. *dbg_redzone1(cachep, objp) = RED_ACTIVE;
  2339. *dbg_redzone2(cachep, objp) = RED_ACTIVE;
  2340. }
  2341. objp += obj_offset(cachep);
  2342. if (cachep->ctor && cachep->flags & SLAB_POISON) {
  2343. unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
  2344. if (!(flags & __GFP_WAIT))
  2345. ctor_flags |= SLAB_CTOR_ATOMIC;
  2346. cachep->ctor(objp, cachep, ctor_flags);
  2347. }
  2348. return objp;
  2349. }
  2350. #else
  2351. #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
  2352. #endif
  2353. static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2354. {
  2355. void *objp;
  2356. struct array_cache *ac;
  2357. #ifdef CONFIG_NUMA
  2358. if (unlikely(current->mempolicy && !in_interrupt())) {
  2359. int nid = slab_node(current->mempolicy);
  2360. if (nid != numa_node_id())
  2361. return __cache_alloc_node(cachep, flags, nid);
  2362. }
  2363. #endif
  2364. check_irq_off();
  2365. ac = cpu_cache_get(cachep);
  2366. if (likely(ac->avail)) {
  2367. STATS_INC_ALLOCHIT(cachep);
  2368. ac->touched = 1;
  2369. objp = ac->entry[--ac->avail];
  2370. } else {
  2371. STATS_INC_ALLOCMISS(cachep);
  2372. objp = cache_alloc_refill(cachep, flags);
  2373. }
  2374. return objp;
  2375. }
  2376. static __always_inline void *
  2377. __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
  2378. {
  2379. unsigned long save_flags;
  2380. void *objp;
  2381. cache_alloc_debugcheck_before(cachep, flags);
  2382. local_irq_save(save_flags);
  2383. objp = ____cache_alloc(cachep, flags);
  2384. local_irq_restore(save_flags);
  2385. objp = cache_alloc_debugcheck_after(cachep, flags, objp,
  2386. caller);
  2387. prefetchw(objp);
  2388. return objp;
  2389. }
  2390. #ifdef CONFIG_NUMA
  2391. /*
  2392. * A interface to enable slab creation on nodeid
  2393. */
  2394. static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  2395. {
  2396. struct list_head *entry;
  2397. struct slab *slabp;
  2398. struct kmem_list3 *l3;
  2399. void *obj;
  2400. int x;
  2401. l3 = cachep->nodelists[nodeid];
  2402. BUG_ON(!l3);
  2403. retry:
  2404. spin_lock(&l3->list_lock);
  2405. entry = l3->slabs_partial.next;
  2406. if (entry == &l3->slabs_partial) {
  2407. l3->free_touched = 1;
  2408. entry = l3->slabs_free.next;
  2409. if (entry == &l3->slabs_free)
  2410. goto must_grow;
  2411. }
  2412. slabp = list_entry(entry, struct slab, list);
  2413. check_spinlock_acquired_node(cachep, nodeid);
  2414. check_slabp(cachep, slabp);
  2415. STATS_INC_NODEALLOCS(cachep);
  2416. STATS_INC_ACTIVE(cachep);
  2417. STATS_SET_HIGH(cachep);
  2418. BUG_ON(slabp->inuse == cachep->num);
  2419. obj = slab_get_obj(cachep, slabp, nodeid);
  2420. check_slabp(cachep, slabp);
  2421. l3->free_objects--;
  2422. /* move slabp to correct slabp list: */
  2423. list_del(&slabp->list);
  2424. if (slabp->free == BUFCTL_END) {
  2425. list_add(&slabp->list, &l3->slabs_full);
  2426. } else {
  2427. list_add(&slabp->list, &l3->slabs_partial);
  2428. }
  2429. spin_unlock(&l3->list_lock);
  2430. goto done;
  2431. must_grow:
  2432. spin_unlock(&l3->list_lock);
  2433. x = cache_grow(cachep, flags, nodeid);
  2434. if (!x)
  2435. return NULL;
  2436. goto retry;
  2437. done:
  2438. return obj;
  2439. }
  2440. #endif
  2441. /*
  2442. * Caller needs to acquire correct kmem_list's list_lock
  2443. */
  2444. static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
  2445. int node)
  2446. {
  2447. int i;
  2448. struct kmem_list3 *l3;
  2449. for (i = 0; i < nr_objects; i++) {
  2450. void *objp = objpp[i];
  2451. struct slab *slabp;
  2452. slabp = virt_to_slab(objp);
  2453. l3 = cachep->nodelists[node];
  2454. list_del(&slabp->list);
  2455. check_spinlock_acquired_node(cachep, node);
  2456. check_slabp(cachep, slabp);
  2457. slab_put_obj(cachep, slabp, objp, node);
  2458. STATS_DEC_ACTIVE(cachep);
  2459. l3->free_objects++;
  2460. check_slabp(cachep, slabp);
  2461. /* fixup slab chains */
  2462. if (slabp->inuse == 0) {
  2463. if (l3->free_objects > l3->free_limit) {
  2464. l3->free_objects -= cachep->num;
  2465. slab_destroy(cachep, slabp);
  2466. } else {
  2467. list_add(&slabp->list, &l3->slabs_free);
  2468. }
  2469. } else {
  2470. /* Unconditionally move a slab to the end of the
  2471. * partial list on free - maximum time for the
  2472. * other objects to be freed, too.
  2473. */
  2474. list_add_tail(&slabp->list, &l3->slabs_partial);
  2475. }
  2476. }
  2477. }
  2478. static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
  2479. {
  2480. int batchcount;
  2481. struct kmem_list3 *l3;
  2482. int node = numa_node_id();
  2483. batchcount = ac->batchcount;
  2484. #if DEBUG
  2485. BUG_ON(!batchcount || batchcount > ac->avail);
  2486. #endif
  2487. check_irq_off();
  2488. l3 = cachep->nodelists[node];
  2489. spin_lock(&l3->list_lock);
  2490. if (l3->shared) {
  2491. struct array_cache *shared_array = l3->shared;
  2492. int max = shared_array->limit - shared_array->avail;
  2493. if (max) {
  2494. if (batchcount > max)
  2495. batchcount = max;
  2496. memcpy(&(shared_array->entry[shared_array->avail]),
  2497. ac->entry, sizeof(void *) * batchcount);
  2498. shared_array->avail += batchcount;
  2499. goto free_done;
  2500. }
  2501. }
  2502. free_block(cachep, ac->entry, batchcount, node);
  2503. free_done:
  2504. #if STATS
  2505. {
  2506. int i = 0;
  2507. struct list_head *p;
  2508. p = l3->slabs_free.next;
  2509. while (p != &(l3->slabs_free)) {
  2510. struct slab *slabp;
  2511. slabp = list_entry(p, struct slab, list);
  2512. BUG_ON(slabp->inuse);
  2513. i++;
  2514. p = p->next;
  2515. }
  2516. STATS_SET_FREEABLE(cachep, i);
  2517. }
  2518. #endif
  2519. spin_unlock(&l3->list_lock);
  2520. ac->avail -= batchcount;
  2521. memmove(ac->entry, &(ac->entry[batchcount]),
  2522. sizeof(void *) * ac->avail);
  2523. }
  2524. /*
  2525. * __cache_free
  2526. * Release an obj back to its cache. If the obj has a constructed
  2527. * state, it must be in this state _before_ it is released.
  2528. *
  2529. * Called with disabled ints.
  2530. */
  2531. static inline void __cache_free(struct kmem_cache *cachep, void *objp)
  2532. {
  2533. struct array_cache *ac = cpu_cache_get(cachep);
  2534. check_irq_off();
  2535. objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
  2536. /* Make sure we are not freeing a object from another
  2537. * node to the array cache on this cpu.
  2538. */
  2539. #ifdef CONFIG_NUMA
  2540. {
  2541. struct slab *slabp;
  2542. slabp = virt_to_slab(objp);
  2543. if (unlikely(slabp->nodeid != numa_node_id())) {
  2544. struct array_cache *alien = NULL;
  2545. int nodeid = slabp->nodeid;
  2546. struct kmem_list3 *l3 =
  2547. cachep->nodelists[numa_node_id()];
  2548. STATS_INC_NODEFREES(cachep);
  2549. if (l3->alien && l3->alien[nodeid]) {
  2550. alien = l3->alien[nodeid];
  2551. spin_lock(&alien->lock);
  2552. if (unlikely(alien->avail == alien->limit))
  2553. __drain_alien_cache(cachep,
  2554. alien, nodeid);
  2555. alien->entry[alien->avail++] = objp;
  2556. spin_unlock(&alien->lock);
  2557. } else {
  2558. spin_lock(&(cachep->nodelists[nodeid])->
  2559. list_lock);
  2560. free_block(cachep, &objp, 1, nodeid);
  2561. spin_unlock(&(cachep->nodelists[nodeid])->
  2562. list_lock);
  2563. }
  2564. return;
  2565. }
  2566. }
  2567. #endif
  2568. if (likely(ac->avail < ac->limit)) {
  2569. STATS_INC_FREEHIT(cachep);
  2570. ac->entry[ac->avail++] = objp;
  2571. return;
  2572. } else {
  2573. STATS_INC_FREEMISS(cachep);
  2574. cache_flusharray(cachep, ac);
  2575. ac->entry[ac->avail++] = objp;
  2576. }
  2577. }
  2578. /**
  2579. * kmem_cache_alloc - Allocate an object
  2580. * @cachep: The cache to allocate from.
  2581. * @flags: See kmalloc().
  2582. *
  2583. * Allocate an object from this cache. The flags are only relevant
  2584. * if the cache has no available objects.
  2585. */
  2586. void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2587. {
  2588. return __cache_alloc(cachep, flags, __builtin_return_address(0));
  2589. }
  2590. EXPORT_SYMBOL(kmem_cache_alloc);
  2591. /**
  2592. * kmem_ptr_validate - check if an untrusted pointer might
  2593. * be a slab entry.
  2594. * @cachep: the cache we're checking against
  2595. * @ptr: pointer to validate
  2596. *
  2597. * This verifies that the untrusted pointer looks sane:
  2598. * it is _not_ a guarantee that the pointer is actually
  2599. * part of the slab cache in question, but it at least
  2600. * validates that the pointer can be dereferenced and
  2601. * looks half-way sane.
  2602. *
  2603. * Currently only used for dentry validation.
  2604. */
  2605. int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
  2606. {
  2607. unsigned long addr = (unsigned long)ptr;
  2608. unsigned long min_addr = PAGE_OFFSET;
  2609. unsigned long align_mask = BYTES_PER_WORD - 1;
  2610. unsigned long size = cachep->buffer_size;
  2611. struct page *page;
  2612. if (unlikely(addr < min_addr))
  2613. goto out;
  2614. if (unlikely(addr > (unsigned long)high_memory - size))
  2615. goto out;
  2616. if (unlikely(addr & align_mask))
  2617. goto out;
  2618. if (unlikely(!kern_addr_valid(addr)))
  2619. goto out;
  2620. if (unlikely(!kern_addr_valid(addr + size - 1)))
  2621. goto out;
  2622. page = virt_to_page(ptr);
  2623. if (unlikely(!PageSlab(page)))
  2624. goto out;
  2625. if (unlikely(page_get_cache(page) != cachep))
  2626. goto out;
  2627. return 1;
  2628. out:
  2629. return 0;
  2630. }
  2631. #ifdef CONFIG_NUMA
  2632. /**
  2633. * kmem_cache_alloc_node - Allocate an object on the specified node
  2634. * @cachep: The cache to allocate from.
  2635. * @flags: See kmalloc().
  2636. * @nodeid: node number of the target node.
  2637. *
  2638. * Identical to kmem_cache_alloc, except that this function is slow
  2639. * and can sleep. And it will allocate memory on the given node, which
  2640. * can improve the performance for cpu bound structures.
  2641. * New and improved: it will now make sure that the object gets
  2642. * put on the correct node list so that there is no false sharing.
  2643. */
  2644. void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
  2645. {
  2646. unsigned long save_flags;
  2647. void *ptr;
  2648. cache_alloc_debugcheck_before(cachep, flags);
  2649. local_irq_save(save_flags);
  2650. if (nodeid == -1 || nodeid == numa_node_id() ||
  2651. !cachep->nodelists[nodeid])
  2652. ptr = ____cache_alloc(cachep, flags);
  2653. else
  2654. ptr = __cache_alloc_node(cachep, flags, nodeid);
  2655. local_irq_restore(save_flags);
  2656. ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
  2657. __builtin_return_address(0));
  2658. return ptr;
  2659. }
  2660. EXPORT_SYMBOL(kmem_cache_alloc_node);
  2661. void *kmalloc_node(size_t size, gfp_t flags, int node)
  2662. {
  2663. struct kmem_cache *cachep;
  2664. cachep = kmem_find_general_cachep(size, flags);
  2665. if (unlikely(cachep == NULL))
  2666. return NULL;
  2667. return kmem_cache_alloc_node(cachep, flags, node);
  2668. }
  2669. EXPORT_SYMBOL(kmalloc_node);
  2670. #endif
  2671. /**
  2672. * kmalloc - allocate memory
  2673. * @size: how many bytes of memory are required.
  2674. * @flags: the type of memory to allocate.
  2675. *
  2676. * kmalloc is the normal method of allocating memory
  2677. * in the kernel.
  2678. *
  2679. * The @flags argument may be one of:
  2680. *
  2681. * %GFP_USER - Allocate memory on behalf of user. May sleep.
  2682. *
  2683. * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
  2684. *
  2685. * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
  2686. *
  2687. * Additionally, the %GFP_DMA flag may be set to indicate the memory
  2688. * must be suitable for DMA. This can mean different things on different
  2689. * platforms. For example, on i386, it means that the memory must come
  2690. * from the first 16MB.
  2691. */
  2692. static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
  2693. void *caller)
  2694. {
  2695. struct kmem_cache *cachep;
  2696. /* If you want to save a few bytes .text space: replace
  2697. * __ with kmem_.
  2698. * Then kmalloc uses the uninlined functions instead of the inline
  2699. * functions.
  2700. */
  2701. cachep = __find_general_cachep(size, flags);
  2702. if (unlikely(cachep == NULL))
  2703. return NULL;
  2704. return __cache_alloc(cachep, flags, caller);
  2705. }
  2706. #ifndef CONFIG_DEBUG_SLAB
  2707. void *__kmalloc(size_t size, gfp_t flags)
  2708. {
  2709. return __do_kmalloc(size, flags, NULL);
  2710. }
  2711. EXPORT_SYMBOL(__kmalloc);
  2712. #else
  2713. void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
  2714. {
  2715. return __do_kmalloc(size, flags, caller);
  2716. }
  2717. EXPORT_SYMBOL(__kmalloc_track_caller);
  2718. #endif
  2719. #ifdef CONFIG_SMP
  2720. /**
  2721. * __alloc_percpu - allocate one copy of the object for every present
  2722. * cpu in the system, zeroing them.
  2723. * Objects should be dereferenced using the per_cpu_ptr macro only.
  2724. *
  2725. * @size: how many bytes of memory are required.
  2726. */
  2727. void *__alloc_percpu(size_t size)
  2728. {
  2729. int i;
  2730. struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
  2731. if (!pdata)
  2732. return NULL;
  2733. /*
  2734. * Cannot use for_each_online_cpu since a cpu may come online
  2735. * and we have no way of figuring out how to fix the array
  2736. * that we have allocated then....
  2737. */
  2738. for_each_cpu(i) {
  2739. int node = cpu_to_node(i);
  2740. if (node_online(node))
  2741. pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
  2742. else
  2743. pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
  2744. if (!pdata->ptrs[i])
  2745. goto unwind_oom;
  2746. memset(pdata->ptrs[i], 0, size);
  2747. }
  2748. /* Catch derefs w/o wrappers */
  2749. return (void *)(~(unsigned long)pdata);
  2750. unwind_oom:
  2751. while (--i >= 0) {
  2752. if (!cpu_possible(i))
  2753. continue;
  2754. kfree(pdata->ptrs[i]);
  2755. }
  2756. kfree(pdata);
  2757. return NULL;
  2758. }
  2759. EXPORT_SYMBOL(__alloc_percpu);
  2760. #endif
  2761. /**
  2762. * kmem_cache_free - Deallocate an object
  2763. * @cachep: The cache the allocation was from.
  2764. * @objp: The previously allocated object.
  2765. *
  2766. * Free an object which was previously allocated from this
  2767. * cache.
  2768. */
  2769. void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  2770. {
  2771. unsigned long flags;
  2772. local_irq_save(flags);
  2773. __cache_free(cachep, objp);
  2774. local_irq_restore(flags);
  2775. }
  2776. EXPORT_SYMBOL(kmem_cache_free);
  2777. /**
  2778. * kfree - free previously allocated memory
  2779. * @objp: pointer returned by kmalloc.
  2780. *
  2781. * If @objp is NULL, no operation is performed.
  2782. *
  2783. * Don't free memory not originally allocated by kmalloc()
  2784. * or you will run into trouble.
  2785. */
  2786. void kfree(const void *objp)
  2787. {
  2788. struct kmem_cache *c;
  2789. unsigned long flags;
  2790. if (unlikely(!objp))
  2791. return;
  2792. local_irq_save(flags);
  2793. kfree_debugcheck(objp);
  2794. c = virt_to_cache(objp);
  2795. mutex_debug_check_no_locks_freed(objp, obj_size(c));
  2796. __cache_free(c, (void *)objp);
  2797. local_irq_restore(flags);
  2798. }
  2799. EXPORT_SYMBOL(kfree);
  2800. #ifdef CONFIG_SMP
  2801. /**
  2802. * free_percpu - free previously allocated percpu memory
  2803. * @objp: pointer returned by alloc_percpu.
  2804. *
  2805. * Don't free memory not originally allocated by alloc_percpu()
  2806. * The complemented objp is to check for that.
  2807. */
  2808. void free_percpu(const void *objp)
  2809. {
  2810. int i;
  2811. struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
  2812. /*
  2813. * We allocate for all cpus so we cannot use for online cpu here.
  2814. */
  2815. for_each_cpu(i)
  2816. kfree(p->ptrs[i]);
  2817. kfree(p);
  2818. }
  2819. EXPORT_SYMBOL(free_percpu);
  2820. #endif
  2821. unsigned int kmem_cache_size(struct kmem_cache *cachep)
  2822. {
  2823. return obj_size(cachep);
  2824. }
  2825. EXPORT_SYMBOL(kmem_cache_size);
  2826. const char *kmem_cache_name(struct kmem_cache *cachep)
  2827. {
  2828. return cachep->name;
  2829. }
  2830. EXPORT_SYMBOL_GPL(kmem_cache_name);
  2831. /*
  2832. * This initializes kmem_list3 for all nodes.
  2833. */
  2834. static int alloc_kmemlist(struct kmem_cache *cachep)
  2835. {
  2836. int node;
  2837. struct kmem_list3 *l3;
  2838. int err = 0;
  2839. for_each_online_node(node) {
  2840. struct array_cache *nc = NULL, *new;
  2841. struct array_cache **new_alien = NULL;
  2842. #ifdef CONFIG_NUMA
  2843. if (!(new_alien = alloc_alien_cache(node, cachep->limit)))
  2844. goto fail;
  2845. #endif
  2846. if (!(new = alloc_arraycache(node, (cachep->shared *
  2847. cachep->batchcount),
  2848. 0xbaadf00d)))
  2849. goto fail;
  2850. if ((l3 = cachep->nodelists[node])) {
  2851. spin_lock_irq(&l3->list_lock);
  2852. if ((nc = cachep->nodelists[node]->shared))
  2853. free_block(cachep, nc->entry, nc->avail, node);
  2854. l3->shared = new;
  2855. if (!cachep->nodelists[node]->alien) {
  2856. l3->alien = new_alien;
  2857. new_alien = NULL;
  2858. }
  2859. l3->free_limit = (1 + nr_cpus_node(node)) *
  2860. cachep->batchcount + cachep->num;
  2861. spin_unlock_irq(&l3->list_lock);
  2862. kfree(nc);
  2863. free_alien_cache(new_alien);
  2864. continue;
  2865. }
  2866. if (!(l3 = kmalloc_node(sizeof(struct kmem_list3),
  2867. GFP_KERNEL, node)))
  2868. goto fail;
  2869. kmem_list3_init(l3);
  2870. l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
  2871. ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  2872. l3->shared = new;
  2873. l3->alien = new_alien;
  2874. l3->free_limit = (1 + nr_cpus_node(node)) *
  2875. cachep->batchcount + cachep->num;
  2876. cachep->nodelists[node] = l3;
  2877. }
  2878. return err;
  2879. fail:
  2880. err = -ENOMEM;
  2881. return err;
  2882. }
  2883. struct ccupdate_struct {
  2884. struct kmem_cache *cachep;
  2885. struct array_cache *new[NR_CPUS];
  2886. };
  2887. static void do_ccupdate_local(void *info)
  2888. {
  2889. struct ccupdate_struct *new = (struct ccupdate_struct *)info;
  2890. struct array_cache *old;
  2891. check_irq_off();
  2892. old = cpu_cache_get(new->cachep);
  2893. new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
  2894. new->new[smp_processor_id()] = old;
  2895. }
  2896. static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
  2897. int shared)
  2898. {
  2899. struct ccupdate_struct new;
  2900. int i, err;
  2901. memset(&new.new, 0, sizeof(new.new));
  2902. for_each_online_cpu(i) {
  2903. new.new[i] =
  2904. alloc_arraycache(cpu_to_node(i), limit, batchcount);
  2905. if (!new.new[i]) {
  2906. for (i--; i >= 0; i--)
  2907. kfree(new.new[i]);
  2908. return -ENOMEM;
  2909. }
  2910. }
  2911. new.cachep = cachep;
  2912. smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
  2913. check_irq_on();
  2914. spin_lock_irq(&cachep->spinlock);
  2915. cachep->batchcount = batchcount;
  2916. cachep->limit = limit;
  2917. cachep->shared = shared;
  2918. spin_unlock_irq(&cachep->spinlock);
  2919. for_each_online_cpu(i) {
  2920. struct array_cache *ccold = new.new[i];
  2921. if (!ccold)
  2922. continue;
  2923. spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
  2924. free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
  2925. spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
  2926. kfree(ccold);
  2927. }
  2928. err = alloc_kmemlist(cachep);
  2929. if (err) {
  2930. printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
  2931. cachep->name, -err);
  2932. BUG();
  2933. }
  2934. return 0;
  2935. }
  2936. static void enable_cpucache(struct kmem_cache *cachep)
  2937. {
  2938. int err;
  2939. int limit, shared;
  2940. /* The head array serves three purposes:
  2941. * - create a LIFO ordering, i.e. return objects that are cache-warm
  2942. * - reduce the number of spinlock operations.
  2943. * - reduce the number of linked list operations on the slab and
  2944. * bufctl chains: array operations are cheaper.
  2945. * The numbers are guessed, we should auto-tune as described by
  2946. * Bonwick.
  2947. */
  2948. if (cachep->buffer_size > 131072)
  2949. limit = 1;
  2950. else if (cachep->buffer_size > PAGE_SIZE)
  2951. limit = 8;
  2952. else if (cachep->buffer_size > 1024)
  2953. limit = 24;
  2954. else if (cachep->buffer_size > 256)
  2955. limit = 54;
  2956. else
  2957. limit = 120;
  2958. /* Cpu bound tasks (e.g. network routing) can exhibit cpu bound
  2959. * allocation behaviour: Most allocs on one cpu, most free operations
  2960. * on another cpu. For these cases, an efficient object passing between
  2961. * cpus is necessary. This is provided by a shared array. The array
  2962. * replaces Bonwick's magazine layer.
  2963. * On uniprocessor, it's functionally equivalent (but less efficient)
  2964. * to a larger limit. Thus disabled by default.
  2965. */
  2966. shared = 0;
  2967. #ifdef CONFIG_SMP
  2968. if (cachep->buffer_size <= PAGE_SIZE)
  2969. shared = 8;
  2970. #endif
  2971. #if DEBUG
  2972. /* With debugging enabled, large batchcount lead to excessively
  2973. * long periods with disabled local interrupts. Limit the
  2974. * batchcount
  2975. */
  2976. if (limit > 32)
  2977. limit = 32;
  2978. #endif
  2979. err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
  2980. if (err)
  2981. printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
  2982. cachep->name, -err);
  2983. }
  2984. static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
  2985. int force, int node)
  2986. {
  2987. int tofree;
  2988. check_spinlock_acquired_node(cachep, node);
  2989. if (ac->touched && !force) {
  2990. ac->touched = 0;
  2991. } else if (ac->avail) {
  2992. tofree = force ? ac->avail : (ac->limit + 4) / 5;
  2993. if (tofree > ac->avail) {
  2994. tofree = (ac->avail + 1) / 2;
  2995. }
  2996. free_block(cachep, ac->entry, tofree, node);
  2997. ac->avail -= tofree;
  2998. memmove(ac->entry, &(ac->entry[tofree]),
  2999. sizeof(void *) * ac->avail);
  3000. }
  3001. }
  3002. /**
  3003. * cache_reap - Reclaim memory from caches.
  3004. * @unused: unused parameter
  3005. *
  3006. * Called from workqueue/eventd every few seconds.
  3007. * Purpose:
  3008. * - clear the per-cpu caches for this CPU.
  3009. * - return freeable pages to the main free memory pool.
  3010. *
  3011. * If we cannot acquire the cache chain mutex then just give up - we'll
  3012. * try again on the next iteration.
  3013. */
  3014. static void cache_reap(void *unused)
  3015. {
  3016. struct list_head *walk;
  3017. struct kmem_list3 *l3;
  3018. if (!mutex_trylock(&cache_chain_mutex)) {
  3019. /* Give up. Setup the next iteration. */
  3020. schedule_delayed_work(&__get_cpu_var(reap_work),
  3021. REAPTIMEOUT_CPUC);
  3022. return;
  3023. }
  3024. list_for_each(walk, &cache_chain) {
  3025. struct kmem_cache *searchp;
  3026. struct list_head *p;
  3027. int tofree;
  3028. struct slab *slabp;
  3029. searchp = list_entry(walk, struct kmem_cache, next);
  3030. if (searchp->flags & SLAB_NO_REAP)
  3031. goto next;
  3032. check_irq_on();
  3033. l3 = searchp->nodelists[numa_node_id()];
  3034. if (l3->alien)
  3035. drain_alien_cache(searchp, l3);
  3036. spin_lock_irq(&l3->list_lock);
  3037. drain_array_locked(searchp, cpu_cache_get(searchp), 0,
  3038. numa_node_id());
  3039. if (time_after(l3->next_reap, jiffies))
  3040. goto next_unlock;
  3041. l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
  3042. if (l3->shared)
  3043. drain_array_locked(searchp, l3->shared, 0,
  3044. numa_node_id());
  3045. if (l3->free_touched) {
  3046. l3->free_touched = 0;
  3047. goto next_unlock;
  3048. }
  3049. tofree =
  3050. (l3->free_limit + 5 * searchp->num -
  3051. 1) / (5 * searchp->num);
  3052. do {
  3053. p = l3->slabs_free.next;
  3054. if (p == &(l3->slabs_free))
  3055. break;
  3056. slabp = list_entry(p, struct slab, list);
  3057. BUG_ON(slabp->inuse);
  3058. list_del(&slabp->list);
  3059. STATS_INC_REAPED(searchp);
  3060. /* Safe to drop the lock. The slab is no longer
  3061. * linked to the cache.
  3062. * searchp cannot disappear, we hold
  3063. * cache_chain_lock
  3064. */
  3065. l3->free_objects -= searchp->num;
  3066. spin_unlock_irq(&l3->list_lock);
  3067. slab_destroy(searchp, slabp);
  3068. spin_lock_irq(&l3->list_lock);
  3069. } while (--tofree > 0);
  3070. next_unlock:
  3071. spin_unlock_irq(&l3->list_lock);
  3072. next:
  3073. cond_resched();
  3074. }
  3075. check_irq_on();
  3076. mutex_unlock(&cache_chain_mutex);
  3077. drain_remote_pages();
  3078. /* Setup the next iteration */
  3079. schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
  3080. }
  3081. #ifdef CONFIG_PROC_FS
  3082. static void print_slabinfo_header(struct seq_file *m)
  3083. {
  3084. /*
  3085. * Output format version, so at least we can change it
  3086. * without _too_ many complaints.
  3087. */
  3088. #if STATS
  3089. seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
  3090. #else
  3091. seq_puts(m, "slabinfo - version: 2.1\n");
  3092. #endif
  3093. seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
  3094. "<objperslab> <pagesperslab>");
  3095. seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  3096. seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  3097. #if STATS
  3098. seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
  3099. "<error> <maxfreeable> <nodeallocs> <remotefrees>");
  3100. seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  3101. #endif
  3102. seq_putc(m, '\n');
  3103. }
  3104. static void *s_start(struct seq_file *m, loff_t *pos)
  3105. {
  3106. loff_t n = *pos;
  3107. struct list_head *p;
  3108. mutex_lock(&cache_chain_mutex);
  3109. if (!n)
  3110. print_slabinfo_header(m);
  3111. p = cache_chain.next;
  3112. while (n--) {
  3113. p = p->next;
  3114. if (p == &cache_chain)
  3115. return NULL;
  3116. }
  3117. return list_entry(p, struct kmem_cache, next);
  3118. }
  3119. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  3120. {
  3121. struct kmem_cache *cachep = p;
  3122. ++*pos;
  3123. return cachep->next.next == &cache_chain ? NULL
  3124. : list_entry(cachep->next.next, struct kmem_cache, next);
  3125. }
  3126. static void s_stop(struct seq_file *m, void *p)
  3127. {
  3128. mutex_unlock(&cache_chain_mutex);
  3129. }
  3130. static int s_show(struct seq_file *m, void *p)
  3131. {
  3132. struct kmem_cache *cachep = p;
  3133. struct list_head *q;
  3134. struct slab *slabp;
  3135. unsigned long active_objs;
  3136. unsigned long num_objs;
  3137. unsigned long active_slabs = 0;
  3138. unsigned long num_slabs, free_objects = 0, shared_avail = 0;
  3139. const char *name;
  3140. char *error = NULL;
  3141. int node;
  3142. struct kmem_list3 *l3;
  3143. check_irq_on();
  3144. spin_lock_irq(&cachep->spinlock);
  3145. active_objs = 0;
  3146. num_slabs = 0;
  3147. for_each_online_node(node) {
  3148. l3 = cachep->nodelists[node];
  3149. if (!l3)
  3150. continue;
  3151. spin_lock(&l3->list_lock);
  3152. list_for_each(q, &l3->slabs_full) {
  3153. slabp = list_entry(q, struct slab, list);
  3154. if (slabp->inuse != cachep->num && !error)
  3155. error = "slabs_full accounting error";
  3156. active_objs += cachep->num;
  3157. active_slabs++;
  3158. }
  3159. list_for_each(q, &l3->slabs_partial) {
  3160. slabp = list_entry(q, struct slab, list);
  3161. if (slabp->inuse == cachep->num && !error)
  3162. error = "slabs_partial inuse accounting error";
  3163. if (!slabp->inuse && !error)
  3164. error = "slabs_partial/inuse accounting error";
  3165. active_objs += slabp->inuse;
  3166. active_slabs++;
  3167. }
  3168. list_for_each(q, &l3->slabs_free) {
  3169. slabp = list_entry(q, struct slab, list);
  3170. if (slabp->inuse && !error)
  3171. error = "slabs_free/inuse accounting error";
  3172. num_slabs++;
  3173. }
  3174. free_objects += l3->free_objects;
  3175. shared_avail += l3->shared->avail;
  3176. spin_unlock(&l3->list_lock);
  3177. }
  3178. num_slabs += active_slabs;
  3179. num_objs = num_slabs * cachep->num;
  3180. if (num_objs - active_objs != free_objects && !error)
  3181. error = "free_objects accounting error";
  3182. name = cachep->name;
  3183. if (error)
  3184. printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
  3185. seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
  3186. name, active_objs, num_objs, cachep->buffer_size,
  3187. cachep->num, (1 << cachep->gfporder));
  3188. seq_printf(m, " : tunables %4u %4u %4u",
  3189. cachep->limit, cachep->batchcount, cachep->shared);
  3190. seq_printf(m, " : slabdata %6lu %6lu %6lu",
  3191. active_slabs, num_slabs, shared_avail);
  3192. #if STATS
  3193. { /* list3 stats */
  3194. unsigned long high = cachep->high_mark;
  3195. unsigned long allocs = cachep->num_allocations;
  3196. unsigned long grown = cachep->grown;
  3197. unsigned long reaped = cachep->reaped;
  3198. unsigned long errors = cachep->errors;
  3199. unsigned long max_freeable = cachep->max_freeable;
  3200. unsigned long node_allocs = cachep->node_allocs;
  3201. unsigned long node_frees = cachep->node_frees;
  3202. seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
  3203. %4lu %4lu %4lu %4lu", allocs, high, grown, reaped, errors, max_freeable, node_allocs, node_frees);
  3204. }
  3205. /* cpu stats */
  3206. {
  3207. unsigned long allochit = atomic_read(&cachep->allochit);
  3208. unsigned long allocmiss = atomic_read(&cachep->allocmiss);
  3209. unsigned long freehit = atomic_read(&cachep->freehit);
  3210. unsigned long freemiss = atomic_read(&cachep->freemiss);
  3211. seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
  3212. allochit, allocmiss, freehit, freemiss);
  3213. }
  3214. #endif
  3215. seq_putc(m, '\n');
  3216. spin_unlock_irq(&cachep->spinlock);
  3217. return 0;
  3218. }
  3219. /*
  3220. * slabinfo_op - iterator that generates /proc/slabinfo
  3221. *
  3222. * Output layout:
  3223. * cache-name
  3224. * num-active-objs
  3225. * total-objs
  3226. * object size
  3227. * num-active-slabs
  3228. * total-slabs
  3229. * num-pages-per-slab
  3230. * + further values on SMP and with statistics enabled
  3231. */
  3232. struct seq_operations slabinfo_op = {
  3233. .start = s_start,
  3234. .next = s_next,
  3235. .stop = s_stop,
  3236. .show = s_show,
  3237. };
  3238. #define MAX_SLABINFO_WRITE 128
  3239. /**
  3240. * slabinfo_write - Tuning for the slab allocator
  3241. * @file: unused
  3242. * @buffer: user buffer
  3243. * @count: data length
  3244. * @ppos: unused
  3245. */
  3246. ssize_t slabinfo_write(struct file *file, const char __user * buffer,
  3247. size_t count, loff_t *ppos)
  3248. {
  3249. char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
  3250. int limit, batchcount, shared, res;
  3251. struct list_head *p;
  3252. if (count > MAX_SLABINFO_WRITE)
  3253. return -EINVAL;
  3254. if (copy_from_user(&kbuf, buffer, count))
  3255. return -EFAULT;
  3256. kbuf[MAX_SLABINFO_WRITE] = '\0';
  3257. tmp = strchr(kbuf, ' ');
  3258. if (!tmp)
  3259. return -EINVAL;
  3260. *tmp = '\0';
  3261. tmp++;
  3262. if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
  3263. return -EINVAL;
  3264. /* Find the cache in the chain of caches. */
  3265. mutex_lock(&cache_chain_mutex);
  3266. res = -EINVAL;
  3267. list_for_each(p, &cache_chain) {
  3268. struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
  3269. next);
  3270. if (!strcmp(cachep->name, kbuf)) {
  3271. if (limit < 1 ||
  3272. batchcount < 1 ||
  3273. batchcount > limit || shared < 0) {
  3274. res = 0;
  3275. } else {
  3276. res = do_tune_cpucache(cachep, limit,
  3277. batchcount, shared);
  3278. }
  3279. break;
  3280. }
  3281. }
  3282. mutex_unlock(&cache_chain_mutex);
  3283. if (res >= 0)
  3284. res = count;
  3285. return res;
  3286. }
  3287. #endif
  3288. /**
  3289. * ksize - get the actual amount of memory allocated for a given object
  3290. * @objp: Pointer to the object
  3291. *
  3292. * kmalloc may internally round up allocations and return more memory
  3293. * than requested. ksize() can be used to determine the actual amount of
  3294. * memory allocated. The caller may use this additional memory, even though
  3295. * a smaller amount of memory was initially specified with the kmalloc call.
  3296. * The caller must guarantee that objp points to a valid object previously
  3297. * allocated with either kmalloc() or kmem_cache_alloc(). The object
  3298. * must not be freed during the duration of the call.
  3299. */
  3300. unsigned int ksize(const void *objp)
  3301. {
  3302. if (unlikely(objp == NULL))
  3303. return 0;
  3304. return obj_size(virt_to_cache(objp));
  3305. }