slub.c 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588
  1. /*
  2. * SLUB: A slab allocator that limits cache line use instead of queuing
  3. * objects in per cpu and per node lists.
  4. *
  5. * The allocator synchronizes using per slab locks and only
  6. * uses a centralized lock to manage a pool of partial slabs.
  7. *
  8. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/bit_spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/bitops.h>
  15. #include <linux/slab.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/cpu.h>
  18. #include <linux/cpuset.h>
  19. #include <linux/mempolicy.h>
  20. #include <linux/ctype.h>
  21. #include <linux/kallsyms.h>
  22. /*
  23. * Lock order:
  24. * 1. slab_lock(page)
  25. * 2. slab->list_lock
  26. *
  27. * The slab_lock protects operations on the object of a particular
  28. * slab and its metadata in the page struct. If the slab lock
  29. * has been taken then no allocations nor frees can be performed
  30. * on the objects in the slab nor can the slab be added or removed
  31. * from the partial or full lists since this would mean modifying
  32. * the page_struct of the slab.
  33. *
  34. * The list_lock protects the partial and full list on each node and
  35. * the partial slab counter. If taken then no new slabs may be added or
  36. * removed from the lists nor make the number of partial slabs be modified.
  37. * (Note that the total number of slabs is an atomic value that may be
  38. * modified without taking the list lock).
  39. *
  40. * The list_lock is a centralized lock and thus we avoid taking it as
  41. * much as possible. As long as SLUB does not have to handle partial
  42. * slabs, operations can continue without any centralized lock. F.e.
  43. * allocating a long series of objects that fill up slabs does not require
  44. * the list lock.
  45. *
  46. * The lock order is sometimes inverted when we are trying to get a slab
  47. * off a list. We take the list_lock and then look for a page on the list
  48. * to use. While we do that objects in the slabs may be freed. We can
  49. * only operate on the slab if we have also taken the slab_lock. So we use
  50. * a slab_trylock() on the slab. If trylock was successful then no frees
  51. * can occur anymore and we can use the slab for allocations etc. If the
  52. * slab_trylock() does not succeed then frees are in progress in the slab and
  53. * we must stay away from it for a while since we may cause a bouncing
  54. * cacheline if we try to acquire the lock. So go onto the next slab.
  55. * If all pages are busy then we may allocate a new slab instead of reusing
  56. * a partial slab. A new slab has noone operating on it and thus there is
  57. * no danger of cacheline contention.
  58. *
  59. * Interrupts are disabled during allocation and deallocation in order to
  60. * make the slab allocator safe to use in the context of an irq. In addition
  61. * interrupts are disabled to ensure that the processor does not change
  62. * while handling per_cpu slabs, due to kernel preemption.
  63. *
  64. * SLUB assigns one slab for allocation to each processor.
  65. * Allocations only occur from these slabs called cpu slabs.
  66. *
  67. * Slabs with free elements are kept on a partial list and during regular
  68. * operations no list for full slabs is used. If an object in a full slab is
  69. * freed then the slab will show up again on the partial lists.
  70. * We track full slabs for debugging purposes though because otherwise we
  71. * cannot scan all objects.
  72. *
  73. * Slabs are freed when they become empty. Teardown and setup is
  74. * minimal so we rely on the page allocators per cpu caches for
  75. * fast frees and allocs.
  76. *
  77. * Overloading of page flags that are otherwise used for LRU management.
  78. *
  79. * PageActive The slab is used as a cpu cache. Allocations
  80. * may be performed from the slab. The slab is not
  81. * on any slab list and cannot be moved onto one.
  82. *
  83. * PageError Slab requires special handling due to debug
  84. * options set. This moves slab handling out of
  85. * the fast path.
  86. */
  87. static inline int SlabDebug(struct page *page)
  88. {
  89. #ifdef CONFIG_SLUB_DEBUG
  90. return PageError(page);
  91. #else
  92. return 0;
  93. #endif
  94. }
  95. static inline void SetSlabDebug(struct page *page)
  96. {
  97. #ifdef CONFIG_SLUB_DEBUG
  98. SetPageError(page);
  99. #endif
  100. }
  101. static inline void ClearSlabDebug(struct page *page)
  102. {
  103. #ifdef CONFIG_SLUB_DEBUG
  104. ClearPageError(page);
  105. #endif
  106. }
  107. /*
  108. * Issues still to be resolved:
  109. *
  110. * - The per cpu array is updated for each new slab and and is a remote
  111. * cacheline for most nodes. This could become a bouncing cacheline given
  112. * enough frequent updates. There are 16 pointers in a cacheline, so at
  113. * max 16 cpus could compete for the cacheline which may be okay.
  114. *
  115. * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
  116. *
  117. * - Variable sizing of the per node arrays
  118. */
  119. /* Enable to test recovery from slab corruption on boot */
  120. #undef SLUB_RESILIENCY_TEST
  121. #if PAGE_SHIFT <= 12
  122. /*
  123. * Small page size. Make sure that we do not fragment memory
  124. */
  125. #define DEFAULT_MAX_ORDER 1
  126. #define DEFAULT_MIN_OBJECTS 4
  127. #else
  128. /*
  129. * Large page machines are customarily able to handle larger
  130. * page orders.
  131. */
  132. #define DEFAULT_MAX_ORDER 2
  133. #define DEFAULT_MIN_OBJECTS 8
  134. #endif
  135. /*
  136. * Mininum number of partial slabs. These will be left on the partial
  137. * lists even if they are empty. kmem_cache_shrink may reclaim them.
  138. */
  139. #define MIN_PARTIAL 2
  140. /*
  141. * Maximum number of desirable partial slabs.
  142. * The existence of more partial slabs makes kmem_cache_shrink
  143. * sort the partial list by the number of objects in the.
  144. */
  145. #define MAX_PARTIAL 10
  146. #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  147. SLAB_POISON | SLAB_STORE_USER)
  148. /*
  149. * Set of flags that will prevent slab merging
  150. */
  151. #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  152. SLAB_TRACE | SLAB_DESTROY_BY_RCU)
  153. #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
  154. SLAB_CACHE_DMA)
  155. #ifndef ARCH_KMALLOC_MINALIGN
  156. #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  157. #endif
  158. #ifndef ARCH_SLAB_MINALIGN
  159. #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  160. #endif
  161. /* Internal SLUB flags */
  162. #define __OBJECT_POISON 0x80000000 /* Poison object */
  163. /* Not all arches define cache_line_size */
  164. #ifndef cache_line_size
  165. #define cache_line_size() L1_CACHE_BYTES
  166. #endif
  167. static int kmem_size = sizeof(struct kmem_cache);
  168. #ifdef CONFIG_SMP
  169. static struct notifier_block slab_notifier;
  170. #endif
  171. static enum {
  172. DOWN, /* No slab functionality available */
  173. PARTIAL, /* kmem_cache_open() works but kmalloc does not */
  174. UP, /* Everything works but does not show up in sysfs */
  175. SYSFS /* Sysfs up */
  176. } slab_state = DOWN;
  177. /* A list of all slab caches on the system */
  178. static DECLARE_RWSEM(slub_lock);
  179. LIST_HEAD(slab_caches);
  180. /*
  181. * Tracking user of a slab.
  182. */
  183. struct track {
  184. void *addr; /* Called from address */
  185. int cpu; /* Was running on cpu */
  186. int pid; /* Pid context */
  187. unsigned long when; /* When did the operation occur */
  188. };
  189. enum track_item { TRACK_ALLOC, TRACK_FREE };
  190. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  191. static int sysfs_slab_add(struct kmem_cache *);
  192. static int sysfs_slab_alias(struct kmem_cache *, const char *);
  193. static void sysfs_slab_remove(struct kmem_cache *);
  194. #else
  195. static int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  196. static int sysfs_slab_alias(struct kmem_cache *s, const char *p) { return 0; }
  197. static void sysfs_slab_remove(struct kmem_cache *s) {}
  198. #endif
  199. /********************************************************************
  200. * Core slab cache functions
  201. *******************************************************************/
  202. int slab_is_available(void)
  203. {
  204. return slab_state >= UP;
  205. }
  206. static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  207. {
  208. #ifdef CONFIG_NUMA
  209. return s->node[node];
  210. #else
  211. return &s->local_node;
  212. #endif
  213. }
  214. static inline int check_valid_pointer(struct kmem_cache *s,
  215. struct page *page, const void *object)
  216. {
  217. void *base;
  218. if (!object)
  219. return 1;
  220. base = page_address(page);
  221. if (object < base || object >= base + s->objects * s->size ||
  222. (object - base) % s->size) {
  223. return 0;
  224. }
  225. return 1;
  226. }
  227. /*
  228. * Slow version of get and set free pointer.
  229. *
  230. * This version requires touching the cache lines of kmem_cache which
  231. * we avoid to do in the fast alloc free paths. There we obtain the offset
  232. * from the page struct.
  233. */
  234. static inline void *get_freepointer(struct kmem_cache *s, void *object)
  235. {
  236. return *(void **)(object + s->offset);
  237. }
  238. static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  239. {
  240. *(void **)(object + s->offset) = fp;
  241. }
  242. /* Loop over all objects in a slab */
  243. #define for_each_object(__p, __s, __addr) \
  244. for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
  245. __p += (__s)->size)
  246. /* Scan freelist */
  247. #define for_each_free_object(__p, __s, __free) \
  248. for (__p = (__free); __p; __p = get_freepointer((__s), __p))
  249. /* Determine object index from a given position */
  250. static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  251. {
  252. return (p - addr) / s->size;
  253. }
  254. #ifdef CONFIG_SLUB_DEBUG
  255. /*
  256. * Debug settings:
  257. */
  258. static int slub_debug;
  259. static char *slub_debug_slabs;
  260. /*
  261. * Object debugging
  262. */
  263. static void print_section(char *text, u8 *addr, unsigned int length)
  264. {
  265. int i, offset;
  266. int newline = 1;
  267. char ascii[17];
  268. ascii[16] = 0;
  269. for (i = 0; i < length; i++) {
  270. if (newline) {
  271. printk(KERN_ERR "%10s 0x%p: ", text, addr + i);
  272. newline = 0;
  273. }
  274. printk(" %02x", addr[i]);
  275. offset = i % 16;
  276. ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  277. if (offset == 15) {
  278. printk(" %s\n",ascii);
  279. newline = 1;
  280. }
  281. }
  282. if (!newline) {
  283. i %= 16;
  284. while (i < 16) {
  285. printk(" ");
  286. ascii[i] = ' ';
  287. i++;
  288. }
  289. printk(" %s\n", ascii);
  290. }
  291. }
  292. static struct track *get_track(struct kmem_cache *s, void *object,
  293. enum track_item alloc)
  294. {
  295. struct track *p;
  296. if (s->offset)
  297. p = object + s->offset + sizeof(void *);
  298. else
  299. p = object + s->inuse;
  300. return p + alloc;
  301. }
  302. static void set_track(struct kmem_cache *s, void *object,
  303. enum track_item alloc, void *addr)
  304. {
  305. struct track *p;
  306. if (s->offset)
  307. p = object + s->offset + sizeof(void *);
  308. else
  309. p = object + s->inuse;
  310. p += alloc;
  311. if (addr) {
  312. p->addr = addr;
  313. p->cpu = smp_processor_id();
  314. p->pid = current ? current->pid : -1;
  315. p->when = jiffies;
  316. } else
  317. memset(p, 0, sizeof(struct track));
  318. }
  319. static void init_tracking(struct kmem_cache *s, void *object)
  320. {
  321. if (s->flags & SLAB_STORE_USER) {
  322. set_track(s, object, TRACK_FREE, NULL);
  323. set_track(s, object, TRACK_ALLOC, NULL);
  324. }
  325. }
  326. static void print_track(const char *s, struct track *t)
  327. {
  328. if (!t->addr)
  329. return;
  330. printk(KERN_ERR "%s: ", s);
  331. __print_symbol("%s", (unsigned long)t->addr);
  332. printk(" jiffies_ago=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
  333. }
  334. static void print_trailer(struct kmem_cache *s, u8 *p)
  335. {
  336. unsigned int off; /* Offset of last byte */
  337. if (s->flags & SLAB_RED_ZONE)
  338. print_section("Redzone", p + s->objsize,
  339. s->inuse - s->objsize);
  340. printk(KERN_ERR "FreePointer 0x%p -> 0x%p\n",
  341. p + s->offset,
  342. get_freepointer(s, p));
  343. if (s->offset)
  344. off = s->offset + sizeof(void *);
  345. else
  346. off = s->inuse;
  347. if (s->flags & SLAB_STORE_USER) {
  348. print_track("Last alloc", get_track(s, p, TRACK_ALLOC));
  349. print_track("Last free ", get_track(s, p, TRACK_FREE));
  350. off += 2 * sizeof(struct track);
  351. }
  352. if (off != s->size)
  353. /* Beginning of the filler is the free pointer */
  354. print_section("Filler", p + off, s->size - off);
  355. }
  356. static void object_err(struct kmem_cache *s, struct page *page,
  357. u8 *object, char *reason)
  358. {
  359. u8 *addr = page_address(page);
  360. printk(KERN_ERR "*** SLUB %s: %s@0x%p slab 0x%p\n",
  361. s->name, reason, object, page);
  362. printk(KERN_ERR " offset=%tu flags=0x%04lx inuse=%u freelist=0x%p\n",
  363. object - addr, page->flags, page->inuse, page->freelist);
  364. if (object > addr + 16)
  365. print_section("Bytes b4", object - 16, 16);
  366. print_section("Object", object, min(s->objsize, 128));
  367. print_trailer(s, object);
  368. dump_stack();
  369. }
  370. static void slab_err(struct kmem_cache *s, struct page *page, char *reason, ...)
  371. {
  372. va_list args;
  373. char buf[100];
  374. va_start(args, reason);
  375. vsnprintf(buf, sizeof(buf), reason, args);
  376. va_end(args);
  377. printk(KERN_ERR "*** SLUB %s: %s in slab @0x%p\n", s->name, buf,
  378. page);
  379. dump_stack();
  380. }
  381. static void init_object(struct kmem_cache *s, void *object, int active)
  382. {
  383. u8 *p = object;
  384. if (s->flags & __OBJECT_POISON) {
  385. memset(p, POISON_FREE, s->objsize - 1);
  386. p[s->objsize -1] = POISON_END;
  387. }
  388. if (s->flags & SLAB_RED_ZONE)
  389. memset(p + s->objsize,
  390. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
  391. s->inuse - s->objsize);
  392. }
  393. static int check_bytes(u8 *start, unsigned int value, unsigned int bytes)
  394. {
  395. while (bytes) {
  396. if (*start != (u8)value)
  397. return 0;
  398. start++;
  399. bytes--;
  400. }
  401. return 1;
  402. }
  403. /*
  404. * Object layout:
  405. *
  406. * object address
  407. * Bytes of the object to be managed.
  408. * If the freepointer may overlay the object then the free
  409. * pointer is the first word of the object.
  410. *
  411. * Poisoning uses 0x6b (POISON_FREE) and the last byte is
  412. * 0xa5 (POISON_END)
  413. *
  414. * object + s->objsize
  415. * Padding to reach word boundary. This is also used for Redzoning.
  416. * Padding is extended by another word if Redzoning is enabled and
  417. * objsize == inuse.
  418. *
  419. * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
  420. * 0xcc (RED_ACTIVE) for objects in use.
  421. *
  422. * object + s->inuse
  423. * Meta data starts here.
  424. *
  425. * A. Free pointer (if we cannot overwrite object on free)
  426. * B. Tracking data for SLAB_STORE_USER
  427. * C. Padding to reach required alignment boundary or at mininum
  428. * one word if debuggin is on to be able to detect writes
  429. * before the word boundary.
  430. *
  431. * Padding is done using 0x5a (POISON_INUSE)
  432. *
  433. * object + s->size
  434. * Nothing is used beyond s->size.
  435. *
  436. * If slabcaches are merged then the objsize and inuse boundaries are mostly
  437. * ignored. And therefore no slab options that rely on these boundaries
  438. * may be used with merged slabcaches.
  439. */
  440. static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  441. void *from, void *to)
  442. {
  443. printk(KERN_ERR "@@@ SLUB %s: Restoring %s (0x%x) from 0x%p-0x%p\n",
  444. s->name, message, data, from, to - 1);
  445. memset(from, data, to - from);
  446. }
  447. static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  448. {
  449. unsigned long off = s->inuse; /* The end of info */
  450. if (s->offset)
  451. /* Freepointer is placed after the object. */
  452. off += sizeof(void *);
  453. if (s->flags & SLAB_STORE_USER)
  454. /* We also have user information there */
  455. off += 2 * sizeof(struct track);
  456. if (s->size == off)
  457. return 1;
  458. if (check_bytes(p + off, POISON_INUSE, s->size - off))
  459. return 1;
  460. object_err(s, page, p, "Object padding check fails");
  461. /*
  462. * Restore padding
  463. */
  464. restore_bytes(s, "object padding", POISON_INUSE, p + off, p + s->size);
  465. return 0;
  466. }
  467. static int slab_pad_check(struct kmem_cache *s, struct page *page)
  468. {
  469. u8 *p;
  470. int length, remainder;
  471. if (!(s->flags & SLAB_POISON))
  472. return 1;
  473. p = page_address(page);
  474. length = s->objects * s->size;
  475. remainder = (PAGE_SIZE << s->order) - length;
  476. if (!remainder)
  477. return 1;
  478. if (!check_bytes(p + length, POISON_INUSE, remainder)) {
  479. slab_err(s, page, "Padding check failed");
  480. restore_bytes(s, "slab padding", POISON_INUSE, p + length,
  481. p + length + remainder);
  482. return 0;
  483. }
  484. return 1;
  485. }
  486. static int check_object(struct kmem_cache *s, struct page *page,
  487. void *object, int active)
  488. {
  489. u8 *p = object;
  490. u8 *endobject = object + s->objsize;
  491. if (s->flags & SLAB_RED_ZONE) {
  492. unsigned int red =
  493. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
  494. if (!check_bytes(endobject, red, s->inuse - s->objsize)) {
  495. object_err(s, page, object,
  496. active ? "Redzone Active" : "Redzone Inactive");
  497. restore_bytes(s, "redzone", red,
  498. endobject, object + s->inuse);
  499. return 0;
  500. }
  501. } else {
  502. if ((s->flags & SLAB_POISON) && s->objsize < s->inuse &&
  503. !check_bytes(endobject, POISON_INUSE,
  504. s->inuse - s->objsize)) {
  505. object_err(s, page, p, "Alignment padding check fails");
  506. /*
  507. * Fix it so that there will not be another report.
  508. *
  509. * Hmmm... We may be corrupting an object that now expects
  510. * to be longer than allowed.
  511. */
  512. restore_bytes(s, "alignment padding", POISON_INUSE,
  513. endobject, object + s->inuse);
  514. }
  515. }
  516. if (s->flags & SLAB_POISON) {
  517. if (!active && (s->flags & __OBJECT_POISON) &&
  518. (!check_bytes(p, POISON_FREE, s->objsize - 1) ||
  519. p[s->objsize - 1] != POISON_END)) {
  520. object_err(s, page, p, "Poison check failed");
  521. restore_bytes(s, "Poison", POISON_FREE,
  522. p, p + s->objsize -1);
  523. restore_bytes(s, "Poison", POISON_END,
  524. p + s->objsize - 1, p + s->objsize);
  525. return 0;
  526. }
  527. /*
  528. * check_pad_bytes cleans up on its own.
  529. */
  530. check_pad_bytes(s, page, p);
  531. }
  532. if (!s->offset && active)
  533. /*
  534. * Object and freepointer overlap. Cannot check
  535. * freepointer while object is allocated.
  536. */
  537. return 1;
  538. /* Check free pointer validity */
  539. if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  540. object_err(s, page, p, "Freepointer corrupt");
  541. /*
  542. * No choice but to zap it and thus loose the remainder
  543. * of the free objects in this slab. May cause
  544. * another error because the object count is now wrong.
  545. */
  546. set_freepointer(s, p, NULL);
  547. return 0;
  548. }
  549. return 1;
  550. }
  551. static int check_slab(struct kmem_cache *s, struct page *page)
  552. {
  553. VM_BUG_ON(!irqs_disabled());
  554. if (!PageSlab(page)) {
  555. slab_err(s, page, "Not a valid slab page flags=%lx "
  556. "mapping=0x%p count=%d", page->flags, page->mapping,
  557. page_count(page));
  558. return 0;
  559. }
  560. if (page->offset * sizeof(void *) != s->offset) {
  561. slab_err(s, page, "Corrupted offset %lu flags=0x%lx "
  562. "mapping=0x%p count=%d",
  563. (unsigned long)(page->offset * sizeof(void *)),
  564. page->flags,
  565. page->mapping,
  566. page_count(page));
  567. return 0;
  568. }
  569. if (page->inuse > s->objects) {
  570. slab_err(s, page, "inuse %u > max %u @0x%p flags=%lx "
  571. "mapping=0x%p count=%d",
  572. s->name, page->inuse, s->objects, page->flags,
  573. page->mapping, page_count(page));
  574. return 0;
  575. }
  576. /* Slab_pad_check fixes things up after itself */
  577. slab_pad_check(s, page);
  578. return 1;
  579. }
  580. /*
  581. * Determine if a certain object on a page is on the freelist. Must hold the
  582. * slab lock to guarantee that the chains are in a consistent state.
  583. */
  584. static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  585. {
  586. int nr = 0;
  587. void *fp = page->freelist;
  588. void *object = NULL;
  589. while (fp && nr <= s->objects) {
  590. if (fp == search)
  591. return 1;
  592. if (!check_valid_pointer(s, page, fp)) {
  593. if (object) {
  594. object_err(s, page, object,
  595. "Freechain corrupt");
  596. set_freepointer(s, object, NULL);
  597. break;
  598. } else {
  599. slab_err(s, page, "Freepointer 0x%p corrupt",
  600. fp);
  601. page->freelist = NULL;
  602. page->inuse = s->objects;
  603. printk(KERN_ERR "@@@ SLUB %s: Freelist "
  604. "cleared. Slab 0x%p\n",
  605. s->name, page);
  606. return 0;
  607. }
  608. break;
  609. }
  610. object = fp;
  611. fp = get_freepointer(s, object);
  612. nr++;
  613. }
  614. if (page->inuse != s->objects - nr) {
  615. slab_err(s, page, "Wrong object count. Counter is %d but "
  616. "counted were %d", s, page, page->inuse,
  617. s->objects - nr);
  618. page->inuse = s->objects - nr;
  619. printk(KERN_ERR "@@@ SLUB %s: Object count adjusted. "
  620. "Slab @0x%p\n", s->name, page);
  621. }
  622. return search == NULL;
  623. }
  624. /*
  625. * Tracking of fully allocated slabs for debugging purposes.
  626. */
  627. static void add_full(struct kmem_cache_node *n, struct page *page)
  628. {
  629. spin_lock(&n->list_lock);
  630. list_add(&page->lru, &n->full);
  631. spin_unlock(&n->list_lock);
  632. }
  633. static void remove_full(struct kmem_cache *s, struct page *page)
  634. {
  635. struct kmem_cache_node *n;
  636. if (!(s->flags & SLAB_STORE_USER))
  637. return;
  638. n = get_node(s, page_to_nid(page));
  639. spin_lock(&n->list_lock);
  640. list_del(&page->lru);
  641. spin_unlock(&n->list_lock);
  642. }
  643. static int alloc_object_checks(struct kmem_cache *s, struct page *page,
  644. void *object)
  645. {
  646. if (!check_slab(s, page))
  647. goto bad;
  648. if (object && !on_freelist(s, page, object)) {
  649. slab_err(s, page, "Object 0x%p already allocated", object);
  650. goto bad;
  651. }
  652. if (!check_valid_pointer(s, page, object)) {
  653. object_err(s, page, object, "Freelist Pointer check fails");
  654. goto bad;
  655. }
  656. if (!object)
  657. return 1;
  658. if (!check_object(s, page, object, 0))
  659. goto bad;
  660. return 1;
  661. bad:
  662. if (PageSlab(page)) {
  663. /*
  664. * If this is a slab page then lets do the best we can
  665. * to avoid issues in the future. Marking all objects
  666. * as used avoids touching the remaining objects.
  667. */
  668. printk(KERN_ERR "@@@ SLUB: %s slab 0x%p. Marking all objects used.\n",
  669. s->name, page);
  670. page->inuse = s->objects;
  671. page->freelist = NULL;
  672. /* Fix up fields that may be corrupted */
  673. page->offset = s->offset / sizeof(void *);
  674. }
  675. return 0;
  676. }
  677. static int free_object_checks(struct kmem_cache *s, struct page *page,
  678. void *object)
  679. {
  680. if (!check_slab(s, page))
  681. goto fail;
  682. if (!check_valid_pointer(s, page, object)) {
  683. slab_err(s, page, "Invalid object pointer 0x%p", object);
  684. goto fail;
  685. }
  686. if (on_freelist(s, page, object)) {
  687. slab_err(s, page, "Object 0x%p already free", object);
  688. goto fail;
  689. }
  690. if (!check_object(s, page, object, 1))
  691. return 0;
  692. if (unlikely(s != page->slab)) {
  693. if (!PageSlab(page))
  694. slab_err(s, page, "Attempt to free object(0x%p) "
  695. "outside of slab", object);
  696. else
  697. if (!page->slab) {
  698. printk(KERN_ERR
  699. "SLUB <none>: no slab for object 0x%p.\n",
  700. object);
  701. dump_stack();
  702. }
  703. else
  704. slab_err(s, page, "object at 0x%p belongs "
  705. "to slab %s", object, page->slab->name);
  706. goto fail;
  707. }
  708. return 1;
  709. fail:
  710. printk(KERN_ERR "@@@ SLUB: %s slab 0x%p object at 0x%p not freed.\n",
  711. s->name, page, object);
  712. return 0;
  713. }
  714. static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
  715. {
  716. if (s->flags & SLAB_TRACE) {
  717. printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
  718. s->name,
  719. alloc ? "alloc" : "free",
  720. object, page->inuse,
  721. page->freelist);
  722. if (!alloc)
  723. print_section("Object", (void *)object, s->objsize);
  724. dump_stack();
  725. }
  726. }
  727. static int __init setup_slub_debug(char *str)
  728. {
  729. if (!str || *str != '=')
  730. slub_debug = DEBUG_DEFAULT_FLAGS;
  731. else {
  732. str++;
  733. if (*str == 0 || *str == ',')
  734. slub_debug = DEBUG_DEFAULT_FLAGS;
  735. else
  736. for( ;*str && *str != ','; str++)
  737. switch (*str) {
  738. case 'f' : case 'F' :
  739. slub_debug |= SLAB_DEBUG_FREE;
  740. break;
  741. case 'z' : case 'Z' :
  742. slub_debug |= SLAB_RED_ZONE;
  743. break;
  744. case 'p' : case 'P' :
  745. slub_debug |= SLAB_POISON;
  746. break;
  747. case 'u' : case 'U' :
  748. slub_debug |= SLAB_STORE_USER;
  749. break;
  750. case 't' : case 'T' :
  751. slub_debug |= SLAB_TRACE;
  752. break;
  753. default:
  754. printk(KERN_ERR "slub_debug option '%c' "
  755. "unknown. skipped\n",*str);
  756. }
  757. }
  758. if (*str == ',')
  759. slub_debug_slabs = str + 1;
  760. return 1;
  761. }
  762. __setup("slub_debug", setup_slub_debug);
  763. static void kmem_cache_open_debug_check(struct kmem_cache *s)
  764. {
  765. /*
  766. * The page->offset field is only 16 bit wide. This is an offset
  767. * in units of words from the beginning of an object. If the slab
  768. * size is bigger then we cannot move the free pointer behind the
  769. * object anymore.
  770. *
  771. * On 32 bit platforms the limit is 256k. On 64bit platforms
  772. * the limit is 512k.
  773. *
  774. * Debugging or ctor/dtors may create a need to move the free
  775. * pointer. Fail if this happens.
  776. */
  777. if (s->size >= 65535 * sizeof(void *)) {
  778. BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
  779. SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
  780. BUG_ON(s->ctor || s->dtor);
  781. }
  782. else
  783. /*
  784. * Enable debugging if selected on the kernel commandline.
  785. */
  786. if (slub_debug && (!slub_debug_slabs ||
  787. strncmp(slub_debug_slabs, s->name,
  788. strlen(slub_debug_slabs)) == 0))
  789. s->flags |= slub_debug;
  790. }
  791. #else
  792. static inline int alloc_object_checks(struct kmem_cache *s,
  793. struct page *page, void *object) { return 0; }
  794. static inline int free_object_checks(struct kmem_cache *s,
  795. struct page *page, void *object) { return 0; }
  796. static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
  797. static inline void remove_full(struct kmem_cache *s, struct page *page) {}
  798. static inline void trace(struct kmem_cache *s, struct page *page,
  799. void *object, int alloc) {}
  800. static inline void init_object(struct kmem_cache *s,
  801. void *object, int active) {}
  802. static inline void init_tracking(struct kmem_cache *s, void *object) {}
  803. static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  804. { return 1; }
  805. static inline int check_object(struct kmem_cache *s, struct page *page,
  806. void *object, int active) { return 1; }
  807. static inline void set_track(struct kmem_cache *s, void *object,
  808. enum track_item alloc, void *addr) {}
  809. static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
  810. #define slub_debug 0
  811. #endif
  812. /*
  813. * Slab allocation and freeing
  814. */
  815. static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  816. {
  817. struct page * page;
  818. int pages = 1 << s->order;
  819. if (s->order)
  820. flags |= __GFP_COMP;
  821. if (s->flags & SLAB_CACHE_DMA)
  822. flags |= SLUB_DMA;
  823. if (node == -1)
  824. page = alloc_pages(flags, s->order);
  825. else
  826. page = alloc_pages_node(node, flags, s->order);
  827. if (!page)
  828. return NULL;
  829. mod_zone_page_state(page_zone(page),
  830. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  831. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  832. pages);
  833. return page;
  834. }
  835. static void setup_object(struct kmem_cache *s, struct page *page,
  836. void *object)
  837. {
  838. if (SlabDebug(page)) {
  839. init_object(s, object, 0);
  840. init_tracking(s, object);
  841. }
  842. if (unlikely(s->ctor))
  843. s->ctor(object, s, SLAB_CTOR_CONSTRUCTOR);
  844. }
  845. static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  846. {
  847. struct page *page;
  848. struct kmem_cache_node *n;
  849. void *start;
  850. void *end;
  851. void *last;
  852. void *p;
  853. BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
  854. if (flags & __GFP_WAIT)
  855. local_irq_enable();
  856. page = allocate_slab(s, flags & GFP_LEVEL_MASK, node);
  857. if (!page)
  858. goto out;
  859. n = get_node(s, page_to_nid(page));
  860. if (n)
  861. atomic_long_inc(&n->nr_slabs);
  862. page->offset = s->offset / sizeof(void *);
  863. page->slab = s;
  864. page->flags |= 1 << PG_slab;
  865. if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
  866. SLAB_STORE_USER | SLAB_TRACE))
  867. SetSlabDebug(page);
  868. start = page_address(page);
  869. end = start + s->objects * s->size;
  870. if (unlikely(s->flags & SLAB_POISON))
  871. memset(start, POISON_INUSE, PAGE_SIZE << s->order);
  872. last = start;
  873. for_each_object(p, s, start) {
  874. setup_object(s, page, last);
  875. set_freepointer(s, last, p);
  876. last = p;
  877. }
  878. setup_object(s, page, last);
  879. set_freepointer(s, last, NULL);
  880. page->freelist = start;
  881. page->inuse = 0;
  882. out:
  883. if (flags & __GFP_WAIT)
  884. local_irq_disable();
  885. return page;
  886. }
  887. static void __free_slab(struct kmem_cache *s, struct page *page)
  888. {
  889. int pages = 1 << s->order;
  890. if (unlikely(SlabDebug(page) || s->dtor)) {
  891. void *p;
  892. slab_pad_check(s, page);
  893. for_each_object(p, s, page_address(page)) {
  894. if (s->dtor)
  895. s->dtor(p, s, 0);
  896. check_object(s, page, p, 0);
  897. }
  898. }
  899. mod_zone_page_state(page_zone(page),
  900. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  901. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  902. - pages);
  903. page->mapping = NULL;
  904. __free_pages(page, s->order);
  905. }
  906. static void rcu_free_slab(struct rcu_head *h)
  907. {
  908. struct page *page;
  909. page = container_of((struct list_head *)h, struct page, lru);
  910. __free_slab(page->slab, page);
  911. }
  912. static void free_slab(struct kmem_cache *s, struct page *page)
  913. {
  914. if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
  915. /*
  916. * RCU free overloads the RCU head over the LRU
  917. */
  918. struct rcu_head *head = (void *)&page->lru;
  919. call_rcu(head, rcu_free_slab);
  920. } else
  921. __free_slab(s, page);
  922. }
  923. static void discard_slab(struct kmem_cache *s, struct page *page)
  924. {
  925. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  926. atomic_long_dec(&n->nr_slabs);
  927. reset_page_mapcount(page);
  928. ClearSlabDebug(page);
  929. __ClearPageSlab(page);
  930. free_slab(s, page);
  931. }
  932. /*
  933. * Per slab locking using the pagelock
  934. */
  935. static __always_inline void slab_lock(struct page *page)
  936. {
  937. bit_spin_lock(PG_locked, &page->flags);
  938. }
  939. static __always_inline void slab_unlock(struct page *page)
  940. {
  941. bit_spin_unlock(PG_locked, &page->flags);
  942. }
  943. static __always_inline int slab_trylock(struct page *page)
  944. {
  945. int rc = 1;
  946. rc = bit_spin_trylock(PG_locked, &page->flags);
  947. return rc;
  948. }
  949. /*
  950. * Management of partially allocated slabs
  951. */
  952. static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
  953. {
  954. spin_lock(&n->list_lock);
  955. n->nr_partial++;
  956. list_add_tail(&page->lru, &n->partial);
  957. spin_unlock(&n->list_lock);
  958. }
  959. static void add_partial(struct kmem_cache_node *n, struct page *page)
  960. {
  961. spin_lock(&n->list_lock);
  962. n->nr_partial++;
  963. list_add(&page->lru, &n->partial);
  964. spin_unlock(&n->list_lock);
  965. }
  966. static void remove_partial(struct kmem_cache *s,
  967. struct page *page)
  968. {
  969. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  970. spin_lock(&n->list_lock);
  971. list_del(&page->lru);
  972. n->nr_partial--;
  973. spin_unlock(&n->list_lock);
  974. }
  975. /*
  976. * Lock slab and remove from the partial list.
  977. *
  978. * Must hold list_lock.
  979. */
  980. static int lock_and_del_slab(struct kmem_cache_node *n, struct page *page)
  981. {
  982. if (slab_trylock(page)) {
  983. list_del(&page->lru);
  984. n->nr_partial--;
  985. return 1;
  986. }
  987. return 0;
  988. }
  989. /*
  990. * Try to allocate a partial slab from a specific node.
  991. */
  992. static struct page *get_partial_node(struct kmem_cache_node *n)
  993. {
  994. struct page *page;
  995. /*
  996. * Racy check. If we mistakenly see no partial slabs then we
  997. * just allocate an empty slab. If we mistakenly try to get a
  998. * partial slab and there is none available then get_partials()
  999. * will return NULL.
  1000. */
  1001. if (!n || !n->nr_partial)
  1002. return NULL;
  1003. spin_lock(&n->list_lock);
  1004. list_for_each_entry(page, &n->partial, lru)
  1005. if (lock_and_del_slab(n, page))
  1006. goto out;
  1007. page = NULL;
  1008. out:
  1009. spin_unlock(&n->list_lock);
  1010. return page;
  1011. }
  1012. /*
  1013. * Get a page from somewhere. Search in increasing NUMA distances.
  1014. */
  1015. static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  1016. {
  1017. #ifdef CONFIG_NUMA
  1018. struct zonelist *zonelist;
  1019. struct zone **z;
  1020. struct page *page;
  1021. /*
  1022. * The defrag ratio allows a configuration of the tradeoffs between
  1023. * inter node defragmentation and node local allocations. A lower
  1024. * defrag_ratio increases the tendency to do local allocations
  1025. * instead of attempting to obtain partial slabs from other nodes.
  1026. *
  1027. * If the defrag_ratio is set to 0 then kmalloc() always
  1028. * returns node local objects. If the ratio is higher then kmalloc()
  1029. * may return off node objects because partial slabs are obtained
  1030. * from other nodes and filled up.
  1031. *
  1032. * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
  1033. * defrag_ratio = 1000) then every (well almost) allocation will
  1034. * first attempt to defrag slab caches on other nodes. This means
  1035. * scanning over all nodes to look for partial slabs which may be
  1036. * expensive if we do it every time we are trying to find a slab
  1037. * with available objects.
  1038. */
  1039. if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
  1040. return NULL;
  1041. zonelist = &NODE_DATA(slab_node(current->mempolicy))
  1042. ->node_zonelists[gfp_zone(flags)];
  1043. for (z = zonelist->zones; *z; z++) {
  1044. struct kmem_cache_node *n;
  1045. n = get_node(s, zone_to_nid(*z));
  1046. if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
  1047. n->nr_partial > MIN_PARTIAL) {
  1048. page = get_partial_node(n);
  1049. if (page)
  1050. return page;
  1051. }
  1052. }
  1053. #endif
  1054. return NULL;
  1055. }
  1056. /*
  1057. * Get a partial page, lock it and return it.
  1058. */
  1059. static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  1060. {
  1061. struct page *page;
  1062. int searchnode = (node == -1) ? numa_node_id() : node;
  1063. page = get_partial_node(get_node(s, searchnode));
  1064. if (page || (flags & __GFP_THISNODE))
  1065. return page;
  1066. return get_any_partial(s, flags);
  1067. }
  1068. /*
  1069. * Move a page back to the lists.
  1070. *
  1071. * Must be called with the slab lock held.
  1072. *
  1073. * On exit the slab lock will have been dropped.
  1074. */
  1075. static void putback_slab(struct kmem_cache *s, struct page *page)
  1076. {
  1077. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1078. if (page->inuse) {
  1079. if (page->freelist)
  1080. add_partial(n, page);
  1081. else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
  1082. add_full(n, page);
  1083. slab_unlock(page);
  1084. } else {
  1085. if (n->nr_partial < MIN_PARTIAL) {
  1086. /*
  1087. * Adding an empty slab to the partial slabs in order
  1088. * to avoid page allocator overhead. This slab needs
  1089. * to come after the other slabs with objects in
  1090. * order to fill them up. That way the size of the
  1091. * partial list stays small. kmem_cache_shrink can
  1092. * reclaim empty slabs from the partial list.
  1093. */
  1094. add_partial_tail(n, page);
  1095. slab_unlock(page);
  1096. } else {
  1097. slab_unlock(page);
  1098. discard_slab(s, page);
  1099. }
  1100. }
  1101. }
  1102. /*
  1103. * Remove the cpu slab
  1104. */
  1105. static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
  1106. {
  1107. s->cpu_slab[cpu] = NULL;
  1108. ClearPageActive(page);
  1109. putback_slab(s, page);
  1110. }
  1111. static void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
  1112. {
  1113. slab_lock(page);
  1114. deactivate_slab(s, page, cpu);
  1115. }
  1116. /*
  1117. * Flush cpu slab.
  1118. * Called from IPI handler with interrupts disabled.
  1119. */
  1120. static void __flush_cpu_slab(struct kmem_cache *s, int cpu)
  1121. {
  1122. struct page *page = s->cpu_slab[cpu];
  1123. if (likely(page))
  1124. flush_slab(s, page, cpu);
  1125. }
  1126. static void flush_cpu_slab(void *d)
  1127. {
  1128. struct kmem_cache *s = d;
  1129. int cpu = smp_processor_id();
  1130. __flush_cpu_slab(s, cpu);
  1131. }
  1132. static void flush_all(struct kmem_cache *s)
  1133. {
  1134. #ifdef CONFIG_SMP
  1135. on_each_cpu(flush_cpu_slab, s, 1, 1);
  1136. #else
  1137. unsigned long flags;
  1138. local_irq_save(flags);
  1139. flush_cpu_slab(s);
  1140. local_irq_restore(flags);
  1141. #endif
  1142. }
  1143. /*
  1144. * slab_alloc is optimized to only modify two cachelines on the fast path
  1145. * (aside from the stack):
  1146. *
  1147. * 1. The page struct
  1148. * 2. The first cacheline of the object to be allocated.
  1149. *
  1150. * The only other cache lines that are read (apart from code) is the
  1151. * per cpu array in the kmem_cache struct.
  1152. *
  1153. * Fastpath is not possible if we need to get a new slab or have
  1154. * debugging enabled (which means all slabs are marked with SlabDebug)
  1155. */
  1156. static void *slab_alloc(struct kmem_cache *s,
  1157. gfp_t gfpflags, int node, void *addr)
  1158. {
  1159. struct page *page;
  1160. void **object;
  1161. unsigned long flags;
  1162. int cpu;
  1163. local_irq_save(flags);
  1164. cpu = smp_processor_id();
  1165. page = s->cpu_slab[cpu];
  1166. if (!page)
  1167. goto new_slab;
  1168. slab_lock(page);
  1169. if (unlikely(node != -1 && page_to_nid(page) != node))
  1170. goto another_slab;
  1171. redo:
  1172. object = page->freelist;
  1173. if (unlikely(!object))
  1174. goto another_slab;
  1175. if (unlikely(SlabDebug(page)))
  1176. goto debug;
  1177. have_object:
  1178. page->inuse++;
  1179. page->freelist = object[page->offset];
  1180. slab_unlock(page);
  1181. local_irq_restore(flags);
  1182. return object;
  1183. another_slab:
  1184. deactivate_slab(s, page, cpu);
  1185. new_slab:
  1186. page = get_partial(s, gfpflags, node);
  1187. if (likely(page)) {
  1188. have_slab:
  1189. s->cpu_slab[cpu] = page;
  1190. SetPageActive(page);
  1191. goto redo;
  1192. }
  1193. page = new_slab(s, gfpflags, node);
  1194. if (page) {
  1195. cpu = smp_processor_id();
  1196. if (s->cpu_slab[cpu]) {
  1197. /*
  1198. * Someone else populated the cpu_slab while we
  1199. * enabled interrupts, or we have gotten scheduled
  1200. * on another cpu. The page may not be on the
  1201. * requested node even if __GFP_THISNODE was
  1202. * specified. So we need to recheck.
  1203. */
  1204. if (node == -1 ||
  1205. page_to_nid(s->cpu_slab[cpu]) == node) {
  1206. /*
  1207. * Current cpuslab is acceptable and we
  1208. * want the current one since its cache hot
  1209. */
  1210. discard_slab(s, page);
  1211. page = s->cpu_slab[cpu];
  1212. slab_lock(page);
  1213. goto redo;
  1214. }
  1215. /* New slab does not fit our expectations */
  1216. flush_slab(s, s->cpu_slab[cpu], cpu);
  1217. }
  1218. slab_lock(page);
  1219. goto have_slab;
  1220. }
  1221. local_irq_restore(flags);
  1222. return NULL;
  1223. debug:
  1224. if (!alloc_object_checks(s, page, object))
  1225. goto another_slab;
  1226. if (s->flags & SLAB_STORE_USER)
  1227. set_track(s, object, TRACK_ALLOC, addr);
  1228. trace(s, page, object, 1);
  1229. init_object(s, object, 1);
  1230. goto have_object;
  1231. }
  1232. void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  1233. {
  1234. return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
  1235. }
  1236. EXPORT_SYMBOL(kmem_cache_alloc);
  1237. #ifdef CONFIG_NUMA
  1238. void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  1239. {
  1240. return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
  1241. }
  1242. EXPORT_SYMBOL(kmem_cache_alloc_node);
  1243. #endif
  1244. /*
  1245. * The fastpath only writes the cacheline of the page struct and the first
  1246. * cacheline of the object.
  1247. *
  1248. * We read the cpu_slab cacheline to check if the slab is the per cpu
  1249. * slab for this processor.
  1250. */
  1251. static void slab_free(struct kmem_cache *s, struct page *page,
  1252. void *x, void *addr)
  1253. {
  1254. void *prior;
  1255. void **object = (void *)x;
  1256. unsigned long flags;
  1257. local_irq_save(flags);
  1258. slab_lock(page);
  1259. if (unlikely(SlabDebug(page)))
  1260. goto debug;
  1261. checks_ok:
  1262. prior = object[page->offset] = page->freelist;
  1263. page->freelist = object;
  1264. page->inuse--;
  1265. if (unlikely(PageActive(page)))
  1266. /*
  1267. * Cpu slabs are never on partial lists and are
  1268. * never freed.
  1269. */
  1270. goto out_unlock;
  1271. if (unlikely(!page->inuse))
  1272. goto slab_empty;
  1273. /*
  1274. * Objects left in the slab. If it
  1275. * was not on the partial list before
  1276. * then add it.
  1277. */
  1278. if (unlikely(!prior))
  1279. add_partial(get_node(s, page_to_nid(page)), page);
  1280. out_unlock:
  1281. slab_unlock(page);
  1282. local_irq_restore(flags);
  1283. return;
  1284. slab_empty:
  1285. if (prior)
  1286. /*
  1287. * Slab still on the partial list.
  1288. */
  1289. remove_partial(s, page);
  1290. slab_unlock(page);
  1291. discard_slab(s, page);
  1292. local_irq_restore(flags);
  1293. return;
  1294. debug:
  1295. if (!free_object_checks(s, page, x))
  1296. goto out_unlock;
  1297. if (!PageActive(page) && !page->freelist)
  1298. remove_full(s, page);
  1299. if (s->flags & SLAB_STORE_USER)
  1300. set_track(s, x, TRACK_FREE, addr);
  1301. trace(s, page, object, 0);
  1302. init_object(s, object, 0);
  1303. goto checks_ok;
  1304. }
  1305. void kmem_cache_free(struct kmem_cache *s, void *x)
  1306. {
  1307. struct page *page;
  1308. page = virt_to_head_page(x);
  1309. slab_free(s, page, x, __builtin_return_address(0));
  1310. }
  1311. EXPORT_SYMBOL(kmem_cache_free);
  1312. /* Figure out on which slab object the object resides */
  1313. static struct page *get_object_page(const void *x)
  1314. {
  1315. struct page *page = virt_to_head_page(x);
  1316. if (!PageSlab(page))
  1317. return NULL;
  1318. return page;
  1319. }
  1320. /*
  1321. * Object placement in a slab is made very easy because we always start at
  1322. * offset 0. If we tune the size of the object to the alignment then we can
  1323. * get the required alignment by putting one properly sized object after
  1324. * another.
  1325. *
  1326. * Notice that the allocation order determines the sizes of the per cpu
  1327. * caches. Each processor has always one slab available for allocations.
  1328. * Increasing the allocation order reduces the number of times that slabs
  1329. * must be moved on and off the partial lists and is therefore a factor in
  1330. * locking overhead.
  1331. */
  1332. /*
  1333. * Mininum / Maximum order of slab pages. This influences locking overhead
  1334. * and slab fragmentation. A higher order reduces the number of partial slabs
  1335. * and increases the number of allocations possible without having to
  1336. * take the list_lock.
  1337. */
  1338. static int slub_min_order;
  1339. static int slub_max_order = DEFAULT_MAX_ORDER;
  1340. static int slub_min_objects = DEFAULT_MIN_OBJECTS;
  1341. /*
  1342. * Merge control. If this is set then no merging of slab caches will occur.
  1343. * (Could be removed. This was introduced to pacify the merge skeptics.)
  1344. */
  1345. static int slub_nomerge;
  1346. /*
  1347. * Calculate the order of allocation given an slab object size.
  1348. *
  1349. * The order of allocation has significant impact on performance and other
  1350. * system components. Generally order 0 allocations should be preferred since
  1351. * order 0 does not cause fragmentation in the page allocator. Larger objects
  1352. * be problematic to put into order 0 slabs because there may be too much
  1353. * unused space left. We go to a higher order if more than 1/8th of the slab
  1354. * would be wasted.
  1355. *
  1356. * In order to reach satisfactory performance we must ensure that a minimum
  1357. * number of objects is in one slab. Otherwise we may generate too much
  1358. * activity on the partial lists which requires taking the list_lock. This is
  1359. * less a concern for large slabs though which are rarely used.
  1360. *
  1361. * slub_max_order specifies the order where we begin to stop considering the
  1362. * number of objects in a slab as critical. If we reach slub_max_order then
  1363. * we try to keep the page order as low as possible. So we accept more waste
  1364. * of space in favor of a small page order.
  1365. *
  1366. * Higher order allocations also allow the placement of more objects in a
  1367. * slab and thereby reduce object handling overhead. If the user has
  1368. * requested a higher mininum order then we start with that one instead of
  1369. * the smallest order which will fit the object.
  1370. */
  1371. static inline int slab_order(int size, int min_objects,
  1372. int max_order, int fract_leftover)
  1373. {
  1374. int order;
  1375. int rem;
  1376. for (order = max(slub_min_order,
  1377. fls(min_objects * size - 1) - PAGE_SHIFT);
  1378. order <= max_order; order++) {
  1379. unsigned long slab_size = PAGE_SIZE << order;
  1380. if (slab_size < min_objects * size)
  1381. continue;
  1382. rem = slab_size % size;
  1383. if (rem <= slab_size / fract_leftover)
  1384. break;
  1385. }
  1386. return order;
  1387. }
  1388. static inline int calculate_order(int size)
  1389. {
  1390. int order;
  1391. int min_objects;
  1392. int fraction;
  1393. /*
  1394. * Attempt to find best configuration for a slab. This
  1395. * works by first attempting to generate a layout with
  1396. * the best configuration and backing off gradually.
  1397. *
  1398. * First we reduce the acceptable waste in a slab. Then
  1399. * we reduce the minimum objects required in a slab.
  1400. */
  1401. min_objects = slub_min_objects;
  1402. while (min_objects > 1) {
  1403. fraction = 8;
  1404. while (fraction >= 4) {
  1405. order = slab_order(size, min_objects,
  1406. slub_max_order, fraction);
  1407. if (order <= slub_max_order)
  1408. return order;
  1409. fraction /= 2;
  1410. }
  1411. min_objects /= 2;
  1412. }
  1413. /*
  1414. * We were unable to place multiple objects in a slab. Now
  1415. * lets see if we can place a single object there.
  1416. */
  1417. order = slab_order(size, 1, slub_max_order, 1);
  1418. if (order <= slub_max_order)
  1419. return order;
  1420. /*
  1421. * Doh this slab cannot be placed using slub_max_order.
  1422. */
  1423. order = slab_order(size, 1, MAX_ORDER, 1);
  1424. if (order <= MAX_ORDER)
  1425. return order;
  1426. return -ENOSYS;
  1427. }
  1428. /*
  1429. * Figure out what the alignment of the objects will be.
  1430. */
  1431. static unsigned long calculate_alignment(unsigned long flags,
  1432. unsigned long align, unsigned long size)
  1433. {
  1434. /*
  1435. * If the user wants hardware cache aligned objects then
  1436. * follow that suggestion if the object is sufficiently
  1437. * large.
  1438. *
  1439. * The hardware cache alignment cannot override the
  1440. * specified alignment though. If that is greater
  1441. * then use it.
  1442. */
  1443. if ((flags & SLAB_HWCACHE_ALIGN) &&
  1444. size > cache_line_size() / 2)
  1445. return max_t(unsigned long, align, cache_line_size());
  1446. if (align < ARCH_SLAB_MINALIGN)
  1447. return ARCH_SLAB_MINALIGN;
  1448. return ALIGN(align, sizeof(void *));
  1449. }
  1450. static void init_kmem_cache_node(struct kmem_cache_node *n)
  1451. {
  1452. n->nr_partial = 0;
  1453. atomic_long_set(&n->nr_slabs, 0);
  1454. spin_lock_init(&n->list_lock);
  1455. INIT_LIST_HEAD(&n->partial);
  1456. INIT_LIST_HEAD(&n->full);
  1457. }
  1458. #ifdef CONFIG_NUMA
  1459. /*
  1460. * No kmalloc_node yet so do it by hand. We know that this is the first
  1461. * slab on the node for this slabcache. There are no concurrent accesses
  1462. * possible.
  1463. *
  1464. * Note that this function only works on the kmalloc_node_cache
  1465. * when allocating for the kmalloc_node_cache.
  1466. */
  1467. static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags,
  1468. int node)
  1469. {
  1470. struct page *page;
  1471. struct kmem_cache_node *n;
  1472. BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
  1473. page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node);
  1474. /* new_slab() disables interupts */
  1475. local_irq_enable();
  1476. BUG_ON(!page);
  1477. n = page->freelist;
  1478. BUG_ON(!n);
  1479. page->freelist = get_freepointer(kmalloc_caches, n);
  1480. page->inuse++;
  1481. kmalloc_caches->node[node] = n;
  1482. init_object(kmalloc_caches, n, 1);
  1483. init_kmem_cache_node(n);
  1484. atomic_long_inc(&n->nr_slabs);
  1485. add_partial(n, page);
  1486. return n;
  1487. }
  1488. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1489. {
  1490. int node;
  1491. for_each_online_node(node) {
  1492. struct kmem_cache_node *n = s->node[node];
  1493. if (n && n != &s->local_node)
  1494. kmem_cache_free(kmalloc_caches, n);
  1495. s->node[node] = NULL;
  1496. }
  1497. }
  1498. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1499. {
  1500. int node;
  1501. int local_node;
  1502. if (slab_state >= UP)
  1503. local_node = page_to_nid(virt_to_page(s));
  1504. else
  1505. local_node = 0;
  1506. for_each_online_node(node) {
  1507. struct kmem_cache_node *n;
  1508. if (local_node == node)
  1509. n = &s->local_node;
  1510. else {
  1511. if (slab_state == DOWN) {
  1512. n = early_kmem_cache_node_alloc(gfpflags,
  1513. node);
  1514. continue;
  1515. }
  1516. n = kmem_cache_alloc_node(kmalloc_caches,
  1517. gfpflags, node);
  1518. if (!n) {
  1519. free_kmem_cache_nodes(s);
  1520. return 0;
  1521. }
  1522. }
  1523. s->node[node] = n;
  1524. init_kmem_cache_node(n);
  1525. }
  1526. return 1;
  1527. }
  1528. #else
  1529. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1530. {
  1531. }
  1532. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1533. {
  1534. init_kmem_cache_node(&s->local_node);
  1535. return 1;
  1536. }
  1537. #endif
  1538. /*
  1539. * calculate_sizes() determines the order and the distribution of data within
  1540. * a slab object.
  1541. */
  1542. static int calculate_sizes(struct kmem_cache *s)
  1543. {
  1544. unsigned long flags = s->flags;
  1545. unsigned long size = s->objsize;
  1546. unsigned long align = s->align;
  1547. /*
  1548. * Determine if we can poison the object itself. If the user of
  1549. * the slab may touch the object after free or before allocation
  1550. * then we should never poison the object itself.
  1551. */
  1552. if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
  1553. !s->ctor && !s->dtor)
  1554. s->flags |= __OBJECT_POISON;
  1555. else
  1556. s->flags &= ~__OBJECT_POISON;
  1557. /*
  1558. * Round up object size to the next word boundary. We can only
  1559. * place the free pointer at word boundaries and this determines
  1560. * the possible location of the free pointer.
  1561. */
  1562. size = ALIGN(size, sizeof(void *));
  1563. #ifdef CONFIG_SLUB_DEBUG
  1564. /*
  1565. * If we are Redzoning then check if there is some space between the
  1566. * end of the object and the free pointer. If not then add an
  1567. * additional word to have some bytes to store Redzone information.
  1568. */
  1569. if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  1570. size += sizeof(void *);
  1571. #endif
  1572. /*
  1573. * With that we have determined the number of bytes in actual use
  1574. * by the object. This is the potential offset to the free pointer.
  1575. */
  1576. s->inuse = size;
  1577. #ifdef CONFIG_SLUB_DEBUG
  1578. if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
  1579. s->ctor || s->dtor)) {
  1580. /*
  1581. * Relocate free pointer after the object if it is not
  1582. * permitted to overwrite the first word of the object on
  1583. * kmem_cache_free.
  1584. *
  1585. * This is the case if we do RCU, have a constructor or
  1586. * destructor or are poisoning the objects.
  1587. */
  1588. s->offset = size;
  1589. size += sizeof(void *);
  1590. }
  1591. if (flags & SLAB_STORE_USER)
  1592. /*
  1593. * Need to store information about allocs and frees after
  1594. * the object.
  1595. */
  1596. size += 2 * sizeof(struct track);
  1597. if (flags & SLAB_RED_ZONE)
  1598. /*
  1599. * Add some empty padding so that we can catch
  1600. * overwrites from earlier objects rather than let
  1601. * tracking information or the free pointer be
  1602. * corrupted if an user writes before the start
  1603. * of the object.
  1604. */
  1605. size += sizeof(void *);
  1606. #endif
  1607. /*
  1608. * Determine the alignment based on various parameters that the
  1609. * user specified and the dynamic determination of cache line size
  1610. * on bootup.
  1611. */
  1612. align = calculate_alignment(flags, align, s->objsize);
  1613. /*
  1614. * SLUB stores one object immediately after another beginning from
  1615. * offset 0. In order to align the objects we have to simply size
  1616. * each object to conform to the alignment.
  1617. */
  1618. size = ALIGN(size, align);
  1619. s->size = size;
  1620. s->order = calculate_order(size);
  1621. if (s->order < 0)
  1622. return 0;
  1623. /*
  1624. * Determine the number of objects per slab
  1625. */
  1626. s->objects = (PAGE_SIZE << s->order) / size;
  1627. /*
  1628. * Verify that the number of objects is within permitted limits.
  1629. * The page->inuse field is only 16 bit wide! So we cannot have
  1630. * more than 64k objects per slab.
  1631. */
  1632. if (!s->objects || s->objects > 65535)
  1633. return 0;
  1634. return 1;
  1635. }
  1636. static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  1637. const char *name, size_t size,
  1638. size_t align, unsigned long flags,
  1639. void (*ctor)(void *, struct kmem_cache *, unsigned long),
  1640. void (*dtor)(void *, struct kmem_cache *, unsigned long))
  1641. {
  1642. memset(s, 0, kmem_size);
  1643. s->name = name;
  1644. s->ctor = ctor;
  1645. s->dtor = dtor;
  1646. s->objsize = size;
  1647. s->flags = flags;
  1648. s->align = align;
  1649. kmem_cache_open_debug_check(s);
  1650. if (!calculate_sizes(s))
  1651. goto error;
  1652. s->refcount = 1;
  1653. #ifdef CONFIG_NUMA
  1654. s->defrag_ratio = 100;
  1655. #endif
  1656. if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  1657. return 1;
  1658. error:
  1659. if (flags & SLAB_PANIC)
  1660. panic("Cannot create slab %s size=%lu realsize=%u "
  1661. "order=%u offset=%u flags=%lx\n",
  1662. s->name, (unsigned long)size, s->size, s->order,
  1663. s->offset, flags);
  1664. return 0;
  1665. }
  1666. EXPORT_SYMBOL(kmem_cache_open);
  1667. /*
  1668. * Check if a given pointer is valid
  1669. */
  1670. int kmem_ptr_validate(struct kmem_cache *s, const void *object)
  1671. {
  1672. struct page * page;
  1673. page = get_object_page(object);
  1674. if (!page || s != page->slab)
  1675. /* No slab or wrong slab */
  1676. return 0;
  1677. if (!check_valid_pointer(s, page, object))
  1678. return 0;
  1679. /*
  1680. * We could also check if the object is on the slabs freelist.
  1681. * But this would be too expensive and it seems that the main
  1682. * purpose of kmem_ptr_valid is to check if the object belongs
  1683. * to a certain slab.
  1684. */
  1685. return 1;
  1686. }
  1687. EXPORT_SYMBOL(kmem_ptr_validate);
  1688. /*
  1689. * Determine the size of a slab object
  1690. */
  1691. unsigned int kmem_cache_size(struct kmem_cache *s)
  1692. {
  1693. return s->objsize;
  1694. }
  1695. EXPORT_SYMBOL(kmem_cache_size);
  1696. const char *kmem_cache_name(struct kmem_cache *s)
  1697. {
  1698. return s->name;
  1699. }
  1700. EXPORT_SYMBOL(kmem_cache_name);
  1701. /*
  1702. * Attempt to free all slabs on a node. Return the number of slabs we
  1703. * were unable to free.
  1704. */
  1705. static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
  1706. struct list_head *list)
  1707. {
  1708. int slabs_inuse = 0;
  1709. unsigned long flags;
  1710. struct page *page, *h;
  1711. spin_lock_irqsave(&n->list_lock, flags);
  1712. list_for_each_entry_safe(page, h, list, lru)
  1713. if (!page->inuse) {
  1714. list_del(&page->lru);
  1715. discard_slab(s, page);
  1716. } else
  1717. slabs_inuse++;
  1718. spin_unlock_irqrestore(&n->list_lock, flags);
  1719. return slabs_inuse;
  1720. }
  1721. /*
  1722. * Release all resources used by a slab cache.
  1723. */
  1724. static int kmem_cache_close(struct kmem_cache *s)
  1725. {
  1726. int node;
  1727. flush_all(s);
  1728. /* Attempt to free all objects */
  1729. for_each_online_node(node) {
  1730. struct kmem_cache_node *n = get_node(s, node);
  1731. n->nr_partial -= free_list(s, n, &n->partial);
  1732. if (atomic_long_read(&n->nr_slabs))
  1733. return 1;
  1734. }
  1735. free_kmem_cache_nodes(s);
  1736. return 0;
  1737. }
  1738. /*
  1739. * Close a cache and release the kmem_cache structure
  1740. * (must be used for caches created using kmem_cache_create)
  1741. */
  1742. void kmem_cache_destroy(struct kmem_cache *s)
  1743. {
  1744. down_write(&slub_lock);
  1745. s->refcount--;
  1746. if (!s->refcount) {
  1747. list_del(&s->list);
  1748. if (kmem_cache_close(s))
  1749. WARN_ON(1);
  1750. sysfs_slab_remove(s);
  1751. kfree(s);
  1752. }
  1753. up_write(&slub_lock);
  1754. }
  1755. EXPORT_SYMBOL(kmem_cache_destroy);
  1756. /********************************************************************
  1757. * Kmalloc subsystem
  1758. *******************************************************************/
  1759. struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
  1760. EXPORT_SYMBOL(kmalloc_caches);
  1761. #ifdef CONFIG_ZONE_DMA
  1762. static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
  1763. #endif
  1764. static int __init setup_slub_min_order(char *str)
  1765. {
  1766. get_option (&str, &slub_min_order);
  1767. return 1;
  1768. }
  1769. __setup("slub_min_order=", setup_slub_min_order);
  1770. static int __init setup_slub_max_order(char *str)
  1771. {
  1772. get_option (&str, &slub_max_order);
  1773. return 1;
  1774. }
  1775. __setup("slub_max_order=", setup_slub_max_order);
  1776. static int __init setup_slub_min_objects(char *str)
  1777. {
  1778. get_option (&str, &slub_min_objects);
  1779. return 1;
  1780. }
  1781. __setup("slub_min_objects=", setup_slub_min_objects);
  1782. static int __init setup_slub_nomerge(char *str)
  1783. {
  1784. slub_nomerge = 1;
  1785. return 1;
  1786. }
  1787. __setup("slub_nomerge", setup_slub_nomerge);
  1788. static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  1789. const char *name, int size, gfp_t gfp_flags)
  1790. {
  1791. unsigned int flags = 0;
  1792. if (gfp_flags & SLUB_DMA)
  1793. flags = SLAB_CACHE_DMA;
  1794. down_write(&slub_lock);
  1795. if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
  1796. flags, NULL, NULL))
  1797. goto panic;
  1798. list_add(&s->list, &slab_caches);
  1799. up_write(&slub_lock);
  1800. if (sysfs_slab_add(s))
  1801. goto panic;
  1802. return s;
  1803. panic:
  1804. panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
  1805. }
  1806. static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  1807. {
  1808. int index = kmalloc_index(size);
  1809. if (!index)
  1810. return NULL;
  1811. /* Allocation too large? */
  1812. BUG_ON(index < 0);
  1813. #ifdef CONFIG_ZONE_DMA
  1814. if ((flags & SLUB_DMA)) {
  1815. struct kmem_cache *s;
  1816. struct kmem_cache *x;
  1817. char *text;
  1818. size_t realsize;
  1819. s = kmalloc_caches_dma[index];
  1820. if (s)
  1821. return s;
  1822. /* Dynamically create dma cache */
  1823. x = kmalloc(kmem_size, flags & ~SLUB_DMA);
  1824. if (!x)
  1825. panic("Unable to allocate memory for dma cache\n");
  1826. if (index <= KMALLOC_SHIFT_HIGH)
  1827. realsize = 1 << index;
  1828. else {
  1829. if (index == 1)
  1830. realsize = 96;
  1831. else
  1832. realsize = 192;
  1833. }
  1834. text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
  1835. (unsigned int)realsize);
  1836. s = create_kmalloc_cache(x, text, realsize, flags);
  1837. kmalloc_caches_dma[index] = s;
  1838. return s;
  1839. }
  1840. #endif
  1841. return &kmalloc_caches[index];
  1842. }
  1843. void *__kmalloc(size_t size, gfp_t flags)
  1844. {
  1845. struct kmem_cache *s = get_slab(size, flags);
  1846. if (s)
  1847. return slab_alloc(s, flags, -1, __builtin_return_address(0));
  1848. return NULL;
  1849. }
  1850. EXPORT_SYMBOL(__kmalloc);
  1851. #ifdef CONFIG_NUMA
  1852. void *__kmalloc_node(size_t size, gfp_t flags, int node)
  1853. {
  1854. struct kmem_cache *s = get_slab(size, flags);
  1855. if (s)
  1856. return slab_alloc(s, flags, node, __builtin_return_address(0));
  1857. return NULL;
  1858. }
  1859. EXPORT_SYMBOL(__kmalloc_node);
  1860. #endif
  1861. size_t ksize(const void *object)
  1862. {
  1863. struct page *page = get_object_page(object);
  1864. struct kmem_cache *s;
  1865. BUG_ON(!page);
  1866. s = page->slab;
  1867. BUG_ON(!s);
  1868. /*
  1869. * Debugging requires use of the padding between object
  1870. * and whatever may come after it.
  1871. */
  1872. if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  1873. return s->objsize;
  1874. /*
  1875. * If we have the need to store the freelist pointer
  1876. * back there or track user information then we can
  1877. * only use the space before that information.
  1878. */
  1879. if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  1880. return s->inuse;
  1881. /*
  1882. * Else we can use all the padding etc for the allocation
  1883. */
  1884. return s->size;
  1885. }
  1886. EXPORT_SYMBOL(ksize);
  1887. void kfree(const void *x)
  1888. {
  1889. struct kmem_cache *s;
  1890. struct page *page;
  1891. if (!x)
  1892. return;
  1893. page = virt_to_head_page(x);
  1894. s = page->slab;
  1895. slab_free(s, page, (void *)x, __builtin_return_address(0));
  1896. }
  1897. EXPORT_SYMBOL(kfree);
  1898. /*
  1899. * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  1900. * the remaining slabs by the number of items in use. The slabs with the
  1901. * most items in use come first. New allocations will then fill those up
  1902. * and thus they can be removed from the partial lists.
  1903. *
  1904. * The slabs with the least items are placed last. This results in them
  1905. * being allocated from last increasing the chance that the last objects
  1906. * are freed in them.
  1907. */
  1908. int kmem_cache_shrink(struct kmem_cache *s)
  1909. {
  1910. int node;
  1911. int i;
  1912. struct kmem_cache_node *n;
  1913. struct page *page;
  1914. struct page *t;
  1915. struct list_head *slabs_by_inuse =
  1916. kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
  1917. unsigned long flags;
  1918. if (!slabs_by_inuse)
  1919. return -ENOMEM;
  1920. flush_all(s);
  1921. for_each_online_node(node) {
  1922. n = get_node(s, node);
  1923. if (!n->nr_partial)
  1924. continue;
  1925. for (i = 0; i < s->objects; i++)
  1926. INIT_LIST_HEAD(slabs_by_inuse + i);
  1927. spin_lock_irqsave(&n->list_lock, flags);
  1928. /*
  1929. * Build lists indexed by the items in use in each slab.
  1930. *
  1931. * Note that concurrent frees may occur while we hold the
  1932. * list_lock. page->inuse here is the upper limit.
  1933. */
  1934. list_for_each_entry_safe(page, t, &n->partial, lru) {
  1935. if (!page->inuse && slab_trylock(page)) {
  1936. /*
  1937. * Must hold slab lock here because slab_free
  1938. * may have freed the last object and be
  1939. * waiting to release the slab.
  1940. */
  1941. list_del(&page->lru);
  1942. n->nr_partial--;
  1943. slab_unlock(page);
  1944. discard_slab(s, page);
  1945. } else {
  1946. if (n->nr_partial > MAX_PARTIAL)
  1947. list_move(&page->lru,
  1948. slabs_by_inuse + page->inuse);
  1949. }
  1950. }
  1951. if (n->nr_partial <= MAX_PARTIAL)
  1952. goto out;
  1953. /*
  1954. * Rebuild the partial list with the slabs filled up most
  1955. * first and the least used slabs at the end.
  1956. */
  1957. for (i = s->objects - 1; i >= 0; i--)
  1958. list_splice(slabs_by_inuse + i, n->partial.prev);
  1959. out:
  1960. spin_unlock_irqrestore(&n->list_lock, flags);
  1961. }
  1962. kfree(slabs_by_inuse);
  1963. return 0;
  1964. }
  1965. EXPORT_SYMBOL(kmem_cache_shrink);
  1966. /**
  1967. * krealloc - reallocate memory. The contents will remain unchanged.
  1968. * @p: object to reallocate memory for.
  1969. * @new_size: how many bytes of memory are required.
  1970. * @flags: the type of memory to allocate.
  1971. *
  1972. * The contents of the object pointed to are preserved up to the
  1973. * lesser of the new and old sizes. If @p is %NULL, krealloc()
  1974. * behaves exactly like kmalloc(). If @size is 0 and @p is not a
  1975. * %NULL pointer, the object pointed to is freed.
  1976. */
  1977. void *krealloc(const void *p, size_t new_size, gfp_t flags)
  1978. {
  1979. void *ret;
  1980. size_t ks;
  1981. if (unlikely(!p))
  1982. return kmalloc(new_size, flags);
  1983. if (unlikely(!new_size)) {
  1984. kfree(p);
  1985. return NULL;
  1986. }
  1987. ks = ksize(p);
  1988. if (ks >= new_size)
  1989. return (void *)p;
  1990. ret = kmalloc(new_size, flags);
  1991. if (ret) {
  1992. memcpy(ret, p, min(new_size, ks));
  1993. kfree(p);
  1994. }
  1995. return ret;
  1996. }
  1997. EXPORT_SYMBOL(krealloc);
  1998. /********************************************************************
  1999. * Basic setup of slabs
  2000. *******************************************************************/
  2001. void __init kmem_cache_init(void)
  2002. {
  2003. int i;
  2004. #ifdef CONFIG_NUMA
  2005. /*
  2006. * Must first have the slab cache available for the allocations of the
  2007. * struct kmem_cache_node's. There is special bootstrap code in
  2008. * kmem_cache_open for slab_state == DOWN.
  2009. */
  2010. create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
  2011. sizeof(struct kmem_cache_node), GFP_KERNEL);
  2012. #endif
  2013. /* Able to allocate the per node structures */
  2014. slab_state = PARTIAL;
  2015. /* Caches that are not of the two-to-the-power-of size */
  2016. create_kmalloc_cache(&kmalloc_caches[1],
  2017. "kmalloc-96", 96, GFP_KERNEL);
  2018. create_kmalloc_cache(&kmalloc_caches[2],
  2019. "kmalloc-192", 192, GFP_KERNEL);
  2020. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  2021. create_kmalloc_cache(&kmalloc_caches[i],
  2022. "kmalloc", 1 << i, GFP_KERNEL);
  2023. slab_state = UP;
  2024. /* Provide the correct kmalloc names now that the caches are up */
  2025. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  2026. kmalloc_caches[i]. name =
  2027. kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
  2028. #ifdef CONFIG_SMP
  2029. register_cpu_notifier(&slab_notifier);
  2030. #endif
  2031. if (nr_cpu_ids) /* Remove when nr_cpu_ids is fixed upstream ! */
  2032. kmem_size = offsetof(struct kmem_cache, cpu_slab)
  2033. + nr_cpu_ids * sizeof(struct page *);
  2034. printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
  2035. " Processors=%d, Nodes=%d\n",
  2036. KMALLOC_SHIFT_HIGH, cache_line_size(),
  2037. slub_min_order, slub_max_order, slub_min_objects,
  2038. nr_cpu_ids, nr_node_ids);
  2039. }
  2040. /*
  2041. * Find a mergeable slab cache
  2042. */
  2043. static int slab_unmergeable(struct kmem_cache *s)
  2044. {
  2045. if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  2046. return 1;
  2047. if (s->ctor || s->dtor)
  2048. return 1;
  2049. return 0;
  2050. }
  2051. static struct kmem_cache *find_mergeable(size_t size,
  2052. size_t align, unsigned long flags,
  2053. void (*ctor)(void *, struct kmem_cache *, unsigned long),
  2054. void (*dtor)(void *, struct kmem_cache *, unsigned long))
  2055. {
  2056. struct list_head *h;
  2057. if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  2058. return NULL;
  2059. if (ctor || dtor)
  2060. return NULL;
  2061. size = ALIGN(size, sizeof(void *));
  2062. align = calculate_alignment(flags, align, size);
  2063. size = ALIGN(size, align);
  2064. list_for_each(h, &slab_caches) {
  2065. struct kmem_cache *s =
  2066. container_of(h, struct kmem_cache, list);
  2067. if (slab_unmergeable(s))
  2068. continue;
  2069. if (size > s->size)
  2070. continue;
  2071. if (((flags | slub_debug) & SLUB_MERGE_SAME) !=
  2072. (s->flags & SLUB_MERGE_SAME))
  2073. continue;
  2074. /*
  2075. * Check if alignment is compatible.
  2076. * Courtesy of Adrian Drzewiecki
  2077. */
  2078. if ((s->size & ~(align -1)) != s->size)
  2079. continue;
  2080. if (s->size - size >= sizeof(void *))
  2081. continue;
  2082. return s;
  2083. }
  2084. return NULL;
  2085. }
  2086. struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  2087. size_t align, unsigned long flags,
  2088. void (*ctor)(void *, struct kmem_cache *, unsigned long),
  2089. void (*dtor)(void *, struct kmem_cache *, unsigned long))
  2090. {
  2091. struct kmem_cache *s;
  2092. down_write(&slub_lock);
  2093. s = find_mergeable(size, align, flags, dtor, ctor);
  2094. if (s) {
  2095. s->refcount++;
  2096. /*
  2097. * Adjust the object sizes so that we clear
  2098. * the complete object on kzalloc.
  2099. */
  2100. s->objsize = max(s->objsize, (int)size);
  2101. s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
  2102. if (sysfs_slab_alias(s, name))
  2103. goto err;
  2104. } else {
  2105. s = kmalloc(kmem_size, GFP_KERNEL);
  2106. if (s && kmem_cache_open(s, GFP_KERNEL, name,
  2107. size, align, flags, ctor, dtor)) {
  2108. if (sysfs_slab_add(s)) {
  2109. kfree(s);
  2110. goto err;
  2111. }
  2112. list_add(&s->list, &slab_caches);
  2113. } else
  2114. kfree(s);
  2115. }
  2116. up_write(&slub_lock);
  2117. return s;
  2118. err:
  2119. up_write(&slub_lock);
  2120. if (flags & SLAB_PANIC)
  2121. panic("Cannot create slabcache %s\n", name);
  2122. else
  2123. s = NULL;
  2124. return s;
  2125. }
  2126. EXPORT_SYMBOL(kmem_cache_create);
  2127. void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
  2128. {
  2129. void *x;
  2130. x = slab_alloc(s, flags, -1, __builtin_return_address(0));
  2131. if (x)
  2132. memset(x, 0, s->objsize);
  2133. return x;
  2134. }
  2135. EXPORT_SYMBOL(kmem_cache_zalloc);
  2136. #ifdef CONFIG_SMP
  2137. static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
  2138. {
  2139. struct list_head *h;
  2140. down_read(&slub_lock);
  2141. list_for_each(h, &slab_caches) {
  2142. struct kmem_cache *s =
  2143. container_of(h, struct kmem_cache, list);
  2144. func(s, cpu);
  2145. }
  2146. up_read(&slub_lock);
  2147. }
  2148. /*
  2149. * Use the cpu notifier to insure that the cpu slabs are flushed when
  2150. * necessary.
  2151. */
  2152. static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  2153. unsigned long action, void *hcpu)
  2154. {
  2155. long cpu = (long)hcpu;
  2156. switch (action) {
  2157. case CPU_UP_CANCELED:
  2158. case CPU_UP_CANCELED_FROZEN:
  2159. case CPU_DEAD:
  2160. case CPU_DEAD_FROZEN:
  2161. for_all_slabs(__flush_cpu_slab, cpu);
  2162. break;
  2163. default:
  2164. break;
  2165. }
  2166. return NOTIFY_OK;
  2167. }
  2168. static struct notifier_block __cpuinitdata slab_notifier =
  2169. { &slab_cpuup_callback, NULL, 0 };
  2170. #endif
  2171. void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
  2172. {
  2173. struct kmem_cache *s = get_slab(size, gfpflags);
  2174. if (!s)
  2175. return NULL;
  2176. return slab_alloc(s, gfpflags, -1, caller);
  2177. }
  2178. void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
  2179. int node, void *caller)
  2180. {
  2181. struct kmem_cache *s = get_slab(size, gfpflags);
  2182. if (!s)
  2183. return NULL;
  2184. return slab_alloc(s, gfpflags, node, caller);
  2185. }
  2186. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  2187. static int validate_slab(struct kmem_cache *s, struct page *page)
  2188. {
  2189. void *p;
  2190. void *addr = page_address(page);
  2191. DECLARE_BITMAP(map, s->objects);
  2192. if (!check_slab(s, page) ||
  2193. !on_freelist(s, page, NULL))
  2194. return 0;
  2195. /* Now we know that a valid freelist exists */
  2196. bitmap_zero(map, s->objects);
  2197. for_each_free_object(p, s, page->freelist) {
  2198. set_bit(slab_index(p, s, addr), map);
  2199. if (!check_object(s, page, p, 0))
  2200. return 0;
  2201. }
  2202. for_each_object(p, s, addr)
  2203. if (!test_bit(slab_index(p, s, addr), map))
  2204. if (!check_object(s, page, p, 1))
  2205. return 0;
  2206. return 1;
  2207. }
  2208. static void validate_slab_slab(struct kmem_cache *s, struct page *page)
  2209. {
  2210. if (slab_trylock(page)) {
  2211. validate_slab(s, page);
  2212. slab_unlock(page);
  2213. } else
  2214. printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
  2215. s->name, page);
  2216. if (s->flags & DEBUG_DEFAULT_FLAGS) {
  2217. if (!SlabDebug(page))
  2218. printk(KERN_ERR "SLUB %s: SlabDebug not set "
  2219. "on slab 0x%p\n", s->name, page);
  2220. } else {
  2221. if (SlabDebug(page))
  2222. printk(KERN_ERR "SLUB %s: SlabDebug set on "
  2223. "slab 0x%p\n", s->name, page);
  2224. }
  2225. }
  2226. static int validate_slab_node(struct kmem_cache *s, struct kmem_cache_node *n)
  2227. {
  2228. unsigned long count = 0;
  2229. struct page *page;
  2230. unsigned long flags;
  2231. spin_lock_irqsave(&n->list_lock, flags);
  2232. list_for_each_entry(page, &n->partial, lru) {
  2233. validate_slab_slab(s, page);
  2234. count++;
  2235. }
  2236. if (count != n->nr_partial)
  2237. printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  2238. "counter=%ld\n", s->name, count, n->nr_partial);
  2239. if (!(s->flags & SLAB_STORE_USER))
  2240. goto out;
  2241. list_for_each_entry(page, &n->full, lru) {
  2242. validate_slab_slab(s, page);
  2243. count++;
  2244. }
  2245. if (count != atomic_long_read(&n->nr_slabs))
  2246. printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  2247. "counter=%ld\n", s->name, count,
  2248. atomic_long_read(&n->nr_slabs));
  2249. out:
  2250. spin_unlock_irqrestore(&n->list_lock, flags);
  2251. return count;
  2252. }
  2253. static unsigned long validate_slab_cache(struct kmem_cache *s)
  2254. {
  2255. int node;
  2256. unsigned long count = 0;
  2257. flush_all(s);
  2258. for_each_online_node(node) {
  2259. struct kmem_cache_node *n = get_node(s, node);
  2260. count += validate_slab_node(s, n);
  2261. }
  2262. return count;
  2263. }
  2264. #ifdef SLUB_RESILIENCY_TEST
  2265. static void resiliency_test(void)
  2266. {
  2267. u8 *p;
  2268. printk(KERN_ERR "SLUB resiliency testing\n");
  2269. printk(KERN_ERR "-----------------------\n");
  2270. printk(KERN_ERR "A. Corruption after allocation\n");
  2271. p = kzalloc(16, GFP_KERNEL);
  2272. p[16] = 0x12;
  2273. printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
  2274. " 0x12->0x%p\n\n", p + 16);
  2275. validate_slab_cache(kmalloc_caches + 4);
  2276. /* Hmmm... The next two are dangerous */
  2277. p = kzalloc(32, GFP_KERNEL);
  2278. p[32 + sizeof(void *)] = 0x34;
  2279. printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
  2280. " 0x34 -> -0x%p\n", p);
  2281. printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
  2282. validate_slab_cache(kmalloc_caches + 5);
  2283. p = kzalloc(64, GFP_KERNEL);
  2284. p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  2285. *p = 0x56;
  2286. printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
  2287. p);
  2288. printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
  2289. validate_slab_cache(kmalloc_caches + 6);
  2290. printk(KERN_ERR "\nB. Corruption after free\n");
  2291. p = kzalloc(128, GFP_KERNEL);
  2292. kfree(p);
  2293. *p = 0x78;
  2294. printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
  2295. validate_slab_cache(kmalloc_caches + 7);
  2296. p = kzalloc(256, GFP_KERNEL);
  2297. kfree(p);
  2298. p[50] = 0x9a;
  2299. printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
  2300. validate_slab_cache(kmalloc_caches + 8);
  2301. p = kzalloc(512, GFP_KERNEL);
  2302. kfree(p);
  2303. p[512] = 0xab;
  2304. printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
  2305. validate_slab_cache(kmalloc_caches + 9);
  2306. }
  2307. #else
  2308. static void resiliency_test(void) {};
  2309. #endif
  2310. /*
  2311. * Generate lists of code addresses where slabcache objects are allocated
  2312. * and freed.
  2313. */
  2314. struct location {
  2315. unsigned long count;
  2316. void *addr;
  2317. long long sum_time;
  2318. long min_time;
  2319. long max_time;
  2320. long min_pid;
  2321. long max_pid;
  2322. cpumask_t cpus;
  2323. nodemask_t nodes;
  2324. };
  2325. struct loc_track {
  2326. unsigned long max;
  2327. unsigned long count;
  2328. struct location *loc;
  2329. };
  2330. static void free_loc_track(struct loc_track *t)
  2331. {
  2332. if (t->max)
  2333. free_pages((unsigned long)t->loc,
  2334. get_order(sizeof(struct location) * t->max));
  2335. }
  2336. static int alloc_loc_track(struct loc_track *t, unsigned long max)
  2337. {
  2338. struct location *l;
  2339. int order;
  2340. if (!max)
  2341. max = PAGE_SIZE / sizeof(struct location);
  2342. order = get_order(sizeof(struct location) * max);
  2343. l = (void *)__get_free_pages(GFP_KERNEL, order);
  2344. if (!l)
  2345. return 0;
  2346. if (t->count) {
  2347. memcpy(l, t->loc, sizeof(struct location) * t->count);
  2348. free_loc_track(t);
  2349. }
  2350. t->max = max;
  2351. t->loc = l;
  2352. return 1;
  2353. }
  2354. static int add_location(struct loc_track *t, struct kmem_cache *s,
  2355. const struct track *track)
  2356. {
  2357. long start, end, pos;
  2358. struct location *l;
  2359. void *caddr;
  2360. unsigned long age = jiffies - track->when;
  2361. start = -1;
  2362. end = t->count;
  2363. for ( ; ; ) {
  2364. pos = start + (end - start + 1) / 2;
  2365. /*
  2366. * There is nothing at "end". If we end up there
  2367. * we need to add something to before end.
  2368. */
  2369. if (pos == end)
  2370. break;
  2371. caddr = t->loc[pos].addr;
  2372. if (track->addr == caddr) {
  2373. l = &t->loc[pos];
  2374. l->count++;
  2375. if (track->when) {
  2376. l->sum_time += age;
  2377. if (age < l->min_time)
  2378. l->min_time = age;
  2379. if (age > l->max_time)
  2380. l->max_time = age;
  2381. if (track->pid < l->min_pid)
  2382. l->min_pid = track->pid;
  2383. if (track->pid > l->max_pid)
  2384. l->max_pid = track->pid;
  2385. cpu_set(track->cpu, l->cpus);
  2386. }
  2387. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2388. return 1;
  2389. }
  2390. if (track->addr < caddr)
  2391. end = pos;
  2392. else
  2393. start = pos;
  2394. }
  2395. /*
  2396. * Not found. Insert new tracking element.
  2397. */
  2398. if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max))
  2399. return 0;
  2400. l = t->loc + pos;
  2401. if (pos < t->count)
  2402. memmove(l + 1, l,
  2403. (t->count - pos) * sizeof(struct location));
  2404. t->count++;
  2405. l->count = 1;
  2406. l->addr = track->addr;
  2407. l->sum_time = age;
  2408. l->min_time = age;
  2409. l->max_time = age;
  2410. l->min_pid = track->pid;
  2411. l->max_pid = track->pid;
  2412. cpus_clear(l->cpus);
  2413. cpu_set(track->cpu, l->cpus);
  2414. nodes_clear(l->nodes);
  2415. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2416. return 1;
  2417. }
  2418. static void process_slab(struct loc_track *t, struct kmem_cache *s,
  2419. struct page *page, enum track_item alloc)
  2420. {
  2421. void *addr = page_address(page);
  2422. DECLARE_BITMAP(map, s->objects);
  2423. void *p;
  2424. bitmap_zero(map, s->objects);
  2425. for_each_free_object(p, s, page->freelist)
  2426. set_bit(slab_index(p, s, addr), map);
  2427. for_each_object(p, s, addr)
  2428. if (!test_bit(slab_index(p, s, addr), map))
  2429. add_location(t, s, get_track(s, p, alloc));
  2430. }
  2431. static int list_locations(struct kmem_cache *s, char *buf,
  2432. enum track_item alloc)
  2433. {
  2434. int n = 0;
  2435. unsigned long i;
  2436. struct loc_track t;
  2437. int node;
  2438. t.count = 0;
  2439. t.max = 0;
  2440. /* Push back cpu slabs */
  2441. flush_all(s);
  2442. for_each_online_node(node) {
  2443. struct kmem_cache_node *n = get_node(s, node);
  2444. unsigned long flags;
  2445. struct page *page;
  2446. if (!atomic_read(&n->nr_slabs))
  2447. continue;
  2448. spin_lock_irqsave(&n->list_lock, flags);
  2449. list_for_each_entry(page, &n->partial, lru)
  2450. process_slab(&t, s, page, alloc);
  2451. list_for_each_entry(page, &n->full, lru)
  2452. process_slab(&t, s, page, alloc);
  2453. spin_unlock_irqrestore(&n->list_lock, flags);
  2454. }
  2455. for (i = 0; i < t.count; i++) {
  2456. struct location *l = &t.loc[i];
  2457. if (n > PAGE_SIZE - 100)
  2458. break;
  2459. n += sprintf(buf + n, "%7ld ", l->count);
  2460. if (l->addr)
  2461. n += sprint_symbol(buf + n, (unsigned long)l->addr);
  2462. else
  2463. n += sprintf(buf + n, "<not-available>");
  2464. if (l->sum_time != l->min_time) {
  2465. unsigned long remainder;
  2466. n += sprintf(buf + n, " age=%ld/%ld/%ld",
  2467. l->min_time,
  2468. div_long_long_rem(l->sum_time, l->count, &remainder),
  2469. l->max_time);
  2470. } else
  2471. n += sprintf(buf + n, " age=%ld",
  2472. l->min_time);
  2473. if (l->min_pid != l->max_pid)
  2474. n += sprintf(buf + n, " pid=%ld-%ld",
  2475. l->min_pid, l->max_pid);
  2476. else
  2477. n += sprintf(buf + n, " pid=%ld",
  2478. l->min_pid);
  2479. if (num_online_cpus() > 1 && !cpus_empty(l->cpus)) {
  2480. n += sprintf(buf + n, " cpus=");
  2481. n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
  2482. l->cpus);
  2483. }
  2484. if (num_online_nodes() > 1 && !nodes_empty(l->nodes)) {
  2485. n += sprintf(buf + n, " nodes=");
  2486. n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
  2487. l->nodes);
  2488. }
  2489. n += sprintf(buf + n, "\n");
  2490. }
  2491. free_loc_track(&t);
  2492. if (!t.count)
  2493. n += sprintf(buf, "No data\n");
  2494. return n;
  2495. }
  2496. static unsigned long count_partial(struct kmem_cache_node *n)
  2497. {
  2498. unsigned long flags;
  2499. unsigned long x = 0;
  2500. struct page *page;
  2501. spin_lock_irqsave(&n->list_lock, flags);
  2502. list_for_each_entry(page, &n->partial, lru)
  2503. x += page->inuse;
  2504. spin_unlock_irqrestore(&n->list_lock, flags);
  2505. return x;
  2506. }
  2507. enum slab_stat_type {
  2508. SL_FULL,
  2509. SL_PARTIAL,
  2510. SL_CPU,
  2511. SL_OBJECTS
  2512. };
  2513. #define SO_FULL (1 << SL_FULL)
  2514. #define SO_PARTIAL (1 << SL_PARTIAL)
  2515. #define SO_CPU (1 << SL_CPU)
  2516. #define SO_OBJECTS (1 << SL_OBJECTS)
  2517. static unsigned long slab_objects(struct kmem_cache *s,
  2518. char *buf, unsigned long flags)
  2519. {
  2520. unsigned long total = 0;
  2521. int cpu;
  2522. int node;
  2523. int x;
  2524. unsigned long *nodes;
  2525. unsigned long *per_cpu;
  2526. nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
  2527. per_cpu = nodes + nr_node_ids;
  2528. for_each_possible_cpu(cpu) {
  2529. struct page *page = s->cpu_slab[cpu];
  2530. int node;
  2531. if (page) {
  2532. node = page_to_nid(page);
  2533. if (flags & SO_CPU) {
  2534. int x = 0;
  2535. if (flags & SO_OBJECTS)
  2536. x = page->inuse;
  2537. else
  2538. x = 1;
  2539. total += x;
  2540. nodes[node] += x;
  2541. }
  2542. per_cpu[node]++;
  2543. }
  2544. }
  2545. for_each_online_node(node) {
  2546. struct kmem_cache_node *n = get_node(s, node);
  2547. if (flags & SO_PARTIAL) {
  2548. if (flags & SO_OBJECTS)
  2549. x = count_partial(n);
  2550. else
  2551. x = n->nr_partial;
  2552. total += x;
  2553. nodes[node] += x;
  2554. }
  2555. if (flags & SO_FULL) {
  2556. int full_slabs = atomic_read(&n->nr_slabs)
  2557. - per_cpu[node]
  2558. - n->nr_partial;
  2559. if (flags & SO_OBJECTS)
  2560. x = full_slabs * s->objects;
  2561. else
  2562. x = full_slabs;
  2563. total += x;
  2564. nodes[node] += x;
  2565. }
  2566. }
  2567. x = sprintf(buf, "%lu", total);
  2568. #ifdef CONFIG_NUMA
  2569. for_each_online_node(node)
  2570. if (nodes[node])
  2571. x += sprintf(buf + x, " N%d=%lu",
  2572. node, nodes[node]);
  2573. #endif
  2574. kfree(nodes);
  2575. return x + sprintf(buf + x, "\n");
  2576. }
  2577. static int any_slab_objects(struct kmem_cache *s)
  2578. {
  2579. int node;
  2580. int cpu;
  2581. for_each_possible_cpu(cpu)
  2582. if (s->cpu_slab[cpu])
  2583. return 1;
  2584. for_each_node(node) {
  2585. struct kmem_cache_node *n = get_node(s, node);
  2586. if (n->nr_partial || atomic_read(&n->nr_slabs))
  2587. return 1;
  2588. }
  2589. return 0;
  2590. }
  2591. #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
  2592. #define to_slab(n) container_of(n, struct kmem_cache, kobj);
  2593. struct slab_attribute {
  2594. struct attribute attr;
  2595. ssize_t (*show)(struct kmem_cache *s, char *buf);
  2596. ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  2597. };
  2598. #define SLAB_ATTR_RO(_name) \
  2599. static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  2600. #define SLAB_ATTR(_name) \
  2601. static struct slab_attribute _name##_attr = \
  2602. __ATTR(_name, 0644, _name##_show, _name##_store)
  2603. static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  2604. {
  2605. return sprintf(buf, "%d\n", s->size);
  2606. }
  2607. SLAB_ATTR_RO(slab_size);
  2608. static ssize_t align_show(struct kmem_cache *s, char *buf)
  2609. {
  2610. return sprintf(buf, "%d\n", s->align);
  2611. }
  2612. SLAB_ATTR_RO(align);
  2613. static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  2614. {
  2615. return sprintf(buf, "%d\n", s->objsize);
  2616. }
  2617. SLAB_ATTR_RO(object_size);
  2618. static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  2619. {
  2620. return sprintf(buf, "%d\n", s->objects);
  2621. }
  2622. SLAB_ATTR_RO(objs_per_slab);
  2623. static ssize_t order_show(struct kmem_cache *s, char *buf)
  2624. {
  2625. return sprintf(buf, "%d\n", s->order);
  2626. }
  2627. SLAB_ATTR_RO(order);
  2628. static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  2629. {
  2630. if (s->ctor) {
  2631. int n = sprint_symbol(buf, (unsigned long)s->ctor);
  2632. return n + sprintf(buf + n, "\n");
  2633. }
  2634. return 0;
  2635. }
  2636. SLAB_ATTR_RO(ctor);
  2637. static ssize_t dtor_show(struct kmem_cache *s, char *buf)
  2638. {
  2639. if (s->dtor) {
  2640. int n = sprint_symbol(buf, (unsigned long)s->dtor);
  2641. return n + sprintf(buf + n, "\n");
  2642. }
  2643. return 0;
  2644. }
  2645. SLAB_ATTR_RO(dtor);
  2646. static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  2647. {
  2648. return sprintf(buf, "%d\n", s->refcount - 1);
  2649. }
  2650. SLAB_ATTR_RO(aliases);
  2651. static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  2652. {
  2653. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
  2654. }
  2655. SLAB_ATTR_RO(slabs);
  2656. static ssize_t partial_show(struct kmem_cache *s, char *buf)
  2657. {
  2658. return slab_objects(s, buf, SO_PARTIAL);
  2659. }
  2660. SLAB_ATTR_RO(partial);
  2661. static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  2662. {
  2663. return slab_objects(s, buf, SO_CPU);
  2664. }
  2665. SLAB_ATTR_RO(cpu_slabs);
  2666. static ssize_t objects_show(struct kmem_cache *s, char *buf)
  2667. {
  2668. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
  2669. }
  2670. SLAB_ATTR_RO(objects);
  2671. static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  2672. {
  2673. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
  2674. }
  2675. static ssize_t sanity_checks_store(struct kmem_cache *s,
  2676. const char *buf, size_t length)
  2677. {
  2678. s->flags &= ~SLAB_DEBUG_FREE;
  2679. if (buf[0] == '1')
  2680. s->flags |= SLAB_DEBUG_FREE;
  2681. return length;
  2682. }
  2683. SLAB_ATTR(sanity_checks);
  2684. static ssize_t trace_show(struct kmem_cache *s, char *buf)
  2685. {
  2686. return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
  2687. }
  2688. static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  2689. size_t length)
  2690. {
  2691. s->flags &= ~SLAB_TRACE;
  2692. if (buf[0] == '1')
  2693. s->flags |= SLAB_TRACE;
  2694. return length;
  2695. }
  2696. SLAB_ATTR(trace);
  2697. static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  2698. {
  2699. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  2700. }
  2701. static ssize_t reclaim_account_store(struct kmem_cache *s,
  2702. const char *buf, size_t length)
  2703. {
  2704. s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  2705. if (buf[0] == '1')
  2706. s->flags |= SLAB_RECLAIM_ACCOUNT;
  2707. return length;
  2708. }
  2709. SLAB_ATTR(reclaim_account);
  2710. static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  2711. {
  2712. return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
  2713. }
  2714. SLAB_ATTR_RO(hwcache_align);
  2715. #ifdef CONFIG_ZONE_DMA
  2716. static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  2717. {
  2718. return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
  2719. }
  2720. SLAB_ATTR_RO(cache_dma);
  2721. #endif
  2722. static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  2723. {
  2724. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
  2725. }
  2726. SLAB_ATTR_RO(destroy_by_rcu);
  2727. static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  2728. {
  2729. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
  2730. }
  2731. static ssize_t red_zone_store(struct kmem_cache *s,
  2732. const char *buf, size_t length)
  2733. {
  2734. if (any_slab_objects(s))
  2735. return -EBUSY;
  2736. s->flags &= ~SLAB_RED_ZONE;
  2737. if (buf[0] == '1')
  2738. s->flags |= SLAB_RED_ZONE;
  2739. calculate_sizes(s);
  2740. return length;
  2741. }
  2742. SLAB_ATTR(red_zone);
  2743. static ssize_t poison_show(struct kmem_cache *s, char *buf)
  2744. {
  2745. return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
  2746. }
  2747. static ssize_t poison_store(struct kmem_cache *s,
  2748. const char *buf, size_t length)
  2749. {
  2750. if (any_slab_objects(s))
  2751. return -EBUSY;
  2752. s->flags &= ~SLAB_POISON;
  2753. if (buf[0] == '1')
  2754. s->flags |= SLAB_POISON;
  2755. calculate_sizes(s);
  2756. return length;
  2757. }
  2758. SLAB_ATTR(poison);
  2759. static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  2760. {
  2761. return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
  2762. }
  2763. static ssize_t store_user_store(struct kmem_cache *s,
  2764. const char *buf, size_t length)
  2765. {
  2766. if (any_slab_objects(s))
  2767. return -EBUSY;
  2768. s->flags &= ~SLAB_STORE_USER;
  2769. if (buf[0] == '1')
  2770. s->flags |= SLAB_STORE_USER;
  2771. calculate_sizes(s);
  2772. return length;
  2773. }
  2774. SLAB_ATTR(store_user);
  2775. static ssize_t validate_show(struct kmem_cache *s, char *buf)
  2776. {
  2777. return 0;
  2778. }
  2779. static ssize_t validate_store(struct kmem_cache *s,
  2780. const char *buf, size_t length)
  2781. {
  2782. if (buf[0] == '1')
  2783. validate_slab_cache(s);
  2784. else
  2785. return -EINVAL;
  2786. return length;
  2787. }
  2788. SLAB_ATTR(validate);
  2789. static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  2790. {
  2791. return 0;
  2792. }
  2793. static ssize_t shrink_store(struct kmem_cache *s,
  2794. const char *buf, size_t length)
  2795. {
  2796. if (buf[0] == '1') {
  2797. int rc = kmem_cache_shrink(s);
  2798. if (rc)
  2799. return rc;
  2800. } else
  2801. return -EINVAL;
  2802. return length;
  2803. }
  2804. SLAB_ATTR(shrink);
  2805. static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  2806. {
  2807. if (!(s->flags & SLAB_STORE_USER))
  2808. return -ENOSYS;
  2809. return list_locations(s, buf, TRACK_ALLOC);
  2810. }
  2811. SLAB_ATTR_RO(alloc_calls);
  2812. static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  2813. {
  2814. if (!(s->flags & SLAB_STORE_USER))
  2815. return -ENOSYS;
  2816. return list_locations(s, buf, TRACK_FREE);
  2817. }
  2818. SLAB_ATTR_RO(free_calls);
  2819. #ifdef CONFIG_NUMA
  2820. static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
  2821. {
  2822. return sprintf(buf, "%d\n", s->defrag_ratio / 10);
  2823. }
  2824. static ssize_t defrag_ratio_store(struct kmem_cache *s,
  2825. const char *buf, size_t length)
  2826. {
  2827. int n = simple_strtoul(buf, NULL, 10);
  2828. if (n < 100)
  2829. s->defrag_ratio = n * 10;
  2830. return length;
  2831. }
  2832. SLAB_ATTR(defrag_ratio);
  2833. #endif
  2834. static struct attribute * slab_attrs[] = {
  2835. &slab_size_attr.attr,
  2836. &object_size_attr.attr,
  2837. &objs_per_slab_attr.attr,
  2838. &order_attr.attr,
  2839. &objects_attr.attr,
  2840. &slabs_attr.attr,
  2841. &partial_attr.attr,
  2842. &cpu_slabs_attr.attr,
  2843. &ctor_attr.attr,
  2844. &dtor_attr.attr,
  2845. &aliases_attr.attr,
  2846. &align_attr.attr,
  2847. &sanity_checks_attr.attr,
  2848. &trace_attr.attr,
  2849. &hwcache_align_attr.attr,
  2850. &reclaim_account_attr.attr,
  2851. &destroy_by_rcu_attr.attr,
  2852. &red_zone_attr.attr,
  2853. &poison_attr.attr,
  2854. &store_user_attr.attr,
  2855. &validate_attr.attr,
  2856. &shrink_attr.attr,
  2857. &alloc_calls_attr.attr,
  2858. &free_calls_attr.attr,
  2859. #ifdef CONFIG_ZONE_DMA
  2860. &cache_dma_attr.attr,
  2861. #endif
  2862. #ifdef CONFIG_NUMA
  2863. &defrag_ratio_attr.attr,
  2864. #endif
  2865. NULL
  2866. };
  2867. static struct attribute_group slab_attr_group = {
  2868. .attrs = slab_attrs,
  2869. };
  2870. static ssize_t slab_attr_show(struct kobject *kobj,
  2871. struct attribute *attr,
  2872. char *buf)
  2873. {
  2874. struct slab_attribute *attribute;
  2875. struct kmem_cache *s;
  2876. int err;
  2877. attribute = to_slab_attr(attr);
  2878. s = to_slab(kobj);
  2879. if (!attribute->show)
  2880. return -EIO;
  2881. err = attribute->show(s, buf);
  2882. return err;
  2883. }
  2884. static ssize_t slab_attr_store(struct kobject *kobj,
  2885. struct attribute *attr,
  2886. const char *buf, size_t len)
  2887. {
  2888. struct slab_attribute *attribute;
  2889. struct kmem_cache *s;
  2890. int err;
  2891. attribute = to_slab_attr(attr);
  2892. s = to_slab(kobj);
  2893. if (!attribute->store)
  2894. return -EIO;
  2895. err = attribute->store(s, buf, len);
  2896. return err;
  2897. }
  2898. static struct sysfs_ops slab_sysfs_ops = {
  2899. .show = slab_attr_show,
  2900. .store = slab_attr_store,
  2901. };
  2902. static struct kobj_type slab_ktype = {
  2903. .sysfs_ops = &slab_sysfs_ops,
  2904. };
  2905. static int uevent_filter(struct kset *kset, struct kobject *kobj)
  2906. {
  2907. struct kobj_type *ktype = get_ktype(kobj);
  2908. if (ktype == &slab_ktype)
  2909. return 1;
  2910. return 0;
  2911. }
  2912. static struct kset_uevent_ops slab_uevent_ops = {
  2913. .filter = uevent_filter,
  2914. };
  2915. decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
  2916. #define ID_STR_LENGTH 64
  2917. /* Create a unique string id for a slab cache:
  2918. * format
  2919. * :[flags-]size:[memory address of kmemcache]
  2920. */
  2921. static char *create_unique_id(struct kmem_cache *s)
  2922. {
  2923. char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  2924. char *p = name;
  2925. BUG_ON(!name);
  2926. *p++ = ':';
  2927. /*
  2928. * First flags affecting slabcache operations. We will only
  2929. * get here for aliasable slabs so we do not need to support
  2930. * too many flags. The flags here must cover all flags that
  2931. * are matched during merging to guarantee that the id is
  2932. * unique.
  2933. */
  2934. if (s->flags & SLAB_CACHE_DMA)
  2935. *p++ = 'd';
  2936. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  2937. *p++ = 'a';
  2938. if (s->flags & SLAB_DEBUG_FREE)
  2939. *p++ = 'F';
  2940. if (p != name + 1)
  2941. *p++ = '-';
  2942. p += sprintf(p, "%07d", s->size);
  2943. BUG_ON(p > name + ID_STR_LENGTH - 1);
  2944. return name;
  2945. }
  2946. static int sysfs_slab_add(struct kmem_cache *s)
  2947. {
  2948. int err;
  2949. const char *name;
  2950. int unmergeable;
  2951. if (slab_state < SYSFS)
  2952. /* Defer until later */
  2953. return 0;
  2954. unmergeable = slab_unmergeable(s);
  2955. if (unmergeable) {
  2956. /*
  2957. * Slabcache can never be merged so we can use the name proper.
  2958. * This is typically the case for debug situations. In that
  2959. * case we can catch duplicate names easily.
  2960. */
  2961. sysfs_remove_link(&slab_subsys.kobj, s->name);
  2962. name = s->name;
  2963. } else {
  2964. /*
  2965. * Create a unique name for the slab as a target
  2966. * for the symlinks.
  2967. */
  2968. name = create_unique_id(s);
  2969. }
  2970. kobj_set_kset_s(s, slab_subsys);
  2971. kobject_set_name(&s->kobj, name);
  2972. kobject_init(&s->kobj);
  2973. err = kobject_add(&s->kobj);
  2974. if (err)
  2975. return err;
  2976. err = sysfs_create_group(&s->kobj, &slab_attr_group);
  2977. if (err)
  2978. return err;
  2979. kobject_uevent(&s->kobj, KOBJ_ADD);
  2980. if (!unmergeable) {
  2981. /* Setup first alias */
  2982. sysfs_slab_alias(s, s->name);
  2983. kfree(name);
  2984. }
  2985. return 0;
  2986. }
  2987. static void sysfs_slab_remove(struct kmem_cache *s)
  2988. {
  2989. kobject_uevent(&s->kobj, KOBJ_REMOVE);
  2990. kobject_del(&s->kobj);
  2991. }
  2992. /*
  2993. * Need to buffer aliases during bootup until sysfs becomes
  2994. * available lest we loose that information.
  2995. */
  2996. struct saved_alias {
  2997. struct kmem_cache *s;
  2998. const char *name;
  2999. struct saved_alias *next;
  3000. };
  3001. struct saved_alias *alias_list;
  3002. static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  3003. {
  3004. struct saved_alias *al;
  3005. if (slab_state == SYSFS) {
  3006. /*
  3007. * If we have a leftover link then remove it.
  3008. */
  3009. sysfs_remove_link(&slab_subsys.kobj, name);
  3010. return sysfs_create_link(&slab_subsys.kobj,
  3011. &s->kobj, name);
  3012. }
  3013. al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  3014. if (!al)
  3015. return -ENOMEM;
  3016. al->s = s;
  3017. al->name = name;
  3018. al->next = alias_list;
  3019. alias_list = al;
  3020. return 0;
  3021. }
  3022. static int __init slab_sysfs_init(void)
  3023. {
  3024. struct list_head *h;
  3025. int err;
  3026. err = subsystem_register(&slab_subsys);
  3027. if (err) {
  3028. printk(KERN_ERR "Cannot register slab subsystem.\n");
  3029. return -ENOSYS;
  3030. }
  3031. slab_state = SYSFS;
  3032. list_for_each(h, &slab_caches) {
  3033. struct kmem_cache *s =
  3034. container_of(h, struct kmem_cache, list);
  3035. err = sysfs_slab_add(s);
  3036. BUG_ON(err);
  3037. }
  3038. while (alias_list) {
  3039. struct saved_alias *al = alias_list;
  3040. alias_list = alias_list->next;
  3041. err = sysfs_slab_alias(al->s, al->name);
  3042. BUG_ON(err);
  3043. kfree(al);
  3044. }
  3045. resiliency_test();
  3046. return 0;
  3047. }
  3048. __initcall(slab_sysfs_init);
  3049. #endif