slub.c 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797
  1. /*
  2. * SLUB: A slab allocator that limits cache line use instead of queuing
  3. * objects in per cpu and per node lists.
  4. *
  5. * The allocator synchronizes using per slab locks and only
  6. * uses a centralized lock to manage a pool of partial slabs.
  7. *
  8. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/bit_spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/bitops.h>
  15. #include <linux/slab.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/cpu.h>
  18. #include <linux/cpuset.h>
  19. #include <linux/mempolicy.h>
  20. #include <linux/ctype.h>
  21. #include <linux/kallsyms.h>
  22. /*
  23. * Lock order:
  24. * 1. slab_lock(page)
  25. * 2. slab->list_lock
  26. *
  27. * The slab_lock protects operations on the object of a particular
  28. * slab and its metadata in the page struct. If the slab lock
  29. * has been taken then no allocations nor frees can be performed
  30. * on the objects in the slab nor can the slab be added or removed
  31. * from the partial or full lists since this would mean modifying
  32. * the page_struct of the slab.
  33. *
  34. * The list_lock protects the partial and full list on each node and
  35. * the partial slab counter. If taken then no new slabs may be added or
  36. * removed from the lists nor make the number of partial slabs be modified.
  37. * (Note that the total number of slabs is an atomic value that may be
  38. * modified without taking the list lock).
  39. *
  40. * The list_lock is a centralized lock and thus we avoid taking it as
  41. * much as possible. As long as SLUB does not have to handle partial
  42. * slabs, operations can continue without any centralized lock. F.e.
  43. * allocating a long series of objects that fill up slabs does not require
  44. * the list lock.
  45. *
  46. * The lock order is sometimes inverted when we are trying to get a slab
  47. * off a list. We take the list_lock and then look for a page on the list
  48. * to use. While we do that objects in the slabs may be freed. We can
  49. * only operate on the slab if we have also taken the slab_lock. So we use
  50. * a slab_trylock() on the slab. If trylock was successful then no frees
  51. * can occur anymore and we can use the slab for allocations etc. If the
  52. * slab_trylock() does not succeed then frees are in progress in the slab and
  53. * we must stay away from it for a while since we may cause a bouncing
  54. * cacheline if we try to acquire the lock. So go onto the next slab.
  55. * If all pages are busy then we may allocate a new slab instead of reusing
  56. * a partial slab. A new slab has noone operating on it and thus there is
  57. * no danger of cacheline contention.
  58. *
  59. * Interrupts are disabled during allocation and deallocation in order to
  60. * make the slab allocator safe to use in the context of an irq. In addition
  61. * interrupts are disabled to ensure that the processor does not change
  62. * while handling per_cpu slabs, due to kernel preemption.
  63. *
  64. * SLUB assigns one slab for allocation to each processor.
  65. * Allocations only occur from these slabs called cpu slabs.
  66. *
  67. * Slabs with free elements are kept on a partial list and during regular
  68. * operations no list for full slabs is used. If an object in a full slab is
  69. * freed then the slab will show up again on the partial lists.
  70. * We track full slabs for debugging purposes though because otherwise we
  71. * cannot scan all objects.
  72. *
  73. * Slabs are freed when they become empty. Teardown and setup is
  74. * minimal so we rely on the page allocators per cpu caches for
  75. * fast frees and allocs.
  76. *
  77. * Overloading of page flags that are otherwise used for LRU management.
  78. *
  79. * PageActive The slab is frozen and exempt from list processing.
  80. * This means that the slab is dedicated to a purpose
  81. * such as satisfying allocations for a specific
  82. * processor. Objects may be freed in the slab while
  83. * it is frozen but slab_free will then skip the usual
  84. * list operations. It is up to the processor holding
  85. * the slab to integrate the slab into the slab lists
  86. * when the slab is no longer needed.
  87. *
  88. * One use of this flag is to mark slabs that are
  89. * used for allocations. Then such a slab becomes a cpu
  90. * slab. The cpu slab may be equipped with an additional
  91. * lockless_freelist that allows lockless access to
  92. * free objects in addition to the regular freelist
  93. * that requires the slab lock.
  94. *
  95. * PageError Slab requires special handling due to debug
  96. * options set. This moves slab handling out of
  97. * the fast path and disables lockless freelists.
  98. */
  99. #define FROZEN (1 << PG_active)
  100. #ifdef CONFIG_SLUB_DEBUG
  101. #define SLABDEBUG (1 << PG_error)
  102. #else
  103. #define SLABDEBUG 0
  104. #endif
  105. static inline int SlabFrozen(struct page *page)
  106. {
  107. return page->flags & FROZEN;
  108. }
  109. static inline void SetSlabFrozen(struct page *page)
  110. {
  111. page->flags |= FROZEN;
  112. }
  113. static inline void ClearSlabFrozen(struct page *page)
  114. {
  115. page->flags &= ~FROZEN;
  116. }
  117. static inline int SlabDebug(struct page *page)
  118. {
  119. return page->flags & SLABDEBUG;
  120. }
  121. static inline void SetSlabDebug(struct page *page)
  122. {
  123. page->flags |= SLABDEBUG;
  124. }
  125. static inline void ClearSlabDebug(struct page *page)
  126. {
  127. page->flags &= ~SLABDEBUG;
  128. }
  129. /*
  130. * Issues still to be resolved:
  131. *
  132. * - The per cpu array is updated for each new slab and and is a remote
  133. * cacheline for most nodes. This could become a bouncing cacheline given
  134. * enough frequent updates. There are 16 pointers in a cacheline, so at
  135. * max 16 cpus could compete for the cacheline which may be okay.
  136. *
  137. * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
  138. *
  139. * - Variable sizing of the per node arrays
  140. */
  141. /* Enable to test recovery from slab corruption on boot */
  142. #undef SLUB_RESILIENCY_TEST
  143. #if PAGE_SHIFT <= 12
  144. /*
  145. * Small page size. Make sure that we do not fragment memory
  146. */
  147. #define DEFAULT_MAX_ORDER 1
  148. #define DEFAULT_MIN_OBJECTS 4
  149. #else
  150. /*
  151. * Large page machines are customarily able to handle larger
  152. * page orders.
  153. */
  154. #define DEFAULT_MAX_ORDER 2
  155. #define DEFAULT_MIN_OBJECTS 8
  156. #endif
  157. /*
  158. * Mininum number of partial slabs. These will be left on the partial
  159. * lists even if they are empty. kmem_cache_shrink may reclaim them.
  160. */
  161. #define MIN_PARTIAL 2
  162. /*
  163. * Maximum number of desirable partial slabs.
  164. * The existence of more partial slabs makes kmem_cache_shrink
  165. * sort the partial list by the number of objects in the.
  166. */
  167. #define MAX_PARTIAL 10
  168. #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  169. SLAB_POISON | SLAB_STORE_USER)
  170. /*
  171. * Set of flags that will prevent slab merging
  172. */
  173. #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  174. SLAB_TRACE | SLAB_DESTROY_BY_RCU)
  175. #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
  176. SLAB_CACHE_DMA)
  177. #ifndef ARCH_KMALLOC_MINALIGN
  178. #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  179. #endif
  180. #ifndef ARCH_SLAB_MINALIGN
  181. #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  182. #endif
  183. /*
  184. * The page->inuse field is 16 bit thus we have this limitation
  185. */
  186. #define MAX_OBJECTS_PER_SLAB 65535
  187. /* Internal SLUB flags */
  188. #define __OBJECT_POISON 0x80000000 /* Poison object */
  189. /* Not all arches define cache_line_size */
  190. #ifndef cache_line_size
  191. #define cache_line_size() L1_CACHE_BYTES
  192. #endif
  193. static int kmem_size = sizeof(struct kmem_cache);
  194. #ifdef CONFIG_SMP
  195. static struct notifier_block slab_notifier;
  196. #endif
  197. static enum {
  198. DOWN, /* No slab functionality available */
  199. PARTIAL, /* kmem_cache_open() works but kmalloc does not */
  200. UP, /* Everything works but does not show up in sysfs */
  201. SYSFS /* Sysfs up */
  202. } slab_state = DOWN;
  203. /* A list of all slab caches on the system */
  204. static DECLARE_RWSEM(slub_lock);
  205. static LIST_HEAD(slab_caches);
  206. /*
  207. * Tracking user of a slab.
  208. */
  209. struct track {
  210. void *addr; /* Called from address */
  211. int cpu; /* Was running on cpu */
  212. int pid; /* Pid context */
  213. unsigned long when; /* When did the operation occur */
  214. };
  215. enum track_item { TRACK_ALLOC, TRACK_FREE };
  216. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  217. static int sysfs_slab_add(struct kmem_cache *);
  218. static int sysfs_slab_alias(struct kmem_cache *, const char *);
  219. static void sysfs_slab_remove(struct kmem_cache *);
  220. #else
  221. static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  222. static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  223. { return 0; }
  224. static inline void sysfs_slab_remove(struct kmem_cache *s) {}
  225. #endif
  226. /********************************************************************
  227. * Core slab cache functions
  228. *******************************************************************/
  229. int slab_is_available(void)
  230. {
  231. return slab_state >= UP;
  232. }
  233. static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  234. {
  235. #ifdef CONFIG_NUMA
  236. return s->node[node];
  237. #else
  238. return &s->local_node;
  239. #endif
  240. }
  241. static inline int check_valid_pointer(struct kmem_cache *s,
  242. struct page *page, const void *object)
  243. {
  244. void *base;
  245. if (!object)
  246. return 1;
  247. base = page_address(page);
  248. if (object < base || object >= base + s->objects * s->size ||
  249. (object - base) % s->size) {
  250. return 0;
  251. }
  252. return 1;
  253. }
  254. /*
  255. * Slow version of get and set free pointer.
  256. *
  257. * This version requires touching the cache lines of kmem_cache which
  258. * we avoid to do in the fast alloc free paths. There we obtain the offset
  259. * from the page struct.
  260. */
  261. static inline void *get_freepointer(struct kmem_cache *s, void *object)
  262. {
  263. return *(void **)(object + s->offset);
  264. }
  265. static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  266. {
  267. *(void **)(object + s->offset) = fp;
  268. }
  269. /* Loop over all objects in a slab */
  270. #define for_each_object(__p, __s, __addr) \
  271. for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
  272. __p += (__s)->size)
  273. /* Scan freelist */
  274. #define for_each_free_object(__p, __s, __free) \
  275. for (__p = (__free); __p; __p = get_freepointer((__s), __p))
  276. /* Determine object index from a given position */
  277. static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  278. {
  279. return (p - addr) / s->size;
  280. }
  281. #ifdef CONFIG_SLUB_DEBUG
  282. /*
  283. * Debug settings:
  284. */
  285. #ifdef CONFIG_SLUB_DEBUG_ON
  286. static int slub_debug = DEBUG_DEFAULT_FLAGS;
  287. #else
  288. static int slub_debug;
  289. #endif
  290. static char *slub_debug_slabs;
  291. /*
  292. * Object debugging
  293. */
  294. static void print_section(char *text, u8 *addr, unsigned int length)
  295. {
  296. int i, offset;
  297. int newline = 1;
  298. char ascii[17];
  299. ascii[16] = 0;
  300. for (i = 0; i < length; i++) {
  301. if (newline) {
  302. printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
  303. newline = 0;
  304. }
  305. printk(" %02x", addr[i]);
  306. offset = i % 16;
  307. ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  308. if (offset == 15) {
  309. printk(" %s\n",ascii);
  310. newline = 1;
  311. }
  312. }
  313. if (!newline) {
  314. i %= 16;
  315. while (i < 16) {
  316. printk(" ");
  317. ascii[i] = ' ';
  318. i++;
  319. }
  320. printk(" %s\n", ascii);
  321. }
  322. }
  323. static struct track *get_track(struct kmem_cache *s, void *object,
  324. enum track_item alloc)
  325. {
  326. struct track *p;
  327. if (s->offset)
  328. p = object + s->offset + sizeof(void *);
  329. else
  330. p = object + s->inuse;
  331. return p + alloc;
  332. }
  333. static void set_track(struct kmem_cache *s, void *object,
  334. enum track_item alloc, void *addr)
  335. {
  336. struct track *p;
  337. if (s->offset)
  338. p = object + s->offset + sizeof(void *);
  339. else
  340. p = object + s->inuse;
  341. p += alloc;
  342. if (addr) {
  343. p->addr = addr;
  344. p->cpu = smp_processor_id();
  345. p->pid = current ? current->pid : -1;
  346. p->when = jiffies;
  347. } else
  348. memset(p, 0, sizeof(struct track));
  349. }
  350. static void init_tracking(struct kmem_cache *s, void *object)
  351. {
  352. if (!(s->flags & SLAB_STORE_USER))
  353. return;
  354. set_track(s, object, TRACK_FREE, NULL);
  355. set_track(s, object, TRACK_ALLOC, NULL);
  356. }
  357. static void print_track(const char *s, struct track *t)
  358. {
  359. if (!t->addr)
  360. return;
  361. printk(KERN_ERR "INFO: %s in ", s);
  362. __print_symbol("%s", (unsigned long)t->addr);
  363. printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
  364. }
  365. static void print_tracking(struct kmem_cache *s, void *object)
  366. {
  367. if (!(s->flags & SLAB_STORE_USER))
  368. return;
  369. print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  370. print_track("Freed", get_track(s, object, TRACK_FREE));
  371. }
  372. static void print_page_info(struct page *page)
  373. {
  374. printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
  375. page, page->inuse, page->freelist, page->flags);
  376. }
  377. static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  378. {
  379. va_list args;
  380. char buf[100];
  381. va_start(args, fmt);
  382. vsnprintf(buf, sizeof(buf), fmt, args);
  383. va_end(args);
  384. printk(KERN_ERR "========================================"
  385. "=====================================\n");
  386. printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
  387. printk(KERN_ERR "----------------------------------------"
  388. "-------------------------------------\n\n");
  389. }
  390. static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  391. {
  392. va_list args;
  393. char buf[100];
  394. va_start(args, fmt);
  395. vsnprintf(buf, sizeof(buf), fmt, args);
  396. va_end(args);
  397. printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
  398. }
  399. static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
  400. {
  401. unsigned int off; /* Offset of last byte */
  402. u8 *addr = page_address(page);
  403. print_tracking(s, p);
  404. print_page_info(page);
  405. printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
  406. p, p - addr, get_freepointer(s, p));
  407. if (p > addr + 16)
  408. print_section("Bytes b4", p - 16, 16);
  409. print_section("Object", p, min(s->objsize, 128));
  410. if (s->flags & SLAB_RED_ZONE)
  411. print_section("Redzone", p + s->objsize,
  412. s->inuse - s->objsize);
  413. if (s->offset)
  414. off = s->offset + sizeof(void *);
  415. else
  416. off = s->inuse;
  417. if (s->flags & SLAB_STORE_USER)
  418. off += 2 * sizeof(struct track);
  419. if (off != s->size)
  420. /* Beginning of the filler is the free pointer */
  421. print_section("Padding", p + off, s->size - off);
  422. dump_stack();
  423. }
  424. static void object_err(struct kmem_cache *s, struct page *page,
  425. u8 *object, char *reason)
  426. {
  427. slab_bug(s, reason);
  428. print_trailer(s, page, object);
  429. }
  430. static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
  431. {
  432. va_list args;
  433. char buf[100];
  434. va_start(args, fmt);
  435. vsnprintf(buf, sizeof(buf), fmt, args);
  436. va_end(args);
  437. slab_bug(s, fmt);
  438. print_page_info(page);
  439. dump_stack();
  440. }
  441. static void init_object(struct kmem_cache *s, void *object, int active)
  442. {
  443. u8 *p = object;
  444. if (s->flags & __OBJECT_POISON) {
  445. memset(p, POISON_FREE, s->objsize - 1);
  446. p[s->objsize -1] = POISON_END;
  447. }
  448. if (s->flags & SLAB_RED_ZONE)
  449. memset(p + s->objsize,
  450. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
  451. s->inuse - s->objsize);
  452. }
  453. static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
  454. {
  455. while (bytes) {
  456. if (*start != (u8)value)
  457. return start;
  458. start++;
  459. bytes--;
  460. }
  461. return NULL;
  462. }
  463. static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  464. void *from, void *to)
  465. {
  466. slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
  467. memset(from, data, to - from);
  468. }
  469. static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  470. u8 *object, char *what,
  471. u8* start, unsigned int value, unsigned int bytes)
  472. {
  473. u8 *fault;
  474. u8 *end;
  475. fault = check_bytes(start, value, bytes);
  476. if (!fault)
  477. return 1;
  478. end = start + bytes;
  479. while (end > fault && end[-1] == value)
  480. end--;
  481. slab_bug(s, "%s overwritten", what);
  482. printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
  483. fault, end - 1, fault[0], value);
  484. print_trailer(s, page, object);
  485. restore_bytes(s, what, value, fault, end);
  486. return 0;
  487. }
  488. /*
  489. * Object layout:
  490. *
  491. * object address
  492. * Bytes of the object to be managed.
  493. * If the freepointer may overlay the object then the free
  494. * pointer is the first word of the object.
  495. *
  496. * Poisoning uses 0x6b (POISON_FREE) and the last byte is
  497. * 0xa5 (POISON_END)
  498. *
  499. * object + s->objsize
  500. * Padding to reach word boundary. This is also used for Redzoning.
  501. * Padding is extended by another word if Redzoning is enabled and
  502. * objsize == inuse.
  503. *
  504. * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
  505. * 0xcc (RED_ACTIVE) for objects in use.
  506. *
  507. * object + s->inuse
  508. * Meta data starts here.
  509. *
  510. * A. Free pointer (if we cannot overwrite object on free)
  511. * B. Tracking data for SLAB_STORE_USER
  512. * C. Padding to reach required alignment boundary or at mininum
  513. * one word if debuggin is on to be able to detect writes
  514. * before the word boundary.
  515. *
  516. * Padding is done using 0x5a (POISON_INUSE)
  517. *
  518. * object + s->size
  519. * Nothing is used beyond s->size.
  520. *
  521. * If slabcaches are merged then the objsize and inuse boundaries are mostly
  522. * ignored. And therefore no slab options that rely on these boundaries
  523. * may be used with merged slabcaches.
  524. */
  525. static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  526. {
  527. unsigned long off = s->inuse; /* The end of info */
  528. if (s->offset)
  529. /* Freepointer is placed after the object. */
  530. off += sizeof(void *);
  531. if (s->flags & SLAB_STORE_USER)
  532. /* We also have user information there */
  533. off += 2 * sizeof(struct track);
  534. if (s->size == off)
  535. return 1;
  536. return check_bytes_and_report(s, page, p, "Object padding",
  537. p + off, POISON_INUSE, s->size - off);
  538. }
  539. static int slab_pad_check(struct kmem_cache *s, struct page *page)
  540. {
  541. u8 *start;
  542. u8 *fault;
  543. u8 *end;
  544. int length;
  545. int remainder;
  546. if (!(s->flags & SLAB_POISON))
  547. return 1;
  548. start = page_address(page);
  549. end = start + (PAGE_SIZE << s->order);
  550. length = s->objects * s->size;
  551. remainder = end - (start + length);
  552. if (!remainder)
  553. return 1;
  554. fault = check_bytes(start + length, POISON_INUSE, remainder);
  555. if (!fault)
  556. return 1;
  557. while (end > fault && end[-1] == POISON_INUSE)
  558. end--;
  559. slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
  560. print_section("Padding", start, length);
  561. restore_bytes(s, "slab padding", POISON_INUSE, start, end);
  562. return 0;
  563. }
  564. static int check_object(struct kmem_cache *s, struct page *page,
  565. void *object, int active)
  566. {
  567. u8 *p = object;
  568. u8 *endobject = object + s->objsize;
  569. if (s->flags & SLAB_RED_ZONE) {
  570. unsigned int red =
  571. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
  572. if (!check_bytes_and_report(s, page, object, "Redzone",
  573. endobject, red, s->inuse - s->objsize))
  574. return 0;
  575. } else {
  576. if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
  577. check_bytes_and_report(s, page, p, "Alignment padding", endobject,
  578. POISON_INUSE, s->inuse - s->objsize);
  579. }
  580. if (s->flags & SLAB_POISON) {
  581. if (!active && (s->flags & __OBJECT_POISON) &&
  582. (!check_bytes_and_report(s, page, p, "Poison", p,
  583. POISON_FREE, s->objsize - 1) ||
  584. !check_bytes_and_report(s, page, p, "Poison",
  585. p + s->objsize -1, POISON_END, 1)))
  586. return 0;
  587. /*
  588. * check_pad_bytes cleans up on its own.
  589. */
  590. check_pad_bytes(s, page, p);
  591. }
  592. if (!s->offset && active)
  593. /*
  594. * Object and freepointer overlap. Cannot check
  595. * freepointer while object is allocated.
  596. */
  597. return 1;
  598. /* Check free pointer validity */
  599. if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  600. object_err(s, page, p, "Freepointer corrupt");
  601. /*
  602. * No choice but to zap it and thus loose the remainder
  603. * of the free objects in this slab. May cause
  604. * another error because the object count is now wrong.
  605. */
  606. set_freepointer(s, p, NULL);
  607. return 0;
  608. }
  609. return 1;
  610. }
  611. static int check_slab(struct kmem_cache *s, struct page *page)
  612. {
  613. VM_BUG_ON(!irqs_disabled());
  614. if (!PageSlab(page)) {
  615. slab_err(s, page, "Not a valid slab page");
  616. return 0;
  617. }
  618. if (page->offset * sizeof(void *) != s->offset) {
  619. slab_err(s, page, "Corrupted offset %lu",
  620. (unsigned long)(page->offset * sizeof(void *)));
  621. return 0;
  622. }
  623. if (page->inuse > s->objects) {
  624. slab_err(s, page, "inuse %u > max %u",
  625. s->name, page->inuse, s->objects);
  626. return 0;
  627. }
  628. /* Slab_pad_check fixes things up after itself */
  629. slab_pad_check(s, page);
  630. return 1;
  631. }
  632. /*
  633. * Determine if a certain object on a page is on the freelist. Must hold the
  634. * slab lock to guarantee that the chains are in a consistent state.
  635. */
  636. static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  637. {
  638. int nr = 0;
  639. void *fp = page->freelist;
  640. void *object = NULL;
  641. while (fp && nr <= s->objects) {
  642. if (fp == search)
  643. return 1;
  644. if (!check_valid_pointer(s, page, fp)) {
  645. if (object) {
  646. object_err(s, page, object,
  647. "Freechain corrupt");
  648. set_freepointer(s, object, NULL);
  649. break;
  650. } else {
  651. slab_err(s, page, "Freepointer corrupt");
  652. page->freelist = NULL;
  653. page->inuse = s->objects;
  654. slab_fix(s, "Freelist cleared");
  655. return 0;
  656. }
  657. break;
  658. }
  659. object = fp;
  660. fp = get_freepointer(s, object);
  661. nr++;
  662. }
  663. if (page->inuse != s->objects - nr) {
  664. slab_err(s, page, "Wrong object count. Counter is %d but "
  665. "counted were %d", page->inuse, s->objects - nr);
  666. page->inuse = s->objects - nr;
  667. slab_fix(s, "Object count adjusted.");
  668. }
  669. return search == NULL;
  670. }
  671. static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
  672. {
  673. if (s->flags & SLAB_TRACE) {
  674. printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
  675. s->name,
  676. alloc ? "alloc" : "free",
  677. object, page->inuse,
  678. page->freelist);
  679. if (!alloc)
  680. print_section("Object", (void *)object, s->objsize);
  681. dump_stack();
  682. }
  683. }
  684. /*
  685. * Tracking of fully allocated slabs for debugging purposes.
  686. */
  687. static void add_full(struct kmem_cache_node *n, struct page *page)
  688. {
  689. spin_lock(&n->list_lock);
  690. list_add(&page->lru, &n->full);
  691. spin_unlock(&n->list_lock);
  692. }
  693. static void remove_full(struct kmem_cache *s, struct page *page)
  694. {
  695. struct kmem_cache_node *n;
  696. if (!(s->flags & SLAB_STORE_USER))
  697. return;
  698. n = get_node(s, page_to_nid(page));
  699. spin_lock(&n->list_lock);
  700. list_del(&page->lru);
  701. spin_unlock(&n->list_lock);
  702. }
  703. static void setup_object_debug(struct kmem_cache *s, struct page *page,
  704. void *object)
  705. {
  706. if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  707. return;
  708. init_object(s, object, 0);
  709. init_tracking(s, object);
  710. }
  711. static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
  712. void *object, void *addr)
  713. {
  714. if (!check_slab(s, page))
  715. goto bad;
  716. if (object && !on_freelist(s, page, object)) {
  717. object_err(s, page, object, "Object already allocated");
  718. goto bad;
  719. }
  720. if (!check_valid_pointer(s, page, object)) {
  721. object_err(s, page, object, "Freelist Pointer check fails");
  722. goto bad;
  723. }
  724. if (object && !check_object(s, page, object, 0))
  725. goto bad;
  726. /* Success perform special debug activities for allocs */
  727. if (s->flags & SLAB_STORE_USER)
  728. set_track(s, object, TRACK_ALLOC, addr);
  729. trace(s, page, object, 1);
  730. init_object(s, object, 1);
  731. return 1;
  732. bad:
  733. if (PageSlab(page)) {
  734. /*
  735. * If this is a slab page then lets do the best we can
  736. * to avoid issues in the future. Marking all objects
  737. * as used avoids touching the remaining objects.
  738. */
  739. slab_fix(s, "Marking all objects used");
  740. page->inuse = s->objects;
  741. page->freelist = NULL;
  742. /* Fix up fields that may be corrupted */
  743. page->offset = s->offset / sizeof(void *);
  744. }
  745. return 0;
  746. }
  747. static int free_debug_processing(struct kmem_cache *s, struct page *page,
  748. void *object, void *addr)
  749. {
  750. if (!check_slab(s, page))
  751. goto fail;
  752. if (!check_valid_pointer(s, page, object)) {
  753. slab_err(s, page, "Invalid object pointer 0x%p", object);
  754. goto fail;
  755. }
  756. if (on_freelist(s, page, object)) {
  757. object_err(s, page, object, "Object already free");
  758. goto fail;
  759. }
  760. if (!check_object(s, page, object, 1))
  761. return 0;
  762. if (unlikely(s != page->slab)) {
  763. if (!PageSlab(page))
  764. slab_err(s, page, "Attempt to free object(0x%p) "
  765. "outside of slab", object);
  766. else
  767. if (!page->slab) {
  768. printk(KERN_ERR
  769. "SLUB <none>: no slab for object 0x%p.\n",
  770. object);
  771. dump_stack();
  772. }
  773. else
  774. object_err(s, page, object,
  775. "page slab pointer corrupt.");
  776. goto fail;
  777. }
  778. /* Special debug activities for freeing objects */
  779. if (!SlabFrozen(page) && !page->freelist)
  780. remove_full(s, page);
  781. if (s->flags & SLAB_STORE_USER)
  782. set_track(s, object, TRACK_FREE, addr);
  783. trace(s, page, object, 0);
  784. init_object(s, object, 0);
  785. return 1;
  786. fail:
  787. slab_fix(s, "Object at 0x%p not freed", object);
  788. return 0;
  789. }
  790. static int __init setup_slub_debug(char *str)
  791. {
  792. slub_debug = DEBUG_DEFAULT_FLAGS;
  793. if (*str++ != '=' || !*str)
  794. /*
  795. * No options specified. Switch on full debugging.
  796. */
  797. goto out;
  798. if (*str == ',')
  799. /*
  800. * No options but restriction on slabs. This means full
  801. * debugging for slabs matching a pattern.
  802. */
  803. goto check_slabs;
  804. slub_debug = 0;
  805. if (*str == '-')
  806. /*
  807. * Switch off all debugging measures.
  808. */
  809. goto out;
  810. /*
  811. * Determine which debug features should be switched on
  812. */
  813. for ( ;*str && *str != ','; str++) {
  814. switch (tolower(*str)) {
  815. case 'f':
  816. slub_debug |= SLAB_DEBUG_FREE;
  817. break;
  818. case 'z':
  819. slub_debug |= SLAB_RED_ZONE;
  820. break;
  821. case 'p':
  822. slub_debug |= SLAB_POISON;
  823. break;
  824. case 'u':
  825. slub_debug |= SLAB_STORE_USER;
  826. break;
  827. case 't':
  828. slub_debug |= SLAB_TRACE;
  829. break;
  830. default:
  831. printk(KERN_ERR "slub_debug option '%c' "
  832. "unknown. skipped\n",*str);
  833. }
  834. }
  835. check_slabs:
  836. if (*str == ',')
  837. slub_debug_slabs = str + 1;
  838. out:
  839. return 1;
  840. }
  841. __setup("slub_debug", setup_slub_debug);
  842. static void kmem_cache_open_debug_check(struct kmem_cache *s)
  843. {
  844. /*
  845. * The page->offset field is only 16 bit wide. This is an offset
  846. * in units of words from the beginning of an object. If the slab
  847. * size is bigger then we cannot move the free pointer behind the
  848. * object anymore.
  849. *
  850. * On 32 bit platforms the limit is 256k. On 64bit platforms
  851. * the limit is 512k.
  852. *
  853. * Debugging or ctor may create a need to move the free
  854. * pointer. Fail if this happens.
  855. */
  856. if (s->objsize >= 65535 * sizeof(void *)) {
  857. BUG_ON(s->flags & (SLAB_RED_ZONE | SLAB_POISON |
  858. SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
  859. BUG_ON(s->ctor);
  860. }
  861. else
  862. /*
  863. * Enable debugging if selected on the kernel commandline.
  864. */
  865. if (slub_debug && (!slub_debug_slabs ||
  866. strncmp(slub_debug_slabs, s->name,
  867. strlen(slub_debug_slabs)) == 0))
  868. s->flags |= slub_debug;
  869. }
  870. #else
  871. static inline void setup_object_debug(struct kmem_cache *s,
  872. struct page *page, void *object) {}
  873. static inline int alloc_debug_processing(struct kmem_cache *s,
  874. struct page *page, void *object, void *addr) { return 0; }
  875. static inline int free_debug_processing(struct kmem_cache *s,
  876. struct page *page, void *object, void *addr) { return 0; }
  877. static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  878. { return 1; }
  879. static inline int check_object(struct kmem_cache *s, struct page *page,
  880. void *object, int active) { return 1; }
  881. static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
  882. static inline void kmem_cache_open_debug_check(struct kmem_cache *s) {}
  883. #define slub_debug 0
  884. #endif
  885. /*
  886. * Slab allocation and freeing
  887. */
  888. static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  889. {
  890. struct page * page;
  891. int pages = 1 << s->order;
  892. if (s->order)
  893. flags |= __GFP_COMP;
  894. if (s->flags & SLAB_CACHE_DMA)
  895. flags |= SLUB_DMA;
  896. if (node == -1)
  897. page = alloc_pages(flags, s->order);
  898. else
  899. page = alloc_pages_node(node, flags, s->order);
  900. if (!page)
  901. return NULL;
  902. mod_zone_page_state(page_zone(page),
  903. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  904. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  905. pages);
  906. return page;
  907. }
  908. static void setup_object(struct kmem_cache *s, struct page *page,
  909. void *object)
  910. {
  911. setup_object_debug(s, page, object);
  912. if (unlikely(s->ctor))
  913. s->ctor(object, s, 0);
  914. }
  915. static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  916. {
  917. struct page *page;
  918. struct kmem_cache_node *n;
  919. void *start;
  920. void *end;
  921. void *last;
  922. void *p;
  923. BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
  924. if (flags & __GFP_WAIT)
  925. local_irq_enable();
  926. page = allocate_slab(s, flags & GFP_LEVEL_MASK, node);
  927. if (!page)
  928. goto out;
  929. n = get_node(s, page_to_nid(page));
  930. if (n)
  931. atomic_long_inc(&n->nr_slabs);
  932. page->offset = s->offset / sizeof(void *);
  933. page->slab = s;
  934. page->flags |= 1 << PG_slab;
  935. if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
  936. SLAB_STORE_USER | SLAB_TRACE))
  937. SetSlabDebug(page);
  938. start = page_address(page);
  939. end = start + s->objects * s->size;
  940. if (unlikely(s->flags & SLAB_POISON))
  941. memset(start, POISON_INUSE, PAGE_SIZE << s->order);
  942. last = start;
  943. for_each_object(p, s, start) {
  944. setup_object(s, page, last);
  945. set_freepointer(s, last, p);
  946. last = p;
  947. }
  948. setup_object(s, page, last);
  949. set_freepointer(s, last, NULL);
  950. page->freelist = start;
  951. page->lockless_freelist = NULL;
  952. page->inuse = 0;
  953. out:
  954. if (flags & __GFP_WAIT)
  955. local_irq_disable();
  956. return page;
  957. }
  958. static void __free_slab(struct kmem_cache *s, struct page *page)
  959. {
  960. int pages = 1 << s->order;
  961. if (unlikely(SlabDebug(page))) {
  962. void *p;
  963. slab_pad_check(s, page);
  964. for_each_object(p, s, page_address(page))
  965. check_object(s, page, p, 0);
  966. }
  967. mod_zone_page_state(page_zone(page),
  968. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  969. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  970. - pages);
  971. page->mapping = NULL;
  972. __free_pages(page, s->order);
  973. }
  974. static void rcu_free_slab(struct rcu_head *h)
  975. {
  976. struct page *page;
  977. page = container_of((struct list_head *)h, struct page, lru);
  978. __free_slab(page->slab, page);
  979. }
  980. static void free_slab(struct kmem_cache *s, struct page *page)
  981. {
  982. if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
  983. /*
  984. * RCU free overloads the RCU head over the LRU
  985. */
  986. struct rcu_head *head = (void *)&page->lru;
  987. call_rcu(head, rcu_free_slab);
  988. } else
  989. __free_slab(s, page);
  990. }
  991. static void discard_slab(struct kmem_cache *s, struct page *page)
  992. {
  993. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  994. atomic_long_dec(&n->nr_slabs);
  995. reset_page_mapcount(page);
  996. ClearSlabDebug(page);
  997. __ClearPageSlab(page);
  998. free_slab(s, page);
  999. }
  1000. /*
  1001. * Per slab locking using the pagelock
  1002. */
  1003. static __always_inline void slab_lock(struct page *page)
  1004. {
  1005. bit_spin_lock(PG_locked, &page->flags);
  1006. }
  1007. static __always_inline void slab_unlock(struct page *page)
  1008. {
  1009. bit_spin_unlock(PG_locked, &page->flags);
  1010. }
  1011. static __always_inline int slab_trylock(struct page *page)
  1012. {
  1013. int rc = 1;
  1014. rc = bit_spin_trylock(PG_locked, &page->flags);
  1015. return rc;
  1016. }
  1017. /*
  1018. * Management of partially allocated slabs
  1019. */
  1020. static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
  1021. {
  1022. spin_lock(&n->list_lock);
  1023. n->nr_partial++;
  1024. list_add_tail(&page->lru, &n->partial);
  1025. spin_unlock(&n->list_lock);
  1026. }
  1027. static void add_partial(struct kmem_cache_node *n, struct page *page)
  1028. {
  1029. spin_lock(&n->list_lock);
  1030. n->nr_partial++;
  1031. list_add(&page->lru, &n->partial);
  1032. spin_unlock(&n->list_lock);
  1033. }
  1034. static void remove_partial(struct kmem_cache *s,
  1035. struct page *page)
  1036. {
  1037. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1038. spin_lock(&n->list_lock);
  1039. list_del(&page->lru);
  1040. n->nr_partial--;
  1041. spin_unlock(&n->list_lock);
  1042. }
  1043. /*
  1044. * Lock slab and remove from the partial list.
  1045. *
  1046. * Must hold list_lock.
  1047. */
  1048. static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
  1049. {
  1050. if (slab_trylock(page)) {
  1051. list_del(&page->lru);
  1052. n->nr_partial--;
  1053. SetSlabFrozen(page);
  1054. return 1;
  1055. }
  1056. return 0;
  1057. }
  1058. /*
  1059. * Try to allocate a partial slab from a specific node.
  1060. */
  1061. static struct page *get_partial_node(struct kmem_cache_node *n)
  1062. {
  1063. struct page *page;
  1064. /*
  1065. * Racy check. If we mistakenly see no partial slabs then we
  1066. * just allocate an empty slab. If we mistakenly try to get a
  1067. * partial slab and there is none available then get_partials()
  1068. * will return NULL.
  1069. */
  1070. if (!n || !n->nr_partial)
  1071. return NULL;
  1072. spin_lock(&n->list_lock);
  1073. list_for_each_entry(page, &n->partial, lru)
  1074. if (lock_and_freeze_slab(n, page))
  1075. goto out;
  1076. page = NULL;
  1077. out:
  1078. spin_unlock(&n->list_lock);
  1079. return page;
  1080. }
  1081. /*
  1082. * Get a page from somewhere. Search in increasing NUMA distances.
  1083. */
  1084. static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  1085. {
  1086. #ifdef CONFIG_NUMA
  1087. struct zonelist *zonelist;
  1088. struct zone **z;
  1089. struct page *page;
  1090. /*
  1091. * The defrag ratio allows a configuration of the tradeoffs between
  1092. * inter node defragmentation and node local allocations. A lower
  1093. * defrag_ratio increases the tendency to do local allocations
  1094. * instead of attempting to obtain partial slabs from other nodes.
  1095. *
  1096. * If the defrag_ratio is set to 0 then kmalloc() always
  1097. * returns node local objects. If the ratio is higher then kmalloc()
  1098. * may return off node objects because partial slabs are obtained
  1099. * from other nodes and filled up.
  1100. *
  1101. * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
  1102. * defrag_ratio = 1000) then every (well almost) allocation will
  1103. * first attempt to defrag slab caches on other nodes. This means
  1104. * scanning over all nodes to look for partial slabs which may be
  1105. * expensive if we do it every time we are trying to find a slab
  1106. * with available objects.
  1107. */
  1108. if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
  1109. return NULL;
  1110. zonelist = &NODE_DATA(slab_node(current->mempolicy))
  1111. ->node_zonelists[gfp_zone(flags)];
  1112. for (z = zonelist->zones; *z; z++) {
  1113. struct kmem_cache_node *n;
  1114. n = get_node(s, zone_to_nid(*z));
  1115. if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
  1116. n->nr_partial > MIN_PARTIAL) {
  1117. page = get_partial_node(n);
  1118. if (page)
  1119. return page;
  1120. }
  1121. }
  1122. #endif
  1123. return NULL;
  1124. }
  1125. /*
  1126. * Get a partial page, lock it and return it.
  1127. */
  1128. static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  1129. {
  1130. struct page *page;
  1131. int searchnode = (node == -1) ? numa_node_id() : node;
  1132. page = get_partial_node(get_node(s, searchnode));
  1133. if (page || (flags & __GFP_THISNODE))
  1134. return page;
  1135. return get_any_partial(s, flags);
  1136. }
  1137. /*
  1138. * Move a page back to the lists.
  1139. *
  1140. * Must be called with the slab lock held.
  1141. *
  1142. * On exit the slab lock will have been dropped.
  1143. */
  1144. static void unfreeze_slab(struct kmem_cache *s, struct page *page)
  1145. {
  1146. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1147. ClearSlabFrozen(page);
  1148. if (page->inuse) {
  1149. if (page->freelist)
  1150. add_partial(n, page);
  1151. else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
  1152. add_full(n, page);
  1153. slab_unlock(page);
  1154. } else {
  1155. if (n->nr_partial < MIN_PARTIAL) {
  1156. /*
  1157. * Adding an empty slab to the partial slabs in order
  1158. * to avoid page allocator overhead. This slab needs
  1159. * to come after the other slabs with objects in
  1160. * order to fill them up. That way the size of the
  1161. * partial list stays small. kmem_cache_shrink can
  1162. * reclaim empty slabs from the partial list.
  1163. */
  1164. add_partial_tail(n, page);
  1165. slab_unlock(page);
  1166. } else {
  1167. slab_unlock(page);
  1168. discard_slab(s, page);
  1169. }
  1170. }
  1171. }
  1172. /*
  1173. * Remove the cpu slab
  1174. */
  1175. static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
  1176. {
  1177. /*
  1178. * Merge cpu freelist into freelist. Typically we get here
  1179. * because both freelists are empty. So this is unlikely
  1180. * to occur.
  1181. */
  1182. while (unlikely(page->lockless_freelist)) {
  1183. void **object;
  1184. /* Retrieve object from cpu_freelist */
  1185. object = page->lockless_freelist;
  1186. page->lockless_freelist = page->lockless_freelist[page->offset];
  1187. /* And put onto the regular freelist */
  1188. object[page->offset] = page->freelist;
  1189. page->freelist = object;
  1190. page->inuse--;
  1191. }
  1192. s->cpu_slab[cpu] = NULL;
  1193. unfreeze_slab(s, page);
  1194. }
  1195. static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
  1196. {
  1197. slab_lock(page);
  1198. deactivate_slab(s, page, cpu);
  1199. }
  1200. /*
  1201. * Flush cpu slab.
  1202. * Called from IPI handler with interrupts disabled.
  1203. */
  1204. static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
  1205. {
  1206. struct page *page = s->cpu_slab[cpu];
  1207. if (likely(page))
  1208. flush_slab(s, page, cpu);
  1209. }
  1210. static void flush_cpu_slab(void *d)
  1211. {
  1212. struct kmem_cache *s = d;
  1213. int cpu = smp_processor_id();
  1214. __flush_cpu_slab(s, cpu);
  1215. }
  1216. static void flush_all(struct kmem_cache *s)
  1217. {
  1218. #ifdef CONFIG_SMP
  1219. on_each_cpu(flush_cpu_slab, s, 1, 1);
  1220. #else
  1221. unsigned long flags;
  1222. local_irq_save(flags);
  1223. flush_cpu_slab(s);
  1224. local_irq_restore(flags);
  1225. #endif
  1226. }
  1227. /*
  1228. * Slow path. The lockless freelist is empty or we need to perform
  1229. * debugging duties.
  1230. *
  1231. * Interrupts are disabled.
  1232. *
  1233. * Processing is still very fast if new objects have been freed to the
  1234. * regular freelist. In that case we simply take over the regular freelist
  1235. * as the lockless freelist and zap the regular freelist.
  1236. *
  1237. * If that is not working then we fall back to the partial lists. We take the
  1238. * first element of the freelist as the object to allocate now and move the
  1239. * rest of the freelist to the lockless freelist.
  1240. *
  1241. * And if we were unable to get a new slab from the partial slab lists then
  1242. * we need to allocate a new slab. This is slowest path since we may sleep.
  1243. */
  1244. static void *__slab_alloc(struct kmem_cache *s,
  1245. gfp_t gfpflags, int node, void *addr, struct page *page)
  1246. {
  1247. void **object;
  1248. int cpu = smp_processor_id();
  1249. if (!page)
  1250. goto new_slab;
  1251. slab_lock(page);
  1252. if (unlikely(node != -1 && page_to_nid(page) != node))
  1253. goto another_slab;
  1254. load_freelist:
  1255. object = page->freelist;
  1256. if (unlikely(!object))
  1257. goto another_slab;
  1258. if (unlikely(SlabDebug(page)))
  1259. goto debug;
  1260. object = page->freelist;
  1261. page->lockless_freelist = object[page->offset];
  1262. page->inuse = s->objects;
  1263. page->freelist = NULL;
  1264. slab_unlock(page);
  1265. return object;
  1266. another_slab:
  1267. deactivate_slab(s, page, cpu);
  1268. new_slab:
  1269. page = get_partial(s, gfpflags, node);
  1270. if (page) {
  1271. s->cpu_slab[cpu] = page;
  1272. goto load_freelist;
  1273. }
  1274. page = new_slab(s, gfpflags, node);
  1275. if (page) {
  1276. cpu = smp_processor_id();
  1277. if (s->cpu_slab[cpu]) {
  1278. /*
  1279. * Someone else populated the cpu_slab while we
  1280. * enabled interrupts, or we have gotten scheduled
  1281. * on another cpu. The page may not be on the
  1282. * requested node even if __GFP_THISNODE was
  1283. * specified. So we need to recheck.
  1284. */
  1285. if (node == -1 ||
  1286. page_to_nid(s->cpu_slab[cpu]) == node) {
  1287. /*
  1288. * Current cpuslab is acceptable and we
  1289. * want the current one since its cache hot
  1290. */
  1291. discard_slab(s, page);
  1292. page = s->cpu_slab[cpu];
  1293. slab_lock(page);
  1294. goto load_freelist;
  1295. }
  1296. /* New slab does not fit our expectations */
  1297. flush_slab(s, s->cpu_slab[cpu], cpu);
  1298. }
  1299. slab_lock(page);
  1300. SetSlabFrozen(page);
  1301. s->cpu_slab[cpu] = page;
  1302. goto load_freelist;
  1303. }
  1304. return NULL;
  1305. debug:
  1306. object = page->freelist;
  1307. if (!alloc_debug_processing(s, page, object, addr))
  1308. goto another_slab;
  1309. page->inuse++;
  1310. page->freelist = object[page->offset];
  1311. slab_unlock(page);
  1312. return object;
  1313. }
  1314. /*
  1315. * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  1316. * have the fastpath folded into their functions. So no function call
  1317. * overhead for requests that can be satisfied on the fastpath.
  1318. *
  1319. * The fastpath works by first checking if the lockless freelist can be used.
  1320. * If not then __slab_alloc is called for slow processing.
  1321. *
  1322. * Otherwise we can simply pick the next object from the lockless free list.
  1323. */
  1324. static void __always_inline *slab_alloc(struct kmem_cache *s,
  1325. gfp_t gfpflags, int node, void *addr)
  1326. {
  1327. struct page *page;
  1328. void **object;
  1329. unsigned long flags;
  1330. local_irq_save(flags);
  1331. page = s->cpu_slab[smp_processor_id()];
  1332. if (unlikely(!page || !page->lockless_freelist ||
  1333. (node != -1 && page_to_nid(page) != node)))
  1334. object = __slab_alloc(s, gfpflags, node, addr, page);
  1335. else {
  1336. object = page->lockless_freelist;
  1337. page->lockless_freelist = object[page->offset];
  1338. }
  1339. local_irq_restore(flags);
  1340. if (unlikely((gfpflags & __GFP_ZERO) && object))
  1341. memset(object, 0, s->objsize);
  1342. return object;
  1343. }
  1344. void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  1345. {
  1346. return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
  1347. }
  1348. EXPORT_SYMBOL(kmem_cache_alloc);
  1349. #ifdef CONFIG_NUMA
  1350. void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  1351. {
  1352. return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
  1353. }
  1354. EXPORT_SYMBOL(kmem_cache_alloc_node);
  1355. #endif
  1356. /*
  1357. * Slow patch handling. This may still be called frequently since objects
  1358. * have a longer lifetime than the cpu slabs in most processing loads.
  1359. *
  1360. * So we still attempt to reduce cache line usage. Just take the slab
  1361. * lock and free the item. If there is no additional partial page
  1362. * handling required then we can return immediately.
  1363. */
  1364. static void __slab_free(struct kmem_cache *s, struct page *page,
  1365. void *x, void *addr)
  1366. {
  1367. void *prior;
  1368. void **object = (void *)x;
  1369. slab_lock(page);
  1370. if (unlikely(SlabDebug(page)))
  1371. goto debug;
  1372. checks_ok:
  1373. prior = object[page->offset] = page->freelist;
  1374. page->freelist = object;
  1375. page->inuse--;
  1376. if (unlikely(SlabFrozen(page)))
  1377. goto out_unlock;
  1378. if (unlikely(!page->inuse))
  1379. goto slab_empty;
  1380. /*
  1381. * Objects left in the slab. If it
  1382. * was not on the partial list before
  1383. * then add it.
  1384. */
  1385. if (unlikely(!prior))
  1386. add_partial(get_node(s, page_to_nid(page)), page);
  1387. out_unlock:
  1388. slab_unlock(page);
  1389. return;
  1390. slab_empty:
  1391. if (prior)
  1392. /*
  1393. * Slab still on the partial list.
  1394. */
  1395. remove_partial(s, page);
  1396. slab_unlock(page);
  1397. discard_slab(s, page);
  1398. return;
  1399. debug:
  1400. if (!free_debug_processing(s, page, x, addr))
  1401. goto out_unlock;
  1402. goto checks_ok;
  1403. }
  1404. /*
  1405. * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
  1406. * can perform fastpath freeing without additional function calls.
  1407. *
  1408. * The fastpath is only possible if we are freeing to the current cpu slab
  1409. * of this processor. This typically the case if we have just allocated
  1410. * the item before.
  1411. *
  1412. * If fastpath is not possible then fall back to __slab_free where we deal
  1413. * with all sorts of special processing.
  1414. */
  1415. static void __always_inline slab_free(struct kmem_cache *s,
  1416. struct page *page, void *x, void *addr)
  1417. {
  1418. void **object = (void *)x;
  1419. unsigned long flags;
  1420. local_irq_save(flags);
  1421. if (likely(page == s->cpu_slab[smp_processor_id()] &&
  1422. !SlabDebug(page))) {
  1423. object[page->offset] = page->lockless_freelist;
  1424. page->lockless_freelist = object;
  1425. } else
  1426. __slab_free(s, page, x, addr);
  1427. local_irq_restore(flags);
  1428. }
  1429. void kmem_cache_free(struct kmem_cache *s, void *x)
  1430. {
  1431. struct page *page;
  1432. page = virt_to_head_page(x);
  1433. slab_free(s, page, x, __builtin_return_address(0));
  1434. }
  1435. EXPORT_SYMBOL(kmem_cache_free);
  1436. /* Figure out on which slab object the object resides */
  1437. static struct page *get_object_page(const void *x)
  1438. {
  1439. struct page *page = virt_to_head_page(x);
  1440. if (!PageSlab(page))
  1441. return NULL;
  1442. return page;
  1443. }
  1444. /*
  1445. * Object placement in a slab is made very easy because we always start at
  1446. * offset 0. If we tune the size of the object to the alignment then we can
  1447. * get the required alignment by putting one properly sized object after
  1448. * another.
  1449. *
  1450. * Notice that the allocation order determines the sizes of the per cpu
  1451. * caches. Each processor has always one slab available for allocations.
  1452. * Increasing the allocation order reduces the number of times that slabs
  1453. * must be moved on and off the partial lists and is therefore a factor in
  1454. * locking overhead.
  1455. */
  1456. /*
  1457. * Mininum / Maximum order of slab pages. This influences locking overhead
  1458. * and slab fragmentation. A higher order reduces the number of partial slabs
  1459. * and increases the number of allocations possible without having to
  1460. * take the list_lock.
  1461. */
  1462. static int slub_min_order;
  1463. static int slub_max_order = DEFAULT_MAX_ORDER;
  1464. static int slub_min_objects = DEFAULT_MIN_OBJECTS;
  1465. /*
  1466. * Merge control. If this is set then no merging of slab caches will occur.
  1467. * (Could be removed. This was introduced to pacify the merge skeptics.)
  1468. */
  1469. static int slub_nomerge;
  1470. /*
  1471. * Calculate the order of allocation given an slab object size.
  1472. *
  1473. * The order of allocation has significant impact on performance and other
  1474. * system components. Generally order 0 allocations should be preferred since
  1475. * order 0 does not cause fragmentation in the page allocator. Larger objects
  1476. * be problematic to put into order 0 slabs because there may be too much
  1477. * unused space left. We go to a higher order if more than 1/8th of the slab
  1478. * would be wasted.
  1479. *
  1480. * In order to reach satisfactory performance we must ensure that a minimum
  1481. * number of objects is in one slab. Otherwise we may generate too much
  1482. * activity on the partial lists which requires taking the list_lock. This is
  1483. * less a concern for large slabs though which are rarely used.
  1484. *
  1485. * slub_max_order specifies the order where we begin to stop considering the
  1486. * number of objects in a slab as critical. If we reach slub_max_order then
  1487. * we try to keep the page order as low as possible. So we accept more waste
  1488. * of space in favor of a small page order.
  1489. *
  1490. * Higher order allocations also allow the placement of more objects in a
  1491. * slab and thereby reduce object handling overhead. If the user has
  1492. * requested a higher mininum order then we start with that one instead of
  1493. * the smallest order which will fit the object.
  1494. */
  1495. static inline int slab_order(int size, int min_objects,
  1496. int max_order, int fract_leftover)
  1497. {
  1498. int order;
  1499. int rem;
  1500. int min_order = slub_min_order;
  1501. /*
  1502. * If we would create too many object per slab then reduce
  1503. * the slab order even if it goes below slub_min_order.
  1504. */
  1505. while (min_order > 0 &&
  1506. (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
  1507. min_order--;
  1508. for (order = max(min_order,
  1509. fls(min_objects * size - 1) - PAGE_SHIFT);
  1510. order <= max_order; order++) {
  1511. unsigned long slab_size = PAGE_SIZE << order;
  1512. if (slab_size < min_objects * size)
  1513. continue;
  1514. rem = slab_size % size;
  1515. if (rem <= slab_size / fract_leftover)
  1516. break;
  1517. /* If the next size is too high then exit now */
  1518. if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
  1519. break;
  1520. }
  1521. return order;
  1522. }
  1523. static inline int calculate_order(int size)
  1524. {
  1525. int order;
  1526. int min_objects;
  1527. int fraction;
  1528. /*
  1529. * Attempt to find best configuration for a slab. This
  1530. * works by first attempting to generate a layout with
  1531. * the best configuration and backing off gradually.
  1532. *
  1533. * First we reduce the acceptable waste in a slab. Then
  1534. * we reduce the minimum objects required in a slab.
  1535. */
  1536. min_objects = slub_min_objects;
  1537. while (min_objects > 1) {
  1538. fraction = 8;
  1539. while (fraction >= 4) {
  1540. order = slab_order(size, min_objects,
  1541. slub_max_order, fraction);
  1542. if (order <= slub_max_order)
  1543. return order;
  1544. fraction /= 2;
  1545. }
  1546. min_objects /= 2;
  1547. }
  1548. /*
  1549. * We were unable to place multiple objects in a slab. Now
  1550. * lets see if we can place a single object there.
  1551. */
  1552. order = slab_order(size, 1, slub_max_order, 1);
  1553. if (order <= slub_max_order)
  1554. return order;
  1555. /*
  1556. * Doh this slab cannot be placed using slub_max_order.
  1557. */
  1558. order = slab_order(size, 1, MAX_ORDER, 1);
  1559. if (order <= MAX_ORDER)
  1560. return order;
  1561. return -ENOSYS;
  1562. }
  1563. /*
  1564. * Figure out what the alignment of the objects will be.
  1565. */
  1566. static unsigned long calculate_alignment(unsigned long flags,
  1567. unsigned long align, unsigned long size)
  1568. {
  1569. /*
  1570. * If the user wants hardware cache aligned objects then
  1571. * follow that suggestion if the object is sufficiently
  1572. * large.
  1573. *
  1574. * The hardware cache alignment cannot override the
  1575. * specified alignment though. If that is greater
  1576. * then use it.
  1577. */
  1578. if ((flags & SLAB_HWCACHE_ALIGN) &&
  1579. size > cache_line_size() / 2)
  1580. return max_t(unsigned long, align, cache_line_size());
  1581. if (align < ARCH_SLAB_MINALIGN)
  1582. return ARCH_SLAB_MINALIGN;
  1583. return ALIGN(align, sizeof(void *));
  1584. }
  1585. static void init_kmem_cache_node(struct kmem_cache_node *n)
  1586. {
  1587. n->nr_partial = 0;
  1588. atomic_long_set(&n->nr_slabs, 0);
  1589. spin_lock_init(&n->list_lock);
  1590. INIT_LIST_HEAD(&n->partial);
  1591. INIT_LIST_HEAD(&n->full);
  1592. }
  1593. #ifdef CONFIG_NUMA
  1594. /*
  1595. * No kmalloc_node yet so do it by hand. We know that this is the first
  1596. * slab on the node for this slabcache. There are no concurrent accesses
  1597. * possible.
  1598. *
  1599. * Note that this function only works on the kmalloc_node_cache
  1600. * when allocating for the kmalloc_node_cache.
  1601. */
  1602. static struct kmem_cache_node * __init early_kmem_cache_node_alloc(gfp_t gfpflags,
  1603. int node)
  1604. {
  1605. struct page *page;
  1606. struct kmem_cache_node *n;
  1607. BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
  1608. page = new_slab(kmalloc_caches, gfpflags | GFP_THISNODE, node);
  1609. BUG_ON(!page);
  1610. n = page->freelist;
  1611. BUG_ON(!n);
  1612. page->freelist = get_freepointer(kmalloc_caches, n);
  1613. page->inuse++;
  1614. kmalloc_caches->node[node] = n;
  1615. init_object(kmalloc_caches, n, 1);
  1616. init_tracking(kmalloc_caches, n);
  1617. init_kmem_cache_node(n);
  1618. atomic_long_inc(&n->nr_slabs);
  1619. add_partial(n, page);
  1620. /*
  1621. * new_slab() disables interupts. If we do not reenable interrupts here
  1622. * then bootup would continue with interrupts disabled.
  1623. */
  1624. local_irq_enable();
  1625. return n;
  1626. }
  1627. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1628. {
  1629. int node;
  1630. for_each_online_node(node) {
  1631. struct kmem_cache_node *n = s->node[node];
  1632. if (n && n != &s->local_node)
  1633. kmem_cache_free(kmalloc_caches, n);
  1634. s->node[node] = NULL;
  1635. }
  1636. }
  1637. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1638. {
  1639. int node;
  1640. int local_node;
  1641. if (slab_state >= UP)
  1642. local_node = page_to_nid(virt_to_page(s));
  1643. else
  1644. local_node = 0;
  1645. for_each_online_node(node) {
  1646. struct kmem_cache_node *n;
  1647. if (local_node == node)
  1648. n = &s->local_node;
  1649. else {
  1650. if (slab_state == DOWN) {
  1651. n = early_kmem_cache_node_alloc(gfpflags,
  1652. node);
  1653. continue;
  1654. }
  1655. n = kmem_cache_alloc_node(kmalloc_caches,
  1656. gfpflags, node);
  1657. if (!n) {
  1658. free_kmem_cache_nodes(s);
  1659. return 0;
  1660. }
  1661. }
  1662. s->node[node] = n;
  1663. init_kmem_cache_node(n);
  1664. }
  1665. return 1;
  1666. }
  1667. #else
  1668. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1669. {
  1670. }
  1671. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1672. {
  1673. init_kmem_cache_node(&s->local_node);
  1674. return 1;
  1675. }
  1676. #endif
  1677. /*
  1678. * calculate_sizes() determines the order and the distribution of data within
  1679. * a slab object.
  1680. */
  1681. static int calculate_sizes(struct kmem_cache *s)
  1682. {
  1683. unsigned long flags = s->flags;
  1684. unsigned long size = s->objsize;
  1685. unsigned long align = s->align;
  1686. /*
  1687. * Determine if we can poison the object itself. If the user of
  1688. * the slab may touch the object after free or before allocation
  1689. * then we should never poison the object itself.
  1690. */
  1691. if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
  1692. !s->ctor)
  1693. s->flags |= __OBJECT_POISON;
  1694. else
  1695. s->flags &= ~__OBJECT_POISON;
  1696. /*
  1697. * Round up object size to the next word boundary. We can only
  1698. * place the free pointer at word boundaries and this determines
  1699. * the possible location of the free pointer.
  1700. */
  1701. size = ALIGN(size, sizeof(void *));
  1702. #ifdef CONFIG_SLUB_DEBUG
  1703. /*
  1704. * If we are Redzoning then check if there is some space between the
  1705. * end of the object and the free pointer. If not then add an
  1706. * additional word to have some bytes to store Redzone information.
  1707. */
  1708. if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  1709. size += sizeof(void *);
  1710. #endif
  1711. /*
  1712. * With that we have determined the number of bytes in actual use
  1713. * by the object. This is the potential offset to the free pointer.
  1714. */
  1715. s->inuse = size;
  1716. if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
  1717. s->ctor)) {
  1718. /*
  1719. * Relocate free pointer after the object if it is not
  1720. * permitted to overwrite the first word of the object on
  1721. * kmem_cache_free.
  1722. *
  1723. * This is the case if we do RCU, have a constructor or
  1724. * destructor or are poisoning the objects.
  1725. */
  1726. s->offset = size;
  1727. size += sizeof(void *);
  1728. }
  1729. #ifdef CONFIG_SLUB_DEBUG
  1730. if (flags & SLAB_STORE_USER)
  1731. /*
  1732. * Need to store information about allocs and frees after
  1733. * the object.
  1734. */
  1735. size += 2 * sizeof(struct track);
  1736. if (flags & SLAB_RED_ZONE)
  1737. /*
  1738. * Add some empty padding so that we can catch
  1739. * overwrites from earlier objects rather than let
  1740. * tracking information or the free pointer be
  1741. * corrupted if an user writes before the start
  1742. * of the object.
  1743. */
  1744. size += sizeof(void *);
  1745. #endif
  1746. /*
  1747. * Determine the alignment based on various parameters that the
  1748. * user specified and the dynamic determination of cache line size
  1749. * on bootup.
  1750. */
  1751. align = calculate_alignment(flags, align, s->objsize);
  1752. /*
  1753. * SLUB stores one object immediately after another beginning from
  1754. * offset 0. In order to align the objects we have to simply size
  1755. * each object to conform to the alignment.
  1756. */
  1757. size = ALIGN(size, align);
  1758. s->size = size;
  1759. s->order = calculate_order(size);
  1760. if (s->order < 0)
  1761. return 0;
  1762. /*
  1763. * Determine the number of objects per slab
  1764. */
  1765. s->objects = (PAGE_SIZE << s->order) / size;
  1766. /*
  1767. * Verify that the number of objects is within permitted limits.
  1768. * The page->inuse field is only 16 bit wide! So we cannot have
  1769. * more than 64k objects per slab.
  1770. */
  1771. if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
  1772. return 0;
  1773. return 1;
  1774. }
  1775. static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  1776. const char *name, size_t size,
  1777. size_t align, unsigned long flags,
  1778. void (*ctor)(void *, struct kmem_cache *, unsigned long))
  1779. {
  1780. memset(s, 0, kmem_size);
  1781. s->name = name;
  1782. s->ctor = ctor;
  1783. s->objsize = size;
  1784. s->flags = flags;
  1785. s->align = align;
  1786. kmem_cache_open_debug_check(s);
  1787. if (!calculate_sizes(s))
  1788. goto error;
  1789. s->refcount = 1;
  1790. #ifdef CONFIG_NUMA
  1791. s->defrag_ratio = 100;
  1792. #endif
  1793. if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  1794. return 1;
  1795. error:
  1796. if (flags & SLAB_PANIC)
  1797. panic("Cannot create slab %s size=%lu realsize=%u "
  1798. "order=%u offset=%u flags=%lx\n",
  1799. s->name, (unsigned long)size, s->size, s->order,
  1800. s->offset, flags);
  1801. return 0;
  1802. }
  1803. /*
  1804. * Check if a given pointer is valid
  1805. */
  1806. int kmem_ptr_validate(struct kmem_cache *s, const void *object)
  1807. {
  1808. struct page * page;
  1809. page = get_object_page(object);
  1810. if (!page || s != page->slab)
  1811. /* No slab or wrong slab */
  1812. return 0;
  1813. if (!check_valid_pointer(s, page, object))
  1814. return 0;
  1815. /*
  1816. * We could also check if the object is on the slabs freelist.
  1817. * But this would be too expensive and it seems that the main
  1818. * purpose of kmem_ptr_valid is to check if the object belongs
  1819. * to a certain slab.
  1820. */
  1821. return 1;
  1822. }
  1823. EXPORT_SYMBOL(kmem_ptr_validate);
  1824. /*
  1825. * Determine the size of a slab object
  1826. */
  1827. unsigned int kmem_cache_size(struct kmem_cache *s)
  1828. {
  1829. return s->objsize;
  1830. }
  1831. EXPORT_SYMBOL(kmem_cache_size);
  1832. const char *kmem_cache_name(struct kmem_cache *s)
  1833. {
  1834. return s->name;
  1835. }
  1836. EXPORT_SYMBOL(kmem_cache_name);
  1837. /*
  1838. * Attempt to free all slabs on a node. Return the number of slabs we
  1839. * were unable to free.
  1840. */
  1841. static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
  1842. struct list_head *list)
  1843. {
  1844. int slabs_inuse = 0;
  1845. unsigned long flags;
  1846. struct page *page, *h;
  1847. spin_lock_irqsave(&n->list_lock, flags);
  1848. list_for_each_entry_safe(page, h, list, lru)
  1849. if (!page->inuse) {
  1850. list_del(&page->lru);
  1851. discard_slab(s, page);
  1852. } else
  1853. slabs_inuse++;
  1854. spin_unlock_irqrestore(&n->list_lock, flags);
  1855. return slabs_inuse;
  1856. }
  1857. /*
  1858. * Release all resources used by a slab cache.
  1859. */
  1860. static inline int kmem_cache_close(struct kmem_cache *s)
  1861. {
  1862. int node;
  1863. flush_all(s);
  1864. /* Attempt to free all objects */
  1865. for_each_online_node(node) {
  1866. struct kmem_cache_node *n = get_node(s, node);
  1867. n->nr_partial -= free_list(s, n, &n->partial);
  1868. if (atomic_long_read(&n->nr_slabs))
  1869. return 1;
  1870. }
  1871. free_kmem_cache_nodes(s);
  1872. return 0;
  1873. }
  1874. /*
  1875. * Close a cache and release the kmem_cache structure
  1876. * (must be used for caches created using kmem_cache_create)
  1877. */
  1878. void kmem_cache_destroy(struct kmem_cache *s)
  1879. {
  1880. down_write(&slub_lock);
  1881. s->refcount--;
  1882. if (!s->refcount) {
  1883. list_del(&s->list);
  1884. up_write(&slub_lock);
  1885. if (kmem_cache_close(s))
  1886. WARN_ON(1);
  1887. sysfs_slab_remove(s);
  1888. kfree(s);
  1889. } else
  1890. up_write(&slub_lock);
  1891. }
  1892. EXPORT_SYMBOL(kmem_cache_destroy);
  1893. /********************************************************************
  1894. * Kmalloc subsystem
  1895. *******************************************************************/
  1896. struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __cacheline_aligned;
  1897. EXPORT_SYMBOL(kmalloc_caches);
  1898. #ifdef CONFIG_ZONE_DMA
  1899. static struct kmem_cache *kmalloc_caches_dma[KMALLOC_SHIFT_HIGH + 1];
  1900. #endif
  1901. static int __init setup_slub_min_order(char *str)
  1902. {
  1903. get_option (&str, &slub_min_order);
  1904. return 1;
  1905. }
  1906. __setup("slub_min_order=", setup_slub_min_order);
  1907. static int __init setup_slub_max_order(char *str)
  1908. {
  1909. get_option (&str, &slub_max_order);
  1910. return 1;
  1911. }
  1912. __setup("slub_max_order=", setup_slub_max_order);
  1913. static int __init setup_slub_min_objects(char *str)
  1914. {
  1915. get_option (&str, &slub_min_objects);
  1916. return 1;
  1917. }
  1918. __setup("slub_min_objects=", setup_slub_min_objects);
  1919. static int __init setup_slub_nomerge(char *str)
  1920. {
  1921. slub_nomerge = 1;
  1922. return 1;
  1923. }
  1924. __setup("slub_nomerge", setup_slub_nomerge);
  1925. static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  1926. const char *name, int size, gfp_t gfp_flags)
  1927. {
  1928. unsigned int flags = 0;
  1929. if (gfp_flags & SLUB_DMA)
  1930. flags = SLAB_CACHE_DMA;
  1931. down_write(&slub_lock);
  1932. if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
  1933. flags, NULL))
  1934. goto panic;
  1935. list_add(&s->list, &slab_caches);
  1936. up_write(&slub_lock);
  1937. if (sysfs_slab_add(s))
  1938. goto panic;
  1939. return s;
  1940. panic:
  1941. panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
  1942. }
  1943. #ifdef CONFIG_ZONE_DMA
  1944. static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
  1945. {
  1946. struct kmem_cache *s;
  1947. struct kmem_cache *x;
  1948. char *text;
  1949. size_t realsize;
  1950. s = kmalloc_caches_dma[index];
  1951. if (s)
  1952. return s;
  1953. /* Dynamically create dma cache */
  1954. x = kmalloc(kmem_size, flags & ~SLUB_DMA);
  1955. if (!x)
  1956. panic("Unable to allocate memory for dma cache\n");
  1957. realsize = kmalloc_caches[index].objsize;
  1958. text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
  1959. (unsigned int)realsize);
  1960. s = create_kmalloc_cache(x, text, realsize, flags);
  1961. down_write(&slub_lock);
  1962. if (!kmalloc_caches_dma[index]) {
  1963. kmalloc_caches_dma[index] = s;
  1964. up_write(&slub_lock);
  1965. return s;
  1966. }
  1967. up_write(&slub_lock);
  1968. kmem_cache_destroy(s);
  1969. return kmalloc_caches_dma[index];
  1970. }
  1971. #endif
  1972. /*
  1973. * Conversion table for small slabs sizes / 8 to the index in the
  1974. * kmalloc array. This is necessary for slabs < 192 since we have non power
  1975. * of two cache sizes there. The size of larger slabs can be determined using
  1976. * fls.
  1977. */
  1978. static s8 size_index[24] = {
  1979. 3, /* 8 */
  1980. 4, /* 16 */
  1981. 5, /* 24 */
  1982. 5, /* 32 */
  1983. 6, /* 40 */
  1984. 6, /* 48 */
  1985. 6, /* 56 */
  1986. 6, /* 64 */
  1987. 1, /* 72 */
  1988. 1, /* 80 */
  1989. 1, /* 88 */
  1990. 1, /* 96 */
  1991. 7, /* 104 */
  1992. 7, /* 112 */
  1993. 7, /* 120 */
  1994. 7, /* 128 */
  1995. 2, /* 136 */
  1996. 2, /* 144 */
  1997. 2, /* 152 */
  1998. 2, /* 160 */
  1999. 2, /* 168 */
  2000. 2, /* 176 */
  2001. 2, /* 184 */
  2002. 2 /* 192 */
  2003. };
  2004. static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  2005. {
  2006. int index;
  2007. if (size <= 192) {
  2008. if (!size)
  2009. return ZERO_SIZE_PTR;
  2010. index = size_index[(size - 1) / 8];
  2011. } else {
  2012. if (size > KMALLOC_MAX_SIZE)
  2013. return NULL;
  2014. index = fls(size - 1);
  2015. }
  2016. #ifdef CONFIG_ZONE_DMA
  2017. if (unlikely((flags & SLUB_DMA)))
  2018. return dma_kmalloc_cache(index, flags);
  2019. #endif
  2020. return &kmalloc_caches[index];
  2021. }
  2022. void *__kmalloc(size_t size, gfp_t flags)
  2023. {
  2024. struct kmem_cache *s = get_slab(size, flags);
  2025. if (ZERO_OR_NULL_PTR(s))
  2026. return s;
  2027. return slab_alloc(s, flags, -1, __builtin_return_address(0));
  2028. }
  2029. EXPORT_SYMBOL(__kmalloc);
  2030. #ifdef CONFIG_NUMA
  2031. void *__kmalloc_node(size_t size, gfp_t flags, int node)
  2032. {
  2033. struct kmem_cache *s = get_slab(size, flags);
  2034. if (ZERO_OR_NULL_PTR(s))
  2035. return s;
  2036. return slab_alloc(s, flags, node, __builtin_return_address(0));
  2037. }
  2038. EXPORT_SYMBOL(__kmalloc_node);
  2039. #endif
  2040. size_t ksize(const void *object)
  2041. {
  2042. struct page *page;
  2043. struct kmem_cache *s;
  2044. if (object == ZERO_SIZE_PTR)
  2045. return 0;
  2046. page = get_object_page(object);
  2047. BUG_ON(!page);
  2048. s = page->slab;
  2049. BUG_ON(!s);
  2050. /*
  2051. * Debugging requires use of the padding between object
  2052. * and whatever may come after it.
  2053. */
  2054. if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  2055. return s->objsize;
  2056. /*
  2057. * If we have the need to store the freelist pointer
  2058. * back there or track user information then we can
  2059. * only use the space before that information.
  2060. */
  2061. if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  2062. return s->inuse;
  2063. /*
  2064. * Else we can use all the padding etc for the allocation
  2065. */
  2066. return s->size;
  2067. }
  2068. EXPORT_SYMBOL(ksize);
  2069. void kfree(const void *x)
  2070. {
  2071. struct kmem_cache *s;
  2072. struct page *page;
  2073. /*
  2074. * This has to be an unsigned comparison. According to Linus
  2075. * some gcc version treat a pointer as a signed entity. Then
  2076. * this comparison would be true for all "negative" pointers
  2077. * (which would cover the whole upper half of the address space).
  2078. */
  2079. if (ZERO_OR_NULL_PTR(x))
  2080. return;
  2081. page = virt_to_head_page(x);
  2082. s = page->slab;
  2083. slab_free(s, page, (void *)x, __builtin_return_address(0));
  2084. }
  2085. EXPORT_SYMBOL(kfree);
  2086. /*
  2087. * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  2088. * the remaining slabs by the number of items in use. The slabs with the
  2089. * most items in use come first. New allocations will then fill those up
  2090. * and thus they can be removed from the partial lists.
  2091. *
  2092. * The slabs with the least items are placed last. This results in them
  2093. * being allocated from last increasing the chance that the last objects
  2094. * are freed in them.
  2095. */
  2096. int kmem_cache_shrink(struct kmem_cache *s)
  2097. {
  2098. int node;
  2099. int i;
  2100. struct kmem_cache_node *n;
  2101. struct page *page;
  2102. struct page *t;
  2103. struct list_head *slabs_by_inuse =
  2104. kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
  2105. unsigned long flags;
  2106. if (!slabs_by_inuse)
  2107. return -ENOMEM;
  2108. flush_all(s);
  2109. for_each_online_node(node) {
  2110. n = get_node(s, node);
  2111. if (!n->nr_partial)
  2112. continue;
  2113. for (i = 0; i < s->objects; i++)
  2114. INIT_LIST_HEAD(slabs_by_inuse + i);
  2115. spin_lock_irqsave(&n->list_lock, flags);
  2116. /*
  2117. * Build lists indexed by the items in use in each slab.
  2118. *
  2119. * Note that concurrent frees may occur while we hold the
  2120. * list_lock. page->inuse here is the upper limit.
  2121. */
  2122. list_for_each_entry_safe(page, t, &n->partial, lru) {
  2123. if (!page->inuse && slab_trylock(page)) {
  2124. /*
  2125. * Must hold slab lock here because slab_free
  2126. * may have freed the last object and be
  2127. * waiting to release the slab.
  2128. */
  2129. list_del(&page->lru);
  2130. n->nr_partial--;
  2131. slab_unlock(page);
  2132. discard_slab(s, page);
  2133. } else {
  2134. if (n->nr_partial > MAX_PARTIAL)
  2135. list_move(&page->lru,
  2136. slabs_by_inuse + page->inuse);
  2137. }
  2138. }
  2139. if (n->nr_partial <= MAX_PARTIAL)
  2140. goto out;
  2141. /*
  2142. * Rebuild the partial list with the slabs filled up most
  2143. * first and the least used slabs at the end.
  2144. */
  2145. for (i = s->objects - 1; i >= 0; i--)
  2146. list_splice(slabs_by_inuse + i, n->partial.prev);
  2147. out:
  2148. spin_unlock_irqrestore(&n->list_lock, flags);
  2149. }
  2150. kfree(slabs_by_inuse);
  2151. return 0;
  2152. }
  2153. EXPORT_SYMBOL(kmem_cache_shrink);
  2154. /********************************************************************
  2155. * Basic setup of slabs
  2156. *******************************************************************/
  2157. void __init kmem_cache_init(void)
  2158. {
  2159. int i;
  2160. int caches = 0;
  2161. #ifdef CONFIG_NUMA
  2162. /*
  2163. * Must first have the slab cache available for the allocations of the
  2164. * struct kmem_cache_node's. There is special bootstrap code in
  2165. * kmem_cache_open for slab_state == DOWN.
  2166. */
  2167. create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
  2168. sizeof(struct kmem_cache_node), GFP_KERNEL);
  2169. kmalloc_caches[0].refcount = -1;
  2170. caches++;
  2171. #endif
  2172. /* Able to allocate the per node structures */
  2173. slab_state = PARTIAL;
  2174. /* Caches that are not of the two-to-the-power-of size */
  2175. if (KMALLOC_MIN_SIZE <= 64) {
  2176. create_kmalloc_cache(&kmalloc_caches[1],
  2177. "kmalloc-96", 96, GFP_KERNEL);
  2178. caches++;
  2179. }
  2180. if (KMALLOC_MIN_SIZE <= 128) {
  2181. create_kmalloc_cache(&kmalloc_caches[2],
  2182. "kmalloc-192", 192, GFP_KERNEL);
  2183. caches++;
  2184. }
  2185. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  2186. create_kmalloc_cache(&kmalloc_caches[i],
  2187. "kmalloc", 1 << i, GFP_KERNEL);
  2188. caches++;
  2189. }
  2190. /*
  2191. * Patch up the size_index table if we have strange large alignment
  2192. * requirements for the kmalloc array. This is only the case for
  2193. * mips it seems. The standard arches will not generate any code here.
  2194. *
  2195. * Largest permitted alignment is 256 bytes due to the way we
  2196. * handle the index determination for the smaller caches.
  2197. *
  2198. * Make sure that nothing crazy happens if someone starts tinkering
  2199. * around with ARCH_KMALLOC_MINALIGN
  2200. */
  2201. BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  2202. (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  2203. for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
  2204. size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
  2205. slab_state = UP;
  2206. /* Provide the correct kmalloc names now that the caches are up */
  2207. for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
  2208. kmalloc_caches[i]. name =
  2209. kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
  2210. #ifdef CONFIG_SMP
  2211. register_cpu_notifier(&slab_notifier);
  2212. #endif
  2213. kmem_size = offsetof(struct kmem_cache, cpu_slab) +
  2214. nr_cpu_ids * sizeof(struct page *);
  2215. printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
  2216. " CPUs=%d, Nodes=%d\n",
  2217. caches, cache_line_size(),
  2218. slub_min_order, slub_max_order, slub_min_objects,
  2219. nr_cpu_ids, nr_node_ids);
  2220. }
  2221. /*
  2222. * Find a mergeable slab cache
  2223. */
  2224. static int slab_unmergeable(struct kmem_cache *s)
  2225. {
  2226. if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  2227. return 1;
  2228. if (s->ctor)
  2229. return 1;
  2230. /*
  2231. * We may have set a slab to be unmergeable during bootstrap.
  2232. */
  2233. if (s->refcount < 0)
  2234. return 1;
  2235. return 0;
  2236. }
  2237. static struct kmem_cache *find_mergeable(size_t size,
  2238. size_t align, unsigned long flags,
  2239. void (*ctor)(void *, struct kmem_cache *, unsigned long))
  2240. {
  2241. struct kmem_cache *s;
  2242. if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  2243. return NULL;
  2244. if (ctor)
  2245. return NULL;
  2246. size = ALIGN(size, sizeof(void *));
  2247. align = calculate_alignment(flags, align, size);
  2248. size = ALIGN(size, align);
  2249. list_for_each_entry(s, &slab_caches, list) {
  2250. if (slab_unmergeable(s))
  2251. continue;
  2252. if (size > s->size)
  2253. continue;
  2254. if (((flags | slub_debug) & SLUB_MERGE_SAME) !=
  2255. (s->flags & SLUB_MERGE_SAME))
  2256. continue;
  2257. /*
  2258. * Check if alignment is compatible.
  2259. * Courtesy of Adrian Drzewiecki
  2260. */
  2261. if ((s->size & ~(align -1)) != s->size)
  2262. continue;
  2263. if (s->size - size >= sizeof(void *))
  2264. continue;
  2265. return s;
  2266. }
  2267. return NULL;
  2268. }
  2269. struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  2270. size_t align, unsigned long flags,
  2271. void (*ctor)(void *, struct kmem_cache *, unsigned long),
  2272. void (*dtor)(void *, struct kmem_cache *, unsigned long))
  2273. {
  2274. struct kmem_cache *s;
  2275. BUG_ON(dtor);
  2276. down_write(&slub_lock);
  2277. s = find_mergeable(size, align, flags, ctor);
  2278. if (s) {
  2279. s->refcount++;
  2280. /*
  2281. * Adjust the object sizes so that we clear
  2282. * the complete object on kzalloc.
  2283. */
  2284. s->objsize = max(s->objsize, (int)size);
  2285. s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
  2286. up_write(&slub_lock);
  2287. if (sysfs_slab_alias(s, name))
  2288. goto err;
  2289. return s;
  2290. }
  2291. s = kmalloc(kmem_size, GFP_KERNEL);
  2292. if (s) {
  2293. if (kmem_cache_open(s, GFP_KERNEL, name,
  2294. size, align, flags, ctor)) {
  2295. list_add(&s->list, &slab_caches);
  2296. up_write(&slub_lock);
  2297. if (sysfs_slab_add(s))
  2298. goto err;
  2299. return s;
  2300. }
  2301. kfree(s);
  2302. }
  2303. up_write(&slub_lock);
  2304. err:
  2305. if (flags & SLAB_PANIC)
  2306. panic("Cannot create slabcache %s\n", name);
  2307. else
  2308. s = NULL;
  2309. return s;
  2310. }
  2311. EXPORT_SYMBOL(kmem_cache_create);
  2312. #ifdef CONFIG_SMP
  2313. /*
  2314. * Use the cpu notifier to insure that the cpu slabs are flushed when
  2315. * necessary.
  2316. */
  2317. static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  2318. unsigned long action, void *hcpu)
  2319. {
  2320. long cpu = (long)hcpu;
  2321. struct kmem_cache *s;
  2322. unsigned long flags;
  2323. switch (action) {
  2324. case CPU_UP_CANCELED:
  2325. case CPU_UP_CANCELED_FROZEN:
  2326. case CPU_DEAD:
  2327. case CPU_DEAD_FROZEN:
  2328. down_read(&slub_lock);
  2329. list_for_each_entry(s, &slab_caches, list) {
  2330. local_irq_save(flags);
  2331. __flush_cpu_slab(s, cpu);
  2332. local_irq_restore(flags);
  2333. }
  2334. up_read(&slub_lock);
  2335. break;
  2336. default:
  2337. break;
  2338. }
  2339. return NOTIFY_OK;
  2340. }
  2341. static struct notifier_block __cpuinitdata slab_notifier =
  2342. { &slab_cpuup_callback, NULL, 0 };
  2343. #endif
  2344. void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
  2345. {
  2346. struct kmem_cache *s = get_slab(size, gfpflags);
  2347. if (ZERO_OR_NULL_PTR(s))
  2348. return s;
  2349. return slab_alloc(s, gfpflags, -1, caller);
  2350. }
  2351. void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
  2352. int node, void *caller)
  2353. {
  2354. struct kmem_cache *s = get_slab(size, gfpflags);
  2355. if (ZERO_OR_NULL_PTR(s))
  2356. return s;
  2357. return slab_alloc(s, gfpflags, node, caller);
  2358. }
  2359. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  2360. static int validate_slab(struct kmem_cache *s, struct page *page,
  2361. unsigned long *map)
  2362. {
  2363. void *p;
  2364. void *addr = page_address(page);
  2365. if (!check_slab(s, page) ||
  2366. !on_freelist(s, page, NULL))
  2367. return 0;
  2368. /* Now we know that a valid freelist exists */
  2369. bitmap_zero(map, s->objects);
  2370. for_each_free_object(p, s, page->freelist) {
  2371. set_bit(slab_index(p, s, addr), map);
  2372. if (!check_object(s, page, p, 0))
  2373. return 0;
  2374. }
  2375. for_each_object(p, s, addr)
  2376. if (!test_bit(slab_index(p, s, addr), map))
  2377. if (!check_object(s, page, p, 1))
  2378. return 0;
  2379. return 1;
  2380. }
  2381. static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  2382. unsigned long *map)
  2383. {
  2384. if (slab_trylock(page)) {
  2385. validate_slab(s, page, map);
  2386. slab_unlock(page);
  2387. } else
  2388. printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
  2389. s->name, page);
  2390. if (s->flags & DEBUG_DEFAULT_FLAGS) {
  2391. if (!SlabDebug(page))
  2392. printk(KERN_ERR "SLUB %s: SlabDebug not set "
  2393. "on slab 0x%p\n", s->name, page);
  2394. } else {
  2395. if (SlabDebug(page))
  2396. printk(KERN_ERR "SLUB %s: SlabDebug set on "
  2397. "slab 0x%p\n", s->name, page);
  2398. }
  2399. }
  2400. static int validate_slab_node(struct kmem_cache *s,
  2401. struct kmem_cache_node *n, unsigned long *map)
  2402. {
  2403. unsigned long count = 0;
  2404. struct page *page;
  2405. unsigned long flags;
  2406. spin_lock_irqsave(&n->list_lock, flags);
  2407. list_for_each_entry(page, &n->partial, lru) {
  2408. validate_slab_slab(s, page, map);
  2409. count++;
  2410. }
  2411. if (count != n->nr_partial)
  2412. printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  2413. "counter=%ld\n", s->name, count, n->nr_partial);
  2414. if (!(s->flags & SLAB_STORE_USER))
  2415. goto out;
  2416. list_for_each_entry(page, &n->full, lru) {
  2417. validate_slab_slab(s, page, map);
  2418. count++;
  2419. }
  2420. if (count != atomic_long_read(&n->nr_slabs))
  2421. printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  2422. "counter=%ld\n", s->name, count,
  2423. atomic_long_read(&n->nr_slabs));
  2424. out:
  2425. spin_unlock_irqrestore(&n->list_lock, flags);
  2426. return count;
  2427. }
  2428. static long validate_slab_cache(struct kmem_cache *s)
  2429. {
  2430. int node;
  2431. unsigned long count = 0;
  2432. unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
  2433. sizeof(unsigned long), GFP_KERNEL);
  2434. if (!map)
  2435. return -ENOMEM;
  2436. flush_all(s);
  2437. for_each_online_node(node) {
  2438. struct kmem_cache_node *n = get_node(s, node);
  2439. count += validate_slab_node(s, n, map);
  2440. }
  2441. kfree(map);
  2442. return count;
  2443. }
  2444. #ifdef SLUB_RESILIENCY_TEST
  2445. static void resiliency_test(void)
  2446. {
  2447. u8 *p;
  2448. printk(KERN_ERR "SLUB resiliency testing\n");
  2449. printk(KERN_ERR "-----------------------\n");
  2450. printk(KERN_ERR "A. Corruption after allocation\n");
  2451. p = kzalloc(16, GFP_KERNEL);
  2452. p[16] = 0x12;
  2453. printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
  2454. " 0x12->0x%p\n\n", p + 16);
  2455. validate_slab_cache(kmalloc_caches + 4);
  2456. /* Hmmm... The next two are dangerous */
  2457. p = kzalloc(32, GFP_KERNEL);
  2458. p[32 + sizeof(void *)] = 0x34;
  2459. printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
  2460. " 0x34 -> -0x%p\n", p);
  2461. printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
  2462. validate_slab_cache(kmalloc_caches + 5);
  2463. p = kzalloc(64, GFP_KERNEL);
  2464. p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  2465. *p = 0x56;
  2466. printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
  2467. p);
  2468. printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
  2469. validate_slab_cache(kmalloc_caches + 6);
  2470. printk(KERN_ERR "\nB. Corruption after free\n");
  2471. p = kzalloc(128, GFP_KERNEL);
  2472. kfree(p);
  2473. *p = 0x78;
  2474. printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
  2475. validate_slab_cache(kmalloc_caches + 7);
  2476. p = kzalloc(256, GFP_KERNEL);
  2477. kfree(p);
  2478. p[50] = 0x9a;
  2479. printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
  2480. validate_slab_cache(kmalloc_caches + 8);
  2481. p = kzalloc(512, GFP_KERNEL);
  2482. kfree(p);
  2483. p[512] = 0xab;
  2484. printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
  2485. validate_slab_cache(kmalloc_caches + 9);
  2486. }
  2487. #else
  2488. static void resiliency_test(void) {};
  2489. #endif
  2490. /*
  2491. * Generate lists of code addresses where slabcache objects are allocated
  2492. * and freed.
  2493. */
  2494. struct location {
  2495. unsigned long count;
  2496. void *addr;
  2497. long long sum_time;
  2498. long min_time;
  2499. long max_time;
  2500. long min_pid;
  2501. long max_pid;
  2502. cpumask_t cpus;
  2503. nodemask_t nodes;
  2504. };
  2505. struct loc_track {
  2506. unsigned long max;
  2507. unsigned long count;
  2508. struct location *loc;
  2509. };
  2510. static void free_loc_track(struct loc_track *t)
  2511. {
  2512. if (t->max)
  2513. free_pages((unsigned long)t->loc,
  2514. get_order(sizeof(struct location) * t->max));
  2515. }
  2516. static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
  2517. {
  2518. struct location *l;
  2519. int order;
  2520. order = get_order(sizeof(struct location) * max);
  2521. l = (void *)__get_free_pages(flags, order);
  2522. if (!l)
  2523. return 0;
  2524. if (t->count) {
  2525. memcpy(l, t->loc, sizeof(struct location) * t->count);
  2526. free_loc_track(t);
  2527. }
  2528. t->max = max;
  2529. t->loc = l;
  2530. return 1;
  2531. }
  2532. static int add_location(struct loc_track *t, struct kmem_cache *s,
  2533. const struct track *track)
  2534. {
  2535. long start, end, pos;
  2536. struct location *l;
  2537. void *caddr;
  2538. unsigned long age = jiffies - track->when;
  2539. start = -1;
  2540. end = t->count;
  2541. for ( ; ; ) {
  2542. pos = start + (end - start + 1) / 2;
  2543. /*
  2544. * There is nothing at "end". If we end up there
  2545. * we need to add something to before end.
  2546. */
  2547. if (pos == end)
  2548. break;
  2549. caddr = t->loc[pos].addr;
  2550. if (track->addr == caddr) {
  2551. l = &t->loc[pos];
  2552. l->count++;
  2553. if (track->when) {
  2554. l->sum_time += age;
  2555. if (age < l->min_time)
  2556. l->min_time = age;
  2557. if (age > l->max_time)
  2558. l->max_time = age;
  2559. if (track->pid < l->min_pid)
  2560. l->min_pid = track->pid;
  2561. if (track->pid > l->max_pid)
  2562. l->max_pid = track->pid;
  2563. cpu_set(track->cpu, l->cpus);
  2564. }
  2565. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2566. return 1;
  2567. }
  2568. if (track->addr < caddr)
  2569. end = pos;
  2570. else
  2571. start = pos;
  2572. }
  2573. /*
  2574. * Not found. Insert new tracking element.
  2575. */
  2576. if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
  2577. return 0;
  2578. l = t->loc + pos;
  2579. if (pos < t->count)
  2580. memmove(l + 1, l,
  2581. (t->count - pos) * sizeof(struct location));
  2582. t->count++;
  2583. l->count = 1;
  2584. l->addr = track->addr;
  2585. l->sum_time = age;
  2586. l->min_time = age;
  2587. l->max_time = age;
  2588. l->min_pid = track->pid;
  2589. l->max_pid = track->pid;
  2590. cpus_clear(l->cpus);
  2591. cpu_set(track->cpu, l->cpus);
  2592. nodes_clear(l->nodes);
  2593. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2594. return 1;
  2595. }
  2596. static void process_slab(struct loc_track *t, struct kmem_cache *s,
  2597. struct page *page, enum track_item alloc)
  2598. {
  2599. void *addr = page_address(page);
  2600. DECLARE_BITMAP(map, s->objects);
  2601. void *p;
  2602. bitmap_zero(map, s->objects);
  2603. for_each_free_object(p, s, page->freelist)
  2604. set_bit(slab_index(p, s, addr), map);
  2605. for_each_object(p, s, addr)
  2606. if (!test_bit(slab_index(p, s, addr), map))
  2607. add_location(t, s, get_track(s, p, alloc));
  2608. }
  2609. static int list_locations(struct kmem_cache *s, char *buf,
  2610. enum track_item alloc)
  2611. {
  2612. int n = 0;
  2613. unsigned long i;
  2614. struct loc_track t = { 0, 0, NULL };
  2615. int node;
  2616. if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  2617. GFP_KERNEL))
  2618. return sprintf(buf, "Out of memory\n");
  2619. /* Push back cpu slabs */
  2620. flush_all(s);
  2621. for_each_online_node(node) {
  2622. struct kmem_cache_node *n = get_node(s, node);
  2623. unsigned long flags;
  2624. struct page *page;
  2625. if (!atomic_read(&n->nr_slabs))
  2626. continue;
  2627. spin_lock_irqsave(&n->list_lock, flags);
  2628. list_for_each_entry(page, &n->partial, lru)
  2629. process_slab(&t, s, page, alloc);
  2630. list_for_each_entry(page, &n->full, lru)
  2631. process_slab(&t, s, page, alloc);
  2632. spin_unlock_irqrestore(&n->list_lock, flags);
  2633. }
  2634. for (i = 0; i < t.count; i++) {
  2635. struct location *l = &t.loc[i];
  2636. if (n > PAGE_SIZE - 100)
  2637. break;
  2638. n += sprintf(buf + n, "%7ld ", l->count);
  2639. if (l->addr)
  2640. n += sprint_symbol(buf + n, (unsigned long)l->addr);
  2641. else
  2642. n += sprintf(buf + n, "<not-available>");
  2643. if (l->sum_time != l->min_time) {
  2644. unsigned long remainder;
  2645. n += sprintf(buf + n, " age=%ld/%ld/%ld",
  2646. l->min_time,
  2647. div_long_long_rem(l->sum_time, l->count, &remainder),
  2648. l->max_time);
  2649. } else
  2650. n += sprintf(buf + n, " age=%ld",
  2651. l->min_time);
  2652. if (l->min_pid != l->max_pid)
  2653. n += sprintf(buf + n, " pid=%ld-%ld",
  2654. l->min_pid, l->max_pid);
  2655. else
  2656. n += sprintf(buf + n, " pid=%ld",
  2657. l->min_pid);
  2658. if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
  2659. n < PAGE_SIZE - 60) {
  2660. n += sprintf(buf + n, " cpus=");
  2661. n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
  2662. l->cpus);
  2663. }
  2664. if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
  2665. n < PAGE_SIZE - 60) {
  2666. n += sprintf(buf + n, " nodes=");
  2667. n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
  2668. l->nodes);
  2669. }
  2670. n += sprintf(buf + n, "\n");
  2671. }
  2672. free_loc_track(&t);
  2673. if (!t.count)
  2674. n += sprintf(buf, "No data\n");
  2675. return n;
  2676. }
  2677. static unsigned long count_partial(struct kmem_cache_node *n)
  2678. {
  2679. unsigned long flags;
  2680. unsigned long x = 0;
  2681. struct page *page;
  2682. spin_lock_irqsave(&n->list_lock, flags);
  2683. list_for_each_entry(page, &n->partial, lru)
  2684. x += page->inuse;
  2685. spin_unlock_irqrestore(&n->list_lock, flags);
  2686. return x;
  2687. }
  2688. enum slab_stat_type {
  2689. SL_FULL,
  2690. SL_PARTIAL,
  2691. SL_CPU,
  2692. SL_OBJECTS
  2693. };
  2694. #define SO_FULL (1 << SL_FULL)
  2695. #define SO_PARTIAL (1 << SL_PARTIAL)
  2696. #define SO_CPU (1 << SL_CPU)
  2697. #define SO_OBJECTS (1 << SL_OBJECTS)
  2698. static unsigned long slab_objects(struct kmem_cache *s,
  2699. char *buf, unsigned long flags)
  2700. {
  2701. unsigned long total = 0;
  2702. int cpu;
  2703. int node;
  2704. int x;
  2705. unsigned long *nodes;
  2706. unsigned long *per_cpu;
  2707. nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
  2708. per_cpu = nodes + nr_node_ids;
  2709. for_each_possible_cpu(cpu) {
  2710. struct page *page = s->cpu_slab[cpu];
  2711. int node;
  2712. if (page) {
  2713. node = page_to_nid(page);
  2714. if (flags & SO_CPU) {
  2715. int x = 0;
  2716. if (flags & SO_OBJECTS)
  2717. x = page->inuse;
  2718. else
  2719. x = 1;
  2720. total += x;
  2721. nodes[node] += x;
  2722. }
  2723. per_cpu[node]++;
  2724. }
  2725. }
  2726. for_each_online_node(node) {
  2727. struct kmem_cache_node *n = get_node(s, node);
  2728. if (flags & SO_PARTIAL) {
  2729. if (flags & SO_OBJECTS)
  2730. x = count_partial(n);
  2731. else
  2732. x = n->nr_partial;
  2733. total += x;
  2734. nodes[node] += x;
  2735. }
  2736. if (flags & SO_FULL) {
  2737. int full_slabs = atomic_read(&n->nr_slabs)
  2738. - per_cpu[node]
  2739. - n->nr_partial;
  2740. if (flags & SO_OBJECTS)
  2741. x = full_slabs * s->objects;
  2742. else
  2743. x = full_slabs;
  2744. total += x;
  2745. nodes[node] += x;
  2746. }
  2747. }
  2748. x = sprintf(buf, "%lu", total);
  2749. #ifdef CONFIG_NUMA
  2750. for_each_online_node(node)
  2751. if (nodes[node])
  2752. x += sprintf(buf + x, " N%d=%lu",
  2753. node, nodes[node]);
  2754. #endif
  2755. kfree(nodes);
  2756. return x + sprintf(buf + x, "\n");
  2757. }
  2758. static int any_slab_objects(struct kmem_cache *s)
  2759. {
  2760. int node;
  2761. int cpu;
  2762. for_each_possible_cpu(cpu)
  2763. if (s->cpu_slab[cpu])
  2764. return 1;
  2765. for_each_node(node) {
  2766. struct kmem_cache_node *n = get_node(s, node);
  2767. if (n->nr_partial || atomic_read(&n->nr_slabs))
  2768. return 1;
  2769. }
  2770. return 0;
  2771. }
  2772. #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
  2773. #define to_slab(n) container_of(n, struct kmem_cache, kobj);
  2774. struct slab_attribute {
  2775. struct attribute attr;
  2776. ssize_t (*show)(struct kmem_cache *s, char *buf);
  2777. ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  2778. };
  2779. #define SLAB_ATTR_RO(_name) \
  2780. static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  2781. #define SLAB_ATTR(_name) \
  2782. static struct slab_attribute _name##_attr = \
  2783. __ATTR(_name, 0644, _name##_show, _name##_store)
  2784. static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  2785. {
  2786. return sprintf(buf, "%d\n", s->size);
  2787. }
  2788. SLAB_ATTR_RO(slab_size);
  2789. static ssize_t align_show(struct kmem_cache *s, char *buf)
  2790. {
  2791. return sprintf(buf, "%d\n", s->align);
  2792. }
  2793. SLAB_ATTR_RO(align);
  2794. static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  2795. {
  2796. return sprintf(buf, "%d\n", s->objsize);
  2797. }
  2798. SLAB_ATTR_RO(object_size);
  2799. static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  2800. {
  2801. return sprintf(buf, "%d\n", s->objects);
  2802. }
  2803. SLAB_ATTR_RO(objs_per_slab);
  2804. static ssize_t order_show(struct kmem_cache *s, char *buf)
  2805. {
  2806. return sprintf(buf, "%d\n", s->order);
  2807. }
  2808. SLAB_ATTR_RO(order);
  2809. static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  2810. {
  2811. if (s->ctor) {
  2812. int n = sprint_symbol(buf, (unsigned long)s->ctor);
  2813. return n + sprintf(buf + n, "\n");
  2814. }
  2815. return 0;
  2816. }
  2817. SLAB_ATTR_RO(ctor);
  2818. static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  2819. {
  2820. return sprintf(buf, "%d\n", s->refcount - 1);
  2821. }
  2822. SLAB_ATTR_RO(aliases);
  2823. static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  2824. {
  2825. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
  2826. }
  2827. SLAB_ATTR_RO(slabs);
  2828. static ssize_t partial_show(struct kmem_cache *s, char *buf)
  2829. {
  2830. return slab_objects(s, buf, SO_PARTIAL);
  2831. }
  2832. SLAB_ATTR_RO(partial);
  2833. static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  2834. {
  2835. return slab_objects(s, buf, SO_CPU);
  2836. }
  2837. SLAB_ATTR_RO(cpu_slabs);
  2838. static ssize_t objects_show(struct kmem_cache *s, char *buf)
  2839. {
  2840. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
  2841. }
  2842. SLAB_ATTR_RO(objects);
  2843. static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  2844. {
  2845. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
  2846. }
  2847. static ssize_t sanity_checks_store(struct kmem_cache *s,
  2848. const char *buf, size_t length)
  2849. {
  2850. s->flags &= ~SLAB_DEBUG_FREE;
  2851. if (buf[0] == '1')
  2852. s->flags |= SLAB_DEBUG_FREE;
  2853. return length;
  2854. }
  2855. SLAB_ATTR(sanity_checks);
  2856. static ssize_t trace_show(struct kmem_cache *s, char *buf)
  2857. {
  2858. return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
  2859. }
  2860. static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  2861. size_t length)
  2862. {
  2863. s->flags &= ~SLAB_TRACE;
  2864. if (buf[0] == '1')
  2865. s->flags |= SLAB_TRACE;
  2866. return length;
  2867. }
  2868. SLAB_ATTR(trace);
  2869. static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  2870. {
  2871. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  2872. }
  2873. static ssize_t reclaim_account_store(struct kmem_cache *s,
  2874. const char *buf, size_t length)
  2875. {
  2876. s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  2877. if (buf[0] == '1')
  2878. s->flags |= SLAB_RECLAIM_ACCOUNT;
  2879. return length;
  2880. }
  2881. SLAB_ATTR(reclaim_account);
  2882. static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  2883. {
  2884. return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
  2885. }
  2886. SLAB_ATTR_RO(hwcache_align);
  2887. #ifdef CONFIG_ZONE_DMA
  2888. static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  2889. {
  2890. return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
  2891. }
  2892. SLAB_ATTR_RO(cache_dma);
  2893. #endif
  2894. static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  2895. {
  2896. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
  2897. }
  2898. SLAB_ATTR_RO(destroy_by_rcu);
  2899. static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  2900. {
  2901. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
  2902. }
  2903. static ssize_t red_zone_store(struct kmem_cache *s,
  2904. const char *buf, size_t length)
  2905. {
  2906. if (any_slab_objects(s))
  2907. return -EBUSY;
  2908. s->flags &= ~SLAB_RED_ZONE;
  2909. if (buf[0] == '1')
  2910. s->flags |= SLAB_RED_ZONE;
  2911. calculate_sizes(s);
  2912. return length;
  2913. }
  2914. SLAB_ATTR(red_zone);
  2915. static ssize_t poison_show(struct kmem_cache *s, char *buf)
  2916. {
  2917. return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
  2918. }
  2919. static ssize_t poison_store(struct kmem_cache *s,
  2920. const char *buf, size_t length)
  2921. {
  2922. if (any_slab_objects(s))
  2923. return -EBUSY;
  2924. s->flags &= ~SLAB_POISON;
  2925. if (buf[0] == '1')
  2926. s->flags |= SLAB_POISON;
  2927. calculate_sizes(s);
  2928. return length;
  2929. }
  2930. SLAB_ATTR(poison);
  2931. static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  2932. {
  2933. return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
  2934. }
  2935. static ssize_t store_user_store(struct kmem_cache *s,
  2936. const char *buf, size_t length)
  2937. {
  2938. if (any_slab_objects(s))
  2939. return -EBUSY;
  2940. s->flags &= ~SLAB_STORE_USER;
  2941. if (buf[0] == '1')
  2942. s->flags |= SLAB_STORE_USER;
  2943. calculate_sizes(s);
  2944. return length;
  2945. }
  2946. SLAB_ATTR(store_user);
  2947. static ssize_t validate_show(struct kmem_cache *s, char *buf)
  2948. {
  2949. return 0;
  2950. }
  2951. static ssize_t validate_store(struct kmem_cache *s,
  2952. const char *buf, size_t length)
  2953. {
  2954. int ret = -EINVAL;
  2955. if (buf[0] == '1') {
  2956. ret = validate_slab_cache(s);
  2957. if (ret >= 0)
  2958. ret = length;
  2959. }
  2960. return ret;
  2961. }
  2962. SLAB_ATTR(validate);
  2963. static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  2964. {
  2965. return 0;
  2966. }
  2967. static ssize_t shrink_store(struct kmem_cache *s,
  2968. const char *buf, size_t length)
  2969. {
  2970. if (buf[0] == '1') {
  2971. int rc = kmem_cache_shrink(s);
  2972. if (rc)
  2973. return rc;
  2974. } else
  2975. return -EINVAL;
  2976. return length;
  2977. }
  2978. SLAB_ATTR(shrink);
  2979. static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  2980. {
  2981. if (!(s->flags & SLAB_STORE_USER))
  2982. return -ENOSYS;
  2983. return list_locations(s, buf, TRACK_ALLOC);
  2984. }
  2985. SLAB_ATTR_RO(alloc_calls);
  2986. static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  2987. {
  2988. if (!(s->flags & SLAB_STORE_USER))
  2989. return -ENOSYS;
  2990. return list_locations(s, buf, TRACK_FREE);
  2991. }
  2992. SLAB_ATTR_RO(free_calls);
  2993. #ifdef CONFIG_NUMA
  2994. static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
  2995. {
  2996. return sprintf(buf, "%d\n", s->defrag_ratio / 10);
  2997. }
  2998. static ssize_t defrag_ratio_store(struct kmem_cache *s,
  2999. const char *buf, size_t length)
  3000. {
  3001. int n = simple_strtoul(buf, NULL, 10);
  3002. if (n < 100)
  3003. s->defrag_ratio = n * 10;
  3004. return length;
  3005. }
  3006. SLAB_ATTR(defrag_ratio);
  3007. #endif
  3008. static struct attribute * slab_attrs[] = {
  3009. &slab_size_attr.attr,
  3010. &object_size_attr.attr,
  3011. &objs_per_slab_attr.attr,
  3012. &order_attr.attr,
  3013. &objects_attr.attr,
  3014. &slabs_attr.attr,
  3015. &partial_attr.attr,
  3016. &cpu_slabs_attr.attr,
  3017. &ctor_attr.attr,
  3018. &aliases_attr.attr,
  3019. &align_attr.attr,
  3020. &sanity_checks_attr.attr,
  3021. &trace_attr.attr,
  3022. &hwcache_align_attr.attr,
  3023. &reclaim_account_attr.attr,
  3024. &destroy_by_rcu_attr.attr,
  3025. &red_zone_attr.attr,
  3026. &poison_attr.attr,
  3027. &store_user_attr.attr,
  3028. &validate_attr.attr,
  3029. &shrink_attr.attr,
  3030. &alloc_calls_attr.attr,
  3031. &free_calls_attr.attr,
  3032. #ifdef CONFIG_ZONE_DMA
  3033. &cache_dma_attr.attr,
  3034. #endif
  3035. #ifdef CONFIG_NUMA
  3036. &defrag_ratio_attr.attr,
  3037. #endif
  3038. NULL
  3039. };
  3040. static struct attribute_group slab_attr_group = {
  3041. .attrs = slab_attrs,
  3042. };
  3043. static ssize_t slab_attr_show(struct kobject *kobj,
  3044. struct attribute *attr,
  3045. char *buf)
  3046. {
  3047. struct slab_attribute *attribute;
  3048. struct kmem_cache *s;
  3049. int err;
  3050. attribute = to_slab_attr(attr);
  3051. s = to_slab(kobj);
  3052. if (!attribute->show)
  3053. return -EIO;
  3054. err = attribute->show(s, buf);
  3055. return err;
  3056. }
  3057. static ssize_t slab_attr_store(struct kobject *kobj,
  3058. struct attribute *attr,
  3059. const char *buf, size_t len)
  3060. {
  3061. struct slab_attribute *attribute;
  3062. struct kmem_cache *s;
  3063. int err;
  3064. attribute = to_slab_attr(attr);
  3065. s = to_slab(kobj);
  3066. if (!attribute->store)
  3067. return -EIO;
  3068. err = attribute->store(s, buf, len);
  3069. return err;
  3070. }
  3071. static struct sysfs_ops slab_sysfs_ops = {
  3072. .show = slab_attr_show,
  3073. .store = slab_attr_store,
  3074. };
  3075. static struct kobj_type slab_ktype = {
  3076. .sysfs_ops = &slab_sysfs_ops,
  3077. };
  3078. static int uevent_filter(struct kset *kset, struct kobject *kobj)
  3079. {
  3080. struct kobj_type *ktype = get_ktype(kobj);
  3081. if (ktype == &slab_ktype)
  3082. return 1;
  3083. return 0;
  3084. }
  3085. static struct kset_uevent_ops slab_uevent_ops = {
  3086. .filter = uevent_filter,
  3087. };
  3088. static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
  3089. #define ID_STR_LENGTH 64
  3090. /* Create a unique string id for a slab cache:
  3091. * format
  3092. * :[flags-]size:[memory address of kmemcache]
  3093. */
  3094. static char *create_unique_id(struct kmem_cache *s)
  3095. {
  3096. char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  3097. char *p = name;
  3098. BUG_ON(!name);
  3099. *p++ = ':';
  3100. /*
  3101. * First flags affecting slabcache operations. We will only
  3102. * get here for aliasable slabs so we do not need to support
  3103. * too many flags. The flags here must cover all flags that
  3104. * are matched during merging to guarantee that the id is
  3105. * unique.
  3106. */
  3107. if (s->flags & SLAB_CACHE_DMA)
  3108. *p++ = 'd';
  3109. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  3110. *p++ = 'a';
  3111. if (s->flags & SLAB_DEBUG_FREE)
  3112. *p++ = 'F';
  3113. if (p != name + 1)
  3114. *p++ = '-';
  3115. p += sprintf(p, "%07d", s->size);
  3116. BUG_ON(p > name + ID_STR_LENGTH - 1);
  3117. return name;
  3118. }
  3119. static int sysfs_slab_add(struct kmem_cache *s)
  3120. {
  3121. int err;
  3122. const char *name;
  3123. int unmergeable;
  3124. if (slab_state < SYSFS)
  3125. /* Defer until later */
  3126. return 0;
  3127. unmergeable = slab_unmergeable(s);
  3128. if (unmergeable) {
  3129. /*
  3130. * Slabcache can never be merged so we can use the name proper.
  3131. * This is typically the case for debug situations. In that
  3132. * case we can catch duplicate names easily.
  3133. */
  3134. sysfs_remove_link(&slab_subsys.kobj, s->name);
  3135. name = s->name;
  3136. } else {
  3137. /*
  3138. * Create a unique name for the slab as a target
  3139. * for the symlinks.
  3140. */
  3141. name = create_unique_id(s);
  3142. }
  3143. kobj_set_kset_s(s, slab_subsys);
  3144. kobject_set_name(&s->kobj, name);
  3145. kobject_init(&s->kobj);
  3146. err = kobject_add(&s->kobj);
  3147. if (err)
  3148. return err;
  3149. err = sysfs_create_group(&s->kobj, &slab_attr_group);
  3150. if (err)
  3151. return err;
  3152. kobject_uevent(&s->kobj, KOBJ_ADD);
  3153. if (!unmergeable) {
  3154. /* Setup first alias */
  3155. sysfs_slab_alias(s, s->name);
  3156. kfree(name);
  3157. }
  3158. return 0;
  3159. }
  3160. static void sysfs_slab_remove(struct kmem_cache *s)
  3161. {
  3162. kobject_uevent(&s->kobj, KOBJ_REMOVE);
  3163. kobject_del(&s->kobj);
  3164. }
  3165. /*
  3166. * Need to buffer aliases during bootup until sysfs becomes
  3167. * available lest we loose that information.
  3168. */
  3169. struct saved_alias {
  3170. struct kmem_cache *s;
  3171. const char *name;
  3172. struct saved_alias *next;
  3173. };
  3174. static struct saved_alias *alias_list;
  3175. static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  3176. {
  3177. struct saved_alias *al;
  3178. if (slab_state == SYSFS) {
  3179. /*
  3180. * If we have a leftover link then remove it.
  3181. */
  3182. sysfs_remove_link(&slab_subsys.kobj, name);
  3183. return sysfs_create_link(&slab_subsys.kobj,
  3184. &s->kobj, name);
  3185. }
  3186. al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  3187. if (!al)
  3188. return -ENOMEM;
  3189. al->s = s;
  3190. al->name = name;
  3191. al->next = alias_list;
  3192. alias_list = al;
  3193. return 0;
  3194. }
  3195. static int __init slab_sysfs_init(void)
  3196. {
  3197. struct kmem_cache *s;
  3198. int err;
  3199. err = subsystem_register(&slab_subsys);
  3200. if (err) {
  3201. printk(KERN_ERR "Cannot register slab subsystem.\n");
  3202. return -ENOSYS;
  3203. }
  3204. slab_state = SYSFS;
  3205. list_for_each_entry(s, &slab_caches, list) {
  3206. err = sysfs_slab_add(s);
  3207. BUG_ON(err);
  3208. }
  3209. while (alias_list) {
  3210. struct saved_alias *al = alias_list;
  3211. alias_list = alias_list->next;
  3212. err = sysfs_slab_alias(al->s, al->name);
  3213. BUG_ON(err);
  3214. kfree(al);
  3215. }
  3216. resiliency_test();
  3217. return 0;
  3218. }
  3219. __initcall(slab_sysfs_init);
  3220. #endif