slub.c 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375
  1. /*
  2. * SLUB: A slab allocator that limits cache line use instead of queuing
  3. * objects in per cpu and per node lists.
  4. *
  5. * The allocator synchronizes using per slab locks and only
  6. * uses a centralized lock to manage a pool of partial slabs.
  7. *
  8. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/bit_spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/bitops.h>
  15. #include <linux/slab.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/cpu.h>
  18. #include <linux/cpuset.h>
  19. #include <linux/mempolicy.h>
  20. #include <linux/ctype.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/memory.h>
  23. /*
  24. * Lock order:
  25. * 1. slab_lock(page)
  26. * 2. slab->list_lock
  27. *
  28. * The slab_lock protects operations on the object of a particular
  29. * slab and its metadata in the page struct. If the slab lock
  30. * has been taken then no allocations nor frees can be performed
  31. * on the objects in the slab nor can the slab be added or removed
  32. * from the partial or full lists since this would mean modifying
  33. * the page_struct of the slab.
  34. *
  35. * The list_lock protects the partial and full list on each node and
  36. * the partial slab counter. If taken then no new slabs may be added or
  37. * removed from the lists nor make the number of partial slabs be modified.
  38. * (Note that the total number of slabs is an atomic value that may be
  39. * modified without taking the list lock).
  40. *
  41. * The list_lock is a centralized lock and thus we avoid taking it as
  42. * much as possible. As long as SLUB does not have to handle partial
  43. * slabs, operations can continue without any centralized lock. F.e.
  44. * allocating a long series of objects that fill up slabs does not require
  45. * the list lock.
  46. *
  47. * The lock order is sometimes inverted when we are trying to get a slab
  48. * off a list. We take the list_lock and then look for a page on the list
  49. * to use. While we do that objects in the slabs may be freed. We can
  50. * only operate on the slab if we have also taken the slab_lock. So we use
  51. * a slab_trylock() on the slab. If trylock was successful then no frees
  52. * can occur anymore and we can use the slab for allocations etc. If the
  53. * slab_trylock() does not succeed then frees are in progress in the slab and
  54. * we must stay away from it for a while since we may cause a bouncing
  55. * cacheline if we try to acquire the lock. So go onto the next slab.
  56. * If all pages are busy then we may allocate a new slab instead of reusing
  57. * a partial slab. A new slab has noone operating on it and thus there is
  58. * no danger of cacheline contention.
  59. *
  60. * Interrupts are disabled during allocation and deallocation in order to
  61. * make the slab allocator safe to use in the context of an irq. In addition
  62. * interrupts are disabled to ensure that the processor does not change
  63. * while handling per_cpu slabs, due to kernel preemption.
  64. *
  65. * SLUB assigns one slab for allocation to each processor.
  66. * Allocations only occur from these slabs called cpu slabs.
  67. *
  68. * Slabs with free elements are kept on a partial list and during regular
  69. * operations no list for full slabs is used. If an object in a full slab is
  70. * freed then the slab will show up again on the partial lists.
  71. * We track full slabs for debugging purposes though because otherwise we
  72. * cannot scan all objects.
  73. *
  74. * Slabs are freed when they become empty. Teardown and setup is
  75. * minimal so we rely on the page allocators per cpu caches for
  76. * fast frees and allocs.
  77. *
  78. * Overloading of page flags that are otherwise used for LRU management.
  79. *
  80. * PageActive The slab is frozen and exempt from list processing.
  81. * This means that the slab is dedicated to a purpose
  82. * such as satisfying allocations for a specific
  83. * processor. Objects may be freed in the slab while
  84. * it is frozen but slab_free will then skip the usual
  85. * list operations. It is up to the processor holding
  86. * the slab to integrate the slab into the slab lists
  87. * when the slab is no longer needed.
  88. *
  89. * One use of this flag is to mark slabs that are
  90. * used for allocations. Then such a slab becomes a cpu
  91. * slab. The cpu slab may be equipped with an additional
  92. * freelist that allows lockless access to
  93. * free objects in addition to the regular freelist
  94. * that requires the slab lock.
  95. *
  96. * PageError Slab requires special handling due to debug
  97. * options set. This moves slab handling out of
  98. * the fast path and disables lockless freelists.
  99. */
  100. #define FROZEN (1 << PG_active)
  101. #ifdef CONFIG_SLUB_DEBUG
  102. #define SLABDEBUG (1 << PG_error)
  103. #else
  104. #define SLABDEBUG 0
  105. #endif
  106. static inline int SlabFrozen(struct page *page)
  107. {
  108. return page->flags & FROZEN;
  109. }
  110. static inline void SetSlabFrozen(struct page *page)
  111. {
  112. page->flags |= FROZEN;
  113. }
  114. static inline void ClearSlabFrozen(struct page *page)
  115. {
  116. page->flags &= ~FROZEN;
  117. }
  118. static inline int SlabDebug(struct page *page)
  119. {
  120. return page->flags & SLABDEBUG;
  121. }
  122. static inline void SetSlabDebug(struct page *page)
  123. {
  124. page->flags |= SLABDEBUG;
  125. }
  126. static inline void ClearSlabDebug(struct page *page)
  127. {
  128. page->flags &= ~SLABDEBUG;
  129. }
  130. /*
  131. * Issues still to be resolved:
  132. *
  133. * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
  134. *
  135. * - Variable sizing of the per node arrays
  136. */
  137. /* Enable to test recovery from slab corruption on boot */
  138. #undef SLUB_RESILIENCY_TEST
  139. #if PAGE_SHIFT <= 12
  140. /*
  141. * Small page size. Make sure that we do not fragment memory
  142. */
  143. #define DEFAULT_MAX_ORDER 1
  144. #define DEFAULT_MIN_OBJECTS 4
  145. #else
  146. /*
  147. * Large page machines are customarily able to handle larger
  148. * page orders.
  149. */
  150. #define DEFAULT_MAX_ORDER 2
  151. #define DEFAULT_MIN_OBJECTS 8
  152. #endif
  153. /*
  154. * Mininum number of partial slabs. These will be left on the partial
  155. * lists even if they are empty. kmem_cache_shrink may reclaim them.
  156. */
  157. #define MIN_PARTIAL 5
  158. /*
  159. * Maximum number of desirable partial slabs.
  160. * The existence of more partial slabs makes kmem_cache_shrink
  161. * sort the partial list by the number of objects in the.
  162. */
  163. #define MAX_PARTIAL 10
  164. #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  165. SLAB_POISON | SLAB_STORE_USER)
  166. /*
  167. * Set of flags that will prevent slab merging
  168. */
  169. #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  170. SLAB_TRACE | SLAB_DESTROY_BY_RCU)
  171. #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
  172. SLAB_CACHE_DMA)
  173. #ifndef ARCH_KMALLOC_MINALIGN
  174. #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  175. #endif
  176. #ifndef ARCH_SLAB_MINALIGN
  177. #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  178. #endif
  179. /* Internal SLUB flags */
  180. #define __OBJECT_POISON 0x80000000 /* Poison object */
  181. #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
  182. #define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
  183. #define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
  184. /* Not all arches define cache_line_size */
  185. #ifndef cache_line_size
  186. #define cache_line_size() L1_CACHE_BYTES
  187. #endif
  188. static int kmem_size = sizeof(struct kmem_cache);
  189. #ifdef CONFIG_SMP
  190. static struct notifier_block slab_notifier;
  191. #endif
  192. static enum {
  193. DOWN, /* No slab functionality available */
  194. PARTIAL, /* kmem_cache_open() works but kmalloc does not */
  195. UP, /* Everything works but does not show up in sysfs */
  196. SYSFS /* Sysfs up */
  197. } slab_state = DOWN;
  198. /* A list of all slab caches on the system */
  199. static DECLARE_RWSEM(slub_lock);
  200. static LIST_HEAD(slab_caches);
  201. /*
  202. * Tracking user of a slab.
  203. */
  204. struct track {
  205. void *addr; /* Called from address */
  206. int cpu; /* Was running on cpu */
  207. int pid; /* Pid context */
  208. unsigned long when; /* When did the operation occur */
  209. };
  210. enum track_item { TRACK_ALLOC, TRACK_FREE };
  211. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  212. static int sysfs_slab_add(struct kmem_cache *);
  213. static int sysfs_slab_alias(struct kmem_cache *, const char *);
  214. static void sysfs_slab_remove(struct kmem_cache *);
  215. #else
  216. static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  217. static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  218. { return 0; }
  219. static inline void sysfs_slab_remove(struct kmem_cache *s)
  220. {
  221. kfree(s);
  222. }
  223. #endif
  224. static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
  225. {
  226. #ifdef CONFIG_SLUB_STATS
  227. c->stat[si]++;
  228. #endif
  229. }
  230. /********************************************************************
  231. * Core slab cache functions
  232. *******************************************************************/
  233. int slab_is_available(void)
  234. {
  235. return slab_state >= UP;
  236. }
  237. static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  238. {
  239. #ifdef CONFIG_NUMA
  240. return s->node[node];
  241. #else
  242. return &s->local_node;
  243. #endif
  244. }
  245. static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
  246. {
  247. #ifdef CONFIG_SMP
  248. return s->cpu_slab[cpu];
  249. #else
  250. return &s->cpu_slab;
  251. #endif
  252. }
  253. static inline int check_valid_pointer(struct kmem_cache *s,
  254. struct page *page, const void *object)
  255. {
  256. void *base;
  257. if (!object)
  258. return 1;
  259. base = page_address(page);
  260. if (object < base || object >= base + s->objects * s->size ||
  261. (object - base) % s->size) {
  262. return 0;
  263. }
  264. return 1;
  265. }
  266. /*
  267. * Slow version of get and set free pointer.
  268. *
  269. * This version requires touching the cache lines of kmem_cache which
  270. * we avoid to do in the fast alloc free paths. There we obtain the offset
  271. * from the page struct.
  272. */
  273. static inline void *get_freepointer(struct kmem_cache *s, void *object)
  274. {
  275. return *(void **)(object + s->offset);
  276. }
  277. static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  278. {
  279. *(void **)(object + s->offset) = fp;
  280. }
  281. /* Loop over all objects in a slab */
  282. #define for_each_object(__p, __s, __addr) \
  283. for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
  284. __p += (__s)->size)
  285. /* Scan freelist */
  286. #define for_each_free_object(__p, __s, __free) \
  287. for (__p = (__free); __p; __p = get_freepointer((__s), __p))
  288. /* Determine object index from a given position */
  289. static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  290. {
  291. return (p - addr) / s->size;
  292. }
  293. #ifdef CONFIG_SLUB_DEBUG
  294. /*
  295. * Debug settings:
  296. */
  297. #ifdef CONFIG_SLUB_DEBUG_ON
  298. static int slub_debug = DEBUG_DEFAULT_FLAGS;
  299. #else
  300. static int slub_debug;
  301. #endif
  302. static char *slub_debug_slabs;
  303. /*
  304. * Object debugging
  305. */
  306. static void print_section(char *text, u8 *addr, unsigned int length)
  307. {
  308. int i, offset;
  309. int newline = 1;
  310. char ascii[17];
  311. ascii[16] = 0;
  312. for (i = 0; i < length; i++) {
  313. if (newline) {
  314. printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
  315. newline = 0;
  316. }
  317. printk(KERN_CONT " %02x", addr[i]);
  318. offset = i % 16;
  319. ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  320. if (offset == 15) {
  321. printk(KERN_CONT " %s\n", ascii);
  322. newline = 1;
  323. }
  324. }
  325. if (!newline) {
  326. i %= 16;
  327. while (i < 16) {
  328. printk(KERN_CONT " ");
  329. ascii[i] = ' ';
  330. i++;
  331. }
  332. printk(KERN_CONT " %s\n", ascii);
  333. }
  334. }
  335. static struct track *get_track(struct kmem_cache *s, void *object,
  336. enum track_item alloc)
  337. {
  338. struct track *p;
  339. if (s->offset)
  340. p = object + s->offset + sizeof(void *);
  341. else
  342. p = object + s->inuse;
  343. return p + alloc;
  344. }
  345. static void set_track(struct kmem_cache *s, void *object,
  346. enum track_item alloc, void *addr)
  347. {
  348. struct track *p;
  349. if (s->offset)
  350. p = object + s->offset + sizeof(void *);
  351. else
  352. p = object + s->inuse;
  353. p += alloc;
  354. if (addr) {
  355. p->addr = addr;
  356. p->cpu = smp_processor_id();
  357. p->pid = current ? current->pid : -1;
  358. p->when = jiffies;
  359. } else
  360. memset(p, 0, sizeof(struct track));
  361. }
  362. static void init_tracking(struct kmem_cache *s, void *object)
  363. {
  364. if (!(s->flags & SLAB_STORE_USER))
  365. return;
  366. set_track(s, object, TRACK_FREE, NULL);
  367. set_track(s, object, TRACK_ALLOC, NULL);
  368. }
  369. static void print_track(const char *s, struct track *t)
  370. {
  371. if (!t->addr)
  372. return;
  373. printk(KERN_ERR "INFO: %s in ", s);
  374. __print_symbol("%s", (unsigned long)t->addr);
  375. printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
  376. }
  377. static void print_tracking(struct kmem_cache *s, void *object)
  378. {
  379. if (!(s->flags & SLAB_STORE_USER))
  380. return;
  381. print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  382. print_track("Freed", get_track(s, object, TRACK_FREE));
  383. }
  384. static void print_page_info(struct page *page)
  385. {
  386. printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
  387. page, page->inuse, page->freelist, page->flags);
  388. }
  389. static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  390. {
  391. va_list args;
  392. char buf[100];
  393. va_start(args, fmt);
  394. vsnprintf(buf, sizeof(buf), fmt, args);
  395. va_end(args);
  396. printk(KERN_ERR "========================================"
  397. "=====================================\n");
  398. printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
  399. printk(KERN_ERR "----------------------------------------"
  400. "-------------------------------------\n\n");
  401. }
  402. static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  403. {
  404. va_list args;
  405. char buf[100];
  406. va_start(args, fmt);
  407. vsnprintf(buf, sizeof(buf), fmt, args);
  408. va_end(args);
  409. printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
  410. }
  411. static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
  412. {
  413. unsigned int off; /* Offset of last byte */
  414. u8 *addr = page_address(page);
  415. print_tracking(s, p);
  416. print_page_info(page);
  417. printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
  418. p, p - addr, get_freepointer(s, p));
  419. if (p > addr + 16)
  420. print_section("Bytes b4", p - 16, 16);
  421. print_section("Object", p, min(s->objsize, 128));
  422. if (s->flags & SLAB_RED_ZONE)
  423. print_section("Redzone", p + s->objsize,
  424. s->inuse - s->objsize);
  425. if (s->offset)
  426. off = s->offset + sizeof(void *);
  427. else
  428. off = s->inuse;
  429. if (s->flags & SLAB_STORE_USER)
  430. off += 2 * sizeof(struct track);
  431. if (off != s->size)
  432. /* Beginning of the filler is the free pointer */
  433. print_section("Padding", p + off, s->size - off);
  434. dump_stack();
  435. }
  436. static void object_err(struct kmem_cache *s, struct page *page,
  437. u8 *object, char *reason)
  438. {
  439. slab_bug(s, reason);
  440. print_trailer(s, page, object);
  441. }
  442. static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
  443. {
  444. va_list args;
  445. char buf[100];
  446. va_start(args, fmt);
  447. vsnprintf(buf, sizeof(buf), fmt, args);
  448. va_end(args);
  449. slab_bug(s, fmt);
  450. print_page_info(page);
  451. dump_stack();
  452. }
  453. static void init_object(struct kmem_cache *s, void *object, int active)
  454. {
  455. u8 *p = object;
  456. if (s->flags & __OBJECT_POISON) {
  457. memset(p, POISON_FREE, s->objsize - 1);
  458. p[s->objsize - 1] = POISON_END;
  459. }
  460. if (s->flags & SLAB_RED_ZONE)
  461. memset(p + s->objsize,
  462. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
  463. s->inuse - s->objsize);
  464. }
  465. static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
  466. {
  467. while (bytes) {
  468. if (*start != (u8)value)
  469. return start;
  470. start++;
  471. bytes--;
  472. }
  473. return NULL;
  474. }
  475. static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  476. void *from, void *to)
  477. {
  478. slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
  479. memset(from, data, to - from);
  480. }
  481. static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  482. u8 *object, char *what,
  483. u8 *start, unsigned int value, unsigned int bytes)
  484. {
  485. u8 *fault;
  486. u8 *end;
  487. fault = check_bytes(start, value, bytes);
  488. if (!fault)
  489. return 1;
  490. end = start + bytes;
  491. while (end > fault && end[-1] == value)
  492. end--;
  493. slab_bug(s, "%s overwritten", what);
  494. printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
  495. fault, end - 1, fault[0], value);
  496. print_trailer(s, page, object);
  497. restore_bytes(s, what, value, fault, end);
  498. return 0;
  499. }
  500. /*
  501. * Object layout:
  502. *
  503. * object address
  504. * Bytes of the object to be managed.
  505. * If the freepointer may overlay the object then the free
  506. * pointer is the first word of the object.
  507. *
  508. * Poisoning uses 0x6b (POISON_FREE) and the last byte is
  509. * 0xa5 (POISON_END)
  510. *
  511. * object + s->objsize
  512. * Padding to reach word boundary. This is also used for Redzoning.
  513. * Padding is extended by another word if Redzoning is enabled and
  514. * objsize == inuse.
  515. *
  516. * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
  517. * 0xcc (RED_ACTIVE) for objects in use.
  518. *
  519. * object + s->inuse
  520. * Meta data starts here.
  521. *
  522. * A. Free pointer (if we cannot overwrite object on free)
  523. * B. Tracking data for SLAB_STORE_USER
  524. * C. Padding to reach required alignment boundary or at mininum
  525. * one word if debuggin is on to be able to detect writes
  526. * before the word boundary.
  527. *
  528. * Padding is done using 0x5a (POISON_INUSE)
  529. *
  530. * object + s->size
  531. * Nothing is used beyond s->size.
  532. *
  533. * If slabcaches are merged then the objsize and inuse boundaries are mostly
  534. * ignored. And therefore no slab options that rely on these boundaries
  535. * may be used with merged slabcaches.
  536. */
  537. static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  538. {
  539. unsigned long off = s->inuse; /* The end of info */
  540. if (s->offset)
  541. /* Freepointer is placed after the object. */
  542. off += sizeof(void *);
  543. if (s->flags & SLAB_STORE_USER)
  544. /* We also have user information there */
  545. off += 2 * sizeof(struct track);
  546. if (s->size == off)
  547. return 1;
  548. return check_bytes_and_report(s, page, p, "Object padding",
  549. p + off, POISON_INUSE, s->size - off);
  550. }
  551. static int slab_pad_check(struct kmem_cache *s, struct page *page)
  552. {
  553. u8 *start;
  554. u8 *fault;
  555. u8 *end;
  556. int length;
  557. int remainder;
  558. if (!(s->flags & SLAB_POISON))
  559. return 1;
  560. start = page_address(page);
  561. end = start + (PAGE_SIZE << s->order);
  562. length = s->objects * s->size;
  563. remainder = end - (start + length);
  564. if (!remainder)
  565. return 1;
  566. fault = check_bytes(start + length, POISON_INUSE, remainder);
  567. if (!fault)
  568. return 1;
  569. while (end > fault && end[-1] == POISON_INUSE)
  570. end--;
  571. slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
  572. print_section("Padding", start, length);
  573. restore_bytes(s, "slab padding", POISON_INUSE, start, end);
  574. return 0;
  575. }
  576. static int check_object(struct kmem_cache *s, struct page *page,
  577. void *object, int active)
  578. {
  579. u8 *p = object;
  580. u8 *endobject = object + s->objsize;
  581. if (s->flags & SLAB_RED_ZONE) {
  582. unsigned int red =
  583. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
  584. if (!check_bytes_and_report(s, page, object, "Redzone",
  585. endobject, red, s->inuse - s->objsize))
  586. return 0;
  587. } else {
  588. if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  589. check_bytes_and_report(s, page, p, "Alignment padding",
  590. endobject, POISON_INUSE, s->inuse - s->objsize);
  591. }
  592. }
  593. if (s->flags & SLAB_POISON) {
  594. if (!active && (s->flags & __OBJECT_POISON) &&
  595. (!check_bytes_and_report(s, page, p, "Poison", p,
  596. POISON_FREE, s->objsize - 1) ||
  597. !check_bytes_and_report(s, page, p, "Poison",
  598. p + s->objsize - 1, POISON_END, 1)))
  599. return 0;
  600. /*
  601. * check_pad_bytes cleans up on its own.
  602. */
  603. check_pad_bytes(s, page, p);
  604. }
  605. if (!s->offset && active)
  606. /*
  607. * Object and freepointer overlap. Cannot check
  608. * freepointer while object is allocated.
  609. */
  610. return 1;
  611. /* Check free pointer validity */
  612. if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  613. object_err(s, page, p, "Freepointer corrupt");
  614. /*
  615. * No choice but to zap it and thus loose the remainder
  616. * of the free objects in this slab. May cause
  617. * another error because the object count is now wrong.
  618. */
  619. set_freepointer(s, p, NULL);
  620. return 0;
  621. }
  622. return 1;
  623. }
  624. static int check_slab(struct kmem_cache *s, struct page *page)
  625. {
  626. VM_BUG_ON(!irqs_disabled());
  627. if (!PageSlab(page)) {
  628. slab_err(s, page, "Not a valid slab page");
  629. return 0;
  630. }
  631. if (page->inuse > s->objects) {
  632. slab_err(s, page, "inuse %u > max %u",
  633. s->name, page->inuse, s->objects);
  634. return 0;
  635. }
  636. /* Slab_pad_check fixes things up after itself */
  637. slab_pad_check(s, page);
  638. return 1;
  639. }
  640. /*
  641. * Determine if a certain object on a page is on the freelist. Must hold the
  642. * slab lock to guarantee that the chains are in a consistent state.
  643. */
  644. static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  645. {
  646. int nr = 0;
  647. void *fp = page->freelist;
  648. void *object = NULL;
  649. while (fp && nr <= s->objects) {
  650. if (fp == search)
  651. return 1;
  652. if (!check_valid_pointer(s, page, fp)) {
  653. if (object) {
  654. object_err(s, page, object,
  655. "Freechain corrupt");
  656. set_freepointer(s, object, NULL);
  657. break;
  658. } else {
  659. slab_err(s, page, "Freepointer corrupt");
  660. page->freelist = NULL;
  661. page->inuse = s->objects;
  662. slab_fix(s, "Freelist cleared");
  663. return 0;
  664. }
  665. break;
  666. }
  667. object = fp;
  668. fp = get_freepointer(s, object);
  669. nr++;
  670. }
  671. if (page->inuse != s->objects - nr) {
  672. slab_err(s, page, "Wrong object count. Counter is %d but "
  673. "counted were %d", page->inuse, s->objects - nr);
  674. page->inuse = s->objects - nr;
  675. slab_fix(s, "Object count adjusted.");
  676. }
  677. return search == NULL;
  678. }
  679. static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
  680. {
  681. if (s->flags & SLAB_TRACE) {
  682. printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
  683. s->name,
  684. alloc ? "alloc" : "free",
  685. object, page->inuse,
  686. page->freelist);
  687. if (!alloc)
  688. print_section("Object", (void *)object, s->objsize);
  689. dump_stack();
  690. }
  691. }
  692. /*
  693. * Tracking of fully allocated slabs for debugging purposes.
  694. */
  695. static void add_full(struct kmem_cache_node *n, struct page *page)
  696. {
  697. spin_lock(&n->list_lock);
  698. list_add(&page->lru, &n->full);
  699. spin_unlock(&n->list_lock);
  700. }
  701. static void remove_full(struct kmem_cache *s, struct page *page)
  702. {
  703. struct kmem_cache_node *n;
  704. if (!(s->flags & SLAB_STORE_USER))
  705. return;
  706. n = get_node(s, page_to_nid(page));
  707. spin_lock(&n->list_lock);
  708. list_del(&page->lru);
  709. spin_unlock(&n->list_lock);
  710. }
  711. static void setup_object_debug(struct kmem_cache *s, struct page *page,
  712. void *object)
  713. {
  714. if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  715. return;
  716. init_object(s, object, 0);
  717. init_tracking(s, object);
  718. }
  719. static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
  720. void *object, void *addr)
  721. {
  722. if (!check_slab(s, page))
  723. goto bad;
  724. if (object && !on_freelist(s, page, object)) {
  725. object_err(s, page, object, "Object already allocated");
  726. goto bad;
  727. }
  728. if (!check_valid_pointer(s, page, object)) {
  729. object_err(s, page, object, "Freelist Pointer check fails");
  730. goto bad;
  731. }
  732. if (object && !check_object(s, page, object, 0))
  733. goto bad;
  734. /* Success perform special debug activities for allocs */
  735. if (s->flags & SLAB_STORE_USER)
  736. set_track(s, object, TRACK_ALLOC, addr);
  737. trace(s, page, object, 1);
  738. init_object(s, object, 1);
  739. return 1;
  740. bad:
  741. if (PageSlab(page)) {
  742. /*
  743. * If this is a slab page then lets do the best we can
  744. * to avoid issues in the future. Marking all objects
  745. * as used avoids touching the remaining objects.
  746. */
  747. slab_fix(s, "Marking all objects used");
  748. page->inuse = s->objects;
  749. page->freelist = NULL;
  750. }
  751. return 0;
  752. }
  753. static int free_debug_processing(struct kmem_cache *s, struct page *page,
  754. void *object, void *addr)
  755. {
  756. if (!check_slab(s, page))
  757. goto fail;
  758. if (!check_valid_pointer(s, page, object)) {
  759. slab_err(s, page, "Invalid object pointer 0x%p", object);
  760. goto fail;
  761. }
  762. if (on_freelist(s, page, object)) {
  763. object_err(s, page, object, "Object already free");
  764. goto fail;
  765. }
  766. if (!check_object(s, page, object, 1))
  767. return 0;
  768. if (unlikely(s != page->slab)) {
  769. if (!PageSlab(page)) {
  770. slab_err(s, page, "Attempt to free object(0x%p) "
  771. "outside of slab", object);
  772. } else if (!page->slab) {
  773. printk(KERN_ERR
  774. "SLUB <none>: no slab for object 0x%p.\n",
  775. object);
  776. dump_stack();
  777. } else
  778. object_err(s, page, object,
  779. "page slab pointer corrupt.");
  780. goto fail;
  781. }
  782. /* Special debug activities for freeing objects */
  783. if (!SlabFrozen(page) && !page->freelist)
  784. remove_full(s, page);
  785. if (s->flags & SLAB_STORE_USER)
  786. set_track(s, object, TRACK_FREE, addr);
  787. trace(s, page, object, 0);
  788. init_object(s, object, 0);
  789. return 1;
  790. fail:
  791. slab_fix(s, "Object at 0x%p not freed", object);
  792. return 0;
  793. }
  794. static int __init setup_slub_debug(char *str)
  795. {
  796. slub_debug = DEBUG_DEFAULT_FLAGS;
  797. if (*str++ != '=' || !*str)
  798. /*
  799. * No options specified. Switch on full debugging.
  800. */
  801. goto out;
  802. if (*str == ',')
  803. /*
  804. * No options but restriction on slabs. This means full
  805. * debugging for slabs matching a pattern.
  806. */
  807. goto check_slabs;
  808. slub_debug = 0;
  809. if (*str == '-')
  810. /*
  811. * Switch off all debugging measures.
  812. */
  813. goto out;
  814. /*
  815. * Determine which debug features should be switched on
  816. */
  817. for (; *str && *str != ','; str++) {
  818. switch (tolower(*str)) {
  819. case 'f':
  820. slub_debug |= SLAB_DEBUG_FREE;
  821. break;
  822. case 'z':
  823. slub_debug |= SLAB_RED_ZONE;
  824. break;
  825. case 'p':
  826. slub_debug |= SLAB_POISON;
  827. break;
  828. case 'u':
  829. slub_debug |= SLAB_STORE_USER;
  830. break;
  831. case 't':
  832. slub_debug |= SLAB_TRACE;
  833. break;
  834. default:
  835. printk(KERN_ERR "slub_debug option '%c' "
  836. "unknown. skipped\n", *str);
  837. }
  838. }
  839. check_slabs:
  840. if (*str == ',')
  841. slub_debug_slabs = str + 1;
  842. out:
  843. return 1;
  844. }
  845. __setup("slub_debug", setup_slub_debug);
  846. static unsigned long kmem_cache_flags(unsigned long objsize,
  847. unsigned long flags, const char *name,
  848. void (*ctor)(struct kmem_cache *, void *))
  849. {
  850. /*
  851. * The page->offset field is only 16 bit wide. This is an offset
  852. * in units of words from the beginning of an object. If the slab
  853. * size is bigger then we cannot move the free pointer behind the
  854. * object anymore.
  855. *
  856. * On 32 bit platforms the limit is 256k. On 64bit platforms
  857. * the limit is 512k.
  858. *
  859. * Debugging or ctor may create a need to move the free
  860. * pointer. Fail if this happens.
  861. */
  862. if (objsize >= 65535 * sizeof(void *)) {
  863. BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
  864. SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
  865. BUG_ON(ctor);
  866. } else {
  867. /*
  868. * Enable debugging if selected on the kernel commandline.
  869. */
  870. if (slub_debug && (!slub_debug_slabs ||
  871. strncmp(slub_debug_slabs, name,
  872. strlen(slub_debug_slabs)) == 0))
  873. flags |= slub_debug;
  874. }
  875. return flags;
  876. }
  877. #else
  878. static inline void setup_object_debug(struct kmem_cache *s,
  879. struct page *page, void *object) {}
  880. static inline int alloc_debug_processing(struct kmem_cache *s,
  881. struct page *page, void *object, void *addr) { return 0; }
  882. static inline int free_debug_processing(struct kmem_cache *s,
  883. struct page *page, void *object, void *addr) { return 0; }
  884. static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  885. { return 1; }
  886. static inline int check_object(struct kmem_cache *s, struct page *page,
  887. void *object, int active) { return 1; }
  888. static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
  889. static inline unsigned long kmem_cache_flags(unsigned long objsize,
  890. unsigned long flags, const char *name,
  891. void (*ctor)(struct kmem_cache *, void *))
  892. {
  893. return flags;
  894. }
  895. #define slub_debug 0
  896. #endif
  897. /*
  898. * Slab allocation and freeing
  899. */
  900. static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  901. {
  902. struct page *page;
  903. int pages = 1 << s->order;
  904. flags |= s->allocflags;
  905. if (node == -1)
  906. page = alloc_pages(flags, s->order);
  907. else
  908. page = alloc_pages_node(node, flags, s->order);
  909. if (!page)
  910. return NULL;
  911. mod_zone_page_state(page_zone(page),
  912. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  913. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  914. pages);
  915. return page;
  916. }
  917. static void setup_object(struct kmem_cache *s, struct page *page,
  918. void *object)
  919. {
  920. setup_object_debug(s, page, object);
  921. if (unlikely(s->ctor))
  922. s->ctor(s, object);
  923. }
  924. static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  925. {
  926. struct page *page;
  927. struct kmem_cache_node *n;
  928. void *start;
  929. void *last;
  930. void *p;
  931. BUG_ON(flags & GFP_SLAB_BUG_MASK);
  932. page = allocate_slab(s,
  933. flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
  934. if (!page)
  935. goto out;
  936. n = get_node(s, page_to_nid(page));
  937. if (n)
  938. atomic_long_inc(&n->nr_slabs);
  939. page->slab = s;
  940. page->flags |= 1 << PG_slab;
  941. if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
  942. SLAB_STORE_USER | SLAB_TRACE))
  943. SetSlabDebug(page);
  944. start = page_address(page);
  945. if (unlikely(s->flags & SLAB_POISON))
  946. memset(start, POISON_INUSE, PAGE_SIZE << s->order);
  947. last = start;
  948. for_each_object(p, s, start) {
  949. setup_object(s, page, last);
  950. set_freepointer(s, last, p);
  951. last = p;
  952. }
  953. setup_object(s, page, last);
  954. set_freepointer(s, last, NULL);
  955. page->freelist = start;
  956. page->inuse = 0;
  957. out:
  958. return page;
  959. }
  960. static void __free_slab(struct kmem_cache *s, struct page *page)
  961. {
  962. int pages = 1 << s->order;
  963. if (unlikely(SlabDebug(page))) {
  964. void *p;
  965. slab_pad_check(s, page);
  966. for_each_object(p, s, page_address(page))
  967. check_object(s, page, p, 0);
  968. ClearSlabDebug(page);
  969. }
  970. mod_zone_page_state(page_zone(page),
  971. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  972. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  973. -pages);
  974. __free_pages(page, s->order);
  975. }
  976. static void rcu_free_slab(struct rcu_head *h)
  977. {
  978. struct page *page;
  979. page = container_of((struct list_head *)h, struct page, lru);
  980. __free_slab(page->slab, page);
  981. }
  982. static void free_slab(struct kmem_cache *s, struct page *page)
  983. {
  984. if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
  985. /*
  986. * RCU free overloads the RCU head over the LRU
  987. */
  988. struct rcu_head *head = (void *)&page->lru;
  989. call_rcu(head, rcu_free_slab);
  990. } else
  991. __free_slab(s, page);
  992. }
  993. static void discard_slab(struct kmem_cache *s, struct page *page)
  994. {
  995. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  996. atomic_long_dec(&n->nr_slabs);
  997. reset_page_mapcount(page);
  998. __ClearPageSlab(page);
  999. free_slab(s, page);
  1000. }
  1001. /*
  1002. * Per slab locking using the pagelock
  1003. */
  1004. static __always_inline void slab_lock(struct page *page)
  1005. {
  1006. bit_spin_lock(PG_locked, &page->flags);
  1007. }
  1008. static __always_inline void slab_unlock(struct page *page)
  1009. {
  1010. __bit_spin_unlock(PG_locked, &page->flags);
  1011. }
  1012. static __always_inline int slab_trylock(struct page *page)
  1013. {
  1014. int rc = 1;
  1015. rc = bit_spin_trylock(PG_locked, &page->flags);
  1016. return rc;
  1017. }
  1018. /*
  1019. * Management of partially allocated slabs
  1020. */
  1021. static void add_partial(struct kmem_cache_node *n,
  1022. struct page *page, int tail)
  1023. {
  1024. spin_lock(&n->list_lock);
  1025. n->nr_partial++;
  1026. if (tail)
  1027. list_add_tail(&page->lru, &n->partial);
  1028. else
  1029. list_add(&page->lru, &n->partial);
  1030. spin_unlock(&n->list_lock);
  1031. }
  1032. static void remove_partial(struct kmem_cache *s,
  1033. struct page *page)
  1034. {
  1035. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1036. spin_lock(&n->list_lock);
  1037. list_del(&page->lru);
  1038. n->nr_partial--;
  1039. spin_unlock(&n->list_lock);
  1040. }
  1041. /*
  1042. * Lock slab and remove from the partial list.
  1043. *
  1044. * Must hold list_lock.
  1045. */
  1046. static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
  1047. {
  1048. if (slab_trylock(page)) {
  1049. list_del(&page->lru);
  1050. n->nr_partial--;
  1051. SetSlabFrozen(page);
  1052. return 1;
  1053. }
  1054. return 0;
  1055. }
  1056. /*
  1057. * Try to allocate a partial slab from a specific node.
  1058. */
  1059. static struct page *get_partial_node(struct kmem_cache_node *n)
  1060. {
  1061. struct page *page;
  1062. /*
  1063. * Racy check. If we mistakenly see no partial slabs then we
  1064. * just allocate an empty slab. If we mistakenly try to get a
  1065. * partial slab and there is none available then get_partials()
  1066. * will return NULL.
  1067. */
  1068. if (!n || !n->nr_partial)
  1069. return NULL;
  1070. spin_lock(&n->list_lock);
  1071. list_for_each_entry(page, &n->partial, lru)
  1072. if (lock_and_freeze_slab(n, page))
  1073. goto out;
  1074. page = NULL;
  1075. out:
  1076. spin_unlock(&n->list_lock);
  1077. return page;
  1078. }
  1079. /*
  1080. * Get a page from somewhere. Search in increasing NUMA distances.
  1081. */
  1082. static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  1083. {
  1084. #ifdef CONFIG_NUMA
  1085. struct zonelist *zonelist;
  1086. struct zone **z;
  1087. struct page *page;
  1088. /*
  1089. * The defrag ratio allows a configuration of the tradeoffs between
  1090. * inter node defragmentation and node local allocations. A lower
  1091. * defrag_ratio increases the tendency to do local allocations
  1092. * instead of attempting to obtain partial slabs from other nodes.
  1093. *
  1094. * If the defrag_ratio is set to 0 then kmalloc() always
  1095. * returns node local objects. If the ratio is higher then kmalloc()
  1096. * may return off node objects because partial slabs are obtained
  1097. * from other nodes and filled up.
  1098. *
  1099. * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
  1100. * defrag_ratio = 1000) then every (well almost) allocation will
  1101. * first attempt to defrag slab caches on other nodes. This means
  1102. * scanning over all nodes to look for partial slabs which may be
  1103. * expensive if we do it every time we are trying to find a slab
  1104. * with available objects.
  1105. */
  1106. if (!s->remote_node_defrag_ratio ||
  1107. get_cycles() % 1024 > s->remote_node_defrag_ratio)
  1108. return NULL;
  1109. zonelist = &NODE_DATA(
  1110. slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
  1111. for (z = zonelist->zones; *z; z++) {
  1112. struct kmem_cache_node *n;
  1113. n = get_node(s, zone_to_nid(*z));
  1114. if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
  1115. n->nr_partial > MIN_PARTIAL) {
  1116. page = get_partial_node(n);
  1117. if (page)
  1118. return page;
  1119. }
  1120. }
  1121. #endif
  1122. return NULL;
  1123. }
  1124. /*
  1125. * Get a partial page, lock it and return it.
  1126. */
  1127. static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  1128. {
  1129. struct page *page;
  1130. int searchnode = (node == -1) ? numa_node_id() : node;
  1131. page = get_partial_node(get_node(s, searchnode));
  1132. if (page || (flags & __GFP_THISNODE))
  1133. return page;
  1134. return get_any_partial(s, flags);
  1135. }
  1136. /*
  1137. * Move a page back to the lists.
  1138. *
  1139. * Must be called with the slab lock held.
  1140. *
  1141. * On exit the slab lock will have been dropped.
  1142. */
  1143. static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
  1144. {
  1145. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1146. struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
  1147. ClearSlabFrozen(page);
  1148. if (page->inuse) {
  1149. if (page->freelist) {
  1150. add_partial(n, page, tail);
  1151. stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
  1152. } else {
  1153. stat(c, DEACTIVATE_FULL);
  1154. if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
  1155. add_full(n, page);
  1156. }
  1157. slab_unlock(page);
  1158. } else {
  1159. stat(c, DEACTIVATE_EMPTY);
  1160. if (n->nr_partial < MIN_PARTIAL) {
  1161. /*
  1162. * Adding an empty slab to the partial slabs in order
  1163. * to avoid page allocator overhead. This slab needs
  1164. * to come after the other slabs with objects in
  1165. * order to fill them up. That way the size of the
  1166. * partial list stays small. kmem_cache_shrink can
  1167. * reclaim empty slabs from the partial list.
  1168. */
  1169. add_partial(n, page, 1);
  1170. slab_unlock(page);
  1171. } else {
  1172. slab_unlock(page);
  1173. stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
  1174. discard_slab(s, page);
  1175. }
  1176. }
  1177. }
  1178. /*
  1179. * Remove the cpu slab
  1180. */
  1181. static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
  1182. {
  1183. struct page *page = c->page;
  1184. int tail = 1;
  1185. if (c->freelist)
  1186. stat(c, DEACTIVATE_REMOTE_FREES);
  1187. /*
  1188. * Merge cpu freelist into freelist. Typically we get here
  1189. * because both freelists are empty. So this is unlikely
  1190. * to occur.
  1191. */
  1192. while (unlikely(c->freelist)) {
  1193. void **object;
  1194. tail = 0; /* Hot objects. Put the slab first */
  1195. /* Retrieve object from cpu_freelist */
  1196. object = c->freelist;
  1197. c->freelist = c->freelist[c->offset];
  1198. /* And put onto the regular freelist */
  1199. object[c->offset] = page->freelist;
  1200. page->freelist = object;
  1201. page->inuse--;
  1202. }
  1203. c->page = NULL;
  1204. unfreeze_slab(s, page, tail);
  1205. }
  1206. static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
  1207. {
  1208. stat(c, CPUSLAB_FLUSH);
  1209. slab_lock(c->page);
  1210. deactivate_slab(s, c);
  1211. }
  1212. /*
  1213. * Flush cpu slab.
  1214. * Called from IPI handler with interrupts disabled.
  1215. */
  1216. static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
  1217. {
  1218. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1219. if (likely(c && c->page))
  1220. flush_slab(s, c);
  1221. }
  1222. static void flush_cpu_slab(void *d)
  1223. {
  1224. struct kmem_cache *s = d;
  1225. __flush_cpu_slab(s, smp_processor_id());
  1226. }
  1227. static void flush_all(struct kmem_cache *s)
  1228. {
  1229. #ifdef CONFIG_SMP
  1230. on_each_cpu(flush_cpu_slab, s, 1, 1);
  1231. #else
  1232. unsigned long flags;
  1233. local_irq_save(flags);
  1234. flush_cpu_slab(s);
  1235. local_irq_restore(flags);
  1236. #endif
  1237. }
  1238. /*
  1239. * Check if the objects in a per cpu structure fit numa
  1240. * locality expectations.
  1241. */
  1242. static inline int node_match(struct kmem_cache_cpu *c, int node)
  1243. {
  1244. #ifdef CONFIG_NUMA
  1245. if (node != -1 && c->node != node)
  1246. return 0;
  1247. #endif
  1248. return 1;
  1249. }
  1250. /*
  1251. * Slow path. The lockless freelist is empty or we need to perform
  1252. * debugging duties.
  1253. *
  1254. * Interrupts are disabled.
  1255. *
  1256. * Processing is still very fast if new objects have been freed to the
  1257. * regular freelist. In that case we simply take over the regular freelist
  1258. * as the lockless freelist and zap the regular freelist.
  1259. *
  1260. * If that is not working then we fall back to the partial lists. We take the
  1261. * first element of the freelist as the object to allocate now and move the
  1262. * rest of the freelist to the lockless freelist.
  1263. *
  1264. * And if we were unable to get a new slab from the partial slab lists then
  1265. * we need to allocate a new slab. This is slowest path since we may sleep.
  1266. */
  1267. static void *__slab_alloc(struct kmem_cache *s,
  1268. gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
  1269. {
  1270. void **object;
  1271. struct page *new;
  1272. if (!c->page)
  1273. goto new_slab;
  1274. slab_lock(c->page);
  1275. if (unlikely(!node_match(c, node)))
  1276. goto another_slab;
  1277. stat(c, ALLOC_REFILL);
  1278. load_freelist:
  1279. object = c->page->freelist;
  1280. if (unlikely(!object))
  1281. goto another_slab;
  1282. if (unlikely(SlabDebug(c->page)))
  1283. goto debug;
  1284. object = c->page->freelist;
  1285. c->freelist = object[c->offset];
  1286. c->page->inuse = s->objects;
  1287. c->page->freelist = NULL;
  1288. c->node = page_to_nid(c->page);
  1289. unlock_out:
  1290. slab_unlock(c->page);
  1291. stat(c, ALLOC_SLOWPATH);
  1292. return object;
  1293. another_slab:
  1294. deactivate_slab(s, c);
  1295. new_slab:
  1296. new = get_partial(s, gfpflags, node);
  1297. if (new) {
  1298. c->page = new;
  1299. stat(c, ALLOC_FROM_PARTIAL);
  1300. goto load_freelist;
  1301. }
  1302. if (gfpflags & __GFP_WAIT)
  1303. local_irq_enable();
  1304. new = new_slab(s, gfpflags, node);
  1305. if (gfpflags & __GFP_WAIT)
  1306. local_irq_disable();
  1307. if (new) {
  1308. c = get_cpu_slab(s, smp_processor_id());
  1309. stat(c, ALLOC_SLAB);
  1310. if (c->page)
  1311. flush_slab(s, c);
  1312. slab_lock(new);
  1313. SetSlabFrozen(new);
  1314. c->page = new;
  1315. goto load_freelist;
  1316. }
  1317. /*
  1318. * No memory available.
  1319. *
  1320. * If the slab uses higher order allocs but the object is
  1321. * smaller than a page size then we can fallback in emergencies
  1322. * to the page allocator via kmalloc_large. The page allocator may
  1323. * have failed to obtain a higher order page and we can try to
  1324. * allocate a single page if the object fits into a single page.
  1325. * That is only possible if certain conditions are met that are being
  1326. * checked when a slab is created.
  1327. */
  1328. if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
  1329. return kmalloc_large(s->objsize, gfpflags);
  1330. return NULL;
  1331. debug:
  1332. object = c->page->freelist;
  1333. if (!alloc_debug_processing(s, c->page, object, addr))
  1334. goto another_slab;
  1335. c->page->inuse++;
  1336. c->page->freelist = object[c->offset];
  1337. c->node = -1;
  1338. goto unlock_out;
  1339. }
  1340. /*
  1341. * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  1342. * have the fastpath folded into their functions. So no function call
  1343. * overhead for requests that can be satisfied on the fastpath.
  1344. *
  1345. * The fastpath works by first checking if the lockless freelist can be used.
  1346. * If not then __slab_alloc is called for slow processing.
  1347. *
  1348. * Otherwise we can simply pick the next object from the lockless free list.
  1349. */
  1350. static __always_inline void *slab_alloc(struct kmem_cache *s,
  1351. gfp_t gfpflags, int node, void *addr)
  1352. {
  1353. void **object;
  1354. struct kmem_cache_cpu *c;
  1355. unsigned long flags;
  1356. local_irq_save(flags);
  1357. c = get_cpu_slab(s, smp_processor_id());
  1358. if (unlikely(!c->freelist || !node_match(c, node)))
  1359. object = __slab_alloc(s, gfpflags, node, addr, c);
  1360. else {
  1361. object = c->freelist;
  1362. c->freelist = object[c->offset];
  1363. stat(c, ALLOC_FASTPATH);
  1364. }
  1365. local_irq_restore(flags);
  1366. if (unlikely((gfpflags & __GFP_ZERO) && object))
  1367. memset(object, 0, c->objsize);
  1368. return object;
  1369. }
  1370. void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  1371. {
  1372. return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
  1373. }
  1374. EXPORT_SYMBOL(kmem_cache_alloc);
  1375. #ifdef CONFIG_NUMA
  1376. void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  1377. {
  1378. return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
  1379. }
  1380. EXPORT_SYMBOL(kmem_cache_alloc_node);
  1381. #endif
  1382. /*
  1383. * Slow patch handling. This may still be called frequently since objects
  1384. * have a longer lifetime than the cpu slabs in most processing loads.
  1385. *
  1386. * So we still attempt to reduce cache line usage. Just take the slab
  1387. * lock and free the item. If there is no additional partial page
  1388. * handling required then we can return immediately.
  1389. */
  1390. static void __slab_free(struct kmem_cache *s, struct page *page,
  1391. void *x, void *addr, unsigned int offset)
  1392. {
  1393. void *prior;
  1394. void **object = (void *)x;
  1395. struct kmem_cache_cpu *c;
  1396. c = get_cpu_slab(s, raw_smp_processor_id());
  1397. stat(c, FREE_SLOWPATH);
  1398. slab_lock(page);
  1399. if (unlikely(SlabDebug(page)))
  1400. goto debug;
  1401. checks_ok:
  1402. prior = object[offset] = page->freelist;
  1403. page->freelist = object;
  1404. page->inuse--;
  1405. if (unlikely(SlabFrozen(page))) {
  1406. stat(c, FREE_FROZEN);
  1407. goto out_unlock;
  1408. }
  1409. if (unlikely(!page->inuse))
  1410. goto slab_empty;
  1411. /*
  1412. * Objects left in the slab. If it
  1413. * was not on the partial list before
  1414. * then add it.
  1415. */
  1416. if (unlikely(!prior)) {
  1417. add_partial(get_node(s, page_to_nid(page)), page, 1);
  1418. stat(c, FREE_ADD_PARTIAL);
  1419. }
  1420. out_unlock:
  1421. slab_unlock(page);
  1422. return;
  1423. slab_empty:
  1424. if (prior) {
  1425. /*
  1426. * Slab still on the partial list.
  1427. */
  1428. remove_partial(s, page);
  1429. stat(c, FREE_REMOVE_PARTIAL);
  1430. }
  1431. slab_unlock(page);
  1432. stat(c, FREE_SLAB);
  1433. discard_slab(s, page);
  1434. return;
  1435. debug:
  1436. if (!free_debug_processing(s, page, x, addr))
  1437. goto out_unlock;
  1438. goto checks_ok;
  1439. }
  1440. /*
  1441. * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
  1442. * can perform fastpath freeing without additional function calls.
  1443. *
  1444. * The fastpath is only possible if we are freeing to the current cpu slab
  1445. * of this processor. This typically the case if we have just allocated
  1446. * the item before.
  1447. *
  1448. * If fastpath is not possible then fall back to __slab_free where we deal
  1449. * with all sorts of special processing.
  1450. */
  1451. static __always_inline void slab_free(struct kmem_cache *s,
  1452. struct page *page, void *x, void *addr)
  1453. {
  1454. void **object = (void *)x;
  1455. struct kmem_cache_cpu *c;
  1456. unsigned long flags;
  1457. local_irq_save(flags);
  1458. debug_check_no_locks_freed(object, s->objsize);
  1459. c = get_cpu_slab(s, smp_processor_id());
  1460. if (likely(page == c->page && c->node >= 0)) {
  1461. object[c->offset] = c->freelist;
  1462. c->freelist = object;
  1463. stat(c, FREE_FASTPATH);
  1464. } else
  1465. __slab_free(s, page, x, addr, c->offset);
  1466. local_irq_restore(flags);
  1467. }
  1468. void kmem_cache_free(struct kmem_cache *s, void *x)
  1469. {
  1470. struct page *page;
  1471. page = virt_to_head_page(x);
  1472. slab_free(s, page, x, __builtin_return_address(0));
  1473. }
  1474. EXPORT_SYMBOL(kmem_cache_free);
  1475. /* Figure out on which slab object the object resides */
  1476. static struct page *get_object_page(const void *x)
  1477. {
  1478. struct page *page = virt_to_head_page(x);
  1479. if (!PageSlab(page))
  1480. return NULL;
  1481. return page;
  1482. }
  1483. /*
  1484. * Object placement in a slab is made very easy because we always start at
  1485. * offset 0. If we tune the size of the object to the alignment then we can
  1486. * get the required alignment by putting one properly sized object after
  1487. * another.
  1488. *
  1489. * Notice that the allocation order determines the sizes of the per cpu
  1490. * caches. Each processor has always one slab available for allocations.
  1491. * Increasing the allocation order reduces the number of times that slabs
  1492. * must be moved on and off the partial lists and is therefore a factor in
  1493. * locking overhead.
  1494. */
  1495. /*
  1496. * Mininum / Maximum order of slab pages. This influences locking overhead
  1497. * and slab fragmentation. A higher order reduces the number of partial slabs
  1498. * and increases the number of allocations possible without having to
  1499. * take the list_lock.
  1500. */
  1501. static int slub_min_order;
  1502. static int slub_max_order = DEFAULT_MAX_ORDER;
  1503. static int slub_min_objects = DEFAULT_MIN_OBJECTS;
  1504. /*
  1505. * Merge control. If this is set then no merging of slab caches will occur.
  1506. * (Could be removed. This was introduced to pacify the merge skeptics.)
  1507. */
  1508. static int slub_nomerge;
  1509. /*
  1510. * Calculate the order of allocation given an slab object size.
  1511. *
  1512. * The order of allocation has significant impact on performance and other
  1513. * system components. Generally order 0 allocations should be preferred since
  1514. * order 0 does not cause fragmentation in the page allocator. Larger objects
  1515. * be problematic to put into order 0 slabs because there may be too much
  1516. * unused space left. We go to a higher order if more than 1/8th of the slab
  1517. * would be wasted.
  1518. *
  1519. * In order to reach satisfactory performance we must ensure that a minimum
  1520. * number of objects is in one slab. Otherwise we may generate too much
  1521. * activity on the partial lists which requires taking the list_lock. This is
  1522. * less a concern for large slabs though which are rarely used.
  1523. *
  1524. * slub_max_order specifies the order where we begin to stop considering the
  1525. * number of objects in a slab as critical. If we reach slub_max_order then
  1526. * we try to keep the page order as low as possible. So we accept more waste
  1527. * of space in favor of a small page order.
  1528. *
  1529. * Higher order allocations also allow the placement of more objects in a
  1530. * slab and thereby reduce object handling overhead. If the user has
  1531. * requested a higher mininum order then we start with that one instead of
  1532. * the smallest order which will fit the object.
  1533. */
  1534. static inline int slab_order(int size, int min_objects,
  1535. int max_order, int fract_leftover)
  1536. {
  1537. int order;
  1538. int rem;
  1539. int min_order = slub_min_order;
  1540. for (order = max(min_order,
  1541. fls(min_objects * size - 1) - PAGE_SHIFT);
  1542. order <= max_order; order++) {
  1543. unsigned long slab_size = PAGE_SIZE << order;
  1544. if (slab_size < min_objects * size)
  1545. continue;
  1546. rem = slab_size % size;
  1547. if (rem <= slab_size / fract_leftover)
  1548. break;
  1549. }
  1550. return order;
  1551. }
  1552. static inline int calculate_order(int size)
  1553. {
  1554. int order;
  1555. int min_objects;
  1556. int fraction;
  1557. /*
  1558. * Attempt to find best configuration for a slab. This
  1559. * works by first attempting to generate a layout with
  1560. * the best configuration and backing off gradually.
  1561. *
  1562. * First we reduce the acceptable waste in a slab. Then
  1563. * we reduce the minimum objects required in a slab.
  1564. */
  1565. min_objects = slub_min_objects;
  1566. while (min_objects > 1) {
  1567. fraction = 8;
  1568. while (fraction >= 4) {
  1569. order = slab_order(size, min_objects,
  1570. slub_max_order, fraction);
  1571. if (order <= slub_max_order)
  1572. return order;
  1573. fraction /= 2;
  1574. }
  1575. min_objects /= 2;
  1576. }
  1577. /*
  1578. * We were unable to place multiple objects in a slab. Now
  1579. * lets see if we can place a single object there.
  1580. */
  1581. order = slab_order(size, 1, slub_max_order, 1);
  1582. if (order <= slub_max_order)
  1583. return order;
  1584. /*
  1585. * Doh this slab cannot be placed using slub_max_order.
  1586. */
  1587. order = slab_order(size, 1, MAX_ORDER, 1);
  1588. if (order <= MAX_ORDER)
  1589. return order;
  1590. return -ENOSYS;
  1591. }
  1592. /*
  1593. * Figure out what the alignment of the objects will be.
  1594. */
  1595. static unsigned long calculate_alignment(unsigned long flags,
  1596. unsigned long align, unsigned long size)
  1597. {
  1598. /*
  1599. * If the user wants hardware cache aligned objects then
  1600. * follow that suggestion if the object is sufficiently
  1601. * large.
  1602. *
  1603. * The hardware cache alignment cannot override the
  1604. * specified alignment though. If that is greater
  1605. * then use it.
  1606. */
  1607. if ((flags & SLAB_HWCACHE_ALIGN) &&
  1608. size > cache_line_size() / 2)
  1609. return max_t(unsigned long, align, cache_line_size());
  1610. if (align < ARCH_SLAB_MINALIGN)
  1611. return ARCH_SLAB_MINALIGN;
  1612. return ALIGN(align, sizeof(void *));
  1613. }
  1614. static void init_kmem_cache_cpu(struct kmem_cache *s,
  1615. struct kmem_cache_cpu *c)
  1616. {
  1617. c->page = NULL;
  1618. c->freelist = NULL;
  1619. c->node = 0;
  1620. c->offset = s->offset / sizeof(void *);
  1621. c->objsize = s->objsize;
  1622. }
  1623. static void init_kmem_cache_node(struct kmem_cache_node *n)
  1624. {
  1625. n->nr_partial = 0;
  1626. atomic_long_set(&n->nr_slabs, 0);
  1627. spin_lock_init(&n->list_lock);
  1628. INIT_LIST_HEAD(&n->partial);
  1629. #ifdef CONFIG_SLUB_DEBUG
  1630. INIT_LIST_HEAD(&n->full);
  1631. #endif
  1632. }
  1633. #ifdef CONFIG_SMP
  1634. /*
  1635. * Per cpu array for per cpu structures.
  1636. *
  1637. * The per cpu array places all kmem_cache_cpu structures from one processor
  1638. * close together meaning that it becomes possible that multiple per cpu
  1639. * structures are contained in one cacheline. This may be particularly
  1640. * beneficial for the kmalloc caches.
  1641. *
  1642. * A desktop system typically has around 60-80 slabs. With 100 here we are
  1643. * likely able to get per cpu structures for all caches from the array defined
  1644. * here. We must be able to cover all kmalloc caches during bootstrap.
  1645. *
  1646. * If the per cpu array is exhausted then fall back to kmalloc
  1647. * of individual cachelines. No sharing is possible then.
  1648. */
  1649. #define NR_KMEM_CACHE_CPU 100
  1650. static DEFINE_PER_CPU(struct kmem_cache_cpu,
  1651. kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
  1652. static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
  1653. static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
  1654. static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
  1655. int cpu, gfp_t flags)
  1656. {
  1657. struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
  1658. if (c)
  1659. per_cpu(kmem_cache_cpu_free, cpu) =
  1660. (void *)c->freelist;
  1661. else {
  1662. /* Table overflow: So allocate ourselves */
  1663. c = kmalloc_node(
  1664. ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
  1665. flags, cpu_to_node(cpu));
  1666. if (!c)
  1667. return NULL;
  1668. }
  1669. init_kmem_cache_cpu(s, c);
  1670. return c;
  1671. }
  1672. static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
  1673. {
  1674. if (c < per_cpu(kmem_cache_cpu, cpu) ||
  1675. c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
  1676. kfree(c);
  1677. return;
  1678. }
  1679. c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
  1680. per_cpu(kmem_cache_cpu_free, cpu) = c;
  1681. }
  1682. static void free_kmem_cache_cpus(struct kmem_cache *s)
  1683. {
  1684. int cpu;
  1685. for_each_online_cpu(cpu) {
  1686. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1687. if (c) {
  1688. s->cpu_slab[cpu] = NULL;
  1689. free_kmem_cache_cpu(c, cpu);
  1690. }
  1691. }
  1692. }
  1693. static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
  1694. {
  1695. int cpu;
  1696. for_each_online_cpu(cpu) {
  1697. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1698. if (c)
  1699. continue;
  1700. c = alloc_kmem_cache_cpu(s, cpu, flags);
  1701. if (!c) {
  1702. free_kmem_cache_cpus(s);
  1703. return 0;
  1704. }
  1705. s->cpu_slab[cpu] = c;
  1706. }
  1707. return 1;
  1708. }
  1709. /*
  1710. * Initialize the per cpu array.
  1711. */
  1712. static void init_alloc_cpu_cpu(int cpu)
  1713. {
  1714. int i;
  1715. if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
  1716. return;
  1717. for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
  1718. free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
  1719. cpu_set(cpu, kmem_cach_cpu_free_init_once);
  1720. }
  1721. static void __init init_alloc_cpu(void)
  1722. {
  1723. int cpu;
  1724. for_each_online_cpu(cpu)
  1725. init_alloc_cpu_cpu(cpu);
  1726. }
  1727. #else
  1728. static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
  1729. static inline void init_alloc_cpu(void) {}
  1730. static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
  1731. {
  1732. init_kmem_cache_cpu(s, &s->cpu_slab);
  1733. return 1;
  1734. }
  1735. #endif
  1736. #ifdef CONFIG_NUMA
  1737. /*
  1738. * No kmalloc_node yet so do it by hand. We know that this is the first
  1739. * slab on the node for this slabcache. There are no concurrent accesses
  1740. * possible.
  1741. *
  1742. * Note that this function only works on the kmalloc_node_cache
  1743. * when allocating for the kmalloc_node_cache. This is used for bootstrapping
  1744. * memory on a fresh node that has no slab structures yet.
  1745. */
  1746. static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
  1747. int node)
  1748. {
  1749. struct page *page;
  1750. struct kmem_cache_node *n;
  1751. unsigned long flags;
  1752. BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
  1753. page = new_slab(kmalloc_caches, gfpflags, node);
  1754. BUG_ON(!page);
  1755. if (page_to_nid(page) != node) {
  1756. printk(KERN_ERR "SLUB: Unable to allocate memory from "
  1757. "node %d\n", node);
  1758. printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  1759. "in order to be able to continue\n");
  1760. }
  1761. n = page->freelist;
  1762. BUG_ON(!n);
  1763. page->freelist = get_freepointer(kmalloc_caches, n);
  1764. page->inuse++;
  1765. kmalloc_caches->node[node] = n;
  1766. #ifdef CONFIG_SLUB_DEBUG
  1767. init_object(kmalloc_caches, n, 1);
  1768. init_tracking(kmalloc_caches, n);
  1769. #endif
  1770. init_kmem_cache_node(n);
  1771. atomic_long_inc(&n->nr_slabs);
  1772. /*
  1773. * lockdep requires consistent irq usage for each lock
  1774. * so even though there cannot be a race this early in
  1775. * the boot sequence, we still disable irqs.
  1776. */
  1777. local_irq_save(flags);
  1778. add_partial(n, page, 0);
  1779. local_irq_restore(flags);
  1780. return n;
  1781. }
  1782. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1783. {
  1784. int node;
  1785. for_each_node_state(node, N_NORMAL_MEMORY) {
  1786. struct kmem_cache_node *n = s->node[node];
  1787. if (n && n != &s->local_node)
  1788. kmem_cache_free(kmalloc_caches, n);
  1789. s->node[node] = NULL;
  1790. }
  1791. }
  1792. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1793. {
  1794. int node;
  1795. int local_node;
  1796. if (slab_state >= UP)
  1797. local_node = page_to_nid(virt_to_page(s));
  1798. else
  1799. local_node = 0;
  1800. for_each_node_state(node, N_NORMAL_MEMORY) {
  1801. struct kmem_cache_node *n;
  1802. if (local_node == node)
  1803. n = &s->local_node;
  1804. else {
  1805. if (slab_state == DOWN) {
  1806. n = early_kmem_cache_node_alloc(gfpflags,
  1807. node);
  1808. continue;
  1809. }
  1810. n = kmem_cache_alloc_node(kmalloc_caches,
  1811. gfpflags, node);
  1812. if (!n) {
  1813. free_kmem_cache_nodes(s);
  1814. return 0;
  1815. }
  1816. }
  1817. s->node[node] = n;
  1818. init_kmem_cache_node(n);
  1819. }
  1820. return 1;
  1821. }
  1822. #else
  1823. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1824. {
  1825. }
  1826. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1827. {
  1828. init_kmem_cache_node(&s->local_node);
  1829. return 1;
  1830. }
  1831. #endif
  1832. /*
  1833. * calculate_sizes() determines the order and the distribution of data within
  1834. * a slab object.
  1835. */
  1836. static int calculate_sizes(struct kmem_cache *s)
  1837. {
  1838. unsigned long flags = s->flags;
  1839. unsigned long size = s->objsize;
  1840. unsigned long align = s->align;
  1841. /*
  1842. * Determine if we can poison the object itself. If the user of
  1843. * the slab may touch the object after free or before allocation
  1844. * then we should never poison the object itself.
  1845. */
  1846. if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
  1847. !s->ctor)
  1848. s->flags |= __OBJECT_POISON;
  1849. else
  1850. s->flags &= ~__OBJECT_POISON;
  1851. /*
  1852. * Round up object size to the next word boundary. We can only
  1853. * place the free pointer at word boundaries and this determines
  1854. * the possible location of the free pointer.
  1855. */
  1856. size = ALIGN(size, sizeof(void *));
  1857. #ifdef CONFIG_SLUB_DEBUG
  1858. /*
  1859. * If we are Redzoning then check if there is some space between the
  1860. * end of the object and the free pointer. If not then add an
  1861. * additional word to have some bytes to store Redzone information.
  1862. */
  1863. if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  1864. size += sizeof(void *);
  1865. #endif
  1866. /*
  1867. * With that we have determined the number of bytes in actual use
  1868. * by the object. This is the potential offset to the free pointer.
  1869. */
  1870. s->inuse = size;
  1871. if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
  1872. s->ctor)) {
  1873. /*
  1874. * Relocate free pointer after the object if it is not
  1875. * permitted to overwrite the first word of the object on
  1876. * kmem_cache_free.
  1877. *
  1878. * This is the case if we do RCU, have a constructor or
  1879. * destructor or are poisoning the objects.
  1880. */
  1881. s->offset = size;
  1882. size += sizeof(void *);
  1883. }
  1884. #ifdef CONFIG_SLUB_DEBUG
  1885. if (flags & SLAB_STORE_USER)
  1886. /*
  1887. * Need to store information about allocs and frees after
  1888. * the object.
  1889. */
  1890. size += 2 * sizeof(struct track);
  1891. if (flags & SLAB_RED_ZONE)
  1892. /*
  1893. * Add some empty padding so that we can catch
  1894. * overwrites from earlier objects rather than let
  1895. * tracking information or the free pointer be
  1896. * corrupted if an user writes before the start
  1897. * of the object.
  1898. */
  1899. size += sizeof(void *);
  1900. #endif
  1901. /*
  1902. * Determine the alignment based on various parameters that the
  1903. * user specified and the dynamic determination of cache line size
  1904. * on bootup.
  1905. */
  1906. align = calculate_alignment(flags, align, s->objsize);
  1907. /*
  1908. * SLUB stores one object immediately after another beginning from
  1909. * offset 0. In order to align the objects we have to simply size
  1910. * each object to conform to the alignment.
  1911. */
  1912. size = ALIGN(size, align);
  1913. s->size = size;
  1914. if ((flags & __KMALLOC_CACHE) &&
  1915. PAGE_SIZE / size < slub_min_objects) {
  1916. /*
  1917. * Kmalloc cache that would not have enough objects in
  1918. * an order 0 page. Kmalloc slabs can fallback to
  1919. * page allocator order 0 allocs so take a reasonably large
  1920. * order that will allows us a good number of objects.
  1921. */
  1922. s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
  1923. s->flags |= __PAGE_ALLOC_FALLBACK;
  1924. s->allocflags |= __GFP_NOWARN;
  1925. } else
  1926. s->order = calculate_order(size);
  1927. if (s->order < 0)
  1928. return 0;
  1929. s->allocflags = 0;
  1930. if (s->order)
  1931. s->allocflags |= __GFP_COMP;
  1932. if (s->flags & SLAB_CACHE_DMA)
  1933. s->allocflags |= SLUB_DMA;
  1934. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  1935. s->allocflags |= __GFP_RECLAIMABLE;
  1936. /*
  1937. * Determine the number of objects per slab
  1938. */
  1939. s->objects = (PAGE_SIZE << s->order) / size;
  1940. return !!s->objects;
  1941. }
  1942. static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  1943. const char *name, size_t size,
  1944. size_t align, unsigned long flags,
  1945. void (*ctor)(struct kmem_cache *, void *))
  1946. {
  1947. memset(s, 0, kmem_size);
  1948. s->name = name;
  1949. s->ctor = ctor;
  1950. s->objsize = size;
  1951. s->align = align;
  1952. s->flags = kmem_cache_flags(size, flags, name, ctor);
  1953. if (!calculate_sizes(s))
  1954. goto error;
  1955. s->refcount = 1;
  1956. #ifdef CONFIG_NUMA
  1957. s->remote_node_defrag_ratio = 100;
  1958. #endif
  1959. if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  1960. goto error;
  1961. if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
  1962. return 1;
  1963. free_kmem_cache_nodes(s);
  1964. error:
  1965. if (flags & SLAB_PANIC)
  1966. panic("Cannot create slab %s size=%lu realsize=%u "
  1967. "order=%u offset=%u flags=%lx\n",
  1968. s->name, (unsigned long)size, s->size, s->order,
  1969. s->offset, flags);
  1970. return 0;
  1971. }
  1972. /*
  1973. * Check if a given pointer is valid
  1974. */
  1975. int kmem_ptr_validate(struct kmem_cache *s, const void *object)
  1976. {
  1977. struct page *page;
  1978. page = get_object_page(object);
  1979. if (!page || s != page->slab)
  1980. /* No slab or wrong slab */
  1981. return 0;
  1982. if (!check_valid_pointer(s, page, object))
  1983. return 0;
  1984. /*
  1985. * We could also check if the object is on the slabs freelist.
  1986. * But this would be too expensive and it seems that the main
  1987. * purpose of kmem_ptr_valid is to check if the object belongs
  1988. * to a certain slab.
  1989. */
  1990. return 1;
  1991. }
  1992. EXPORT_SYMBOL(kmem_ptr_validate);
  1993. /*
  1994. * Determine the size of a slab object
  1995. */
  1996. unsigned int kmem_cache_size(struct kmem_cache *s)
  1997. {
  1998. return s->objsize;
  1999. }
  2000. EXPORT_SYMBOL(kmem_cache_size);
  2001. const char *kmem_cache_name(struct kmem_cache *s)
  2002. {
  2003. return s->name;
  2004. }
  2005. EXPORT_SYMBOL(kmem_cache_name);
  2006. /*
  2007. * Attempt to free all slabs on a node. Return the number of slabs we
  2008. * were unable to free.
  2009. */
  2010. static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
  2011. struct list_head *list)
  2012. {
  2013. int slabs_inuse = 0;
  2014. unsigned long flags;
  2015. struct page *page, *h;
  2016. spin_lock_irqsave(&n->list_lock, flags);
  2017. list_for_each_entry_safe(page, h, list, lru)
  2018. if (!page->inuse) {
  2019. list_del(&page->lru);
  2020. discard_slab(s, page);
  2021. } else
  2022. slabs_inuse++;
  2023. spin_unlock_irqrestore(&n->list_lock, flags);
  2024. return slabs_inuse;
  2025. }
  2026. /*
  2027. * Release all resources used by a slab cache.
  2028. */
  2029. static inline int kmem_cache_close(struct kmem_cache *s)
  2030. {
  2031. int node;
  2032. flush_all(s);
  2033. /* Attempt to free all objects */
  2034. free_kmem_cache_cpus(s);
  2035. for_each_node_state(node, N_NORMAL_MEMORY) {
  2036. struct kmem_cache_node *n = get_node(s, node);
  2037. n->nr_partial -= free_list(s, n, &n->partial);
  2038. if (atomic_long_read(&n->nr_slabs))
  2039. return 1;
  2040. }
  2041. free_kmem_cache_nodes(s);
  2042. return 0;
  2043. }
  2044. /*
  2045. * Close a cache and release the kmem_cache structure
  2046. * (must be used for caches created using kmem_cache_create)
  2047. */
  2048. void kmem_cache_destroy(struct kmem_cache *s)
  2049. {
  2050. down_write(&slub_lock);
  2051. s->refcount--;
  2052. if (!s->refcount) {
  2053. list_del(&s->list);
  2054. up_write(&slub_lock);
  2055. if (kmem_cache_close(s))
  2056. WARN_ON(1);
  2057. sysfs_slab_remove(s);
  2058. } else
  2059. up_write(&slub_lock);
  2060. }
  2061. EXPORT_SYMBOL(kmem_cache_destroy);
  2062. /********************************************************************
  2063. * Kmalloc subsystem
  2064. *******************************************************************/
  2065. struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
  2066. EXPORT_SYMBOL(kmalloc_caches);
  2067. #ifdef CONFIG_ZONE_DMA
  2068. static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
  2069. #endif
  2070. static int __init setup_slub_min_order(char *str)
  2071. {
  2072. get_option(&str, &slub_min_order);
  2073. return 1;
  2074. }
  2075. __setup("slub_min_order=", setup_slub_min_order);
  2076. static int __init setup_slub_max_order(char *str)
  2077. {
  2078. get_option(&str, &slub_max_order);
  2079. return 1;
  2080. }
  2081. __setup("slub_max_order=", setup_slub_max_order);
  2082. static int __init setup_slub_min_objects(char *str)
  2083. {
  2084. get_option(&str, &slub_min_objects);
  2085. return 1;
  2086. }
  2087. __setup("slub_min_objects=", setup_slub_min_objects);
  2088. static int __init setup_slub_nomerge(char *str)
  2089. {
  2090. slub_nomerge = 1;
  2091. return 1;
  2092. }
  2093. __setup("slub_nomerge", setup_slub_nomerge);
  2094. static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  2095. const char *name, int size, gfp_t gfp_flags)
  2096. {
  2097. unsigned int flags = 0;
  2098. if (gfp_flags & SLUB_DMA)
  2099. flags = SLAB_CACHE_DMA;
  2100. down_write(&slub_lock);
  2101. if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
  2102. flags | __KMALLOC_CACHE, NULL))
  2103. goto panic;
  2104. list_add(&s->list, &slab_caches);
  2105. up_write(&slub_lock);
  2106. if (sysfs_slab_add(s))
  2107. goto panic;
  2108. return s;
  2109. panic:
  2110. panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
  2111. }
  2112. #ifdef CONFIG_ZONE_DMA
  2113. static void sysfs_add_func(struct work_struct *w)
  2114. {
  2115. struct kmem_cache *s;
  2116. down_write(&slub_lock);
  2117. list_for_each_entry(s, &slab_caches, list) {
  2118. if (s->flags & __SYSFS_ADD_DEFERRED) {
  2119. s->flags &= ~__SYSFS_ADD_DEFERRED;
  2120. sysfs_slab_add(s);
  2121. }
  2122. }
  2123. up_write(&slub_lock);
  2124. }
  2125. static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
  2126. static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
  2127. {
  2128. struct kmem_cache *s;
  2129. char *text;
  2130. size_t realsize;
  2131. s = kmalloc_caches_dma[index];
  2132. if (s)
  2133. return s;
  2134. /* Dynamically create dma cache */
  2135. if (flags & __GFP_WAIT)
  2136. down_write(&slub_lock);
  2137. else {
  2138. if (!down_write_trylock(&slub_lock))
  2139. goto out;
  2140. }
  2141. if (kmalloc_caches_dma[index])
  2142. goto unlock_out;
  2143. realsize = kmalloc_caches[index].objsize;
  2144. text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
  2145. (unsigned int)realsize);
  2146. s = kmalloc(kmem_size, flags & ~SLUB_DMA);
  2147. if (!s || !text || !kmem_cache_open(s, flags, text,
  2148. realsize, ARCH_KMALLOC_MINALIGN,
  2149. SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
  2150. kfree(s);
  2151. kfree(text);
  2152. goto unlock_out;
  2153. }
  2154. list_add(&s->list, &slab_caches);
  2155. kmalloc_caches_dma[index] = s;
  2156. schedule_work(&sysfs_add_work);
  2157. unlock_out:
  2158. up_write(&slub_lock);
  2159. out:
  2160. return kmalloc_caches_dma[index];
  2161. }
  2162. #endif
  2163. /*
  2164. * Conversion table for small slabs sizes / 8 to the index in the
  2165. * kmalloc array. This is necessary for slabs < 192 since we have non power
  2166. * of two cache sizes there. The size of larger slabs can be determined using
  2167. * fls.
  2168. */
  2169. static s8 size_index[24] = {
  2170. 3, /* 8 */
  2171. 4, /* 16 */
  2172. 5, /* 24 */
  2173. 5, /* 32 */
  2174. 6, /* 40 */
  2175. 6, /* 48 */
  2176. 6, /* 56 */
  2177. 6, /* 64 */
  2178. 1, /* 72 */
  2179. 1, /* 80 */
  2180. 1, /* 88 */
  2181. 1, /* 96 */
  2182. 7, /* 104 */
  2183. 7, /* 112 */
  2184. 7, /* 120 */
  2185. 7, /* 128 */
  2186. 2, /* 136 */
  2187. 2, /* 144 */
  2188. 2, /* 152 */
  2189. 2, /* 160 */
  2190. 2, /* 168 */
  2191. 2, /* 176 */
  2192. 2, /* 184 */
  2193. 2 /* 192 */
  2194. };
  2195. static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  2196. {
  2197. int index;
  2198. if (size <= 192) {
  2199. if (!size)
  2200. return ZERO_SIZE_PTR;
  2201. index = size_index[(size - 1) / 8];
  2202. } else
  2203. index = fls(size - 1);
  2204. #ifdef CONFIG_ZONE_DMA
  2205. if (unlikely((flags & SLUB_DMA)))
  2206. return dma_kmalloc_cache(index, flags);
  2207. #endif
  2208. return &kmalloc_caches[index];
  2209. }
  2210. void *__kmalloc(size_t size, gfp_t flags)
  2211. {
  2212. struct kmem_cache *s;
  2213. if (unlikely(size > PAGE_SIZE))
  2214. return kmalloc_large(size, flags);
  2215. s = get_slab(size, flags);
  2216. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2217. return s;
  2218. return slab_alloc(s, flags, -1, __builtin_return_address(0));
  2219. }
  2220. EXPORT_SYMBOL(__kmalloc);
  2221. #ifdef CONFIG_NUMA
  2222. void *__kmalloc_node(size_t size, gfp_t flags, int node)
  2223. {
  2224. struct kmem_cache *s;
  2225. if (unlikely(size > PAGE_SIZE))
  2226. return kmalloc_large(size, flags);
  2227. s = get_slab(size, flags);
  2228. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2229. return s;
  2230. return slab_alloc(s, flags, node, __builtin_return_address(0));
  2231. }
  2232. EXPORT_SYMBOL(__kmalloc_node);
  2233. #endif
  2234. size_t ksize(const void *object)
  2235. {
  2236. struct page *page;
  2237. struct kmem_cache *s;
  2238. BUG_ON(!object);
  2239. if (unlikely(object == ZERO_SIZE_PTR))
  2240. return 0;
  2241. page = virt_to_head_page(object);
  2242. BUG_ON(!page);
  2243. if (unlikely(!PageSlab(page)))
  2244. return PAGE_SIZE << compound_order(page);
  2245. s = page->slab;
  2246. BUG_ON(!s);
  2247. /*
  2248. * Debugging requires use of the padding between object
  2249. * and whatever may come after it.
  2250. */
  2251. if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  2252. return s->objsize;
  2253. /*
  2254. * If we have the need to store the freelist pointer
  2255. * back there or track user information then we can
  2256. * only use the space before that information.
  2257. */
  2258. if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  2259. return s->inuse;
  2260. /*
  2261. * Else we can use all the padding etc for the allocation
  2262. */
  2263. return s->size;
  2264. }
  2265. EXPORT_SYMBOL(ksize);
  2266. void kfree(const void *x)
  2267. {
  2268. struct page *page;
  2269. void *object = (void *)x;
  2270. if (unlikely(ZERO_OR_NULL_PTR(x)))
  2271. return;
  2272. page = virt_to_head_page(x);
  2273. if (unlikely(!PageSlab(page))) {
  2274. put_page(page);
  2275. return;
  2276. }
  2277. slab_free(page->slab, page, object, __builtin_return_address(0));
  2278. }
  2279. EXPORT_SYMBOL(kfree);
  2280. static unsigned long count_partial(struct kmem_cache_node *n)
  2281. {
  2282. unsigned long flags;
  2283. unsigned long x = 0;
  2284. struct page *page;
  2285. spin_lock_irqsave(&n->list_lock, flags);
  2286. list_for_each_entry(page, &n->partial, lru)
  2287. x += page->inuse;
  2288. spin_unlock_irqrestore(&n->list_lock, flags);
  2289. return x;
  2290. }
  2291. /*
  2292. * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  2293. * the remaining slabs by the number of items in use. The slabs with the
  2294. * most items in use come first. New allocations will then fill those up
  2295. * and thus they can be removed from the partial lists.
  2296. *
  2297. * The slabs with the least items are placed last. This results in them
  2298. * being allocated from last increasing the chance that the last objects
  2299. * are freed in them.
  2300. */
  2301. int kmem_cache_shrink(struct kmem_cache *s)
  2302. {
  2303. int node;
  2304. int i;
  2305. struct kmem_cache_node *n;
  2306. struct page *page;
  2307. struct page *t;
  2308. struct list_head *slabs_by_inuse =
  2309. kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
  2310. unsigned long flags;
  2311. if (!slabs_by_inuse)
  2312. return -ENOMEM;
  2313. flush_all(s);
  2314. for_each_node_state(node, N_NORMAL_MEMORY) {
  2315. n = get_node(s, node);
  2316. if (!n->nr_partial)
  2317. continue;
  2318. for (i = 0; i < s->objects; i++)
  2319. INIT_LIST_HEAD(slabs_by_inuse + i);
  2320. spin_lock_irqsave(&n->list_lock, flags);
  2321. /*
  2322. * Build lists indexed by the items in use in each slab.
  2323. *
  2324. * Note that concurrent frees may occur while we hold the
  2325. * list_lock. page->inuse here is the upper limit.
  2326. */
  2327. list_for_each_entry_safe(page, t, &n->partial, lru) {
  2328. if (!page->inuse && slab_trylock(page)) {
  2329. /*
  2330. * Must hold slab lock here because slab_free
  2331. * may have freed the last object and be
  2332. * waiting to release the slab.
  2333. */
  2334. list_del(&page->lru);
  2335. n->nr_partial--;
  2336. slab_unlock(page);
  2337. discard_slab(s, page);
  2338. } else {
  2339. list_move(&page->lru,
  2340. slabs_by_inuse + page->inuse);
  2341. }
  2342. }
  2343. /*
  2344. * Rebuild the partial list with the slabs filled up most
  2345. * first and the least used slabs at the end.
  2346. */
  2347. for (i = s->objects - 1; i >= 0; i--)
  2348. list_splice(slabs_by_inuse + i, n->partial.prev);
  2349. spin_unlock_irqrestore(&n->list_lock, flags);
  2350. }
  2351. kfree(slabs_by_inuse);
  2352. return 0;
  2353. }
  2354. EXPORT_SYMBOL(kmem_cache_shrink);
  2355. #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
  2356. static int slab_mem_going_offline_callback(void *arg)
  2357. {
  2358. struct kmem_cache *s;
  2359. down_read(&slub_lock);
  2360. list_for_each_entry(s, &slab_caches, list)
  2361. kmem_cache_shrink(s);
  2362. up_read(&slub_lock);
  2363. return 0;
  2364. }
  2365. static void slab_mem_offline_callback(void *arg)
  2366. {
  2367. struct kmem_cache_node *n;
  2368. struct kmem_cache *s;
  2369. struct memory_notify *marg = arg;
  2370. int offline_node;
  2371. offline_node = marg->status_change_nid;
  2372. /*
  2373. * If the node still has available memory. we need kmem_cache_node
  2374. * for it yet.
  2375. */
  2376. if (offline_node < 0)
  2377. return;
  2378. down_read(&slub_lock);
  2379. list_for_each_entry(s, &slab_caches, list) {
  2380. n = get_node(s, offline_node);
  2381. if (n) {
  2382. /*
  2383. * if n->nr_slabs > 0, slabs still exist on the node
  2384. * that is going down. We were unable to free them,
  2385. * and offline_pages() function shoudn't call this
  2386. * callback. So, we must fail.
  2387. */
  2388. BUG_ON(atomic_long_read(&n->nr_slabs));
  2389. s->node[offline_node] = NULL;
  2390. kmem_cache_free(kmalloc_caches, n);
  2391. }
  2392. }
  2393. up_read(&slub_lock);
  2394. }
  2395. static int slab_mem_going_online_callback(void *arg)
  2396. {
  2397. struct kmem_cache_node *n;
  2398. struct kmem_cache *s;
  2399. struct memory_notify *marg = arg;
  2400. int nid = marg->status_change_nid;
  2401. int ret = 0;
  2402. /*
  2403. * If the node's memory is already available, then kmem_cache_node is
  2404. * already created. Nothing to do.
  2405. */
  2406. if (nid < 0)
  2407. return 0;
  2408. /*
  2409. * We are bringing a node online. No memory is availabe yet. We must
  2410. * allocate a kmem_cache_node structure in order to bring the node
  2411. * online.
  2412. */
  2413. down_read(&slub_lock);
  2414. list_for_each_entry(s, &slab_caches, list) {
  2415. /*
  2416. * XXX: kmem_cache_alloc_node will fallback to other nodes
  2417. * since memory is not yet available from the node that
  2418. * is brought up.
  2419. */
  2420. n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
  2421. if (!n) {
  2422. ret = -ENOMEM;
  2423. goto out;
  2424. }
  2425. init_kmem_cache_node(n);
  2426. s->node[nid] = n;
  2427. }
  2428. out:
  2429. up_read(&slub_lock);
  2430. return ret;
  2431. }
  2432. static int slab_memory_callback(struct notifier_block *self,
  2433. unsigned long action, void *arg)
  2434. {
  2435. int ret = 0;
  2436. switch (action) {
  2437. case MEM_GOING_ONLINE:
  2438. ret = slab_mem_going_online_callback(arg);
  2439. break;
  2440. case MEM_GOING_OFFLINE:
  2441. ret = slab_mem_going_offline_callback(arg);
  2442. break;
  2443. case MEM_OFFLINE:
  2444. case MEM_CANCEL_ONLINE:
  2445. slab_mem_offline_callback(arg);
  2446. break;
  2447. case MEM_ONLINE:
  2448. case MEM_CANCEL_OFFLINE:
  2449. break;
  2450. }
  2451. ret = notifier_from_errno(ret);
  2452. return ret;
  2453. }
  2454. #endif /* CONFIG_MEMORY_HOTPLUG */
  2455. /********************************************************************
  2456. * Basic setup of slabs
  2457. *******************************************************************/
  2458. void __init kmem_cache_init(void)
  2459. {
  2460. int i;
  2461. int caches = 0;
  2462. init_alloc_cpu();
  2463. #ifdef CONFIG_NUMA
  2464. /*
  2465. * Must first have the slab cache available for the allocations of the
  2466. * struct kmem_cache_node's. There is special bootstrap code in
  2467. * kmem_cache_open for slab_state == DOWN.
  2468. */
  2469. create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
  2470. sizeof(struct kmem_cache_node), GFP_KERNEL);
  2471. kmalloc_caches[0].refcount = -1;
  2472. caches++;
  2473. hotplug_memory_notifier(slab_memory_callback, 1);
  2474. #endif
  2475. /* Able to allocate the per node structures */
  2476. slab_state = PARTIAL;
  2477. /* Caches that are not of the two-to-the-power-of size */
  2478. if (KMALLOC_MIN_SIZE <= 64) {
  2479. create_kmalloc_cache(&kmalloc_caches[1],
  2480. "kmalloc-96", 96, GFP_KERNEL);
  2481. caches++;
  2482. }
  2483. if (KMALLOC_MIN_SIZE <= 128) {
  2484. create_kmalloc_cache(&kmalloc_caches[2],
  2485. "kmalloc-192", 192, GFP_KERNEL);
  2486. caches++;
  2487. }
  2488. for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
  2489. create_kmalloc_cache(&kmalloc_caches[i],
  2490. "kmalloc", 1 << i, GFP_KERNEL);
  2491. caches++;
  2492. }
  2493. /*
  2494. * Patch up the size_index table if we have strange large alignment
  2495. * requirements for the kmalloc array. This is only the case for
  2496. * mips it seems. The standard arches will not generate any code here.
  2497. *
  2498. * Largest permitted alignment is 256 bytes due to the way we
  2499. * handle the index determination for the smaller caches.
  2500. *
  2501. * Make sure that nothing crazy happens if someone starts tinkering
  2502. * around with ARCH_KMALLOC_MINALIGN
  2503. */
  2504. BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  2505. (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  2506. for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
  2507. size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
  2508. slab_state = UP;
  2509. /* Provide the correct kmalloc names now that the caches are up */
  2510. for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
  2511. kmalloc_caches[i]. name =
  2512. kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
  2513. #ifdef CONFIG_SMP
  2514. register_cpu_notifier(&slab_notifier);
  2515. kmem_size = offsetof(struct kmem_cache, cpu_slab) +
  2516. nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
  2517. #else
  2518. kmem_size = sizeof(struct kmem_cache);
  2519. #endif
  2520. printk(KERN_INFO
  2521. "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
  2522. " CPUs=%d, Nodes=%d\n",
  2523. caches, cache_line_size(),
  2524. slub_min_order, slub_max_order, slub_min_objects,
  2525. nr_cpu_ids, nr_node_ids);
  2526. }
  2527. /*
  2528. * Find a mergeable slab cache
  2529. */
  2530. static int slab_unmergeable(struct kmem_cache *s)
  2531. {
  2532. if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  2533. return 1;
  2534. if ((s->flags & __PAGE_ALLOC_FALLBACK))
  2535. return 1;
  2536. if (s->ctor)
  2537. return 1;
  2538. /*
  2539. * We may have set a slab to be unmergeable during bootstrap.
  2540. */
  2541. if (s->refcount < 0)
  2542. return 1;
  2543. return 0;
  2544. }
  2545. static struct kmem_cache *find_mergeable(size_t size,
  2546. size_t align, unsigned long flags, const char *name,
  2547. void (*ctor)(struct kmem_cache *, void *))
  2548. {
  2549. struct kmem_cache *s;
  2550. if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  2551. return NULL;
  2552. if (ctor)
  2553. return NULL;
  2554. size = ALIGN(size, sizeof(void *));
  2555. align = calculate_alignment(flags, align, size);
  2556. size = ALIGN(size, align);
  2557. flags = kmem_cache_flags(size, flags, name, NULL);
  2558. list_for_each_entry(s, &slab_caches, list) {
  2559. if (slab_unmergeable(s))
  2560. continue;
  2561. if (size > s->size)
  2562. continue;
  2563. if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
  2564. continue;
  2565. /*
  2566. * Check if alignment is compatible.
  2567. * Courtesy of Adrian Drzewiecki
  2568. */
  2569. if ((s->size & ~(align - 1)) != s->size)
  2570. continue;
  2571. if (s->size - size >= sizeof(void *))
  2572. continue;
  2573. return s;
  2574. }
  2575. return NULL;
  2576. }
  2577. struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  2578. size_t align, unsigned long flags,
  2579. void (*ctor)(struct kmem_cache *, void *))
  2580. {
  2581. struct kmem_cache *s;
  2582. down_write(&slub_lock);
  2583. s = find_mergeable(size, align, flags, name, ctor);
  2584. if (s) {
  2585. int cpu;
  2586. s->refcount++;
  2587. /*
  2588. * Adjust the object sizes so that we clear
  2589. * the complete object on kzalloc.
  2590. */
  2591. s->objsize = max(s->objsize, (int)size);
  2592. /*
  2593. * And then we need to update the object size in the
  2594. * per cpu structures
  2595. */
  2596. for_each_online_cpu(cpu)
  2597. get_cpu_slab(s, cpu)->objsize = s->objsize;
  2598. s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
  2599. up_write(&slub_lock);
  2600. if (sysfs_slab_alias(s, name))
  2601. goto err;
  2602. return s;
  2603. }
  2604. s = kmalloc(kmem_size, GFP_KERNEL);
  2605. if (s) {
  2606. if (kmem_cache_open(s, GFP_KERNEL, name,
  2607. size, align, flags, ctor)) {
  2608. list_add(&s->list, &slab_caches);
  2609. up_write(&slub_lock);
  2610. if (sysfs_slab_add(s))
  2611. goto err;
  2612. return s;
  2613. }
  2614. kfree(s);
  2615. }
  2616. up_write(&slub_lock);
  2617. err:
  2618. if (flags & SLAB_PANIC)
  2619. panic("Cannot create slabcache %s\n", name);
  2620. else
  2621. s = NULL;
  2622. return s;
  2623. }
  2624. EXPORT_SYMBOL(kmem_cache_create);
  2625. #ifdef CONFIG_SMP
  2626. /*
  2627. * Use the cpu notifier to insure that the cpu slabs are flushed when
  2628. * necessary.
  2629. */
  2630. static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  2631. unsigned long action, void *hcpu)
  2632. {
  2633. long cpu = (long)hcpu;
  2634. struct kmem_cache *s;
  2635. unsigned long flags;
  2636. switch (action) {
  2637. case CPU_UP_PREPARE:
  2638. case CPU_UP_PREPARE_FROZEN:
  2639. init_alloc_cpu_cpu(cpu);
  2640. down_read(&slub_lock);
  2641. list_for_each_entry(s, &slab_caches, list)
  2642. s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
  2643. GFP_KERNEL);
  2644. up_read(&slub_lock);
  2645. break;
  2646. case CPU_UP_CANCELED:
  2647. case CPU_UP_CANCELED_FROZEN:
  2648. case CPU_DEAD:
  2649. case CPU_DEAD_FROZEN:
  2650. down_read(&slub_lock);
  2651. list_for_each_entry(s, &slab_caches, list) {
  2652. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  2653. local_irq_save(flags);
  2654. __flush_cpu_slab(s, cpu);
  2655. local_irq_restore(flags);
  2656. free_kmem_cache_cpu(c, cpu);
  2657. s->cpu_slab[cpu] = NULL;
  2658. }
  2659. up_read(&slub_lock);
  2660. break;
  2661. default:
  2662. break;
  2663. }
  2664. return NOTIFY_OK;
  2665. }
  2666. static struct notifier_block __cpuinitdata slab_notifier = {
  2667. .notifier_call = slab_cpuup_callback
  2668. };
  2669. #endif
  2670. void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
  2671. {
  2672. struct kmem_cache *s;
  2673. if (unlikely(size > PAGE_SIZE))
  2674. return kmalloc_large(size, gfpflags);
  2675. s = get_slab(size, gfpflags);
  2676. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2677. return s;
  2678. return slab_alloc(s, gfpflags, -1, caller);
  2679. }
  2680. void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
  2681. int node, void *caller)
  2682. {
  2683. struct kmem_cache *s;
  2684. if (unlikely(size > PAGE_SIZE))
  2685. return kmalloc_large(size, gfpflags);
  2686. s = get_slab(size, gfpflags);
  2687. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2688. return s;
  2689. return slab_alloc(s, gfpflags, node, caller);
  2690. }
  2691. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  2692. static int validate_slab(struct kmem_cache *s, struct page *page,
  2693. unsigned long *map)
  2694. {
  2695. void *p;
  2696. void *addr = page_address(page);
  2697. if (!check_slab(s, page) ||
  2698. !on_freelist(s, page, NULL))
  2699. return 0;
  2700. /* Now we know that a valid freelist exists */
  2701. bitmap_zero(map, s->objects);
  2702. for_each_free_object(p, s, page->freelist) {
  2703. set_bit(slab_index(p, s, addr), map);
  2704. if (!check_object(s, page, p, 0))
  2705. return 0;
  2706. }
  2707. for_each_object(p, s, addr)
  2708. if (!test_bit(slab_index(p, s, addr), map))
  2709. if (!check_object(s, page, p, 1))
  2710. return 0;
  2711. return 1;
  2712. }
  2713. static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  2714. unsigned long *map)
  2715. {
  2716. if (slab_trylock(page)) {
  2717. validate_slab(s, page, map);
  2718. slab_unlock(page);
  2719. } else
  2720. printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
  2721. s->name, page);
  2722. if (s->flags & DEBUG_DEFAULT_FLAGS) {
  2723. if (!SlabDebug(page))
  2724. printk(KERN_ERR "SLUB %s: SlabDebug not set "
  2725. "on slab 0x%p\n", s->name, page);
  2726. } else {
  2727. if (SlabDebug(page))
  2728. printk(KERN_ERR "SLUB %s: SlabDebug set on "
  2729. "slab 0x%p\n", s->name, page);
  2730. }
  2731. }
  2732. static int validate_slab_node(struct kmem_cache *s,
  2733. struct kmem_cache_node *n, unsigned long *map)
  2734. {
  2735. unsigned long count = 0;
  2736. struct page *page;
  2737. unsigned long flags;
  2738. spin_lock_irqsave(&n->list_lock, flags);
  2739. list_for_each_entry(page, &n->partial, lru) {
  2740. validate_slab_slab(s, page, map);
  2741. count++;
  2742. }
  2743. if (count != n->nr_partial)
  2744. printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  2745. "counter=%ld\n", s->name, count, n->nr_partial);
  2746. if (!(s->flags & SLAB_STORE_USER))
  2747. goto out;
  2748. list_for_each_entry(page, &n->full, lru) {
  2749. validate_slab_slab(s, page, map);
  2750. count++;
  2751. }
  2752. if (count != atomic_long_read(&n->nr_slabs))
  2753. printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  2754. "counter=%ld\n", s->name, count,
  2755. atomic_long_read(&n->nr_slabs));
  2756. out:
  2757. spin_unlock_irqrestore(&n->list_lock, flags);
  2758. return count;
  2759. }
  2760. static long validate_slab_cache(struct kmem_cache *s)
  2761. {
  2762. int node;
  2763. unsigned long count = 0;
  2764. unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
  2765. sizeof(unsigned long), GFP_KERNEL);
  2766. if (!map)
  2767. return -ENOMEM;
  2768. flush_all(s);
  2769. for_each_node_state(node, N_NORMAL_MEMORY) {
  2770. struct kmem_cache_node *n = get_node(s, node);
  2771. count += validate_slab_node(s, n, map);
  2772. }
  2773. kfree(map);
  2774. return count;
  2775. }
  2776. #ifdef SLUB_RESILIENCY_TEST
  2777. static void resiliency_test(void)
  2778. {
  2779. u8 *p;
  2780. printk(KERN_ERR "SLUB resiliency testing\n");
  2781. printk(KERN_ERR "-----------------------\n");
  2782. printk(KERN_ERR "A. Corruption after allocation\n");
  2783. p = kzalloc(16, GFP_KERNEL);
  2784. p[16] = 0x12;
  2785. printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
  2786. " 0x12->0x%p\n\n", p + 16);
  2787. validate_slab_cache(kmalloc_caches + 4);
  2788. /* Hmmm... The next two are dangerous */
  2789. p = kzalloc(32, GFP_KERNEL);
  2790. p[32 + sizeof(void *)] = 0x34;
  2791. printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
  2792. " 0x34 -> -0x%p\n", p);
  2793. printk(KERN_ERR
  2794. "If allocated object is overwritten then not detectable\n\n");
  2795. validate_slab_cache(kmalloc_caches + 5);
  2796. p = kzalloc(64, GFP_KERNEL);
  2797. p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  2798. *p = 0x56;
  2799. printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
  2800. p);
  2801. printk(KERN_ERR
  2802. "If allocated object is overwritten then not detectable\n\n");
  2803. validate_slab_cache(kmalloc_caches + 6);
  2804. printk(KERN_ERR "\nB. Corruption after free\n");
  2805. p = kzalloc(128, GFP_KERNEL);
  2806. kfree(p);
  2807. *p = 0x78;
  2808. printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
  2809. validate_slab_cache(kmalloc_caches + 7);
  2810. p = kzalloc(256, GFP_KERNEL);
  2811. kfree(p);
  2812. p[50] = 0x9a;
  2813. printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
  2814. p);
  2815. validate_slab_cache(kmalloc_caches + 8);
  2816. p = kzalloc(512, GFP_KERNEL);
  2817. kfree(p);
  2818. p[512] = 0xab;
  2819. printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
  2820. validate_slab_cache(kmalloc_caches + 9);
  2821. }
  2822. #else
  2823. static void resiliency_test(void) {};
  2824. #endif
  2825. /*
  2826. * Generate lists of code addresses where slabcache objects are allocated
  2827. * and freed.
  2828. */
  2829. struct location {
  2830. unsigned long count;
  2831. void *addr;
  2832. long long sum_time;
  2833. long min_time;
  2834. long max_time;
  2835. long min_pid;
  2836. long max_pid;
  2837. cpumask_t cpus;
  2838. nodemask_t nodes;
  2839. };
  2840. struct loc_track {
  2841. unsigned long max;
  2842. unsigned long count;
  2843. struct location *loc;
  2844. };
  2845. static void free_loc_track(struct loc_track *t)
  2846. {
  2847. if (t->max)
  2848. free_pages((unsigned long)t->loc,
  2849. get_order(sizeof(struct location) * t->max));
  2850. }
  2851. static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
  2852. {
  2853. struct location *l;
  2854. int order;
  2855. order = get_order(sizeof(struct location) * max);
  2856. l = (void *)__get_free_pages(flags, order);
  2857. if (!l)
  2858. return 0;
  2859. if (t->count) {
  2860. memcpy(l, t->loc, sizeof(struct location) * t->count);
  2861. free_loc_track(t);
  2862. }
  2863. t->max = max;
  2864. t->loc = l;
  2865. return 1;
  2866. }
  2867. static int add_location(struct loc_track *t, struct kmem_cache *s,
  2868. const struct track *track)
  2869. {
  2870. long start, end, pos;
  2871. struct location *l;
  2872. void *caddr;
  2873. unsigned long age = jiffies - track->when;
  2874. start = -1;
  2875. end = t->count;
  2876. for ( ; ; ) {
  2877. pos = start + (end - start + 1) / 2;
  2878. /*
  2879. * There is nothing at "end". If we end up there
  2880. * we need to add something to before end.
  2881. */
  2882. if (pos == end)
  2883. break;
  2884. caddr = t->loc[pos].addr;
  2885. if (track->addr == caddr) {
  2886. l = &t->loc[pos];
  2887. l->count++;
  2888. if (track->when) {
  2889. l->sum_time += age;
  2890. if (age < l->min_time)
  2891. l->min_time = age;
  2892. if (age > l->max_time)
  2893. l->max_time = age;
  2894. if (track->pid < l->min_pid)
  2895. l->min_pid = track->pid;
  2896. if (track->pid > l->max_pid)
  2897. l->max_pid = track->pid;
  2898. cpu_set(track->cpu, l->cpus);
  2899. }
  2900. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2901. return 1;
  2902. }
  2903. if (track->addr < caddr)
  2904. end = pos;
  2905. else
  2906. start = pos;
  2907. }
  2908. /*
  2909. * Not found. Insert new tracking element.
  2910. */
  2911. if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
  2912. return 0;
  2913. l = t->loc + pos;
  2914. if (pos < t->count)
  2915. memmove(l + 1, l,
  2916. (t->count - pos) * sizeof(struct location));
  2917. t->count++;
  2918. l->count = 1;
  2919. l->addr = track->addr;
  2920. l->sum_time = age;
  2921. l->min_time = age;
  2922. l->max_time = age;
  2923. l->min_pid = track->pid;
  2924. l->max_pid = track->pid;
  2925. cpus_clear(l->cpus);
  2926. cpu_set(track->cpu, l->cpus);
  2927. nodes_clear(l->nodes);
  2928. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2929. return 1;
  2930. }
  2931. static void process_slab(struct loc_track *t, struct kmem_cache *s,
  2932. struct page *page, enum track_item alloc)
  2933. {
  2934. void *addr = page_address(page);
  2935. DECLARE_BITMAP(map, s->objects);
  2936. void *p;
  2937. bitmap_zero(map, s->objects);
  2938. for_each_free_object(p, s, page->freelist)
  2939. set_bit(slab_index(p, s, addr), map);
  2940. for_each_object(p, s, addr)
  2941. if (!test_bit(slab_index(p, s, addr), map))
  2942. add_location(t, s, get_track(s, p, alloc));
  2943. }
  2944. static int list_locations(struct kmem_cache *s, char *buf,
  2945. enum track_item alloc)
  2946. {
  2947. int len = 0;
  2948. unsigned long i;
  2949. struct loc_track t = { 0, 0, NULL };
  2950. int node;
  2951. if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  2952. GFP_TEMPORARY))
  2953. return sprintf(buf, "Out of memory\n");
  2954. /* Push back cpu slabs */
  2955. flush_all(s);
  2956. for_each_node_state(node, N_NORMAL_MEMORY) {
  2957. struct kmem_cache_node *n = get_node(s, node);
  2958. unsigned long flags;
  2959. struct page *page;
  2960. if (!atomic_long_read(&n->nr_slabs))
  2961. continue;
  2962. spin_lock_irqsave(&n->list_lock, flags);
  2963. list_for_each_entry(page, &n->partial, lru)
  2964. process_slab(&t, s, page, alloc);
  2965. list_for_each_entry(page, &n->full, lru)
  2966. process_slab(&t, s, page, alloc);
  2967. spin_unlock_irqrestore(&n->list_lock, flags);
  2968. }
  2969. for (i = 0; i < t.count; i++) {
  2970. struct location *l = &t.loc[i];
  2971. if (len > PAGE_SIZE - 100)
  2972. break;
  2973. len += sprintf(buf + len, "%7ld ", l->count);
  2974. if (l->addr)
  2975. len += sprint_symbol(buf + len, (unsigned long)l->addr);
  2976. else
  2977. len += sprintf(buf + len, "<not-available>");
  2978. if (l->sum_time != l->min_time) {
  2979. unsigned long remainder;
  2980. len += sprintf(buf + len, " age=%ld/%ld/%ld",
  2981. l->min_time,
  2982. div_long_long_rem(l->sum_time, l->count, &remainder),
  2983. l->max_time);
  2984. } else
  2985. len += sprintf(buf + len, " age=%ld",
  2986. l->min_time);
  2987. if (l->min_pid != l->max_pid)
  2988. len += sprintf(buf + len, " pid=%ld-%ld",
  2989. l->min_pid, l->max_pid);
  2990. else
  2991. len += sprintf(buf + len, " pid=%ld",
  2992. l->min_pid);
  2993. if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
  2994. len < PAGE_SIZE - 60) {
  2995. len += sprintf(buf + len, " cpus=");
  2996. len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
  2997. l->cpus);
  2998. }
  2999. if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
  3000. len < PAGE_SIZE - 60) {
  3001. len += sprintf(buf + len, " nodes=");
  3002. len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
  3003. l->nodes);
  3004. }
  3005. len += sprintf(buf + len, "\n");
  3006. }
  3007. free_loc_track(&t);
  3008. if (!t.count)
  3009. len += sprintf(buf, "No data\n");
  3010. return len;
  3011. }
  3012. enum slab_stat_type {
  3013. SL_FULL,
  3014. SL_PARTIAL,
  3015. SL_CPU,
  3016. SL_OBJECTS
  3017. };
  3018. #define SO_FULL (1 << SL_FULL)
  3019. #define SO_PARTIAL (1 << SL_PARTIAL)
  3020. #define SO_CPU (1 << SL_CPU)
  3021. #define SO_OBJECTS (1 << SL_OBJECTS)
  3022. static unsigned long show_slab_objects(struct kmem_cache *s,
  3023. char *buf, unsigned long flags)
  3024. {
  3025. unsigned long total = 0;
  3026. int cpu;
  3027. int node;
  3028. int x;
  3029. unsigned long *nodes;
  3030. unsigned long *per_cpu;
  3031. nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
  3032. per_cpu = nodes + nr_node_ids;
  3033. for_each_possible_cpu(cpu) {
  3034. struct page *page;
  3035. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  3036. if (!c)
  3037. continue;
  3038. page = c->page;
  3039. node = c->node;
  3040. if (node < 0)
  3041. continue;
  3042. if (page) {
  3043. if (flags & SO_CPU) {
  3044. if (flags & SO_OBJECTS)
  3045. x = page->inuse;
  3046. else
  3047. x = 1;
  3048. total += x;
  3049. nodes[node] += x;
  3050. }
  3051. per_cpu[node]++;
  3052. }
  3053. }
  3054. for_each_node_state(node, N_NORMAL_MEMORY) {
  3055. struct kmem_cache_node *n = get_node(s, node);
  3056. if (flags & SO_PARTIAL) {
  3057. if (flags & SO_OBJECTS)
  3058. x = count_partial(n);
  3059. else
  3060. x = n->nr_partial;
  3061. total += x;
  3062. nodes[node] += x;
  3063. }
  3064. if (flags & SO_FULL) {
  3065. int full_slabs = atomic_long_read(&n->nr_slabs)
  3066. - per_cpu[node]
  3067. - n->nr_partial;
  3068. if (flags & SO_OBJECTS)
  3069. x = full_slabs * s->objects;
  3070. else
  3071. x = full_slabs;
  3072. total += x;
  3073. nodes[node] += x;
  3074. }
  3075. }
  3076. x = sprintf(buf, "%lu", total);
  3077. #ifdef CONFIG_NUMA
  3078. for_each_node_state(node, N_NORMAL_MEMORY)
  3079. if (nodes[node])
  3080. x += sprintf(buf + x, " N%d=%lu",
  3081. node, nodes[node]);
  3082. #endif
  3083. kfree(nodes);
  3084. return x + sprintf(buf + x, "\n");
  3085. }
  3086. static int any_slab_objects(struct kmem_cache *s)
  3087. {
  3088. int node;
  3089. int cpu;
  3090. for_each_possible_cpu(cpu) {
  3091. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  3092. if (c && c->page)
  3093. return 1;
  3094. }
  3095. for_each_online_node(node) {
  3096. struct kmem_cache_node *n = get_node(s, node);
  3097. if (!n)
  3098. continue;
  3099. if (n->nr_partial || atomic_long_read(&n->nr_slabs))
  3100. return 1;
  3101. }
  3102. return 0;
  3103. }
  3104. #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
  3105. #define to_slab(n) container_of(n, struct kmem_cache, kobj);
  3106. struct slab_attribute {
  3107. struct attribute attr;
  3108. ssize_t (*show)(struct kmem_cache *s, char *buf);
  3109. ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  3110. };
  3111. #define SLAB_ATTR_RO(_name) \
  3112. static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  3113. #define SLAB_ATTR(_name) \
  3114. static struct slab_attribute _name##_attr = \
  3115. __ATTR(_name, 0644, _name##_show, _name##_store)
  3116. static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  3117. {
  3118. return sprintf(buf, "%d\n", s->size);
  3119. }
  3120. SLAB_ATTR_RO(slab_size);
  3121. static ssize_t align_show(struct kmem_cache *s, char *buf)
  3122. {
  3123. return sprintf(buf, "%d\n", s->align);
  3124. }
  3125. SLAB_ATTR_RO(align);
  3126. static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  3127. {
  3128. return sprintf(buf, "%d\n", s->objsize);
  3129. }
  3130. SLAB_ATTR_RO(object_size);
  3131. static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  3132. {
  3133. return sprintf(buf, "%d\n", s->objects);
  3134. }
  3135. SLAB_ATTR_RO(objs_per_slab);
  3136. static ssize_t order_show(struct kmem_cache *s, char *buf)
  3137. {
  3138. return sprintf(buf, "%d\n", s->order);
  3139. }
  3140. SLAB_ATTR_RO(order);
  3141. static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  3142. {
  3143. if (s->ctor) {
  3144. int n = sprint_symbol(buf, (unsigned long)s->ctor);
  3145. return n + sprintf(buf + n, "\n");
  3146. }
  3147. return 0;
  3148. }
  3149. SLAB_ATTR_RO(ctor);
  3150. static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  3151. {
  3152. return sprintf(buf, "%d\n", s->refcount - 1);
  3153. }
  3154. SLAB_ATTR_RO(aliases);
  3155. static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  3156. {
  3157. return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
  3158. }
  3159. SLAB_ATTR_RO(slabs);
  3160. static ssize_t partial_show(struct kmem_cache *s, char *buf)
  3161. {
  3162. return show_slab_objects(s, buf, SO_PARTIAL);
  3163. }
  3164. SLAB_ATTR_RO(partial);
  3165. static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  3166. {
  3167. return show_slab_objects(s, buf, SO_CPU);
  3168. }
  3169. SLAB_ATTR_RO(cpu_slabs);
  3170. static ssize_t objects_show(struct kmem_cache *s, char *buf)
  3171. {
  3172. return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
  3173. }
  3174. SLAB_ATTR_RO(objects);
  3175. static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  3176. {
  3177. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
  3178. }
  3179. static ssize_t sanity_checks_store(struct kmem_cache *s,
  3180. const char *buf, size_t length)
  3181. {
  3182. s->flags &= ~SLAB_DEBUG_FREE;
  3183. if (buf[0] == '1')
  3184. s->flags |= SLAB_DEBUG_FREE;
  3185. return length;
  3186. }
  3187. SLAB_ATTR(sanity_checks);
  3188. static ssize_t trace_show(struct kmem_cache *s, char *buf)
  3189. {
  3190. return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
  3191. }
  3192. static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  3193. size_t length)
  3194. {
  3195. s->flags &= ~SLAB_TRACE;
  3196. if (buf[0] == '1')
  3197. s->flags |= SLAB_TRACE;
  3198. return length;
  3199. }
  3200. SLAB_ATTR(trace);
  3201. static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  3202. {
  3203. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  3204. }
  3205. static ssize_t reclaim_account_store(struct kmem_cache *s,
  3206. const char *buf, size_t length)
  3207. {
  3208. s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  3209. if (buf[0] == '1')
  3210. s->flags |= SLAB_RECLAIM_ACCOUNT;
  3211. return length;
  3212. }
  3213. SLAB_ATTR(reclaim_account);
  3214. static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  3215. {
  3216. return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
  3217. }
  3218. SLAB_ATTR_RO(hwcache_align);
  3219. #ifdef CONFIG_ZONE_DMA
  3220. static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  3221. {
  3222. return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
  3223. }
  3224. SLAB_ATTR_RO(cache_dma);
  3225. #endif
  3226. static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  3227. {
  3228. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
  3229. }
  3230. SLAB_ATTR_RO(destroy_by_rcu);
  3231. static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  3232. {
  3233. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
  3234. }
  3235. static ssize_t red_zone_store(struct kmem_cache *s,
  3236. const char *buf, size_t length)
  3237. {
  3238. if (any_slab_objects(s))
  3239. return -EBUSY;
  3240. s->flags &= ~SLAB_RED_ZONE;
  3241. if (buf[0] == '1')
  3242. s->flags |= SLAB_RED_ZONE;
  3243. calculate_sizes(s);
  3244. return length;
  3245. }
  3246. SLAB_ATTR(red_zone);
  3247. static ssize_t poison_show(struct kmem_cache *s, char *buf)
  3248. {
  3249. return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
  3250. }
  3251. static ssize_t poison_store(struct kmem_cache *s,
  3252. const char *buf, size_t length)
  3253. {
  3254. if (any_slab_objects(s))
  3255. return -EBUSY;
  3256. s->flags &= ~SLAB_POISON;
  3257. if (buf[0] == '1')
  3258. s->flags |= SLAB_POISON;
  3259. calculate_sizes(s);
  3260. return length;
  3261. }
  3262. SLAB_ATTR(poison);
  3263. static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  3264. {
  3265. return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
  3266. }
  3267. static ssize_t store_user_store(struct kmem_cache *s,
  3268. const char *buf, size_t length)
  3269. {
  3270. if (any_slab_objects(s))
  3271. return -EBUSY;
  3272. s->flags &= ~SLAB_STORE_USER;
  3273. if (buf[0] == '1')
  3274. s->flags |= SLAB_STORE_USER;
  3275. calculate_sizes(s);
  3276. return length;
  3277. }
  3278. SLAB_ATTR(store_user);
  3279. static ssize_t validate_show(struct kmem_cache *s, char *buf)
  3280. {
  3281. return 0;
  3282. }
  3283. static ssize_t validate_store(struct kmem_cache *s,
  3284. const char *buf, size_t length)
  3285. {
  3286. int ret = -EINVAL;
  3287. if (buf[0] == '1') {
  3288. ret = validate_slab_cache(s);
  3289. if (ret >= 0)
  3290. ret = length;
  3291. }
  3292. return ret;
  3293. }
  3294. SLAB_ATTR(validate);
  3295. static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  3296. {
  3297. return 0;
  3298. }
  3299. static ssize_t shrink_store(struct kmem_cache *s,
  3300. const char *buf, size_t length)
  3301. {
  3302. if (buf[0] == '1') {
  3303. int rc = kmem_cache_shrink(s);
  3304. if (rc)
  3305. return rc;
  3306. } else
  3307. return -EINVAL;
  3308. return length;
  3309. }
  3310. SLAB_ATTR(shrink);
  3311. static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  3312. {
  3313. if (!(s->flags & SLAB_STORE_USER))
  3314. return -ENOSYS;
  3315. return list_locations(s, buf, TRACK_ALLOC);
  3316. }
  3317. SLAB_ATTR_RO(alloc_calls);
  3318. static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  3319. {
  3320. if (!(s->flags & SLAB_STORE_USER))
  3321. return -ENOSYS;
  3322. return list_locations(s, buf, TRACK_FREE);
  3323. }
  3324. SLAB_ATTR_RO(free_calls);
  3325. #ifdef CONFIG_NUMA
  3326. static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
  3327. {
  3328. return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
  3329. }
  3330. static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
  3331. const char *buf, size_t length)
  3332. {
  3333. int n = simple_strtoul(buf, NULL, 10);
  3334. if (n < 100)
  3335. s->remote_node_defrag_ratio = n * 10;
  3336. return length;
  3337. }
  3338. SLAB_ATTR(remote_node_defrag_ratio);
  3339. #endif
  3340. #ifdef CONFIG_SLUB_STATS
  3341. static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  3342. {
  3343. unsigned long sum = 0;
  3344. int cpu;
  3345. int len;
  3346. int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  3347. if (!data)
  3348. return -ENOMEM;
  3349. for_each_online_cpu(cpu) {
  3350. unsigned x = get_cpu_slab(s, cpu)->stat[si];
  3351. data[cpu] = x;
  3352. sum += x;
  3353. }
  3354. len = sprintf(buf, "%lu", sum);
  3355. for_each_online_cpu(cpu) {
  3356. if (data[cpu] && len < PAGE_SIZE - 20)
  3357. len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
  3358. }
  3359. kfree(data);
  3360. return len + sprintf(buf + len, "\n");
  3361. }
  3362. #define STAT_ATTR(si, text) \
  3363. static ssize_t text##_show(struct kmem_cache *s, char *buf) \
  3364. { \
  3365. return show_stat(s, buf, si); \
  3366. } \
  3367. SLAB_ATTR_RO(text); \
  3368. STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  3369. STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  3370. STAT_ATTR(FREE_FASTPATH, free_fastpath);
  3371. STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  3372. STAT_ATTR(FREE_FROZEN, free_frozen);
  3373. STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  3374. STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  3375. STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  3376. STAT_ATTR(ALLOC_SLAB, alloc_slab);
  3377. STAT_ATTR(ALLOC_REFILL, alloc_refill);
  3378. STAT_ATTR(FREE_SLAB, free_slab);
  3379. STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  3380. STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  3381. STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  3382. STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  3383. STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  3384. STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
  3385. #endif
  3386. static struct attribute *slab_attrs[] = {
  3387. &slab_size_attr.attr,
  3388. &object_size_attr.attr,
  3389. &objs_per_slab_attr.attr,
  3390. &order_attr.attr,
  3391. &objects_attr.attr,
  3392. &slabs_attr.attr,
  3393. &partial_attr.attr,
  3394. &cpu_slabs_attr.attr,
  3395. &ctor_attr.attr,
  3396. &aliases_attr.attr,
  3397. &align_attr.attr,
  3398. &sanity_checks_attr.attr,
  3399. &trace_attr.attr,
  3400. &hwcache_align_attr.attr,
  3401. &reclaim_account_attr.attr,
  3402. &destroy_by_rcu_attr.attr,
  3403. &red_zone_attr.attr,
  3404. &poison_attr.attr,
  3405. &store_user_attr.attr,
  3406. &validate_attr.attr,
  3407. &shrink_attr.attr,
  3408. &alloc_calls_attr.attr,
  3409. &free_calls_attr.attr,
  3410. #ifdef CONFIG_ZONE_DMA
  3411. &cache_dma_attr.attr,
  3412. #endif
  3413. #ifdef CONFIG_NUMA
  3414. &remote_node_defrag_ratio_attr.attr,
  3415. #endif
  3416. #ifdef CONFIG_SLUB_STATS
  3417. &alloc_fastpath_attr.attr,
  3418. &alloc_slowpath_attr.attr,
  3419. &free_fastpath_attr.attr,
  3420. &free_slowpath_attr.attr,
  3421. &free_frozen_attr.attr,
  3422. &free_add_partial_attr.attr,
  3423. &free_remove_partial_attr.attr,
  3424. &alloc_from_partial_attr.attr,
  3425. &alloc_slab_attr.attr,
  3426. &alloc_refill_attr.attr,
  3427. &free_slab_attr.attr,
  3428. &cpuslab_flush_attr.attr,
  3429. &deactivate_full_attr.attr,
  3430. &deactivate_empty_attr.attr,
  3431. &deactivate_to_head_attr.attr,
  3432. &deactivate_to_tail_attr.attr,
  3433. &deactivate_remote_frees_attr.attr,
  3434. #endif
  3435. NULL
  3436. };
  3437. static struct attribute_group slab_attr_group = {
  3438. .attrs = slab_attrs,
  3439. };
  3440. static ssize_t slab_attr_show(struct kobject *kobj,
  3441. struct attribute *attr,
  3442. char *buf)
  3443. {
  3444. struct slab_attribute *attribute;
  3445. struct kmem_cache *s;
  3446. int err;
  3447. attribute = to_slab_attr(attr);
  3448. s = to_slab(kobj);
  3449. if (!attribute->show)
  3450. return -EIO;
  3451. err = attribute->show(s, buf);
  3452. return err;
  3453. }
  3454. static ssize_t slab_attr_store(struct kobject *kobj,
  3455. struct attribute *attr,
  3456. const char *buf, size_t len)
  3457. {
  3458. struct slab_attribute *attribute;
  3459. struct kmem_cache *s;
  3460. int err;
  3461. attribute = to_slab_attr(attr);
  3462. s = to_slab(kobj);
  3463. if (!attribute->store)
  3464. return -EIO;
  3465. err = attribute->store(s, buf, len);
  3466. return err;
  3467. }
  3468. static void kmem_cache_release(struct kobject *kobj)
  3469. {
  3470. struct kmem_cache *s = to_slab(kobj);
  3471. kfree(s);
  3472. }
  3473. static struct sysfs_ops slab_sysfs_ops = {
  3474. .show = slab_attr_show,
  3475. .store = slab_attr_store,
  3476. };
  3477. static struct kobj_type slab_ktype = {
  3478. .sysfs_ops = &slab_sysfs_ops,
  3479. .release = kmem_cache_release
  3480. };
  3481. static int uevent_filter(struct kset *kset, struct kobject *kobj)
  3482. {
  3483. struct kobj_type *ktype = get_ktype(kobj);
  3484. if (ktype == &slab_ktype)
  3485. return 1;
  3486. return 0;
  3487. }
  3488. static struct kset_uevent_ops slab_uevent_ops = {
  3489. .filter = uevent_filter,
  3490. };
  3491. static struct kset *slab_kset;
  3492. #define ID_STR_LENGTH 64
  3493. /* Create a unique string id for a slab cache:
  3494. * format
  3495. * :[flags-]size:[memory address of kmemcache]
  3496. */
  3497. static char *create_unique_id(struct kmem_cache *s)
  3498. {
  3499. char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  3500. char *p = name;
  3501. BUG_ON(!name);
  3502. *p++ = ':';
  3503. /*
  3504. * First flags affecting slabcache operations. We will only
  3505. * get here for aliasable slabs so we do not need to support
  3506. * too many flags. The flags here must cover all flags that
  3507. * are matched during merging to guarantee that the id is
  3508. * unique.
  3509. */
  3510. if (s->flags & SLAB_CACHE_DMA)
  3511. *p++ = 'd';
  3512. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  3513. *p++ = 'a';
  3514. if (s->flags & SLAB_DEBUG_FREE)
  3515. *p++ = 'F';
  3516. if (p != name + 1)
  3517. *p++ = '-';
  3518. p += sprintf(p, "%07d", s->size);
  3519. BUG_ON(p > name + ID_STR_LENGTH - 1);
  3520. return name;
  3521. }
  3522. static int sysfs_slab_add(struct kmem_cache *s)
  3523. {
  3524. int err;
  3525. const char *name;
  3526. int unmergeable;
  3527. if (slab_state < SYSFS)
  3528. /* Defer until later */
  3529. return 0;
  3530. unmergeable = slab_unmergeable(s);
  3531. if (unmergeable) {
  3532. /*
  3533. * Slabcache can never be merged so we can use the name proper.
  3534. * This is typically the case for debug situations. In that
  3535. * case we can catch duplicate names easily.
  3536. */
  3537. sysfs_remove_link(&slab_kset->kobj, s->name);
  3538. name = s->name;
  3539. } else {
  3540. /*
  3541. * Create a unique name for the slab as a target
  3542. * for the symlinks.
  3543. */
  3544. name = create_unique_id(s);
  3545. }
  3546. s->kobj.kset = slab_kset;
  3547. err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  3548. if (err) {
  3549. kobject_put(&s->kobj);
  3550. return err;
  3551. }
  3552. err = sysfs_create_group(&s->kobj, &slab_attr_group);
  3553. if (err)
  3554. return err;
  3555. kobject_uevent(&s->kobj, KOBJ_ADD);
  3556. if (!unmergeable) {
  3557. /* Setup first alias */
  3558. sysfs_slab_alias(s, s->name);
  3559. kfree(name);
  3560. }
  3561. return 0;
  3562. }
  3563. static void sysfs_slab_remove(struct kmem_cache *s)
  3564. {
  3565. kobject_uevent(&s->kobj, KOBJ_REMOVE);
  3566. kobject_del(&s->kobj);
  3567. kobject_put(&s->kobj);
  3568. }
  3569. /*
  3570. * Need to buffer aliases during bootup until sysfs becomes
  3571. * available lest we loose that information.
  3572. */
  3573. struct saved_alias {
  3574. struct kmem_cache *s;
  3575. const char *name;
  3576. struct saved_alias *next;
  3577. };
  3578. static struct saved_alias *alias_list;
  3579. static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  3580. {
  3581. struct saved_alias *al;
  3582. if (slab_state == SYSFS) {
  3583. /*
  3584. * If we have a leftover link then remove it.
  3585. */
  3586. sysfs_remove_link(&slab_kset->kobj, name);
  3587. return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
  3588. }
  3589. al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  3590. if (!al)
  3591. return -ENOMEM;
  3592. al->s = s;
  3593. al->name = name;
  3594. al->next = alias_list;
  3595. alias_list = al;
  3596. return 0;
  3597. }
  3598. static int __init slab_sysfs_init(void)
  3599. {
  3600. struct kmem_cache *s;
  3601. int err;
  3602. slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
  3603. if (!slab_kset) {
  3604. printk(KERN_ERR "Cannot register slab subsystem.\n");
  3605. return -ENOSYS;
  3606. }
  3607. slab_state = SYSFS;
  3608. list_for_each_entry(s, &slab_caches, list) {
  3609. err = sysfs_slab_add(s);
  3610. if (err)
  3611. printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  3612. " to sysfs\n", s->name);
  3613. }
  3614. while (alias_list) {
  3615. struct saved_alias *al = alias_list;
  3616. alias_list = alias_list->next;
  3617. err = sysfs_slab_alias(al->s, al->name);
  3618. if (err)
  3619. printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  3620. " %s to sysfs\n", s->name);
  3621. kfree(al);
  3622. }
  3623. resiliency_test();
  3624. return 0;
  3625. }
  3626. __initcall(slab_sysfs_init);
  3627. #endif
  3628. /*
  3629. * The /proc/slabinfo ABI
  3630. */
  3631. #ifdef CONFIG_SLABINFO
  3632. ssize_t slabinfo_write(struct file *file, const char __user * buffer,
  3633. size_t count, loff_t *ppos)
  3634. {
  3635. return -EINVAL;
  3636. }
  3637. static void print_slabinfo_header(struct seq_file *m)
  3638. {
  3639. seq_puts(m, "slabinfo - version: 2.1\n");
  3640. seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
  3641. "<objperslab> <pagesperslab>");
  3642. seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  3643. seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  3644. seq_putc(m, '\n');
  3645. }
  3646. static void *s_start(struct seq_file *m, loff_t *pos)
  3647. {
  3648. loff_t n = *pos;
  3649. down_read(&slub_lock);
  3650. if (!n)
  3651. print_slabinfo_header(m);
  3652. return seq_list_start(&slab_caches, *pos);
  3653. }
  3654. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  3655. {
  3656. return seq_list_next(p, &slab_caches, pos);
  3657. }
  3658. static void s_stop(struct seq_file *m, void *p)
  3659. {
  3660. up_read(&slub_lock);
  3661. }
  3662. static int s_show(struct seq_file *m, void *p)
  3663. {
  3664. unsigned long nr_partials = 0;
  3665. unsigned long nr_slabs = 0;
  3666. unsigned long nr_inuse = 0;
  3667. unsigned long nr_objs;
  3668. struct kmem_cache *s;
  3669. int node;
  3670. s = list_entry(p, struct kmem_cache, list);
  3671. for_each_online_node(node) {
  3672. struct kmem_cache_node *n = get_node(s, node);
  3673. if (!n)
  3674. continue;
  3675. nr_partials += n->nr_partial;
  3676. nr_slabs += atomic_long_read(&n->nr_slabs);
  3677. nr_inuse += count_partial(n);
  3678. }
  3679. nr_objs = nr_slabs * s->objects;
  3680. nr_inuse += (nr_slabs - nr_partials) * s->objects;
  3681. seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
  3682. nr_objs, s->size, s->objects, (1 << s->order));
  3683. seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  3684. seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  3685. 0UL);
  3686. seq_putc(m, '\n');
  3687. return 0;
  3688. }
  3689. const struct seq_operations slabinfo_op = {
  3690. .start = s_start,
  3691. .next = s_next,
  3692. .stop = s_stop,
  3693. .show = s_show,
  3694. };
  3695. #endif /* CONFIG_SLABINFO */