slub.c 103 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450
  1. /*
  2. * SLUB: A slab allocator that limits cache line use instead of queuing
  3. * objects in per cpu and per node lists.
  4. *
  5. * The allocator synchronizes using per slab locks and only
  6. * uses a centralized lock to manage a pool of partial slabs.
  7. *
  8. * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/module.h>
  12. #include <linux/bit_spinlock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/bitops.h>
  15. #include <linux/slab.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/cpu.h>
  18. #include <linux/cpuset.h>
  19. #include <linux/mempolicy.h>
  20. #include <linux/ctype.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/memory.h>
  23. /*
  24. * Lock order:
  25. * 1. slab_lock(page)
  26. * 2. slab->list_lock
  27. *
  28. * The slab_lock protects operations on the object of a particular
  29. * slab and its metadata in the page struct. If the slab lock
  30. * has been taken then no allocations nor frees can be performed
  31. * on the objects in the slab nor can the slab be added or removed
  32. * from the partial or full lists since this would mean modifying
  33. * the page_struct of the slab.
  34. *
  35. * The list_lock protects the partial and full list on each node and
  36. * the partial slab counter. If taken then no new slabs may be added or
  37. * removed from the lists nor make the number of partial slabs be modified.
  38. * (Note that the total number of slabs is an atomic value that may be
  39. * modified without taking the list lock).
  40. *
  41. * The list_lock is a centralized lock and thus we avoid taking it as
  42. * much as possible. As long as SLUB does not have to handle partial
  43. * slabs, operations can continue without any centralized lock. F.e.
  44. * allocating a long series of objects that fill up slabs does not require
  45. * the list lock.
  46. *
  47. * The lock order is sometimes inverted when we are trying to get a slab
  48. * off a list. We take the list_lock and then look for a page on the list
  49. * to use. While we do that objects in the slabs may be freed. We can
  50. * only operate on the slab if we have also taken the slab_lock. So we use
  51. * a slab_trylock() on the slab. If trylock was successful then no frees
  52. * can occur anymore and we can use the slab for allocations etc. If the
  53. * slab_trylock() does not succeed then frees are in progress in the slab and
  54. * we must stay away from it for a while since we may cause a bouncing
  55. * cacheline if we try to acquire the lock. So go onto the next slab.
  56. * If all pages are busy then we may allocate a new slab instead of reusing
  57. * a partial slab. A new slab has noone operating on it and thus there is
  58. * no danger of cacheline contention.
  59. *
  60. * Interrupts are disabled during allocation and deallocation in order to
  61. * make the slab allocator safe to use in the context of an irq. In addition
  62. * interrupts are disabled to ensure that the processor does not change
  63. * while handling per_cpu slabs, due to kernel preemption.
  64. *
  65. * SLUB assigns one slab for allocation to each processor.
  66. * Allocations only occur from these slabs called cpu slabs.
  67. *
  68. * Slabs with free elements are kept on a partial list and during regular
  69. * operations no list for full slabs is used. If an object in a full slab is
  70. * freed then the slab will show up again on the partial lists.
  71. * We track full slabs for debugging purposes though because otherwise we
  72. * cannot scan all objects.
  73. *
  74. * Slabs are freed when they become empty. Teardown and setup is
  75. * minimal so we rely on the page allocators per cpu caches for
  76. * fast frees and allocs.
  77. *
  78. * Overloading of page flags that are otherwise used for LRU management.
  79. *
  80. * PageActive The slab is frozen and exempt from list processing.
  81. * This means that the slab is dedicated to a purpose
  82. * such as satisfying allocations for a specific
  83. * processor. Objects may be freed in the slab while
  84. * it is frozen but slab_free will then skip the usual
  85. * list operations. It is up to the processor holding
  86. * the slab to integrate the slab into the slab lists
  87. * when the slab is no longer needed.
  88. *
  89. * One use of this flag is to mark slabs that are
  90. * used for allocations. Then such a slab becomes a cpu
  91. * slab. The cpu slab may be equipped with an additional
  92. * freelist that allows lockless access to
  93. * free objects in addition to the regular freelist
  94. * that requires the slab lock.
  95. *
  96. * PageError Slab requires special handling due to debug
  97. * options set. This moves slab handling out of
  98. * the fast path and disables lockless freelists.
  99. */
  100. #define FROZEN (1 << PG_active)
  101. #ifdef CONFIG_SLUB_DEBUG
  102. #define SLABDEBUG (1 << PG_error)
  103. #else
  104. #define SLABDEBUG 0
  105. #endif
  106. static inline int SlabFrozen(struct page *page)
  107. {
  108. return page->flags & FROZEN;
  109. }
  110. static inline void SetSlabFrozen(struct page *page)
  111. {
  112. page->flags |= FROZEN;
  113. }
  114. static inline void ClearSlabFrozen(struct page *page)
  115. {
  116. page->flags &= ~FROZEN;
  117. }
  118. static inline int SlabDebug(struct page *page)
  119. {
  120. return page->flags & SLABDEBUG;
  121. }
  122. static inline void SetSlabDebug(struct page *page)
  123. {
  124. page->flags |= SLABDEBUG;
  125. }
  126. static inline void ClearSlabDebug(struct page *page)
  127. {
  128. page->flags &= ~SLABDEBUG;
  129. }
  130. /*
  131. * Issues still to be resolved:
  132. *
  133. * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
  134. *
  135. * - Variable sizing of the per node arrays
  136. */
  137. /* Enable to test recovery from slab corruption on boot */
  138. #undef SLUB_RESILIENCY_TEST
  139. /*
  140. * Currently fastpath is not supported if preemption is enabled.
  141. */
  142. #if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT)
  143. #define SLUB_FASTPATH
  144. #endif
  145. #if PAGE_SHIFT <= 12
  146. /*
  147. * Small page size. Make sure that we do not fragment memory
  148. */
  149. #define DEFAULT_MAX_ORDER 1
  150. #define DEFAULT_MIN_OBJECTS 4
  151. #else
  152. /*
  153. * Large page machines are customarily able to handle larger
  154. * page orders.
  155. */
  156. #define DEFAULT_MAX_ORDER 2
  157. #define DEFAULT_MIN_OBJECTS 8
  158. #endif
  159. /*
  160. * Mininum number of partial slabs. These will be left on the partial
  161. * lists even if they are empty. kmem_cache_shrink may reclaim them.
  162. */
  163. #define MIN_PARTIAL 5
  164. /*
  165. * Maximum number of desirable partial slabs.
  166. * The existence of more partial slabs makes kmem_cache_shrink
  167. * sort the partial list by the number of objects in the.
  168. */
  169. #define MAX_PARTIAL 10
  170. #define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
  171. SLAB_POISON | SLAB_STORE_USER)
  172. /*
  173. * Set of flags that will prevent slab merging
  174. */
  175. #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  176. SLAB_TRACE | SLAB_DESTROY_BY_RCU)
  177. #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
  178. SLAB_CACHE_DMA)
  179. #ifndef ARCH_KMALLOC_MINALIGN
  180. #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
  181. #endif
  182. #ifndef ARCH_SLAB_MINALIGN
  183. #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
  184. #endif
  185. /* Internal SLUB flags */
  186. #define __OBJECT_POISON 0x80000000 /* Poison object */
  187. #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
  188. /* Not all arches define cache_line_size */
  189. #ifndef cache_line_size
  190. #define cache_line_size() L1_CACHE_BYTES
  191. #endif
  192. static int kmem_size = sizeof(struct kmem_cache);
  193. #ifdef CONFIG_SMP
  194. static struct notifier_block slab_notifier;
  195. #endif
  196. static enum {
  197. DOWN, /* No slab functionality available */
  198. PARTIAL, /* kmem_cache_open() works but kmalloc does not */
  199. UP, /* Everything works but does not show up in sysfs */
  200. SYSFS /* Sysfs up */
  201. } slab_state = DOWN;
  202. /* A list of all slab caches on the system */
  203. static DECLARE_RWSEM(slub_lock);
  204. static LIST_HEAD(slab_caches);
  205. /*
  206. * Tracking user of a slab.
  207. */
  208. struct track {
  209. void *addr; /* Called from address */
  210. int cpu; /* Was running on cpu */
  211. int pid; /* Pid context */
  212. unsigned long when; /* When did the operation occur */
  213. };
  214. enum track_item { TRACK_ALLOC, TRACK_FREE };
  215. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  216. static int sysfs_slab_add(struct kmem_cache *);
  217. static int sysfs_slab_alias(struct kmem_cache *, const char *);
  218. static void sysfs_slab_remove(struct kmem_cache *);
  219. #else
  220. static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
  221. static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
  222. { return 0; }
  223. static inline void sysfs_slab_remove(struct kmem_cache *s)
  224. {
  225. kfree(s);
  226. }
  227. #endif
  228. static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
  229. {
  230. #ifdef CONFIG_SLUB_STATS
  231. c->stat[si]++;
  232. #endif
  233. }
  234. /********************************************************************
  235. * Core slab cache functions
  236. *******************************************************************/
  237. int slab_is_available(void)
  238. {
  239. return slab_state >= UP;
  240. }
  241. static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  242. {
  243. #ifdef CONFIG_NUMA
  244. return s->node[node];
  245. #else
  246. return &s->local_node;
  247. #endif
  248. }
  249. static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
  250. {
  251. #ifdef CONFIG_SMP
  252. return s->cpu_slab[cpu];
  253. #else
  254. return &s->cpu_slab;
  255. #endif
  256. }
  257. /*
  258. * The end pointer in a slab is special. It points to the first object in the
  259. * slab but has bit 0 set to mark it.
  260. *
  261. * Note that SLUB relies on page_mapping returning NULL for pages with bit 0
  262. * in the mapping set.
  263. */
  264. static inline int is_end(void *addr)
  265. {
  266. return (unsigned long)addr & PAGE_MAPPING_ANON;
  267. }
  268. void *slab_address(struct page *page)
  269. {
  270. return page->end - PAGE_MAPPING_ANON;
  271. }
  272. static inline int check_valid_pointer(struct kmem_cache *s,
  273. struct page *page, const void *object)
  274. {
  275. void *base;
  276. if (object == page->end)
  277. return 1;
  278. base = slab_address(page);
  279. if (object < base || object >= base + s->objects * s->size ||
  280. (object - base) % s->size) {
  281. return 0;
  282. }
  283. return 1;
  284. }
  285. /*
  286. * Slow version of get and set free pointer.
  287. *
  288. * This version requires touching the cache lines of kmem_cache which
  289. * we avoid to do in the fast alloc free paths. There we obtain the offset
  290. * from the page struct.
  291. */
  292. static inline void *get_freepointer(struct kmem_cache *s, void *object)
  293. {
  294. return *(void **)(object + s->offset);
  295. }
  296. static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
  297. {
  298. *(void **)(object + s->offset) = fp;
  299. }
  300. /* Loop over all objects in a slab */
  301. #define for_each_object(__p, __s, __addr) \
  302. for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
  303. __p += (__s)->size)
  304. /* Scan freelist */
  305. #define for_each_free_object(__p, __s, __free) \
  306. for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
  307. __p))
  308. /* Determine object index from a given position */
  309. static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
  310. {
  311. return (p - addr) / s->size;
  312. }
  313. #ifdef CONFIG_SLUB_DEBUG
  314. /*
  315. * Debug settings:
  316. */
  317. #ifdef CONFIG_SLUB_DEBUG_ON
  318. static int slub_debug = DEBUG_DEFAULT_FLAGS;
  319. #else
  320. static int slub_debug;
  321. #endif
  322. static char *slub_debug_slabs;
  323. /*
  324. * Object debugging
  325. */
  326. static void print_section(char *text, u8 *addr, unsigned int length)
  327. {
  328. int i, offset;
  329. int newline = 1;
  330. char ascii[17];
  331. ascii[16] = 0;
  332. for (i = 0; i < length; i++) {
  333. if (newline) {
  334. printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
  335. newline = 0;
  336. }
  337. printk(KERN_CONT " %02x", addr[i]);
  338. offset = i % 16;
  339. ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
  340. if (offset == 15) {
  341. printk(KERN_CONT " %s\n", ascii);
  342. newline = 1;
  343. }
  344. }
  345. if (!newline) {
  346. i %= 16;
  347. while (i < 16) {
  348. printk(KERN_CONT " ");
  349. ascii[i] = ' ';
  350. i++;
  351. }
  352. printk(KERN_CONT " %s\n", ascii);
  353. }
  354. }
  355. static struct track *get_track(struct kmem_cache *s, void *object,
  356. enum track_item alloc)
  357. {
  358. struct track *p;
  359. if (s->offset)
  360. p = object + s->offset + sizeof(void *);
  361. else
  362. p = object + s->inuse;
  363. return p + alloc;
  364. }
  365. static void set_track(struct kmem_cache *s, void *object,
  366. enum track_item alloc, void *addr)
  367. {
  368. struct track *p;
  369. if (s->offset)
  370. p = object + s->offset + sizeof(void *);
  371. else
  372. p = object + s->inuse;
  373. p += alloc;
  374. if (addr) {
  375. p->addr = addr;
  376. p->cpu = smp_processor_id();
  377. p->pid = current ? current->pid : -1;
  378. p->when = jiffies;
  379. } else
  380. memset(p, 0, sizeof(struct track));
  381. }
  382. static void init_tracking(struct kmem_cache *s, void *object)
  383. {
  384. if (!(s->flags & SLAB_STORE_USER))
  385. return;
  386. set_track(s, object, TRACK_FREE, NULL);
  387. set_track(s, object, TRACK_ALLOC, NULL);
  388. }
  389. static void print_track(const char *s, struct track *t)
  390. {
  391. if (!t->addr)
  392. return;
  393. printk(KERN_ERR "INFO: %s in ", s);
  394. __print_symbol("%s", (unsigned long)t->addr);
  395. printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
  396. }
  397. static void print_tracking(struct kmem_cache *s, void *object)
  398. {
  399. if (!(s->flags & SLAB_STORE_USER))
  400. return;
  401. print_track("Allocated", get_track(s, object, TRACK_ALLOC));
  402. print_track("Freed", get_track(s, object, TRACK_FREE));
  403. }
  404. static void print_page_info(struct page *page)
  405. {
  406. printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
  407. page, page->inuse, page->freelist, page->flags);
  408. }
  409. static void slab_bug(struct kmem_cache *s, char *fmt, ...)
  410. {
  411. va_list args;
  412. char buf[100];
  413. va_start(args, fmt);
  414. vsnprintf(buf, sizeof(buf), fmt, args);
  415. va_end(args);
  416. printk(KERN_ERR "========================================"
  417. "=====================================\n");
  418. printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
  419. printk(KERN_ERR "----------------------------------------"
  420. "-------------------------------------\n\n");
  421. }
  422. static void slab_fix(struct kmem_cache *s, char *fmt, ...)
  423. {
  424. va_list args;
  425. char buf[100];
  426. va_start(args, fmt);
  427. vsnprintf(buf, sizeof(buf), fmt, args);
  428. va_end(args);
  429. printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
  430. }
  431. static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
  432. {
  433. unsigned int off; /* Offset of last byte */
  434. u8 *addr = slab_address(page);
  435. print_tracking(s, p);
  436. print_page_info(page);
  437. printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
  438. p, p - addr, get_freepointer(s, p));
  439. if (p > addr + 16)
  440. print_section("Bytes b4", p - 16, 16);
  441. print_section("Object", p, min(s->objsize, 128));
  442. if (s->flags & SLAB_RED_ZONE)
  443. print_section("Redzone", p + s->objsize,
  444. s->inuse - s->objsize);
  445. if (s->offset)
  446. off = s->offset + sizeof(void *);
  447. else
  448. off = s->inuse;
  449. if (s->flags & SLAB_STORE_USER)
  450. off += 2 * sizeof(struct track);
  451. if (off != s->size)
  452. /* Beginning of the filler is the free pointer */
  453. print_section("Padding", p + off, s->size - off);
  454. dump_stack();
  455. }
  456. static void object_err(struct kmem_cache *s, struct page *page,
  457. u8 *object, char *reason)
  458. {
  459. slab_bug(s, reason);
  460. print_trailer(s, page, object);
  461. }
  462. static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
  463. {
  464. va_list args;
  465. char buf[100];
  466. va_start(args, fmt);
  467. vsnprintf(buf, sizeof(buf), fmt, args);
  468. va_end(args);
  469. slab_bug(s, fmt);
  470. print_page_info(page);
  471. dump_stack();
  472. }
  473. static void init_object(struct kmem_cache *s, void *object, int active)
  474. {
  475. u8 *p = object;
  476. if (s->flags & __OBJECT_POISON) {
  477. memset(p, POISON_FREE, s->objsize - 1);
  478. p[s->objsize - 1] = POISON_END;
  479. }
  480. if (s->flags & SLAB_RED_ZONE)
  481. memset(p + s->objsize,
  482. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
  483. s->inuse - s->objsize);
  484. }
  485. static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
  486. {
  487. while (bytes) {
  488. if (*start != (u8)value)
  489. return start;
  490. start++;
  491. bytes--;
  492. }
  493. return NULL;
  494. }
  495. static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
  496. void *from, void *to)
  497. {
  498. slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
  499. memset(from, data, to - from);
  500. }
  501. static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
  502. u8 *object, char *what,
  503. u8 *start, unsigned int value, unsigned int bytes)
  504. {
  505. u8 *fault;
  506. u8 *end;
  507. fault = check_bytes(start, value, bytes);
  508. if (!fault)
  509. return 1;
  510. end = start + bytes;
  511. while (end > fault && end[-1] == value)
  512. end--;
  513. slab_bug(s, "%s overwritten", what);
  514. printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
  515. fault, end - 1, fault[0], value);
  516. print_trailer(s, page, object);
  517. restore_bytes(s, what, value, fault, end);
  518. return 0;
  519. }
  520. /*
  521. * Object layout:
  522. *
  523. * object address
  524. * Bytes of the object to be managed.
  525. * If the freepointer may overlay the object then the free
  526. * pointer is the first word of the object.
  527. *
  528. * Poisoning uses 0x6b (POISON_FREE) and the last byte is
  529. * 0xa5 (POISON_END)
  530. *
  531. * object + s->objsize
  532. * Padding to reach word boundary. This is also used for Redzoning.
  533. * Padding is extended by another word if Redzoning is enabled and
  534. * objsize == inuse.
  535. *
  536. * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
  537. * 0xcc (RED_ACTIVE) for objects in use.
  538. *
  539. * object + s->inuse
  540. * Meta data starts here.
  541. *
  542. * A. Free pointer (if we cannot overwrite object on free)
  543. * B. Tracking data for SLAB_STORE_USER
  544. * C. Padding to reach required alignment boundary or at mininum
  545. * one word if debuggin is on to be able to detect writes
  546. * before the word boundary.
  547. *
  548. * Padding is done using 0x5a (POISON_INUSE)
  549. *
  550. * object + s->size
  551. * Nothing is used beyond s->size.
  552. *
  553. * If slabcaches are merged then the objsize and inuse boundaries are mostly
  554. * ignored. And therefore no slab options that rely on these boundaries
  555. * may be used with merged slabcaches.
  556. */
  557. static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
  558. {
  559. unsigned long off = s->inuse; /* The end of info */
  560. if (s->offset)
  561. /* Freepointer is placed after the object. */
  562. off += sizeof(void *);
  563. if (s->flags & SLAB_STORE_USER)
  564. /* We also have user information there */
  565. off += 2 * sizeof(struct track);
  566. if (s->size == off)
  567. return 1;
  568. return check_bytes_and_report(s, page, p, "Object padding",
  569. p + off, POISON_INUSE, s->size - off);
  570. }
  571. static int slab_pad_check(struct kmem_cache *s, struct page *page)
  572. {
  573. u8 *start;
  574. u8 *fault;
  575. u8 *end;
  576. int length;
  577. int remainder;
  578. if (!(s->flags & SLAB_POISON))
  579. return 1;
  580. start = slab_address(page);
  581. end = start + (PAGE_SIZE << s->order);
  582. length = s->objects * s->size;
  583. remainder = end - (start + length);
  584. if (!remainder)
  585. return 1;
  586. fault = check_bytes(start + length, POISON_INUSE, remainder);
  587. if (!fault)
  588. return 1;
  589. while (end > fault && end[-1] == POISON_INUSE)
  590. end--;
  591. slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
  592. print_section("Padding", start, length);
  593. restore_bytes(s, "slab padding", POISON_INUSE, start, end);
  594. return 0;
  595. }
  596. static int check_object(struct kmem_cache *s, struct page *page,
  597. void *object, int active)
  598. {
  599. u8 *p = object;
  600. u8 *endobject = object + s->objsize;
  601. if (s->flags & SLAB_RED_ZONE) {
  602. unsigned int red =
  603. active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
  604. if (!check_bytes_and_report(s, page, object, "Redzone",
  605. endobject, red, s->inuse - s->objsize))
  606. return 0;
  607. } else {
  608. if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
  609. check_bytes_and_report(s, page, p, "Alignment padding",
  610. endobject, POISON_INUSE, s->inuse - s->objsize);
  611. }
  612. }
  613. if (s->flags & SLAB_POISON) {
  614. if (!active && (s->flags & __OBJECT_POISON) &&
  615. (!check_bytes_and_report(s, page, p, "Poison", p,
  616. POISON_FREE, s->objsize - 1) ||
  617. !check_bytes_and_report(s, page, p, "Poison",
  618. p + s->objsize - 1, POISON_END, 1)))
  619. return 0;
  620. /*
  621. * check_pad_bytes cleans up on its own.
  622. */
  623. check_pad_bytes(s, page, p);
  624. }
  625. if (!s->offset && active)
  626. /*
  627. * Object and freepointer overlap. Cannot check
  628. * freepointer while object is allocated.
  629. */
  630. return 1;
  631. /* Check free pointer validity */
  632. if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
  633. object_err(s, page, p, "Freepointer corrupt");
  634. /*
  635. * No choice but to zap it and thus loose the remainder
  636. * of the free objects in this slab. May cause
  637. * another error because the object count is now wrong.
  638. */
  639. set_freepointer(s, p, page->end);
  640. return 0;
  641. }
  642. return 1;
  643. }
  644. static int check_slab(struct kmem_cache *s, struct page *page)
  645. {
  646. VM_BUG_ON(!irqs_disabled());
  647. if (!PageSlab(page)) {
  648. slab_err(s, page, "Not a valid slab page");
  649. return 0;
  650. }
  651. if (page->inuse > s->objects) {
  652. slab_err(s, page, "inuse %u > max %u",
  653. s->name, page->inuse, s->objects);
  654. return 0;
  655. }
  656. /* Slab_pad_check fixes things up after itself */
  657. slab_pad_check(s, page);
  658. return 1;
  659. }
  660. /*
  661. * Determine if a certain object on a page is on the freelist. Must hold the
  662. * slab lock to guarantee that the chains are in a consistent state.
  663. */
  664. static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
  665. {
  666. int nr = 0;
  667. void *fp = page->freelist;
  668. void *object = NULL;
  669. while (fp != page->end && nr <= s->objects) {
  670. if (fp == search)
  671. return 1;
  672. if (!check_valid_pointer(s, page, fp)) {
  673. if (object) {
  674. object_err(s, page, object,
  675. "Freechain corrupt");
  676. set_freepointer(s, object, page->end);
  677. break;
  678. } else {
  679. slab_err(s, page, "Freepointer corrupt");
  680. page->freelist = page->end;
  681. page->inuse = s->objects;
  682. slab_fix(s, "Freelist cleared");
  683. return 0;
  684. }
  685. break;
  686. }
  687. object = fp;
  688. fp = get_freepointer(s, object);
  689. nr++;
  690. }
  691. if (page->inuse != s->objects - nr) {
  692. slab_err(s, page, "Wrong object count. Counter is %d but "
  693. "counted were %d", page->inuse, s->objects - nr);
  694. page->inuse = s->objects - nr;
  695. slab_fix(s, "Object count adjusted.");
  696. }
  697. return search == NULL;
  698. }
  699. static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
  700. {
  701. if (s->flags & SLAB_TRACE) {
  702. printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
  703. s->name,
  704. alloc ? "alloc" : "free",
  705. object, page->inuse,
  706. page->freelist);
  707. if (!alloc)
  708. print_section("Object", (void *)object, s->objsize);
  709. dump_stack();
  710. }
  711. }
  712. /*
  713. * Tracking of fully allocated slabs for debugging purposes.
  714. */
  715. static void add_full(struct kmem_cache_node *n, struct page *page)
  716. {
  717. spin_lock(&n->list_lock);
  718. list_add(&page->lru, &n->full);
  719. spin_unlock(&n->list_lock);
  720. }
  721. static void remove_full(struct kmem_cache *s, struct page *page)
  722. {
  723. struct kmem_cache_node *n;
  724. if (!(s->flags & SLAB_STORE_USER))
  725. return;
  726. n = get_node(s, page_to_nid(page));
  727. spin_lock(&n->list_lock);
  728. list_del(&page->lru);
  729. spin_unlock(&n->list_lock);
  730. }
  731. static void setup_object_debug(struct kmem_cache *s, struct page *page,
  732. void *object)
  733. {
  734. if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
  735. return;
  736. init_object(s, object, 0);
  737. init_tracking(s, object);
  738. }
  739. static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
  740. void *object, void *addr)
  741. {
  742. if (!check_slab(s, page))
  743. goto bad;
  744. if (object && !on_freelist(s, page, object)) {
  745. object_err(s, page, object, "Object already allocated");
  746. goto bad;
  747. }
  748. if (!check_valid_pointer(s, page, object)) {
  749. object_err(s, page, object, "Freelist Pointer check fails");
  750. goto bad;
  751. }
  752. if (object && !check_object(s, page, object, 0))
  753. goto bad;
  754. /* Success perform special debug activities for allocs */
  755. if (s->flags & SLAB_STORE_USER)
  756. set_track(s, object, TRACK_ALLOC, addr);
  757. trace(s, page, object, 1);
  758. init_object(s, object, 1);
  759. return 1;
  760. bad:
  761. if (PageSlab(page)) {
  762. /*
  763. * If this is a slab page then lets do the best we can
  764. * to avoid issues in the future. Marking all objects
  765. * as used avoids touching the remaining objects.
  766. */
  767. slab_fix(s, "Marking all objects used");
  768. page->inuse = s->objects;
  769. page->freelist = page->end;
  770. }
  771. return 0;
  772. }
  773. static int free_debug_processing(struct kmem_cache *s, struct page *page,
  774. void *object, void *addr)
  775. {
  776. if (!check_slab(s, page))
  777. goto fail;
  778. if (!check_valid_pointer(s, page, object)) {
  779. slab_err(s, page, "Invalid object pointer 0x%p", object);
  780. goto fail;
  781. }
  782. if (on_freelist(s, page, object)) {
  783. object_err(s, page, object, "Object already free");
  784. goto fail;
  785. }
  786. if (!check_object(s, page, object, 1))
  787. return 0;
  788. if (unlikely(s != page->slab)) {
  789. if (!PageSlab(page)) {
  790. slab_err(s, page, "Attempt to free object(0x%p) "
  791. "outside of slab", object);
  792. } else if (!page->slab) {
  793. printk(KERN_ERR
  794. "SLUB <none>: no slab for object 0x%p.\n",
  795. object);
  796. dump_stack();
  797. } else
  798. object_err(s, page, object,
  799. "page slab pointer corrupt.");
  800. goto fail;
  801. }
  802. /* Special debug activities for freeing objects */
  803. if (!SlabFrozen(page) && page->freelist == page->end)
  804. remove_full(s, page);
  805. if (s->flags & SLAB_STORE_USER)
  806. set_track(s, object, TRACK_FREE, addr);
  807. trace(s, page, object, 0);
  808. init_object(s, object, 0);
  809. return 1;
  810. fail:
  811. slab_fix(s, "Object at 0x%p not freed", object);
  812. return 0;
  813. }
  814. static int __init setup_slub_debug(char *str)
  815. {
  816. slub_debug = DEBUG_DEFAULT_FLAGS;
  817. if (*str++ != '=' || !*str)
  818. /*
  819. * No options specified. Switch on full debugging.
  820. */
  821. goto out;
  822. if (*str == ',')
  823. /*
  824. * No options but restriction on slabs. This means full
  825. * debugging for slabs matching a pattern.
  826. */
  827. goto check_slabs;
  828. slub_debug = 0;
  829. if (*str == '-')
  830. /*
  831. * Switch off all debugging measures.
  832. */
  833. goto out;
  834. /*
  835. * Determine which debug features should be switched on
  836. */
  837. for (; *str && *str != ','; str++) {
  838. switch (tolower(*str)) {
  839. case 'f':
  840. slub_debug |= SLAB_DEBUG_FREE;
  841. break;
  842. case 'z':
  843. slub_debug |= SLAB_RED_ZONE;
  844. break;
  845. case 'p':
  846. slub_debug |= SLAB_POISON;
  847. break;
  848. case 'u':
  849. slub_debug |= SLAB_STORE_USER;
  850. break;
  851. case 't':
  852. slub_debug |= SLAB_TRACE;
  853. break;
  854. default:
  855. printk(KERN_ERR "slub_debug option '%c' "
  856. "unknown. skipped\n", *str);
  857. }
  858. }
  859. check_slabs:
  860. if (*str == ',')
  861. slub_debug_slabs = str + 1;
  862. out:
  863. return 1;
  864. }
  865. __setup("slub_debug", setup_slub_debug);
  866. static unsigned long kmem_cache_flags(unsigned long objsize,
  867. unsigned long flags, const char *name,
  868. void (*ctor)(struct kmem_cache *, void *))
  869. {
  870. /*
  871. * The page->offset field is only 16 bit wide. This is an offset
  872. * in units of words from the beginning of an object. If the slab
  873. * size is bigger then we cannot move the free pointer behind the
  874. * object anymore.
  875. *
  876. * On 32 bit platforms the limit is 256k. On 64bit platforms
  877. * the limit is 512k.
  878. *
  879. * Debugging or ctor may create a need to move the free
  880. * pointer. Fail if this happens.
  881. */
  882. if (objsize >= 65535 * sizeof(void *)) {
  883. BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
  884. SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
  885. BUG_ON(ctor);
  886. } else {
  887. /*
  888. * Enable debugging if selected on the kernel commandline.
  889. */
  890. if (slub_debug && (!slub_debug_slabs ||
  891. strncmp(slub_debug_slabs, name,
  892. strlen(slub_debug_slabs)) == 0))
  893. flags |= slub_debug;
  894. }
  895. return flags;
  896. }
  897. #else
  898. static inline void setup_object_debug(struct kmem_cache *s,
  899. struct page *page, void *object) {}
  900. static inline int alloc_debug_processing(struct kmem_cache *s,
  901. struct page *page, void *object, void *addr) { return 0; }
  902. static inline int free_debug_processing(struct kmem_cache *s,
  903. struct page *page, void *object, void *addr) { return 0; }
  904. static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
  905. { return 1; }
  906. static inline int check_object(struct kmem_cache *s, struct page *page,
  907. void *object, int active) { return 1; }
  908. static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
  909. static inline unsigned long kmem_cache_flags(unsigned long objsize,
  910. unsigned long flags, const char *name,
  911. void (*ctor)(struct kmem_cache *, void *))
  912. {
  913. return flags;
  914. }
  915. #define slub_debug 0
  916. #endif
  917. /*
  918. * Slab allocation and freeing
  919. */
  920. static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
  921. {
  922. struct page *page;
  923. int pages = 1 << s->order;
  924. if (s->order)
  925. flags |= __GFP_COMP;
  926. if (s->flags & SLAB_CACHE_DMA)
  927. flags |= SLUB_DMA;
  928. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  929. flags |= __GFP_RECLAIMABLE;
  930. if (node == -1)
  931. page = alloc_pages(flags, s->order);
  932. else
  933. page = alloc_pages_node(node, flags, s->order);
  934. if (!page)
  935. return NULL;
  936. mod_zone_page_state(page_zone(page),
  937. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  938. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  939. pages);
  940. return page;
  941. }
  942. static void setup_object(struct kmem_cache *s, struct page *page,
  943. void *object)
  944. {
  945. setup_object_debug(s, page, object);
  946. if (unlikely(s->ctor))
  947. s->ctor(s, object);
  948. }
  949. static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
  950. {
  951. struct page *page;
  952. struct kmem_cache_node *n;
  953. void *start;
  954. void *last;
  955. void *p;
  956. BUG_ON(flags & GFP_SLAB_BUG_MASK);
  957. page = allocate_slab(s,
  958. flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
  959. if (!page)
  960. goto out;
  961. n = get_node(s, page_to_nid(page));
  962. if (n)
  963. atomic_long_inc(&n->nr_slabs);
  964. page->slab = s;
  965. page->flags |= 1 << PG_slab;
  966. if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
  967. SLAB_STORE_USER | SLAB_TRACE))
  968. SetSlabDebug(page);
  969. start = page_address(page);
  970. page->end = start + 1;
  971. if (unlikely(s->flags & SLAB_POISON))
  972. memset(start, POISON_INUSE, PAGE_SIZE << s->order);
  973. last = start;
  974. for_each_object(p, s, start) {
  975. setup_object(s, page, last);
  976. set_freepointer(s, last, p);
  977. last = p;
  978. }
  979. setup_object(s, page, last);
  980. set_freepointer(s, last, page->end);
  981. page->freelist = start;
  982. page->inuse = 0;
  983. out:
  984. return page;
  985. }
  986. static void __free_slab(struct kmem_cache *s, struct page *page)
  987. {
  988. int pages = 1 << s->order;
  989. if (unlikely(SlabDebug(page))) {
  990. void *p;
  991. slab_pad_check(s, page);
  992. for_each_object(p, s, slab_address(page))
  993. check_object(s, page, p, 0);
  994. ClearSlabDebug(page);
  995. }
  996. mod_zone_page_state(page_zone(page),
  997. (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  998. NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
  999. -pages);
  1000. page->mapping = NULL;
  1001. __free_pages(page, s->order);
  1002. }
  1003. static void rcu_free_slab(struct rcu_head *h)
  1004. {
  1005. struct page *page;
  1006. page = container_of((struct list_head *)h, struct page, lru);
  1007. __free_slab(page->slab, page);
  1008. }
  1009. static void free_slab(struct kmem_cache *s, struct page *page)
  1010. {
  1011. if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
  1012. /*
  1013. * RCU free overloads the RCU head over the LRU
  1014. */
  1015. struct rcu_head *head = (void *)&page->lru;
  1016. call_rcu(head, rcu_free_slab);
  1017. } else
  1018. __free_slab(s, page);
  1019. }
  1020. static void discard_slab(struct kmem_cache *s, struct page *page)
  1021. {
  1022. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1023. atomic_long_dec(&n->nr_slabs);
  1024. reset_page_mapcount(page);
  1025. __ClearPageSlab(page);
  1026. free_slab(s, page);
  1027. }
  1028. /*
  1029. * Per slab locking using the pagelock
  1030. */
  1031. static __always_inline void slab_lock(struct page *page)
  1032. {
  1033. bit_spin_lock(PG_locked, &page->flags);
  1034. }
  1035. static __always_inline void slab_unlock(struct page *page)
  1036. {
  1037. __bit_spin_unlock(PG_locked, &page->flags);
  1038. }
  1039. static __always_inline int slab_trylock(struct page *page)
  1040. {
  1041. int rc = 1;
  1042. rc = bit_spin_trylock(PG_locked, &page->flags);
  1043. return rc;
  1044. }
  1045. /*
  1046. * Management of partially allocated slabs
  1047. */
  1048. static void add_partial(struct kmem_cache_node *n,
  1049. struct page *page, int tail)
  1050. {
  1051. spin_lock(&n->list_lock);
  1052. n->nr_partial++;
  1053. if (tail)
  1054. list_add_tail(&page->lru, &n->partial);
  1055. else
  1056. list_add(&page->lru, &n->partial);
  1057. spin_unlock(&n->list_lock);
  1058. }
  1059. static void remove_partial(struct kmem_cache *s,
  1060. struct page *page)
  1061. {
  1062. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1063. spin_lock(&n->list_lock);
  1064. list_del(&page->lru);
  1065. n->nr_partial--;
  1066. spin_unlock(&n->list_lock);
  1067. }
  1068. /*
  1069. * Lock slab and remove from the partial list.
  1070. *
  1071. * Must hold list_lock.
  1072. */
  1073. static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
  1074. {
  1075. if (slab_trylock(page)) {
  1076. list_del(&page->lru);
  1077. n->nr_partial--;
  1078. SetSlabFrozen(page);
  1079. return 1;
  1080. }
  1081. return 0;
  1082. }
  1083. /*
  1084. * Try to allocate a partial slab from a specific node.
  1085. */
  1086. static struct page *get_partial_node(struct kmem_cache_node *n)
  1087. {
  1088. struct page *page;
  1089. /*
  1090. * Racy check. If we mistakenly see no partial slabs then we
  1091. * just allocate an empty slab. If we mistakenly try to get a
  1092. * partial slab and there is none available then get_partials()
  1093. * will return NULL.
  1094. */
  1095. if (!n || !n->nr_partial)
  1096. return NULL;
  1097. spin_lock(&n->list_lock);
  1098. list_for_each_entry(page, &n->partial, lru)
  1099. if (lock_and_freeze_slab(n, page))
  1100. goto out;
  1101. page = NULL;
  1102. out:
  1103. spin_unlock(&n->list_lock);
  1104. return page;
  1105. }
  1106. /*
  1107. * Get a page from somewhere. Search in increasing NUMA distances.
  1108. */
  1109. static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
  1110. {
  1111. #ifdef CONFIG_NUMA
  1112. struct zonelist *zonelist;
  1113. struct zone **z;
  1114. struct page *page;
  1115. /*
  1116. * The defrag ratio allows a configuration of the tradeoffs between
  1117. * inter node defragmentation and node local allocations. A lower
  1118. * defrag_ratio increases the tendency to do local allocations
  1119. * instead of attempting to obtain partial slabs from other nodes.
  1120. *
  1121. * If the defrag_ratio is set to 0 then kmalloc() always
  1122. * returns node local objects. If the ratio is higher then kmalloc()
  1123. * may return off node objects because partial slabs are obtained
  1124. * from other nodes and filled up.
  1125. *
  1126. * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
  1127. * defrag_ratio = 1000) then every (well almost) allocation will
  1128. * first attempt to defrag slab caches on other nodes. This means
  1129. * scanning over all nodes to look for partial slabs which may be
  1130. * expensive if we do it every time we are trying to find a slab
  1131. * with available objects.
  1132. */
  1133. if (!s->remote_node_defrag_ratio ||
  1134. get_cycles() % 1024 > s->remote_node_defrag_ratio)
  1135. return NULL;
  1136. zonelist = &NODE_DATA(
  1137. slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
  1138. for (z = zonelist->zones; *z; z++) {
  1139. struct kmem_cache_node *n;
  1140. n = get_node(s, zone_to_nid(*z));
  1141. if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
  1142. n->nr_partial > MIN_PARTIAL) {
  1143. page = get_partial_node(n);
  1144. if (page)
  1145. return page;
  1146. }
  1147. }
  1148. #endif
  1149. return NULL;
  1150. }
  1151. /*
  1152. * Get a partial page, lock it and return it.
  1153. */
  1154. static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
  1155. {
  1156. struct page *page;
  1157. int searchnode = (node == -1) ? numa_node_id() : node;
  1158. page = get_partial_node(get_node(s, searchnode));
  1159. if (page || (flags & __GFP_THISNODE))
  1160. return page;
  1161. return get_any_partial(s, flags);
  1162. }
  1163. /*
  1164. * Move a page back to the lists.
  1165. *
  1166. * Must be called with the slab lock held.
  1167. *
  1168. * On exit the slab lock will have been dropped.
  1169. */
  1170. static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
  1171. {
  1172. struct kmem_cache_node *n = get_node(s, page_to_nid(page));
  1173. struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
  1174. ClearSlabFrozen(page);
  1175. if (page->inuse) {
  1176. if (page->freelist != page->end) {
  1177. add_partial(n, page, tail);
  1178. stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
  1179. } else {
  1180. stat(c, DEACTIVATE_FULL);
  1181. if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
  1182. add_full(n, page);
  1183. }
  1184. slab_unlock(page);
  1185. } else {
  1186. stat(c, DEACTIVATE_EMPTY);
  1187. if (n->nr_partial < MIN_PARTIAL) {
  1188. /*
  1189. * Adding an empty slab to the partial slabs in order
  1190. * to avoid page allocator overhead. This slab needs
  1191. * to come after the other slabs with objects in
  1192. * order to fill them up. That way the size of the
  1193. * partial list stays small. kmem_cache_shrink can
  1194. * reclaim empty slabs from the partial list.
  1195. */
  1196. add_partial(n, page, 1);
  1197. slab_unlock(page);
  1198. } else {
  1199. slab_unlock(page);
  1200. stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
  1201. discard_slab(s, page);
  1202. }
  1203. }
  1204. }
  1205. /*
  1206. * Remove the cpu slab
  1207. */
  1208. static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
  1209. {
  1210. struct page *page = c->page;
  1211. int tail = 1;
  1212. if (c->freelist)
  1213. stat(c, DEACTIVATE_REMOTE_FREES);
  1214. /*
  1215. * Merge cpu freelist into freelist. Typically we get here
  1216. * because both freelists are empty. So this is unlikely
  1217. * to occur.
  1218. *
  1219. * We need to use _is_end here because deactivate slab may
  1220. * be called for a debug slab. Then c->freelist may contain
  1221. * a dummy pointer.
  1222. */
  1223. while (unlikely(!is_end(c->freelist))) {
  1224. void **object;
  1225. tail = 0; /* Hot objects. Put the slab first */
  1226. /* Retrieve object from cpu_freelist */
  1227. object = c->freelist;
  1228. c->freelist = c->freelist[c->offset];
  1229. /* And put onto the regular freelist */
  1230. object[c->offset] = page->freelist;
  1231. page->freelist = object;
  1232. page->inuse--;
  1233. }
  1234. c->page = NULL;
  1235. unfreeze_slab(s, page, tail);
  1236. }
  1237. static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
  1238. {
  1239. stat(c, CPUSLAB_FLUSH);
  1240. slab_lock(c->page);
  1241. deactivate_slab(s, c);
  1242. }
  1243. /*
  1244. * Flush cpu slab.
  1245. * Called from IPI handler with interrupts disabled.
  1246. */
  1247. static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
  1248. {
  1249. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1250. if (likely(c && c->page))
  1251. flush_slab(s, c);
  1252. }
  1253. static void flush_cpu_slab(void *d)
  1254. {
  1255. struct kmem_cache *s = d;
  1256. __flush_cpu_slab(s, smp_processor_id());
  1257. }
  1258. static void flush_all(struct kmem_cache *s)
  1259. {
  1260. #ifdef CONFIG_SMP
  1261. on_each_cpu(flush_cpu_slab, s, 1, 1);
  1262. #else
  1263. unsigned long flags;
  1264. local_irq_save(flags);
  1265. flush_cpu_slab(s);
  1266. local_irq_restore(flags);
  1267. #endif
  1268. }
  1269. /*
  1270. * Check if the objects in a per cpu structure fit numa
  1271. * locality expectations.
  1272. */
  1273. static inline int node_match(struct kmem_cache_cpu *c, int node)
  1274. {
  1275. #ifdef CONFIG_NUMA
  1276. if (node != -1 && c->node != node)
  1277. return 0;
  1278. #endif
  1279. return 1;
  1280. }
  1281. /*
  1282. * Slow path. The lockless freelist is empty or we need to perform
  1283. * debugging duties.
  1284. *
  1285. * Interrupts are disabled.
  1286. *
  1287. * Processing is still very fast if new objects have been freed to the
  1288. * regular freelist. In that case we simply take over the regular freelist
  1289. * as the lockless freelist and zap the regular freelist.
  1290. *
  1291. * If that is not working then we fall back to the partial lists. We take the
  1292. * first element of the freelist as the object to allocate now and move the
  1293. * rest of the freelist to the lockless freelist.
  1294. *
  1295. * And if we were unable to get a new slab from the partial slab lists then
  1296. * we need to allocate a new slab. This is slowest path since we may sleep.
  1297. */
  1298. static void *__slab_alloc(struct kmem_cache *s,
  1299. gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
  1300. {
  1301. void **object;
  1302. struct page *new;
  1303. #ifdef SLUB_FASTPATH
  1304. unsigned long flags;
  1305. local_irq_save(flags);
  1306. #endif
  1307. if (!c->page)
  1308. goto new_slab;
  1309. slab_lock(c->page);
  1310. if (unlikely(!node_match(c, node)))
  1311. goto another_slab;
  1312. stat(c, ALLOC_REFILL);
  1313. load_freelist:
  1314. object = c->page->freelist;
  1315. if (unlikely(object == c->page->end))
  1316. goto another_slab;
  1317. if (unlikely(SlabDebug(c->page)))
  1318. goto debug;
  1319. object = c->page->freelist;
  1320. c->freelist = object[c->offset];
  1321. c->page->inuse = s->objects;
  1322. c->page->freelist = c->page->end;
  1323. c->node = page_to_nid(c->page);
  1324. unlock_out:
  1325. slab_unlock(c->page);
  1326. stat(c, ALLOC_SLOWPATH);
  1327. out:
  1328. #ifdef SLUB_FASTPATH
  1329. local_irq_restore(flags);
  1330. #endif
  1331. return object;
  1332. another_slab:
  1333. deactivate_slab(s, c);
  1334. new_slab:
  1335. new = get_partial(s, gfpflags, node);
  1336. if (new) {
  1337. c->page = new;
  1338. stat(c, ALLOC_FROM_PARTIAL);
  1339. goto load_freelist;
  1340. }
  1341. if (gfpflags & __GFP_WAIT)
  1342. local_irq_enable();
  1343. new = new_slab(s, gfpflags, node);
  1344. if (gfpflags & __GFP_WAIT)
  1345. local_irq_disable();
  1346. if (new) {
  1347. c = get_cpu_slab(s, smp_processor_id());
  1348. stat(c, ALLOC_SLAB);
  1349. if (c->page)
  1350. flush_slab(s, c);
  1351. slab_lock(new);
  1352. SetSlabFrozen(new);
  1353. c->page = new;
  1354. goto load_freelist;
  1355. }
  1356. object = NULL;
  1357. goto out;
  1358. debug:
  1359. object = c->page->freelist;
  1360. if (!alloc_debug_processing(s, c->page, object, addr))
  1361. goto another_slab;
  1362. c->page->inuse++;
  1363. c->page->freelist = object[c->offset];
  1364. c->node = -1;
  1365. goto unlock_out;
  1366. }
  1367. /*
  1368. * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  1369. * have the fastpath folded into their functions. So no function call
  1370. * overhead for requests that can be satisfied on the fastpath.
  1371. *
  1372. * The fastpath works by first checking if the lockless freelist can be used.
  1373. * If not then __slab_alloc is called for slow processing.
  1374. *
  1375. * Otherwise we can simply pick the next object from the lockless free list.
  1376. */
  1377. static __always_inline void *slab_alloc(struct kmem_cache *s,
  1378. gfp_t gfpflags, int node, void *addr)
  1379. {
  1380. void **object;
  1381. struct kmem_cache_cpu *c;
  1382. /*
  1383. * The SLUB_FASTPATH path is provisional and is currently disabled if the
  1384. * kernel is compiled with preemption or if the arch does not support
  1385. * fast cmpxchg operations. There are a couple of coming changes that will
  1386. * simplify matters and allow preemption. Ultimately we may end up making
  1387. * SLUB_FASTPATH the default.
  1388. *
  1389. * 1. The introduction of the per cpu allocator will avoid array lookups
  1390. * through get_cpu_slab(). A special register can be used instead.
  1391. *
  1392. * 2. The introduction of per cpu atomic operations (cpu_ops) means that
  1393. * we can realize the logic here entirely with per cpu atomics. The
  1394. * per cpu atomic ops will take care of the preemption issues.
  1395. */
  1396. #ifdef SLUB_FASTPATH
  1397. c = get_cpu_slab(s, raw_smp_processor_id());
  1398. do {
  1399. object = c->freelist;
  1400. if (unlikely(is_end(object) || !node_match(c, node))) {
  1401. object = __slab_alloc(s, gfpflags, node, addr, c);
  1402. break;
  1403. }
  1404. stat(c, ALLOC_FASTPATH);
  1405. } while (cmpxchg_local(&c->freelist, object, object[c->offset])
  1406. != object);
  1407. #else
  1408. unsigned long flags;
  1409. local_irq_save(flags);
  1410. c = get_cpu_slab(s, smp_processor_id());
  1411. if (unlikely(is_end(c->freelist) || !node_match(c, node)))
  1412. object = __slab_alloc(s, gfpflags, node, addr, c);
  1413. else {
  1414. object = c->freelist;
  1415. c->freelist = object[c->offset];
  1416. stat(c, ALLOC_FASTPATH);
  1417. }
  1418. local_irq_restore(flags);
  1419. #endif
  1420. if (unlikely((gfpflags & __GFP_ZERO) && object))
  1421. memset(object, 0, c->objsize);
  1422. return object;
  1423. }
  1424. void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
  1425. {
  1426. return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
  1427. }
  1428. EXPORT_SYMBOL(kmem_cache_alloc);
  1429. #ifdef CONFIG_NUMA
  1430. void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
  1431. {
  1432. return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
  1433. }
  1434. EXPORT_SYMBOL(kmem_cache_alloc_node);
  1435. #endif
  1436. /*
  1437. * Slow patch handling. This may still be called frequently since objects
  1438. * have a longer lifetime than the cpu slabs in most processing loads.
  1439. *
  1440. * So we still attempt to reduce cache line usage. Just take the slab
  1441. * lock and free the item. If there is no additional partial page
  1442. * handling required then we can return immediately.
  1443. */
  1444. static void __slab_free(struct kmem_cache *s, struct page *page,
  1445. void *x, void *addr, unsigned int offset)
  1446. {
  1447. void *prior;
  1448. void **object = (void *)x;
  1449. struct kmem_cache_cpu *c;
  1450. #ifdef SLUB_FASTPATH
  1451. unsigned long flags;
  1452. local_irq_save(flags);
  1453. #endif
  1454. c = get_cpu_slab(s, raw_smp_processor_id());
  1455. stat(c, FREE_SLOWPATH);
  1456. slab_lock(page);
  1457. if (unlikely(SlabDebug(page)))
  1458. goto debug;
  1459. checks_ok:
  1460. prior = object[offset] = page->freelist;
  1461. page->freelist = object;
  1462. page->inuse--;
  1463. if (unlikely(SlabFrozen(page))) {
  1464. stat(c, FREE_FROZEN);
  1465. goto out_unlock;
  1466. }
  1467. if (unlikely(!page->inuse))
  1468. goto slab_empty;
  1469. /*
  1470. * Objects left in the slab. If it
  1471. * was not on the partial list before
  1472. * then add it.
  1473. */
  1474. if (unlikely(prior == page->end)) {
  1475. add_partial(get_node(s, page_to_nid(page)), page, 1);
  1476. stat(c, FREE_ADD_PARTIAL);
  1477. }
  1478. out_unlock:
  1479. slab_unlock(page);
  1480. #ifdef SLUB_FASTPATH
  1481. local_irq_restore(flags);
  1482. #endif
  1483. return;
  1484. slab_empty:
  1485. if (prior != page->end) {
  1486. /*
  1487. * Slab still on the partial list.
  1488. */
  1489. remove_partial(s, page);
  1490. stat(c, FREE_REMOVE_PARTIAL);
  1491. }
  1492. slab_unlock(page);
  1493. stat(c, FREE_SLAB);
  1494. #ifdef SLUB_FASTPATH
  1495. local_irq_restore(flags);
  1496. #endif
  1497. discard_slab(s, page);
  1498. return;
  1499. debug:
  1500. if (!free_debug_processing(s, page, x, addr))
  1501. goto out_unlock;
  1502. goto checks_ok;
  1503. }
  1504. /*
  1505. * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
  1506. * can perform fastpath freeing without additional function calls.
  1507. *
  1508. * The fastpath is only possible if we are freeing to the current cpu slab
  1509. * of this processor. This typically the case if we have just allocated
  1510. * the item before.
  1511. *
  1512. * If fastpath is not possible then fall back to __slab_free where we deal
  1513. * with all sorts of special processing.
  1514. */
  1515. static __always_inline void slab_free(struct kmem_cache *s,
  1516. struct page *page, void *x, void *addr)
  1517. {
  1518. void **object = (void *)x;
  1519. struct kmem_cache_cpu *c;
  1520. #ifdef SLUB_FASTPATH
  1521. void **freelist;
  1522. c = get_cpu_slab(s, raw_smp_processor_id());
  1523. debug_check_no_locks_freed(object, s->objsize);
  1524. do {
  1525. freelist = c->freelist;
  1526. barrier();
  1527. /*
  1528. * If the compiler would reorder the retrieval of c->page to
  1529. * come before c->freelist then an interrupt could
  1530. * change the cpu slab before we retrieve c->freelist. We
  1531. * could be matching on a page no longer active and put the
  1532. * object onto the freelist of the wrong slab.
  1533. *
  1534. * On the other hand: If we already have the freelist pointer
  1535. * then any change of cpu_slab will cause the cmpxchg to fail
  1536. * since the freelist pointers are unique per slab.
  1537. */
  1538. if (unlikely(page != c->page || c->node < 0)) {
  1539. __slab_free(s, page, x, addr, c->offset);
  1540. break;
  1541. }
  1542. object[c->offset] = freelist;
  1543. stat(c, FREE_FASTPATH);
  1544. } while (cmpxchg_local(&c->freelist, freelist, object) != freelist);
  1545. #else
  1546. unsigned long flags;
  1547. local_irq_save(flags);
  1548. debug_check_no_locks_freed(object, s->objsize);
  1549. c = get_cpu_slab(s, smp_processor_id());
  1550. if (likely(page == c->page && c->node >= 0)) {
  1551. object[c->offset] = c->freelist;
  1552. c->freelist = object;
  1553. stat(c, FREE_FASTPATH);
  1554. } else
  1555. __slab_free(s, page, x, addr, c->offset);
  1556. local_irq_restore(flags);
  1557. #endif
  1558. }
  1559. void kmem_cache_free(struct kmem_cache *s, void *x)
  1560. {
  1561. struct page *page;
  1562. page = virt_to_head_page(x);
  1563. slab_free(s, page, x, __builtin_return_address(0));
  1564. }
  1565. EXPORT_SYMBOL(kmem_cache_free);
  1566. /* Figure out on which slab object the object resides */
  1567. static struct page *get_object_page(const void *x)
  1568. {
  1569. struct page *page = virt_to_head_page(x);
  1570. if (!PageSlab(page))
  1571. return NULL;
  1572. return page;
  1573. }
  1574. /*
  1575. * Object placement in a slab is made very easy because we always start at
  1576. * offset 0. If we tune the size of the object to the alignment then we can
  1577. * get the required alignment by putting one properly sized object after
  1578. * another.
  1579. *
  1580. * Notice that the allocation order determines the sizes of the per cpu
  1581. * caches. Each processor has always one slab available for allocations.
  1582. * Increasing the allocation order reduces the number of times that slabs
  1583. * must be moved on and off the partial lists and is therefore a factor in
  1584. * locking overhead.
  1585. */
  1586. /*
  1587. * Mininum / Maximum order of slab pages. This influences locking overhead
  1588. * and slab fragmentation. A higher order reduces the number of partial slabs
  1589. * and increases the number of allocations possible without having to
  1590. * take the list_lock.
  1591. */
  1592. static int slub_min_order;
  1593. static int slub_max_order = DEFAULT_MAX_ORDER;
  1594. static int slub_min_objects = DEFAULT_MIN_OBJECTS;
  1595. /*
  1596. * Merge control. If this is set then no merging of slab caches will occur.
  1597. * (Could be removed. This was introduced to pacify the merge skeptics.)
  1598. */
  1599. static int slub_nomerge;
  1600. /*
  1601. * Calculate the order of allocation given an slab object size.
  1602. *
  1603. * The order of allocation has significant impact on performance and other
  1604. * system components. Generally order 0 allocations should be preferred since
  1605. * order 0 does not cause fragmentation in the page allocator. Larger objects
  1606. * be problematic to put into order 0 slabs because there may be too much
  1607. * unused space left. We go to a higher order if more than 1/8th of the slab
  1608. * would be wasted.
  1609. *
  1610. * In order to reach satisfactory performance we must ensure that a minimum
  1611. * number of objects is in one slab. Otherwise we may generate too much
  1612. * activity on the partial lists which requires taking the list_lock. This is
  1613. * less a concern for large slabs though which are rarely used.
  1614. *
  1615. * slub_max_order specifies the order where we begin to stop considering the
  1616. * number of objects in a slab as critical. If we reach slub_max_order then
  1617. * we try to keep the page order as low as possible. So we accept more waste
  1618. * of space in favor of a small page order.
  1619. *
  1620. * Higher order allocations also allow the placement of more objects in a
  1621. * slab and thereby reduce object handling overhead. If the user has
  1622. * requested a higher mininum order then we start with that one instead of
  1623. * the smallest order which will fit the object.
  1624. */
  1625. static inline int slab_order(int size, int min_objects,
  1626. int max_order, int fract_leftover)
  1627. {
  1628. int order;
  1629. int rem;
  1630. int min_order = slub_min_order;
  1631. for (order = max(min_order,
  1632. fls(min_objects * size - 1) - PAGE_SHIFT);
  1633. order <= max_order; order++) {
  1634. unsigned long slab_size = PAGE_SIZE << order;
  1635. if (slab_size < min_objects * size)
  1636. continue;
  1637. rem = slab_size % size;
  1638. if (rem <= slab_size / fract_leftover)
  1639. break;
  1640. }
  1641. return order;
  1642. }
  1643. static inline int calculate_order(int size)
  1644. {
  1645. int order;
  1646. int min_objects;
  1647. int fraction;
  1648. /*
  1649. * Attempt to find best configuration for a slab. This
  1650. * works by first attempting to generate a layout with
  1651. * the best configuration and backing off gradually.
  1652. *
  1653. * First we reduce the acceptable waste in a slab. Then
  1654. * we reduce the minimum objects required in a slab.
  1655. */
  1656. min_objects = slub_min_objects;
  1657. while (min_objects > 1) {
  1658. fraction = 8;
  1659. while (fraction >= 4) {
  1660. order = slab_order(size, min_objects,
  1661. slub_max_order, fraction);
  1662. if (order <= slub_max_order)
  1663. return order;
  1664. fraction /= 2;
  1665. }
  1666. min_objects /= 2;
  1667. }
  1668. /*
  1669. * We were unable to place multiple objects in a slab. Now
  1670. * lets see if we can place a single object there.
  1671. */
  1672. order = slab_order(size, 1, slub_max_order, 1);
  1673. if (order <= slub_max_order)
  1674. return order;
  1675. /*
  1676. * Doh this slab cannot be placed using slub_max_order.
  1677. */
  1678. order = slab_order(size, 1, MAX_ORDER, 1);
  1679. if (order <= MAX_ORDER)
  1680. return order;
  1681. return -ENOSYS;
  1682. }
  1683. /*
  1684. * Figure out what the alignment of the objects will be.
  1685. */
  1686. static unsigned long calculate_alignment(unsigned long flags,
  1687. unsigned long align, unsigned long size)
  1688. {
  1689. /*
  1690. * If the user wants hardware cache aligned objects then
  1691. * follow that suggestion if the object is sufficiently
  1692. * large.
  1693. *
  1694. * The hardware cache alignment cannot override the
  1695. * specified alignment though. If that is greater
  1696. * then use it.
  1697. */
  1698. if ((flags & SLAB_HWCACHE_ALIGN) &&
  1699. size > cache_line_size() / 2)
  1700. return max_t(unsigned long, align, cache_line_size());
  1701. if (align < ARCH_SLAB_MINALIGN)
  1702. return ARCH_SLAB_MINALIGN;
  1703. return ALIGN(align, sizeof(void *));
  1704. }
  1705. static void init_kmem_cache_cpu(struct kmem_cache *s,
  1706. struct kmem_cache_cpu *c)
  1707. {
  1708. c->page = NULL;
  1709. c->freelist = (void *)PAGE_MAPPING_ANON;
  1710. c->node = 0;
  1711. c->offset = s->offset / sizeof(void *);
  1712. c->objsize = s->objsize;
  1713. }
  1714. static void init_kmem_cache_node(struct kmem_cache_node *n)
  1715. {
  1716. n->nr_partial = 0;
  1717. atomic_long_set(&n->nr_slabs, 0);
  1718. spin_lock_init(&n->list_lock);
  1719. INIT_LIST_HEAD(&n->partial);
  1720. #ifdef CONFIG_SLUB_DEBUG
  1721. INIT_LIST_HEAD(&n->full);
  1722. #endif
  1723. }
  1724. #ifdef CONFIG_SMP
  1725. /*
  1726. * Per cpu array for per cpu structures.
  1727. *
  1728. * The per cpu array places all kmem_cache_cpu structures from one processor
  1729. * close together meaning that it becomes possible that multiple per cpu
  1730. * structures are contained in one cacheline. This may be particularly
  1731. * beneficial for the kmalloc caches.
  1732. *
  1733. * A desktop system typically has around 60-80 slabs. With 100 here we are
  1734. * likely able to get per cpu structures for all caches from the array defined
  1735. * here. We must be able to cover all kmalloc caches during bootstrap.
  1736. *
  1737. * If the per cpu array is exhausted then fall back to kmalloc
  1738. * of individual cachelines. No sharing is possible then.
  1739. */
  1740. #define NR_KMEM_CACHE_CPU 100
  1741. static DEFINE_PER_CPU(struct kmem_cache_cpu,
  1742. kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
  1743. static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
  1744. static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
  1745. static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
  1746. int cpu, gfp_t flags)
  1747. {
  1748. struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
  1749. if (c)
  1750. per_cpu(kmem_cache_cpu_free, cpu) =
  1751. (void *)c->freelist;
  1752. else {
  1753. /* Table overflow: So allocate ourselves */
  1754. c = kmalloc_node(
  1755. ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
  1756. flags, cpu_to_node(cpu));
  1757. if (!c)
  1758. return NULL;
  1759. }
  1760. init_kmem_cache_cpu(s, c);
  1761. return c;
  1762. }
  1763. static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
  1764. {
  1765. if (c < per_cpu(kmem_cache_cpu, cpu) ||
  1766. c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
  1767. kfree(c);
  1768. return;
  1769. }
  1770. c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
  1771. per_cpu(kmem_cache_cpu_free, cpu) = c;
  1772. }
  1773. static void free_kmem_cache_cpus(struct kmem_cache *s)
  1774. {
  1775. int cpu;
  1776. for_each_online_cpu(cpu) {
  1777. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1778. if (c) {
  1779. s->cpu_slab[cpu] = NULL;
  1780. free_kmem_cache_cpu(c, cpu);
  1781. }
  1782. }
  1783. }
  1784. static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
  1785. {
  1786. int cpu;
  1787. for_each_online_cpu(cpu) {
  1788. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  1789. if (c)
  1790. continue;
  1791. c = alloc_kmem_cache_cpu(s, cpu, flags);
  1792. if (!c) {
  1793. free_kmem_cache_cpus(s);
  1794. return 0;
  1795. }
  1796. s->cpu_slab[cpu] = c;
  1797. }
  1798. return 1;
  1799. }
  1800. /*
  1801. * Initialize the per cpu array.
  1802. */
  1803. static void init_alloc_cpu_cpu(int cpu)
  1804. {
  1805. int i;
  1806. if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
  1807. return;
  1808. for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
  1809. free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
  1810. cpu_set(cpu, kmem_cach_cpu_free_init_once);
  1811. }
  1812. static void __init init_alloc_cpu(void)
  1813. {
  1814. int cpu;
  1815. for_each_online_cpu(cpu)
  1816. init_alloc_cpu_cpu(cpu);
  1817. }
  1818. #else
  1819. static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
  1820. static inline void init_alloc_cpu(void) {}
  1821. static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
  1822. {
  1823. init_kmem_cache_cpu(s, &s->cpu_slab);
  1824. return 1;
  1825. }
  1826. #endif
  1827. #ifdef CONFIG_NUMA
  1828. /*
  1829. * No kmalloc_node yet so do it by hand. We know that this is the first
  1830. * slab on the node for this slabcache. There are no concurrent accesses
  1831. * possible.
  1832. *
  1833. * Note that this function only works on the kmalloc_node_cache
  1834. * when allocating for the kmalloc_node_cache. This is used for bootstrapping
  1835. * memory on a fresh node that has no slab structures yet.
  1836. */
  1837. static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
  1838. int node)
  1839. {
  1840. struct page *page;
  1841. struct kmem_cache_node *n;
  1842. unsigned long flags;
  1843. BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
  1844. page = new_slab(kmalloc_caches, gfpflags, node);
  1845. BUG_ON(!page);
  1846. if (page_to_nid(page) != node) {
  1847. printk(KERN_ERR "SLUB: Unable to allocate memory from "
  1848. "node %d\n", node);
  1849. printk(KERN_ERR "SLUB: Allocating a useless per node structure "
  1850. "in order to be able to continue\n");
  1851. }
  1852. n = page->freelist;
  1853. BUG_ON(!n);
  1854. page->freelist = get_freepointer(kmalloc_caches, n);
  1855. page->inuse++;
  1856. kmalloc_caches->node[node] = n;
  1857. #ifdef CONFIG_SLUB_DEBUG
  1858. init_object(kmalloc_caches, n, 1);
  1859. init_tracking(kmalloc_caches, n);
  1860. #endif
  1861. init_kmem_cache_node(n);
  1862. atomic_long_inc(&n->nr_slabs);
  1863. /*
  1864. * lockdep requires consistent irq usage for each lock
  1865. * so even though there cannot be a race this early in
  1866. * the boot sequence, we still disable irqs.
  1867. */
  1868. local_irq_save(flags);
  1869. add_partial(n, page, 0);
  1870. local_irq_restore(flags);
  1871. return n;
  1872. }
  1873. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1874. {
  1875. int node;
  1876. for_each_node_state(node, N_NORMAL_MEMORY) {
  1877. struct kmem_cache_node *n = s->node[node];
  1878. if (n && n != &s->local_node)
  1879. kmem_cache_free(kmalloc_caches, n);
  1880. s->node[node] = NULL;
  1881. }
  1882. }
  1883. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1884. {
  1885. int node;
  1886. int local_node;
  1887. if (slab_state >= UP)
  1888. local_node = page_to_nid(virt_to_page(s));
  1889. else
  1890. local_node = 0;
  1891. for_each_node_state(node, N_NORMAL_MEMORY) {
  1892. struct kmem_cache_node *n;
  1893. if (local_node == node)
  1894. n = &s->local_node;
  1895. else {
  1896. if (slab_state == DOWN) {
  1897. n = early_kmem_cache_node_alloc(gfpflags,
  1898. node);
  1899. continue;
  1900. }
  1901. n = kmem_cache_alloc_node(kmalloc_caches,
  1902. gfpflags, node);
  1903. if (!n) {
  1904. free_kmem_cache_nodes(s);
  1905. return 0;
  1906. }
  1907. }
  1908. s->node[node] = n;
  1909. init_kmem_cache_node(n);
  1910. }
  1911. return 1;
  1912. }
  1913. #else
  1914. static void free_kmem_cache_nodes(struct kmem_cache *s)
  1915. {
  1916. }
  1917. static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  1918. {
  1919. init_kmem_cache_node(&s->local_node);
  1920. return 1;
  1921. }
  1922. #endif
  1923. /*
  1924. * calculate_sizes() determines the order and the distribution of data within
  1925. * a slab object.
  1926. */
  1927. static int calculate_sizes(struct kmem_cache *s)
  1928. {
  1929. unsigned long flags = s->flags;
  1930. unsigned long size = s->objsize;
  1931. unsigned long align = s->align;
  1932. /*
  1933. * Determine if we can poison the object itself. If the user of
  1934. * the slab may touch the object after free or before allocation
  1935. * then we should never poison the object itself.
  1936. */
  1937. if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
  1938. !s->ctor)
  1939. s->flags |= __OBJECT_POISON;
  1940. else
  1941. s->flags &= ~__OBJECT_POISON;
  1942. /*
  1943. * Round up object size to the next word boundary. We can only
  1944. * place the free pointer at word boundaries and this determines
  1945. * the possible location of the free pointer.
  1946. */
  1947. size = ALIGN(size, sizeof(void *));
  1948. #ifdef CONFIG_SLUB_DEBUG
  1949. /*
  1950. * If we are Redzoning then check if there is some space between the
  1951. * end of the object and the free pointer. If not then add an
  1952. * additional word to have some bytes to store Redzone information.
  1953. */
  1954. if ((flags & SLAB_RED_ZONE) && size == s->objsize)
  1955. size += sizeof(void *);
  1956. #endif
  1957. /*
  1958. * With that we have determined the number of bytes in actual use
  1959. * by the object. This is the potential offset to the free pointer.
  1960. */
  1961. s->inuse = size;
  1962. if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
  1963. s->ctor)) {
  1964. /*
  1965. * Relocate free pointer after the object if it is not
  1966. * permitted to overwrite the first word of the object on
  1967. * kmem_cache_free.
  1968. *
  1969. * This is the case if we do RCU, have a constructor or
  1970. * destructor or are poisoning the objects.
  1971. */
  1972. s->offset = size;
  1973. size += sizeof(void *);
  1974. }
  1975. #ifdef CONFIG_SLUB_DEBUG
  1976. if (flags & SLAB_STORE_USER)
  1977. /*
  1978. * Need to store information about allocs and frees after
  1979. * the object.
  1980. */
  1981. size += 2 * sizeof(struct track);
  1982. if (flags & SLAB_RED_ZONE)
  1983. /*
  1984. * Add some empty padding so that we can catch
  1985. * overwrites from earlier objects rather than let
  1986. * tracking information or the free pointer be
  1987. * corrupted if an user writes before the start
  1988. * of the object.
  1989. */
  1990. size += sizeof(void *);
  1991. #endif
  1992. /*
  1993. * Determine the alignment based on various parameters that the
  1994. * user specified and the dynamic determination of cache line size
  1995. * on bootup.
  1996. */
  1997. align = calculate_alignment(flags, align, s->objsize);
  1998. /*
  1999. * SLUB stores one object immediately after another beginning from
  2000. * offset 0. In order to align the objects we have to simply size
  2001. * each object to conform to the alignment.
  2002. */
  2003. size = ALIGN(size, align);
  2004. s->size = size;
  2005. s->order = calculate_order(size);
  2006. if (s->order < 0)
  2007. return 0;
  2008. /*
  2009. * Determine the number of objects per slab
  2010. */
  2011. s->objects = (PAGE_SIZE << s->order) / size;
  2012. return !!s->objects;
  2013. }
  2014. static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
  2015. const char *name, size_t size,
  2016. size_t align, unsigned long flags,
  2017. void (*ctor)(struct kmem_cache *, void *))
  2018. {
  2019. memset(s, 0, kmem_size);
  2020. s->name = name;
  2021. s->ctor = ctor;
  2022. s->objsize = size;
  2023. s->align = align;
  2024. s->flags = kmem_cache_flags(size, flags, name, ctor);
  2025. if (!calculate_sizes(s))
  2026. goto error;
  2027. s->refcount = 1;
  2028. #ifdef CONFIG_NUMA
  2029. s->remote_node_defrag_ratio = 100;
  2030. #endif
  2031. if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
  2032. goto error;
  2033. if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
  2034. return 1;
  2035. free_kmem_cache_nodes(s);
  2036. error:
  2037. if (flags & SLAB_PANIC)
  2038. panic("Cannot create slab %s size=%lu realsize=%u "
  2039. "order=%u offset=%u flags=%lx\n",
  2040. s->name, (unsigned long)size, s->size, s->order,
  2041. s->offset, flags);
  2042. return 0;
  2043. }
  2044. /*
  2045. * Check if a given pointer is valid
  2046. */
  2047. int kmem_ptr_validate(struct kmem_cache *s, const void *object)
  2048. {
  2049. struct page *page;
  2050. page = get_object_page(object);
  2051. if (!page || s != page->slab)
  2052. /* No slab or wrong slab */
  2053. return 0;
  2054. if (!check_valid_pointer(s, page, object))
  2055. return 0;
  2056. /*
  2057. * We could also check if the object is on the slabs freelist.
  2058. * But this would be too expensive and it seems that the main
  2059. * purpose of kmem_ptr_valid is to check if the object belongs
  2060. * to a certain slab.
  2061. */
  2062. return 1;
  2063. }
  2064. EXPORT_SYMBOL(kmem_ptr_validate);
  2065. /*
  2066. * Determine the size of a slab object
  2067. */
  2068. unsigned int kmem_cache_size(struct kmem_cache *s)
  2069. {
  2070. return s->objsize;
  2071. }
  2072. EXPORT_SYMBOL(kmem_cache_size);
  2073. const char *kmem_cache_name(struct kmem_cache *s)
  2074. {
  2075. return s->name;
  2076. }
  2077. EXPORT_SYMBOL(kmem_cache_name);
  2078. /*
  2079. * Attempt to free all slabs on a node. Return the number of slabs we
  2080. * were unable to free.
  2081. */
  2082. static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
  2083. struct list_head *list)
  2084. {
  2085. int slabs_inuse = 0;
  2086. unsigned long flags;
  2087. struct page *page, *h;
  2088. spin_lock_irqsave(&n->list_lock, flags);
  2089. list_for_each_entry_safe(page, h, list, lru)
  2090. if (!page->inuse) {
  2091. list_del(&page->lru);
  2092. discard_slab(s, page);
  2093. } else
  2094. slabs_inuse++;
  2095. spin_unlock_irqrestore(&n->list_lock, flags);
  2096. return slabs_inuse;
  2097. }
  2098. /*
  2099. * Release all resources used by a slab cache.
  2100. */
  2101. static inline int kmem_cache_close(struct kmem_cache *s)
  2102. {
  2103. int node;
  2104. flush_all(s);
  2105. /* Attempt to free all objects */
  2106. free_kmem_cache_cpus(s);
  2107. for_each_node_state(node, N_NORMAL_MEMORY) {
  2108. struct kmem_cache_node *n = get_node(s, node);
  2109. n->nr_partial -= free_list(s, n, &n->partial);
  2110. if (atomic_long_read(&n->nr_slabs))
  2111. return 1;
  2112. }
  2113. free_kmem_cache_nodes(s);
  2114. return 0;
  2115. }
  2116. /*
  2117. * Close a cache and release the kmem_cache structure
  2118. * (must be used for caches created using kmem_cache_create)
  2119. */
  2120. void kmem_cache_destroy(struct kmem_cache *s)
  2121. {
  2122. down_write(&slub_lock);
  2123. s->refcount--;
  2124. if (!s->refcount) {
  2125. list_del(&s->list);
  2126. up_write(&slub_lock);
  2127. if (kmem_cache_close(s))
  2128. WARN_ON(1);
  2129. sysfs_slab_remove(s);
  2130. } else
  2131. up_write(&slub_lock);
  2132. }
  2133. EXPORT_SYMBOL(kmem_cache_destroy);
  2134. /********************************************************************
  2135. * Kmalloc subsystem
  2136. *******************************************************************/
  2137. struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
  2138. EXPORT_SYMBOL(kmalloc_caches);
  2139. #ifdef CONFIG_ZONE_DMA
  2140. static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
  2141. #endif
  2142. static int __init setup_slub_min_order(char *str)
  2143. {
  2144. get_option(&str, &slub_min_order);
  2145. return 1;
  2146. }
  2147. __setup("slub_min_order=", setup_slub_min_order);
  2148. static int __init setup_slub_max_order(char *str)
  2149. {
  2150. get_option(&str, &slub_max_order);
  2151. return 1;
  2152. }
  2153. __setup("slub_max_order=", setup_slub_max_order);
  2154. static int __init setup_slub_min_objects(char *str)
  2155. {
  2156. get_option(&str, &slub_min_objects);
  2157. return 1;
  2158. }
  2159. __setup("slub_min_objects=", setup_slub_min_objects);
  2160. static int __init setup_slub_nomerge(char *str)
  2161. {
  2162. slub_nomerge = 1;
  2163. return 1;
  2164. }
  2165. __setup("slub_nomerge", setup_slub_nomerge);
  2166. static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
  2167. const char *name, int size, gfp_t gfp_flags)
  2168. {
  2169. unsigned int flags = 0;
  2170. if (gfp_flags & SLUB_DMA)
  2171. flags = SLAB_CACHE_DMA;
  2172. down_write(&slub_lock);
  2173. if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
  2174. flags, NULL))
  2175. goto panic;
  2176. list_add(&s->list, &slab_caches);
  2177. up_write(&slub_lock);
  2178. if (sysfs_slab_add(s))
  2179. goto panic;
  2180. return s;
  2181. panic:
  2182. panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
  2183. }
  2184. #ifdef CONFIG_ZONE_DMA
  2185. static void sysfs_add_func(struct work_struct *w)
  2186. {
  2187. struct kmem_cache *s;
  2188. down_write(&slub_lock);
  2189. list_for_each_entry(s, &slab_caches, list) {
  2190. if (s->flags & __SYSFS_ADD_DEFERRED) {
  2191. s->flags &= ~__SYSFS_ADD_DEFERRED;
  2192. sysfs_slab_add(s);
  2193. }
  2194. }
  2195. up_write(&slub_lock);
  2196. }
  2197. static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
  2198. static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
  2199. {
  2200. struct kmem_cache *s;
  2201. char *text;
  2202. size_t realsize;
  2203. s = kmalloc_caches_dma[index];
  2204. if (s)
  2205. return s;
  2206. /* Dynamically create dma cache */
  2207. if (flags & __GFP_WAIT)
  2208. down_write(&slub_lock);
  2209. else {
  2210. if (!down_write_trylock(&slub_lock))
  2211. goto out;
  2212. }
  2213. if (kmalloc_caches_dma[index])
  2214. goto unlock_out;
  2215. realsize = kmalloc_caches[index].objsize;
  2216. text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
  2217. (unsigned int)realsize);
  2218. s = kmalloc(kmem_size, flags & ~SLUB_DMA);
  2219. if (!s || !text || !kmem_cache_open(s, flags, text,
  2220. realsize, ARCH_KMALLOC_MINALIGN,
  2221. SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
  2222. kfree(s);
  2223. kfree(text);
  2224. goto unlock_out;
  2225. }
  2226. list_add(&s->list, &slab_caches);
  2227. kmalloc_caches_dma[index] = s;
  2228. schedule_work(&sysfs_add_work);
  2229. unlock_out:
  2230. up_write(&slub_lock);
  2231. out:
  2232. return kmalloc_caches_dma[index];
  2233. }
  2234. #endif
  2235. /*
  2236. * Conversion table for small slabs sizes / 8 to the index in the
  2237. * kmalloc array. This is necessary for slabs < 192 since we have non power
  2238. * of two cache sizes there. The size of larger slabs can be determined using
  2239. * fls.
  2240. */
  2241. static s8 size_index[24] = {
  2242. 3, /* 8 */
  2243. 4, /* 16 */
  2244. 5, /* 24 */
  2245. 5, /* 32 */
  2246. 6, /* 40 */
  2247. 6, /* 48 */
  2248. 6, /* 56 */
  2249. 6, /* 64 */
  2250. 1, /* 72 */
  2251. 1, /* 80 */
  2252. 1, /* 88 */
  2253. 1, /* 96 */
  2254. 7, /* 104 */
  2255. 7, /* 112 */
  2256. 7, /* 120 */
  2257. 7, /* 128 */
  2258. 2, /* 136 */
  2259. 2, /* 144 */
  2260. 2, /* 152 */
  2261. 2, /* 160 */
  2262. 2, /* 168 */
  2263. 2, /* 176 */
  2264. 2, /* 184 */
  2265. 2 /* 192 */
  2266. };
  2267. static struct kmem_cache *get_slab(size_t size, gfp_t flags)
  2268. {
  2269. int index;
  2270. if (size <= 192) {
  2271. if (!size)
  2272. return ZERO_SIZE_PTR;
  2273. index = size_index[(size - 1) / 8];
  2274. } else
  2275. index = fls(size - 1);
  2276. #ifdef CONFIG_ZONE_DMA
  2277. if (unlikely((flags & SLUB_DMA)))
  2278. return dma_kmalloc_cache(index, flags);
  2279. #endif
  2280. return &kmalloc_caches[index];
  2281. }
  2282. void *__kmalloc(size_t size, gfp_t flags)
  2283. {
  2284. struct kmem_cache *s;
  2285. if (unlikely(size > PAGE_SIZE / 2))
  2286. return (void *)__get_free_pages(flags | __GFP_COMP,
  2287. get_order(size));
  2288. s = get_slab(size, flags);
  2289. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2290. return s;
  2291. return slab_alloc(s, flags, -1, __builtin_return_address(0));
  2292. }
  2293. EXPORT_SYMBOL(__kmalloc);
  2294. #ifdef CONFIG_NUMA
  2295. void *__kmalloc_node(size_t size, gfp_t flags, int node)
  2296. {
  2297. struct kmem_cache *s;
  2298. if (unlikely(size > PAGE_SIZE / 2))
  2299. return (void *)__get_free_pages(flags | __GFP_COMP,
  2300. get_order(size));
  2301. s = get_slab(size, flags);
  2302. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2303. return s;
  2304. return slab_alloc(s, flags, node, __builtin_return_address(0));
  2305. }
  2306. EXPORT_SYMBOL(__kmalloc_node);
  2307. #endif
  2308. size_t ksize(const void *object)
  2309. {
  2310. struct page *page;
  2311. struct kmem_cache *s;
  2312. BUG_ON(!object);
  2313. if (unlikely(object == ZERO_SIZE_PTR))
  2314. return 0;
  2315. page = virt_to_head_page(object);
  2316. BUG_ON(!page);
  2317. if (unlikely(!PageSlab(page)))
  2318. return PAGE_SIZE << compound_order(page);
  2319. s = page->slab;
  2320. BUG_ON(!s);
  2321. /*
  2322. * Debugging requires use of the padding between object
  2323. * and whatever may come after it.
  2324. */
  2325. if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  2326. return s->objsize;
  2327. /*
  2328. * If we have the need to store the freelist pointer
  2329. * back there or track user information then we can
  2330. * only use the space before that information.
  2331. */
  2332. if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
  2333. return s->inuse;
  2334. /*
  2335. * Else we can use all the padding etc for the allocation
  2336. */
  2337. return s->size;
  2338. }
  2339. EXPORT_SYMBOL(ksize);
  2340. void kfree(const void *x)
  2341. {
  2342. struct page *page;
  2343. void *object = (void *)x;
  2344. if (unlikely(ZERO_OR_NULL_PTR(x)))
  2345. return;
  2346. page = virt_to_head_page(x);
  2347. if (unlikely(!PageSlab(page))) {
  2348. put_page(page);
  2349. return;
  2350. }
  2351. slab_free(page->slab, page, object, __builtin_return_address(0));
  2352. }
  2353. EXPORT_SYMBOL(kfree);
  2354. static unsigned long count_partial(struct kmem_cache_node *n)
  2355. {
  2356. unsigned long flags;
  2357. unsigned long x = 0;
  2358. struct page *page;
  2359. spin_lock_irqsave(&n->list_lock, flags);
  2360. list_for_each_entry(page, &n->partial, lru)
  2361. x += page->inuse;
  2362. spin_unlock_irqrestore(&n->list_lock, flags);
  2363. return x;
  2364. }
  2365. /*
  2366. * kmem_cache_shrink removes empty slabs from the partial lists and sorts
  2367. * the remaining slabs by the number of items in use. The slabs with the
  2368. * most items in use come first. New allocations will then fill those up
  2369. * and thus they can be removed from the partial lists.
  2370. *
  2371. * The slabs with the least items are placed last. This results in them
  2372. * being allocated from last increasing the chance that the last objects
  2373. * are freed in them.
  2374. */
  2375. int kmem_cache_shrink(struct kmem_cache *s)
  2376. {
  2377. int node;
  2378. int i;
  2379. struct kmem_cache_node *n;
  2380. struct page *page;
  2381. struct page *t;
  2382. struct list_head *slabs_by_inuse =
  2383. kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
  2384. unsigned long flags;
  2385. if (!slabs_by_inuse)
  2386. return -ENOMEM;
  2387. flush_all(s);
  2388. for_each_node_state(node, N_NORMAL_MEMORY) {
  2389. n = get_node(s, node);
  2390. if (!n->nr_partial)
  2391. continue;
  2392. for (i = 0; i < s->objects; i++)
  2393. INIT_LIST_HEAD(slabs_by_inuse + i);
  2394. spin_lock_irqsave(&n->list_lock, flags);
  2395. /*
  2396. * Build lists indexed by the items in use in each slab.
  2397. *
  2398. * Note that concurrent frees may occur while we hold the
  2399. * list_lock. page->inuse here is the upper limit.
  2400. */
  2401. list_for_each_entry_safe(page, t, &n->partial, lru) {
  2402. if (!page->inuse && slab_trylock(page)) {
  2403. /*
  2404. * Must hold slab lock here because slab_free
  2405. * may have freed the last object and be
  2406. * waiting to release the slab.
  2407. */
  2408. list_del(&page->lru);
  2409. n->nr_partial--;
  2410. slab_unlock(page);
  2411. discard_slab(s, page);
  2412. } else {
  2413. list_move(&page->lru,
  2414. slabs_by_inuse + page->inuse);
  2415. }
  2416. }
  2417. /*
  2418. * Rebuild the partial list with the slabs filled up most
  2419. * first and the least used slabs at the end.
  2420. */
  2421. for (i = s->objects - 1; i >= 0; i--)
  2422. list_splice(slabs_by_inuse + i, n->partial.prev);
  2423. spin_unlock_irqrestore(&n->list_lock, flags);
  2424. }
  2425. kfree(slabs_by_inuse);
  2426. return 0;
  2427. }
  2428. EXPORT_SYMBOL(kmem_cache_shrink);
  2429. #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
  2430. static int slab_mem_going_offline_callback(void *arg)
  2431. {
  2432. struct kmem_cache *s;
  2433. down_read(&slub_lock);
  2434. list_for_each_entry(s, &slab_caches, list)
  2435. kmem_cache_shrink(s);
  2436. up_read(&slub_lock);
  2437. return 0;
  2438. }
  2439. static void slab_mem_offline_callback(void *arg)
  2440. {
  2441. struct kmem_cache_node *n;
  2442. struct kmem_cache *s;
  2443. struct memory_notify *marg = arg;
  2444. int offline_node;
  2445. offline_node = marg->status_change_nid;
  2446. /*
  2447. * If the node still has available memory. we need kmem_cache_node
  2448. * for it yet.
  2449. */
  2450. if (offline_node < 0)
  2451. return;
  2452. down_read(&slub_lock);
  2453. list_for_each_entry(s, &slab_caches, list) {
  2454. n = get_node(s, offline_node);
  2455. if (n) {
  2456. /*
  2457. * if n->nr_slabs > 0, slabs still exist on the node
  2458. * that is going down. We were unable to free them,
  2459. * and offline_pages() function shoudn't call this
  2460. * callback. So, we must fail.
  2461. */
  2462. BUG_ON(atomic_long_read(&n->nr_slabs));
  2463. s->node[offline_node] = NULL;
  2464. kmem_cache_free(kmalloc_caches, n);
  2465. }
  2466. }
  2467. up_read(&slub_lock);
  2468. }
  2469. static int slab_mem_going_online_callback(void *arg)
  2470. {
  2471. struct kmem_cache_node *n;
  2472. struct kmem_cache *s;
  2473. struct memory_notify *marg = arg;
  2474. int nid = marg->status_change_nid;
  2475. int ret = 0;
  2476. /*
  2477. * If the node's memory is already available, then kmem_cache_node is
  2478. * already created. Nothing to do.
  2479. */
  2480. if (nid < 0)
  2481. return 0;
  2482. /*
  2483. * We are bringing a node online. No memory is availabe yet. We must
  2484. * allocate a kmem_cache_node structure in order to bring the node
  2485. * online.
  2486. */
  2487. down_read(&slub_lock);
  2488. list_for_each_entry(s, &slab_caches, list) {
  2489. /*
  2490. * XXX: kmem_cache_alloc_node will fallback to other nodes
  2491. * since memory is not yet available from the node that
  2492. * is brought up.
  2493. */
  2494. n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
  2495. if (!n) {
  2496. ret = -ENOMEM;
  2497. goto out;
  2498. }
  2499. init_kmem_cache_node(n);
  2500. s->node[nid] = n;
  2501. }
  2502. out:
  2503. up_read(&slub_lock);
  2504. return ret;
  2505. }
  2506. static int slab_memory_callback(struct notifier_block *self,
  2507. unsigned long action, void *arg)
  2508. {
  2509. int ret = 0;
  2510. switch (action) {
  2511. case MEM_GOING_ONLINE:
  2512. ret = slab_mem_going_online_callback(arg);
  2513. break;
  2514. case MEM_GOING_OFFLINE:
  2515. ret = slab_mem_going_offline_callback(arg);
  2516. break;
  2517. case MEM_OFFLINE:
  2518. case MEM_CANCEL_ONLINE:
  2519. slab_mem_offline_callback(arg);
  2520. break;
  2521. case MEM_ONLINE:
  2522. case MEM_CANCEL_OFFLINE:
  2523. break;
  2524. }
  2525. ret = notifier_from_errno(ret);
  2526. return ret;
  2527. }
  2528. #endif /* CONFIG_MEMORY_HOTPLUG */
  2529. /********************************************************************
  2530. * Basic setup of slabs
  2531. *******************************************************************/
  2532. void __init kmem_cache_init(void)
  2533. {
  2534. int i;
  2535. int caches = 0;
  2536. init_alloc_cpu();
  2537. #ifdef CONFIG_NUMA
  2538. /*
  2539. * Must first have the slab cache available for the allocations of the
  2540. * struct kmem_cache_node's. There is special bootstrap code in
  2541. * kmem_cache_open for slab_state == DOWN.
  2542. */
  2543. create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
  2544. sizeof(struct kmem_cache_node), GFP_KERNEL);
  2545. kmalloc_caches[0].refcount = -1;
  2546. caches++;
  2547. hotplug_memory_notifier(slab_memory_callback, 1);
  2548. #endif
  2549. /* Able to allocate the per node structures */
  2550. slab_state = PARTIAL;
  2551. /* Caches that are not of the two-to-the-power-of size */
  2552. if (KMALLOC_MIN_SIZE <= 64) {
  2553. create_kmalloc_cache(&kmalloc_caches[1],
  2554. "kmalloc-96", 96, GFP_KERNEL);
  2555. caches++;
  2556. }
  2557. if (KMALLOC_MIN_SIZE <= 128) {
  2558. create_kmalloc_cache(&kmalloc_caches[2],
  2559. "kmalloc-192", 192, GFP_KERNEL);
  2560. caches++;
  2561. }
  2562. for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
  2563. create_kmalloc_cache(&kmalloc_caches[i],
  2564. "kmalloc", 1 << i, GFP_KERNEL);
  2565. caches++;
  2566. }
  2567. /*
  2568. * Patch up the size_index table if we have strange large alignment
  2569. * requirements for the kmalloc array. This is only the case for
  2570. * mips it seems. The standard arches will not generate any code here.
  2571. *
  2572. * Largest permitted alignment is 256 bytes due to the way we
  2573. * handle the index determination for the smaller caches.
  2574. *
  2575. * Make sure that nothing crazy happens if someone starts tinkering
  2576. * around with ARCH_KMALLOC_MINALIGN
  2577. */
  2578. BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  2579. (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  2580. for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
  2581. size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
  2582. slab_state = UP;
  2583. /* Provide the correct kmalloc names now that the caches are up */
  2584. for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
  2585. kmalloc_caches[i]. name =
  2586. kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
  2587. #ifdef CONFIG_SMP
  2588. register_cpu_notifier(&slab_notifier);
  2589. kmem_size = offsetof(struct kmem_cache, cpu_slab) +
  2590. nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
  2591. #else
  2592. kmem_size = sizeof(struct kmem_cache);
  2593. #endif
  2594. printk(KERN_INFO
  2595. "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
  2596. " CPUs=%d, Nodes=%d\n",
  2597. caches, cache_line_size(),
  2598. slub_min_order, slub_max_order, slub_min_objects,
  2599. nr_cpu_ids, nr_node_ids);
  2600. }
  2601. /*
  2602. * Find a mergeable slab cache
  2603. */
  2604. static int slab_unmergeable(struct kmem_cache *s)
  2605. {
  2606. if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
  2607. return 1;
  2608. if (s->ctor)
  2609. return 1;
  2610. /*
  2611. * We may have set a slab to be unmergeable during bootstrap.
  2612. */
  2613. if (s->refcount < 0)
  2614. return 1;
  2615. return 0;
  2616. }
  2617. static struct kmem_cache *find_mergeable(size_t size,
  2618. size_t align, unsigned long flags, const char *name,
  2619. void (*ctor)(struct kmem_cache *, void *))
  2620. {
  2621. struct kmem_cache *s;
  2622. if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
  2623. return NULL;
  2624. if (ctor)
  2625. return NULL;
  2626. size = ALIGN(size, sizeof(void *));
  2627. align = calculate_alignment(flags, align, size);
  2628. size = ALIGN(size, align);
  2629. flags = kmem_cache_flags(size, flags, name, NULL);
  2630. list_for_each_entry(s, &slab_caches, list) {
  2631. if (slab_unmergeable(s))
  2632. continue;
  2633. if (size > s->size)
  2634. continue;
  2635. if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
  2636. continue;
  2637. /*
  2638. * Check if alignment is compatible.
  2639. * Courtesy of Adrian Drzewiecki
  2640. */
  2641. if ((s->size & ~(align - 1)) != s->size)
  2642. continue;
  2643. if (s->size - size >= sizeof(void *))
  2644. continue;
  2645. return s;
  2646. }
  2647. return NULL;
  2648. }
  2649. struct kmem_cache *kmem_cache_create(const char *name, size_t size,
  2650. size_t align, unsigned long flags,
  2651. void (*ctor)(struct kmem_cache *, void *))
  2652. {
  2653. struct kmem_cache *s;
  2654. down_write(&slub_lock);
  2655. s = find_mergeable(size, align, flags, name, ctor);
  2656. if (s) {
  2657. int cpu;
  2658. s->refcount++;
  2659. /*
  2660. * Adjust the object sizes so that we clear
  2661. * the complete object on kzalloc.
  2662. */
  2663. s->objsize = max(s->objsize, (int)size);
  2664. /*
  2665. * And then we need to update the object size in the
  2666. * per cpu structures
  2667. */
  2668. for_each_online_cpu(cpu)
  2669. get_cpu_slab(s, cpu)->objsize = s->objsize;
  2670. s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
  2671. up_write(&slub_lock);
  2672. if (sysfs_slab_alias(s, name))
  2673. goto err;
  2674. return s;
  2675. }
  2676. s = kmalloc(kmem_size, GFP_KERNEL);
  2677. if (s) {
  2678. if (kmem_cache_open(s, GFP_KERNEL, name,
  2679. size, align, flags, ctor)) {
  2680. list_add(&s->list, &slab_caches);
  2681. up_write(&slub_lock);
  2682. if (sysfs_slab_add(s))
  2683. goto err;
  2684. return s;
  2685. }
  2686. kfree(s);
  2687. }
  2688. up_write(&slub_lock);
  2689. err:
  2690. if (flags & SLAB_PANIC)
  2691. panic("Cannot create slabcache %s\n", name);
  2692. else
  2693. s = NULL;
  2694. return s;
  2695. }
  2696. EXPORT_SYMBOL(kmem_cache_create);
  2697. #ifdef CONFIG_SMP
  2698. /*
  2699. * Use the cpu notifier to insure that the cpu slabs are flushed when
  2700. * necessary.
  2701. */
  2702. static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
  2703. unsigned long action, void *hcpu)
  2704. {
  2705. long cpu = (long)hcpu;
  2706. struct kmem_cache *s;
  2707. unsigned long flags;
  2708. switch (action) {
  2709. case CPU_UP_PREPARE:
  2710. case CPU_UP_PREPARE_FROZEN:
  2711. init_alloc_cpu_cpu(cpu);
  2712. down_read(&slub_lock);
  2713. list_for_each_entry(s, &slab_caches, list)
  2714. s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
  2715. GFP_KERNEL);
  2716. up_read(&slub_lock);
  2717. break;
  2718. case CPU_UP_CANCELED:
  2719. case CPU_UP_CANCELED_FROZEN:
  2720. case CPU_DEAD:
  2721. case CPU_DEAD_FROZEN:
  2722. down_read(&slub_lock);
  2723. list_for_each_entry(s, &slab_caches, list) {
  2724. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  2725. local_irq_save(flags);
  2726. __flush_cpu_slab(s, cpu);
  2727. local_irq_restore(flags);
  2728. free_kmem_cache_cpu(c, cpu);
  2729. s->cpu_slab[cpu] = NULL;
  2730. }
  2731. up_read(&slub_lock);
  2732. break;
  2733. default:
  2734. break;
  2735. }
  2736. return NOTIFY_OK;
  2737. }
  2738. static struct notifier_block __cpuinitdata slab_notifier = {
  2739. .notifier_call = slab_cpuup_callback
  2740. };
  2741. #endif
  2742. void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
  2743. {
  2744. struct kmem_cache *s;
  2745. if (unlikely(size > PAGE_SIZE / 2))
  2746. return (void *)__get_free_pages(gfpflags | __GFP_COMP,
  2747. get_order(size));
  2748. s = get_slab(size, gfpflags);
  2749. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2750. return s;
  2751. return slab_alloc(s, gfpflags, -1, caller);
  2752. }
  2753. void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
  2754. int node, void *caller)
  2755. {
  2756. struct kmem_cache *s;
  2757. if (unlikely(size > PAGE_SIZE / 2))
  2758. return (void *)__get_free_pages(gfpflags | __GFP_COMP,
  2759. get_order(size));
  2760. s = get_slab(size, gfpflags);
  2761. if (unlikely(ZERO_OR_NULL_PTR(s)))
  2762. return s;
  2763. return slab_alloc(s, gfpflags, node, caller);
  2764. }
  2765. #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
  2766. static int validate_slab(struct kmem_cache *s, struct page *page,
  2767. unsigned long *map)
  2768. {
  2769. void *p;
  2770. void *addr = slab_address(page);
  2771. if (!check_slab(s, page) ||
  2772. !on_freelist(s, page, NULL))
  2773. return 0;
  2774. /* Now we know that a valid freelist exists */
  2775. bitmap_zero(map, s->objects);
  2776. for_each_free_object(p, s, page->freelist) {
  2777. set_bit(slab_index(p, s, addr), map);
  2778. if (!check_object(s, page, p, 0))
  2779. return 0;
  2780. }
  2781. for_each_object(p, s, addr)
  2782. if (!test_bit(slab_index(p, s, addr), map))
  2783. if (!check_object(s, page, p, 1))
  2784. return 0;
  2785. return 1;
  2786. }
  2787. static void validate_slab_slab(struct kmem_cache *s, struct page *page,
  2788. unsigned long *map)
  2789. {
  2790. if (slab_trylock(page)) {
  2791. validate_slab(s, page, map);
  2792. slab_unlock(page);
  2793. } else
  2794. printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
  2795. s->name, page);
  2796. if (s->flags & DEBUG_DEFAULT_FLAGS) {
  2797. if (!SlabDebug(page))
  2798. printk(KERN_ERR "SLUB %s: SlabDebug not set "
  2799. "on slab 0x%p\n", s->name, page);
  2800. } else {
  2801. if (SlabDebug(page))
  2802. printk(KERN_ERR "SLUB %s: SlabDebug set on "
  2803. "slab 0x%p\n", s->name, page);
  2804. }
  2805. }
  2806. static int validate_slab_node(struct kmem_cache *s,
  2807. struct kmem_cache_node *n, unsigned long *map)
  2808. {
  2809. unsigned long count = 0;
  2810. struct page *page;
  2811. unsigned long flags;
  2812. spin_lock_irqsave(&n->list_lock, flags);
  2813. list_for_each_entry(page, &n->partial, lru) {
  2814. validate_slab_slab(s, page, map);
  2815. count++;
  2816. }
  2817. if (count != n->nr_partial)
  2818. printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
  2819. "counter=%ld\n", s->name, count, n->nr_partial);
  2820. if (!(s->flags & SLAB_STORE_USER))
  2821. goto out;
  2822. list_for_each_entry(page, &n->full, lru) {
  2823. validate_slab_slab(s, page, map);
  2824. count++;
  2825. }
  2826. if (count != atomic_long_read(&n->nr_slabs))
  2827. printk(KERN_ERR "SLUB: %s %ld slabs counted but "
  2828. "counter=%ld\n", s->name, count,
  2829. atomic_long_read(&n->nr_slabs));
  2830. out:
  2831. spin_unlock_irqrestore(&n->list_lock, flags);
  2832. return count;
  2833. }
  2834. static long validate_slab_cache(struct kmem_cache *s)
  2835. {
  2836. int node;
  2837. unsigned long count = 0;
  2838. unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
  2839. sizeof(unsigned long), GFP_KERNEL);
  2840. if (!map)
  2841. return -ENOMEM;
  2842. flush_all(s);
  2843. for_each_node_state(node, N_NORMAL_MEMORY) {
  2844. struct kmem_cache_node *n = get_node(s, node);
  2845. count += validate_slab_node(s, n, map);
  2846. }
  2847. kfree(map);
  2848. return count;
  2849. }
  2850. #ifdef SLUB_RESILIENCY_TEST
  2851. static void resiliency_test(void)
  2852. {
  2853. u8 *p;
  2854. printk(KERN_ERR "SLUB resiliency testing\n");
  2855. printk(KERN_ERR "-----------------------\n");
  2856. printk(KERN_ERR "A. Corruption after allocation\n");
  2857. p = kzalloc(16, GFP_KERNEL);
  2858. p[16] = 0x12;
  2859. printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
  2860. " 0x12->0x%p\n\n", p + 16);
  2861. validate_slab_cache(kmalloc_caches + 4);
  2862. /* Hmmm... The next two are dangerous */
  2863. p = kzalloc(32, GFP_KERNEL);
  2864. p[32 + sizeof(void *)] = 0x34;
  2865. printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
  2866. " 0x34 -> -0x%p\n", p);
  2867. printk(KERN_ERR
  2868. "If allocated object is overwritten then not detectable\n\n");
  2869. validate_slab_cache(kmalloc_caches + 5);
  2870. p = kzalloc(64, GFP_KERNEL);
  2871. p += 64 + (get_cycles() & 0xff) * sizeof(void *);
  2872. *p = 0x56;
  2873. printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
  2874. p);
  2875. printk(KERN_ERR
  2876. "If allocated object is overwritten then not detectable\n\n");
  2877. validate_slab_cache(kmalloc_caches + 6);
  2878. printk(KERN_ERR "\nB. Corruption after free\n");
  2879. p = kzalloc(128, GFP_KERNEL);
  2880. kfree(p);
  2881. *p = 0x78;
  2882. printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
  2883. validate_slab_cache(kmalloc_caches + 7);
  2884. p = kzalloc(256, GFP_KERNEL);
  2885. kfree(p);
  2886. p[50] = 0x9a;
  2887. printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
  2888. p);
  2889. validate_slab_cache(kmalloc_caches + 8);
  2890. p = kzalloc(512, GFP_KERNEL);
  2891. kfree(p);
  2892. p[512] = 0xab;
  2893. printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
  2894. validate_slab_cache(kmalloc_caches + 9);
  2895. }
  2896. #else
  2897. static void resiliency_test(void) {};
  2898. #endif
  2899. /*
  2900. * Generate lists of code addresses where slabcache objects are allocated
  2901. * and freed.
  2902. */
  2903. struct location {
  2904. unsigned long count;
  2905. void *addr;
  2906. long long sum_time;
  2907. long min_time;
  2908. long max_time;
  2909. long min_pid;
  2910. long max_pid;
  2911. cpumask_t cpus;
  2912. nodemask_t nodes;
  2913. };
  2914. struct loc_track {
  2915. unsigned long max;
  2916. unsigned long count;
  2917. struct location *loc;
  2918. };
  2919. static void free_loc_track(struct loc_track *t)
  2920. {
  2921. if (t->max)
  2922. free_pages((unsigned long)t->loc,
  2923. get_order(sizeof(struct location) * t->max));
  2924. }
  2925. static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
  2926. {
  2927. struct location *l;
  2928. int order;
  2929. order = get_order(sizeof(struct location) * max);
  2930. l = (void *)__get_free_pages(flags, order);
  2931. if (!l)
  2932. return 0;
  2933. if (t->count) {
  2934. memcpy(l, t->loc, sizeof(struct location) * t->count);
  2935. free_loc_track(t);
  2936. }
  2937. t->max = max;
  2938. t->loc = l;
  2939. return 1;
  2940. }
  2941. static int add_location(struct loc_track *t, struct kmem_cache *s,
  2942. const struct track *track)
  2943. {
  2944. long start, end, pos;
  2945. struct location *l;
  2946. void *caddr;
  2947. unsigned long age = jiffies - track->when;
  2948. start = -1;
  2949. end = t->count;
  2950. for ( ; ; ) {
  2951. pos = start + (end - start + 1) / 2;
  2952. /*
  2953. * There is nothing at "end". If we end up there
  2954. * we need to add something to before end.
  2955. */
  2956. if (pos == end)
  2957. break;
  2958. caddr = t->loc[pos].addr;
  2959. if (track->addr == caddr) {
  2960. l = &t->loc[pos];
  2961. l->count++;
  2962. if (track->when) {
  2963. l->sum_time += age;
  2964. if (age < l->min_time)
  2965. l->min_time = age;
  2966. if (age > l->max_time)
  2967. l->max_time = age;
  2968. if (track->pid < l->min_pid)
  2969. l->min_pid = track->pid;
  2970. if (track->pid > l->max_pid)
  2971. l->max_pid = track->pid;
  2972. cpu_set(track->cpu, l->cpus);
  2973. }
  2974. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  2975. return 1;
  2976. }
  2977. if (track->addr < caddr)
  2978. end = pos;
  2979. else
  2980. start = pos;
  2981. }
  2982. /*
  2983. * Not found. Insert new tracking element.
  2984. */
  2985. if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
  2986. return 0;
  2987. l = t->loc + pos;
  2988. if (pos < t->count)
  2989. memmove(l + 1, l,
  2990. (t->count - pos) * sizeof(struct location));
  2991. t->count++;
  2992. l->count = 1;
  2993. l->addr = track->addr;
  2994. l->sum_time = age;
  2995. l->min_time = age;
  2996. l->max_time = age;
  2997. l->min_pid = track->pid;
  2998. l->max_pid = track->pid;
  2999. cpus_clear(l->cpus);
  3000. cpu_set(track->cpu, l->cpus);
  3001. nodes_clear(l->nodes);
  3002. node_set(page_to_nid(virt_to_page(track)), l->nodes);
  3003. return 1;
  3004. }
  3005. static void process_slab(struct loc_track *t, struct kmem_cache *s,
  3006. struct page *page, enum track_item alloc)
  3007. {
  3008. void *addr = slab_address(page);
  3009. DECLARE_BITMAP(map, s->objects);
  3010. void *p;
  3011. bitmap_zero(map, s->objects);
  3012. for_each_free_object(p, s, page->freelist)
  3013. set_bit(slab_index(p, s, addr), map);
  3014. for_each_object(p, s, addr)
  3015. if (!test_bit(slab_index(p, s, addr), map))
  3016. add_location(t, s, get_track(s, p, alloc));
  3017. }
  3018. static int list_locations(struct kmem_cache *s, char *buf,
  3019. enum track_item alloc)
  3020. {
  3021. int len = 0;
  3022. unsigned long i;
  3023. struct loc_track t = { 0, 0, NULL };
  3024. int node;
  3025. if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
  3026. GFP_TEMPORARY))
  3027. return sprintf(buf, "Out of memory\n");
  3028. /* Push back cpu slabs */
  3029. flush_all(s);
  3030. for_each_node_state(node, N_NORMAL_MEMORY) {
  3031. struct kmem_cache_node *n = get_node(s, node);
  3032. unsigned long flags;
  3033. struct page *page;
  3034. if (!atomic_long_read(&n->nr_slabs))
  3035. continue;
  3036. spin_lock_irqsave(&n->list_lock, flags);
  3037. list_for_each_entry(page, &n->partial, lru)
  3038. process_slab(&t, s, page, alloc);
  3039. list_for_each_entry(page, &n->full, lru)
  3040. process_slab(&t, s, page, alloc);
  3041. spin_unlock_irqrestore(&n->list_lock, flags);
  3042. }
  3043. for (i = 0; i < t.count; i++) {
  3044. struct location *l = &t.loc[i];
  3045. if (len > PAGE_SIZE - 100)
  3046. break;
  3047. len += sprintf(buf + len, "%7ld ", l->count);
  3048. if (l->addr)
  3049. len += sprint_symbol(buf + len, (unsigned long)l->addr);
  3050. else
  3051. len += sprintf(buf + len, "<not-available>");
  3052. if (l->sum_time != l->min_time) {
  3053. unsigned long remainder;
  3054. len += sprintf(buf + len, " age=%ld/%ld/%ld",
  3055. l->min_time,
  3056. div_long_long_rem(l->sum_time, l->count, &remainder),
  3057. l->max_time);
  3058. } else
  3059. len += sprintf(buf + len, " age=%ld",
  3060. l->min_time);
  3061. if (l->min_pid != l->max_pid)
  3062. len += sprintf(buf + len, " pid=%ld-%ld",
  3063. l->min_pid, l->max_pid);
  3064. else
  3065. len += sprintf(buf + len, " pid=%ld",
  3066. l->min_pid);
  3067. if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
  3068. len < PAGE_SIZE - 60) {
  3069. len += sprintf(buf + len, " cpus=");
  3070. len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
  3071. l->cpus);
  3072. }
  3073. if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
  3074. len < PAGE_SIZE - 60) {
  3075. len += sprintf(buf + len, " nodes=");
  3076. len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
  3077. l->nodes);
  3078. }
  3079. len += sprintf(buf + len, "\n");
  3080. }
  3081. free_loc_track(&t);
  3082. if (!t.count)
  3083. len += sprintf(buf, "No data\n");
  3084. return len;
  3085. }
  3086. enum slab_stat_type {
  3087. SL_FULL,
  3088. SL_PARTIAL,
  3089. SL_CPU,
  3090. SL_OBJECTS
  3091. };
  3092. #define SO_FULL (1 << SL_FULL)
  3093. #define SO_PARTIAL (1 << SL_PARTIAL)
  3094. #define SO_CPU (1 << SL_CPU)
  3095. #define SO_OBJECTS (1 << SL_OBJECTS)
  3096. static unsigned long slab_objects(struct kmem_cache *s,
  3097. char *buf, unsigned long flags)
  3098. {
  3099. unsigned long total = 0;
  3100. int cpu;
  3101. int node;
  3102. int x;
  3103. unsigned long *nodes;
  3104. unsigned long *per_cpu;
  3105. nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
  3106. per_cpu = nodes + nr_node_ids;
  3107. for_each_possible_cpu(cpu) {
  3108. struct page *page;
  3109. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  3110. if (!c)
  3111. continue;
  3112. page = c->page;
  3113. node = c->node;
  3114. if (node < 0)
  3115. continue;
  3116. if (page) {
  3117. if (flags & SO_CPU) {
  3118. if (flags & SO_OBJECTS)
  3119. x = page->inuse;
  3120. else
  3121. x = 1;
  3122. total += x;
  3123. nodes[node] += x;
  3124. }
  3125. per_cpu[node]++;
  3126. }
  3127. }
  3128. for_each_node_state(node, N_NORMAL_MEMORY) {
  3129. struct kmem_cache_node *n = get_node(s, node);
  3130. if (flags & SO_PARTIAL) {
  3131. if (flags & SO_OBJECTS)
  3132. x = count_partial(n);
  3133. else
  3134. x = n->nr_partial;
  3135. total += x;
  3136. nodes[node] += x;
  3137. }
  3138. if (flags & SO_FULL) {
  3139. int full_slabs = atomic_long_read(&n->nr_slabs)
  3140. - per_cpu[node]
  3141. - n->nr_partial;
  3142. if (flags & SO_OBJECTS)
  3143. x = full_slabs * s->objects;
  3144. else
  3145. x = full_slabs;
  3146. total += x;
  3147. nodes[node] += x;
  3148. }
  3149. }
  3150. x = sprintf(buf, "%lu", total);
  3151. #ifdef CONFIG_NUMA
  3152. for_each_node_state(node, N_NORMAL_MEMORY)
  3153. if (nodes[node])
  3154. x += sprintf(buf + x, " N%d=%lu",
  3155. node, nodes[node]);
  3156. #endif
  3157. kfree(nodes);
  3158. return x + sprintf(buf + x, "\n");
  3159. }
  3160. static int any_slab_objects(struct kmem_cache *s)
  3161. {
  3162. int node;
  3163. int cpu;
  3164. for_each_possible_cpu(cpu) {
  3165. struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
  3166. if (c && c->page)
  3167. return 1;
  3168. }
  3169. for_each_online_node(node) {
  3170. struct kmem_cache_node *n = get_node(s, node);
  3171. if (!n)
  3172. continue;
  3173. if (n->nr_partial || atomic_long_read(&n->nr_slabs))
  3174. return 1;
  3175. }
  3176. return 0;
  3177. }
  3178. #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
  3179. #define to_slab(n) container_of(n, struct kmem_cache, kobj);
  3180. struct slab_attribute {
  3181. struct attribute attr;
  3182. ssize_t (*show)(struct kmem_cache *s, char *buf);
  3183. ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
  3184. };
  3185. #define SLAB_ATTR_RO(_name) \
  3186. static struct slab_attribute _name##_attr = __ATTR_RO(_name)
  3187. #define SLAB_ATTR(_name) \
  3188. static struct slab_attribute _name##_attr = \
  3189. __ATTR(_name, 0644, _name##_show, _name##_store)
  3190. static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
  3191. {
  3192. return sprintf(buf, "%d\n", s->size);
  3193. }
  3194. SLAB_ATTR_RO(slab_size);
  3195. static ssize_t align_show(struct kmem_cache *s, char *buf)
  3196. {
  3197. return sprintf(buf, "%d\n", s->align);
  3198. }
  3199. SLAB_ATTR_RO(align);
  3200. static ssize_t object_size_show(struct kmem_cache *s, char *buf)
  3201. {
  3202. return sprintf(buf, "%d\n", s->objsize);
  3203. }
  3204. SLAB_ATTR_RO(object_size);
  3205. static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
  3206. {
  3207. return sprintf(buf, "%d\n", s->objects);
  3208. }
  3209. SLAB_ATTR_RO(objs_per_slab);
  3210. static ssize_t order_show(struct kmem_cache *s, char *buf)
  3211. {
  3212. return sprintf(buf, "%d\n", s->order);
  3213. }
  3214. SLAB_ATTR_RO(order);
  3215. static ssize_t ctor_show(struct kmem_cache *s, char *buf)
  3216. {
  3217. if (s->ctor) {
  3218. int n = sprint_symbol(buf, (unsigned long)s->ctor);
  3219. return n + sprintf(buf + n, "\n");
  3220. }
  3221. return 0;
  3222. }
  3223. SLAB_ATTR_RO(ctor);
  3224. static ssize_t aliases_show(struct kmem_cache *s, char *buf)
  3225. {
  3226. return sprintf(buf, "%d\n", s->refcount - 1);
  3227. }
  3228. SLAB_ATTR_RO(aliases);
  3229. static ssize_t slabs_show(struct kmem_cache *s, char *buf)
  3230. {
  3231. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
  3232. }
  3233. SLAB_ATTR_RO(slabs);
  3234. static ssize_t partial_show(struct kmem_cache *s, char *buf)
  3235. {
  3236. return slab_objects(s, buf, SO_PARTIAL);
  3237. }
  3238. SLAB_ATTR_RO(partial);
  3239. static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
  3240. {
  3241. return slab_objects(s, buf, SO_CPU);
  3242. }
  3243. SLAB_ATTR_RO(cpu_slabs);
  3244. static ssize_t objects_show(struct kmem_cache *s, char *buf)
  3245. {
  3246. return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
  3247. }
  3248. SLAB_ATTR_RO(objects);
  3249. static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
  3250. {
  3251. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
  3252. }
  3253. static ssize_t sanity_checks_store(struct kmem_cache *s,
  3254. const char *buf, size_t length)
  3255. {
  3256. s->flags &= ~SLAB_DEBUG_FREE;
  3257. if (buf[0] == '1')
  3258. s->flags |= SLAB_DEBUG_FREE;
  3259. return length;
  3260. }
  3261. SLAB_ATTR(sanity_checks);
  3262. static ssize_t trace_show(struct kmem_cache *s, char *buf)
  3263. {
  3264. return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
  3265. }
  3266. static ssize_t trace_store(struct kmem_cache *s, const char *buf,
  3267. size_t length)
  3268. {
  3269. s->flags &= ~SLAB_TRACE;
  3270. if (buf[0] == '1')
  3271. s->flags |= SLAB_TRACE;
  3272. return length;
  3273. }
  3274. SLAB_ATTR(trace);
  3275. static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
  3276. {
  3277. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
  3278. }
  3279. static ssize_t reclaim_account_store(struct kmem_cache *s,
  3280. const char *buf, size_t length)
  3281. {
  3282. s->flags &= ~SLAB_RECLAIM_ACCOUNT;
  3283. if (buf[0] == '1')
  3284. s->flags |= SLAB_RECLAIM_ACCOUNT;
  3285. return length;
  3286. }
  3287. SLAB_ATTR(reclaim_account);
  3288. static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
  3289. {
  3290. return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
  3291. }
  3292. SLAB_ATTR_RO(hwcache_align);
  3293. #ifdef CONFIG_ZONE_DMA
  3294. static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
  3295. {
  3296. return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
  3297. }
  3298. SLAB_ATTR_RO(cache_dma);
  3299. #endif
  3300. static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
  3301. {
  3302. return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
  3303. }
  3304. SLAB_ATTR_RO(destroy_by_rcu);
  3305. static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
  3306. {
  3307. return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
  3308. }
  3309. static ssize_t red_zone_store(struct kmem_cache *s,
  3310. const char *buf, size_t length)
  3311. {
  3312. if (any_slab_objects(s))
  3313. return -EBUSY;
  3314. s->flags &= ~SLAB_RED_ZONE;
  3315. if (buf[0] == '1')
  3316. s->flags |= SLAB_RED_ZONE;
  3317. calculate_sizes(s);
  3318. return length;
  3319. }
  3320. SLAB_ATTR(red_zone);
  3321. static ssize_t poison_show(struct kmem_cache *s, char *buf)
  3322. {
  3323. return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
  3324. }
  3325. static ssize_t poison_store(struct kmem_cache *s,
  3326. const char *buf, size_t length)
  3327. {
  3328. if (any_slab_objects(s))
  3329. return -EBUSY;
  3330. s->flags &= ~SLAB_POISON;
  3331. if (buf[0] == '1')
  3332. s->flags |= SLAB_POISON;
  3333. calculate_sizes(s);
  3334. return length;
  3335. }
  3336. SLAB_ATTR(poison);
  3337. static ssize_t store_user_show(struct kmem_cache *s, char *buf)
  3338. {
  3339. return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
  3340. }
  3341. static ssize_t store_user_store(struct kmem_cache *s,
  3342. const char *buf, size_t length)
  3343. {
  3344. if (any_slab_objects(s))
  3345. return -EBUSY;
  3346. s->flags &= ~SLAB_STORE_USER;
  3347. if (buf[0] == '1')
  3348. s->flags |= SLAB_STORE_USER;
  3349. calculate_sizes(s);
  3350. return length;
  3351. }
  3352. SLAB_ATTR(store_user);
  3353. static ssize_t validate_show(struct kmem_cache *s, char *buf)
  3354. {
  3355. return 0;
  3356. }
  3357. static ssize_t validate_store(struct kmem_cache *s,
  3358. const char *buf, size_t length)
  3359. {
  3360. int ret = -EINVAL;
  3361. if (buf[0] == '1') {
  3362. ret = validate_slab_cache(s);
  3363. if (ret >= 0)
  3364. ret = length;
  3365. }
  3366. return ret;
  3367. }
  3368. SLAB_ATTR(validate);
  3369. static ssize_t shrink_show(struct kmem_cache *s, char *buf)
  3370. {
  3371. return 0;
  3372. }
  3373. static ssize_t shrink_store(struct kmem_cache *s,
  3374. const char *buf, size_t length)
  3375. {
  3376. if (buf[0] == '1') {
  3377. int rc = kmem_cache_shrink(s);
  3378. if (rc)
  3379. return rc;
  3380. } else
  3381. return -EINVAL;
  3382. return length;
  3383. }
  3384. SLAB_ATTR(shrink);
  3385. static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
  3386. {
  3387. if (!(s->flags & SLAB_STORE_USER))
  3388. return -ENOSYS;
  3389. return list_locations(s, buf, TRACK_ALLOC);
  3390. }
  3391. SLAB_ATTR_RO(alloc_calls);
  3392. static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
  3393. {
  3394. if (!(s->flags & SLAB_STORE_USER))
  3395. return -ENOSYS;
  3396. return list_locations(s, buf, TRACK_FREE);
  3397. }
  3398. SLAB_ATTR_RO(free_calls);
  3399. #ifdef CONFIG_NUMA
  3400. static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
  3401. {
  3402. return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
  3403. }
  3404. static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
  3405. const char *buf, size_t length)
  3406. {
  3407. int n = simple_strtoul(buf, NULL, 10);
  3408. if (n < 100)
  3409. s->remote_node_defrag_ratio = n * 10;
  3410. return length;
  3411. }
  3412. SLAB_ATTR(remote_node_defrag_ratio);
  3413. #endif
  3414. #ifdef CONFIG_SLUB_STATS
  3415. static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
  3416. {
  3417. unsigned long sum = 0;
  3418. int cpu;
  3419. int len;
  3420. int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
  3421. if (!data)
  3422. return -ENOMEM;
  3423. for_each_online_cpu(cpu) {
  3424. unsigned x = get_cpu_slab(s, cpu)->stat[si];
  3425. data[cpu] = x;
  3426. sum += x;
  3427. }
  3428. len = sprintf(buf, "%lu", sum);
  3429. for_each_online_cpu(cpu) {
  3430. if (data[cpu] && len < PAGE_SIZE - 20)
  3431. len += sprintf(buf + len, " c%d=%u", cpu, data[cpu]);
  3432. }
  3433. kfree(data);
  3434. return len + sprintf(buf + len, "\n");
  3435. }
  3436. #define STAT_ATTR(si, text) \
  3437. static ssize_t text##_show(struct kmem_cache *s, char *buf) \
  3438. { \
  3439. return show_stat(s, buf, si); \
  3440. } \
  3441. SLAB_ATTR_RO(text); \
  3442. STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
  3443. STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
  3444. STAT_ATTR(FREE_FASTPATH, free_fastpath);
  3445. STAT_ATTR(FREE_SLOWPATH, free_slowpath);
  3446. STAT_ATTR(FREE_FROZEN, free_frozen);
  3447. STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
  3448. STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
  3449. STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
  3450. STAT_ATTR(ALLOC_SLAB, alloc_slab);
  3451. STAT_ATTR(ALLOC_REFILL, alloc_refill);
  3452. STAT_ATTR(FREE_SLAB, free_slab);
  3453. STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
  3454. STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
  3455. STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
  3456. STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
  3457. STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
  3458. STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
  3459. #endif
  3460. static struct attribute *slab_attrs[] = {
  3461. &slab_size_attr.attr,
  3462. &object_size_attr.attr,
  3463. &objs_per_slab_attr.attr,
  3464. &order_attr.attr,
  3465. &objects_attr.attr,
  3466. &slabs_attr.attr,
  3467. &partial_attr.attr,
  3468. &cpu_slabs_attr.attr,
  3469. &ctor_attr.attr,
  3470. &aliases_attr.attr,
  3471. &align_attr.attr,
  3472. &sanity_checks_attr.attr,
  3473. &trace_attr.attr,
  3474. &hwcache_align_attr.attr,
  3475. &reclaim_account_attr.attr,
  3476. &destroy_by_rcu_attr.attr,
  3477. &red_zone_attr.attr,
  3478. &poison_attr.attr,
  3479. &store_user_attr.attr,
  3480. &validate_attr.attr,
  3481. &shrink_attr.attr,
  3482. &alloc_calls_attr.attr,
  3483. &free_calls_attr.attr,
  3484. #ifdef CONFIG_ZONE_DMA
  3485. &cache_dma_attr.attr,
  3486. #endif
  3487. #ifdef CONFIG_NUMA
  3488. &remote_node_defrag_ratio_attr.attr,
  3489. #endif
  3490. #ifdef CONFIG_SLUB_STATS
  3491. &alloc_fastpath_attr.attr,
  3492. &alloc_slowpath_attr.attr,
  3493. &free_fastpath_attr.attr,
  3494. &free_slowpath_attr.attr,
  3495. &free_frozen_attr.attr,
  3496. &free_add_partial_attr.attr,
  3497. &free_remove_partial_attr.attr,
  3498. &alloc_from_partial_attr.attr,
  3499. &alloc_slab_attr.attr,
  3500. &alloc_refill_attr.attr,
  3501. &free_slab_attr.attr,
  3502. &cpuslab_flush_attr.attr,
  3503. &deactivate_full_attr.attr,
  3504. &deactivate_empty_attr.attr,
  3505. &deactivate_to_head_attr.attr,
  3506. &deactivate_to_tail_attr.attr,
  3507. &deactivate_remote_frees_attr.attr,
  3508. #endif
  3509. NULL
  3510. };
  3511. static struct attribute_group slab_attr_group = {
  3512. .attrs = slab_attrs,
  3513. };
  3514. static ssize_t slab_attr_show(struct kobject *kobj,
  3515. struct attribute *attr,
  3516. char *buf)
  3517. {
  3518. struct slab_attribute *attribute;
  3519. struct kmem_cache *s;
  3520. int err;
  3521. attribute = to_slab_attr(attr);
  3522. s = to_slab(kobj);
  3523. if (!attribute->show)
  3524. return -EIO;
  3525. err = attribute->show(s, buf);
  3526. return err;
  3527. }
  3528. static ssize_t slab_attr_store(struct kobject *kobj,
  3529. struct attribute *attr,
  3530. const char *buf, size_t len)
  3531. {
  3532. struct slab_attribute *attribute;
  3533. struct kmem_cache *s;
  3534. int err;
  3535. attribute = to_slab_attr(attr);
  3536. s = to_slab(kobj);
  3537. if (!attribute->store)
  3538. return -EIO;
  3539. err = attribute->store(s, buf, len);
  3540. return err;
  3541. }
  3542. static void kmem_cache_release(struct kobject *kobj)
  3543. {
  3544. struct kmem_cache *s = to_slab(kobj);
  3545. kfree(s);
  3546. }
  3547. static struct sysfs_ops slab_sysfs_ops = {
  3548. .show = slab_attr_show,
  3549. .store = slab_attr_store,
  3550. };
  3551. static struct kobj_type slab_ktype = {
  3552. .sysfs_ops = &slab_sysfs_ops,
  3553. .release = kmem_cache_release
  3554. };
  3555. static int uevent_filter(struct kset *kset, struct kobject *kobj)
  3556. {
  3557. struct kobj_type *ktype = get_ktype(kobj);
  3558. if (ktype == &slab_ktype)
  3559. return 1;
  3560. return 0;
  3561. }
  3562. static struct kset_uevent_ops slab_uevent_ops = {
  3563. .filter = uevent_filter,
  3564. };
  3565. static struct kset *slab_kset;
  3566. #define ID_STR_LENGTH 64
  3567. /* Create a unique string id for a slab cache:
  3568. * format
  3569. * :[flags-]size:[memory address of kmemcache]
  3570. */
  3571. static char *create_unique_id(struct kmem_cache *s)
  3572. {
  3573. char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
  3574. char *p = name;
  3575. BUG_ON(!name);
  3576. *p++ = ':';
  3577. /*
  3578. * First flags affecting slabcache operations. We will only
  3579. * get here for aliasable slabs so we do not need to support
  3580. * too many flags. The flags here must cover all flags that
  3581. * are matched during merging to guarantee that the id is
  3582. * unique.
  3583. */
  3584. if (s->flags & SLAB_CACHE_DMA)
  3585. *p++ = 'd';
  3586. if (s->flags & SLAB_RECLAIM_ACCOUNT)
  3587. *p++ = 'a';
  3588. if (s->flags & SLAB_DEBUG_FREE)
  3589. *p++ = 'F';
  3590. if (p != name + 1)
  3591. *p++ = '-';
  3592. p += sprintf(p, "%07d", s->size);
  3593. BUG_ON(p > name + ID_STR_LENGTH - 1);
  3594. return name;
  3595. }
  3596. static int sysfs_slab_add(struct kmem_cache *s)
  3597. {
  3598. int err;
  3599. const char *name;
  3600. int unmergeable;
  3601. if (slab_state < SYSFS)
  3602. /* Defer until later */
  3603. return 0;
  3604. unmergeable = slab_unmergeable(s);
  3605. if (unmergeable) {
  3606. /*
  3607. * Slabcache can never be merged so we can use the name proper.
  3608. * This is typically the case for debug situations. In that
  3609. * case we can catch duplicate names easily.
  3610. */
  3611. sysfs_remove_link(&slab_kset->kobj, s->name);
  3612. name = s->name;
  3613. } else {
  3614. /*
  3615. * Create a unique name for the slab as a target
  3616. * for the symlinks.
  3617. */
  3618. name = create_unique_id(s);
  3619. }
  3620. s->kobj.kset = slab_kset;
  3621. err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
  3622. if (err) {
  3623. kobject_put(&s->kobj);
  3624. return err;
  3625. }
  3626. err = sysfs_create_group(&s->kobj, &slab_attr_group);
  3627. if (err)
  3628. return err;
  3629. kobject_uevent(&s->kobj, KOBJ_ADD);
  3630. if (!unmergeable) {
  3631. /* Setup first alias */
  3632. sysfs_slab_alias(s, s->name);
  3633. kfree(name);
  3634. }
  3635. return 0;
  3636. }
  3637. static void sysfs_slab_remove(struct kmem_cache *s)
  3638. {
  3639. kobject_uevent(&s->kobj, KOBJ_REMOVE);
  3640. kobject_del(&s->kobj);
  3641. kobject_put(&s->kobj);
  3642. }
  3643. /*
  3644. * Need to buffer aliases during bootup until sysfs becomes
  3645. * available lest we loose that information.
  3646. */
  3647. struct saved_alias {
  3648. struct kmem_cache *s;
  3649. const char *name;
  3650. struct saved_alias *next;
  3651. };
  3652. static struct saved_alias *alias_list;
  3653. static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
  3654. {
  3655. struct saved_alias *al;
  3656. if (slab_state == SYSFS) {
  3657. /*
  3658. * If we have a leftover link then remove it.
  3659. */
  3660. sysfs_remove_link(&slab_kset->kobj, name);
  3661. return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
  3662. }
  3663. al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
  3664. if (!al)
  3665. return -ENOMEM;
  3666. al->s = s;
  3667. al->name = name;
  3668. al->next = alias_list;
  3669. alias_list = al;
  3670. return 0;
  3671. }
  3672. static int __init slab_sysfs_init(void)
  3673. {
  3674. struct kmem_cache *s;
  3675. int err;
  3676. slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
  3677. if (!slab_kset) {
  3678. printk(KERN_ERR "Cannot register slab subsystem.\n");
  3679. return -ENOSYS;
  3680. }
  3681. slab_state = SYSFS;
  3682. list_for_each_entry(s, &slab_caches, list) {
  3683. err = sysfs_slab_add(s);
  3684. if (err)
  3685. printk(KERN_ERR "SLUB: Unable to add boot slab %s"
  3686. " to sysfs\n", s->name);
  3687. }
  3688. while (alias_list) {
  3689. struct saved_alias *al = alias_list;
  3690. alias_list = alias_list->next;
  3691. err = sysfs_slab_alias(al->s, al->name);
  3692. if (err)
  3693. printk(KERN_ERR "SLUB: Unable to add boot slab alias"
  3694. " %s to sysfs\n", s->name);
  3695. kfree(al);
  3696. }
  3697. resiliency_test();
  3698. return 0;
  3699. }
  3700. __initcall(slab_sysfs_init);
  3701. #endif
  3702. /*
  3703. * The /proc/slabinfo ABI
  3704. */
  3705. #ifdef CONFIG_SLABINFO
  3706. ssize_t slabinfo_write(struct file *file, const char __user * buffer,
  3707. size_t count, loff_t *ppos)
  3708. {
  3709. return -EINVAL;
  3710. }
  3711. static void print_slabinfo_header(struct seq_file *m)
  3712. {
  3713. seq_puts(m, "slabinfo - version: 2.1\n");
  3714. seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
  3715. "<objperslab> <pagesperslab>");
  3716. seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  3717. seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  3718. seq_putc(m, '\n');
  3719. }
  3720. static void *s_start(struct seq_file *m, loff_t *pos)
  3721. {
  3722. loff_t n = *pos;
  3723. down_read(&slub_lock);
  3724. if (!n)
  3725. print_slabinfo_header(m);
  3726. return seq_list_start(&slab_caches, *pos);
  3727. }
  3728. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  3729. {
  3730. return seq_list_next(p, &slab_caches, pos);
  3731. }
  3732. static void s_stop(struct seq_file *m, void *p)
  3733. {
  3734. up_read(&slub_lock);
  3735. }
  3736. static int s_show(struct seq_file *m, void *p)
  3737. {
  3738. unsigned long nr_partials = 0;
  3739. unsigned long nr_slabs = 0;
  3740. unsigned long nr_inuse = 0;
  3741. unsigned long nr_objs;
  3742. struct kmem_cache *s;
  3743. int node;
  3744. s = list_entry(p, struct kmem_cache, list);
  3745. for_each_online_node(node) {
  3746. struct kmem_cache_node *n = get_node(s, node);
  3747. if (!n)
  3748. continue;
  3749. nr_partials += n->nr_partial;
  3750. nr_slabs += atomic_long_read(&n->nr_slabs);
  3751. nr_inuse += count_partial(n);
  3752. }
  3753. nr_objs = nr_slabs * s->objects;
  3754. nr_inuse += (nr_slabs - nr_partials) * s->objects;
  3755. seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
  3756. nr_objs, s->size, s->objects, (1 << s->order));
  3757. seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
  3758. seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
  3759. 0UL);
  3760. seq_putc(m, '\n');
  3761. return 0;
  3762. }
  3763. const struct seq_operations slabinfo_op = {
  3764. .start = s_start,
  3765. .next = s_next,
  3766. .stop = s_stop,
  3767. .show = s_show,
  3768. };
  3769. #endif /* CONFIG_SLABINFO */