vmalloc.c 63 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527
  1. /*
  2. * linux/mm/vmalloc.c
  3. *
  4. * Copyright (C) 1993 Linus Torvalds
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
  7. * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
  8. * Numa awareness, Christoph Lameter, SGI, June 2005
  9. */
  10. #include <linux/vmalloc.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/debugobjects.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/list.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/radix-tree.h>
  25. #include <linux/rcupdate.h>
  26. #include <linux/pfn.h>
  27. #include <linux/kmemleak.h>
  28. #include <asm/atomic.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/tlbflush.h>
  31. #include <asm/shmparam.h>
  32. /*** Page table manipulation functions ***/
  33. static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  34. {
  35. pte_t *pte;
  36. pte = pte_offset_kernel(pmd, addr);
  37. do {
  38. pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  39. WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  40. } while (pte++, addr += PAGE_SIZE, addr != end);
  41. }
  42. static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  43. {
  44. pmd_t *pmd;
  45. unsigned long next;
  46. pmd = pmd_offset(pud, addr);
  47. do {
  48. next = pmd_addr_end(addr, end);
  49. if (pmd_none_or_clear_bad(pmd))
  50. continue;
  51. vunmap_pte_range(pmd, addr, next);
  52. } while (pmd++, addr = next, addr != end);
  53. }
  54. static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
  55. {
  56. pud_t *pud;
  57. unsigned long next;
  58. pud = pud_offset(pgd, addr);
  59. do {
  60. next = pud_addr_end(addr, end);
  61. if (pud_none_or_clear_bad(pud))
  62. continue;
  63. vunmap_pmd_range(pud, addr, next);
  64. } while (pud++, addr = next, addr != end);
  65. }
  66. static void vunmap_page_range(unsigned long addr, unsigned long end)
  67. {
  68. pgd_t *pgd;
  69. unsigned long next;
  70. BUG_ON(addr >= end);
  71. pgd = pgd_offset_k(addr);
  72. do {
  73. next = pgd_addr_end(addr, end);
  74. if (pgd_none_or_clear_bad(pgd))
  75. continue;
  76. vunmap_pud_range(pgd, addr, next);
  77. } while (pgd++, addr = next, addr != end);
  78. }
  79. static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  80. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  81. {
  82. pte_t *pte;
  83. /*
  84. * nr is a running index into the array which helps higher level
  85. * callers keep track of where we're up to.
  86. */
  87. pte = pte_alloc_kernel(pmd, addr);
  88. if (!pte)
  89. return -ENOMEM;
  90. do {
  91. struct page *page = pages[*nr];
  92. if (WARN_ON(!pte_none(*pte)))
  93. return -EBUSY;
  94. if (WARN_ON(!page))
  95. return -ENOMEM;
  96. set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
  97. (*nr)++;
  98. } while (pte++, addr += PAGE_SIZE, addr != end);
  99. return 0;
  100. }
  101. static int vmap_pmd_range(pud_t *pud, unsigned long addr,
  102. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  103. {
  104. pmd_t *pmd;
  105. unsigned long next;
  106. pmd = pmd_alloc(&init_mm, pud, addr);
  107. if (!pmd)
  108. return -ENOMEM;
  109. do {
  110. next = pmd_addr_end(addr, end);
  111. if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
  112. return -ENOMEM;
  113. } while (pmd++, addr = next, addr != end);
  114. return 0;
  115. }
  116. static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  117. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  118. {
  119. pud_t *pud;
  120. unsigned long next;
  121. pud = pud_alloc(&init_mm, pgd, addr);
  122. if (!pud)
  123. return -ENOMEM;
  124. do {
  125. next = pud_addr_end(addr, end);
  126. if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
  127. return -ENOMEM;
  128. } while (pud++, addr = next, addr != end);
  129. return 0;
  130. }
  131. /*
  132. * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
  133. * will have pfns corresponding to the "pages" array.
  134. *
  135. * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
  136. */
  137. static int vmap_page_range_noflush(unsigned long start, unsigned long end,
  138. pgprot_t prot, struct page **pages)
  139. {
  140. pgd_t *pgd;
  141. unsigned long next;
  142. unsigned long addr = start;
  143. int err = 0;
  144. int nr = 0;
  145. BUG_ON(addr >= end);
  146. pgd = pgd_offset_k(addr);
  147. do {
  148. next = pgd_addr_end(addr, end);
  149. err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
  150. if (err)
  151. return err;
  152. } while (pgd++, addr = next, addr != end);
  153. return nr;
  154. }
  155. static int vmap_page_range(unsigned long start, unsigned long end,
  156. pgprot_t prot, struct page **pages)
  157. {
  158. int ret;
  159. ret = vmap_page_range_noflush(start, end, prot, pages);
  160. flush_cache_vmap(start, end);
  161. return ret;
  162. }
  163. int is_vmalloc_or_module_addr(const void *x)
  164. {
  165. /*
  166. * ARM, x86-64 and sparc64 put modules in a special place,
  167. * and fall back on vmalloc() if that fails. Others
  168. * just put it in the vmalloc space.
  169. */
  170. #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  171. unsigned long addr = (unsigned long)x;
  172. if (addr >= MODULES_VADDR && addr < MODULES_END)
  173. return 1;
  174. #endif
  175. return is_vmalloc_addr(x);
  176. }
  177. /*
  178. * Walk a vmap address to the struct page it maps.
  179. */
  180. struct page *vmalloc_to_page(const void *vmalloc_addr)
  181. {
  182. unsigned long addr = (unsigned long) vmalloc_addr;
  183. struct page *page = NULL;
  184. pgd_t *pgd = pgd_offset_k(addr);
  185. /*
  186. * XXX we might need to change this if we add VIRTUAL_BUG_ON for
  187. * architectures that do not vmalloc module space
  188. */
  189. VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
  190. if (!pgd_none(*pgd)) {
  191. pud_t *pud = pud_offset(pgd, addr);
  192. if (!pud_none(*pud)) {
  193. pmd_t *pmd = pmd_offset(pud, addr);
  194. if (!pmd_none(*pmd)) {
  195. pte_t *ptep, pte;
  196. ptep = pte_offset_map(pmd, addr);
  197. pte = *ptep;
  198. if (pte_present(pte))
  199. page = pte_page(pte);
  200. pte_unmap(ptep);
  201. }
  202. }
  203. }
  204. return page;
  205. }
  206. EXPORT_SYMBOL(vmalloc_to_page);
  207. /*
  208. * Map a vmalloc()-space virtual address to the physical page frame number.
  209. */
  210. unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
  211. {
  212. return page_to_pfn(vmalloc_to_page(vmalloc_addr));
  213. }
  214. EXPORT_SYMBOL(vmalloc_to_pfn);
  215. /*** Global kva allocator ***/
  216. #define VM_LAZY_FREE 0x01
  217. #define VM_LAZY_FREEING 0x02
  218. #define VM_VM_AREA 0x04
  219. struct vmap_area {
  220. unsigned long va_start;
  221. unsigned long va_end;
  222. unsigned long flags;
  223. struct rb_node rb_node; /* address sorted rbtree */
  224. struct list_head list; /* address sorted list */
  225. struct list_head purge_list; /* "lazy purge" list */
  226. void *private;
  227. struct rcu_head rcu_head;
  228. };
  229. static DEFINE_SPINLOCK(vmap_area_lock);
  230. static struct rb_root vmap_area_root = RB_ROOT;
  231. static LIST_HEAD(vmap_area_list);
  232. static unsigned long vmap_area_pcpu_hole;
  233. static struct vmap_area *__find_vmap_area(unsigned long addr)
  234. {
  235. struct rb_node *n = vmap_area_root.rb_node;
  236. while (n) {
  237. struct vmap_area *va;
  238. va = rb_entry(n, struct vmap_area, rb_node);
  239. if (addr < va->va_start)
  240. n = n->rb_left;
  241. else if (addr > va->va_start)
  242. n = n->rb_right;
  243. else
  244. return va;
  245. }
  246. return NULL;
  247. }
  248. static void __insert_vmap_area(struct vmap_area *va)
  249. {
  250. struct rb_node **p = &vmap_area_root.rb_node;
  251. struct rb_node *parent = NULL;
  252. struct rb_node *tmp;
  253. while (*p) {
  254. struct vmap_area *tmp_va;
  255. parent = *p;
  256. tmp_va = rb_entry(parent, struct vmap_area, rb_node);
  257. if (va->va_start < tmp_va->va_end)
  258. p = &(*p)->rb_left;
  259. else if (va->va_end > tmp_va->va_start)
  260. p = &(*p)->rb_right;
  261. else
  262. BUG();
  263. }
  264. rb_link_node(&va->rb_node, parent, p);
  265. rb_insert_color(&va->rb_node, &vmap_area_root);
  266. /* address-sort this list so it is usable like the vmlist */
  267. tmp = rb_prev(&va->rb_node);
  268. if (tmp) {
  269. struct vmap_area *prev;
  270. prev = rb_entry(tmp, struct vmap_area, rb_node);
  271. list_add_rcu(&va->list, &prev->list);
  272. } else
  273. list_add_rcu(&va->list, &vmap_area_list);
  274. }
  275. static void purge_vmap_area_lazy(void);
  276. /*
  277. * Allocate a region of KVA of the specified size and alignment, within the
  278. * vstart and vend.
  279. */
  280. static struct vmap_area *alloc_vmap_area(unsigned long size,
  281. unsigned long align,
  282. unsigned long vstart, unsigned long vend,
  283. int node, gfp_t gfp_mask)
  284. {
  285. struct vmap_area *va;
  286. struct rb_node *n;
  287. unsigned long addr;
  288. int purged = 0;
  289. BUG_ON(!size);
  290. BUG_ON(size & ~PAGE_MASK);
  291. va = kmalloc_node(sizeof(struct vmap_area),
  292. gfp_mask & GFP_RECLAIM_MASK, node);
  293. if (unlikely(!va))
  294. return ERR_PTR(-ENOMEM);
  295. retry:
  296. addr = ALIGN(vstart, align);
  297. spin_lock(&vmap_area_lock);
  298. if (addr + size - 1 < addr)
  299. goto overflow;
  300. /* XXX: could have a last_hole cache */
  301. n = vmap_area_root.rb_node;
  302. if (n) {
  303. struct vmap_area *first = NULL;
  304. do {
  305. struct vmap_area *tmp;
  306. tmp = rb_entry(n, struct vmap_area, rb_node);
  307. if (tmp->va_end >= addr) {
  308. if (!first && tmp->va_start < addr + size)
  309. first = tmp;
  310. n = n->rb_left;
  311. } else {
  312. first = tmp;
  313. n = n->rb_right;
  314. }
  315. } while (n);
  316. if (!first)
  317. goto found;
  318. if (first->va_end < addr) {
  319. n = rb_next(&first->rb_node);
  320. if (n)
  321. first = rb_entry(n, struct vmap_area, rb_node);
  322. else
  323. goto found;
  324. }
  325. while (addr + size > first->va_start && addr + size <= vend) {
  326. addr = ALIGN(first->va_end + PAGE_SIZE, align);
  327. if (addr + size - 1 < addr)
  328. goto overflow;
  329. n = rb_next(&first->rb_node);
  330. if (n)
  331. first = rb_entry(n, struct vmap_area, rb_node);
  332. else
  333. goto found;
  334. }
  335. }
  336. found:
  337. if (addr + size > vend) {
  338. overflow:
  339. spin_unlock(&vmap_area_lock);
  340. if (!purged) {
  341. purge_vmap_area_lazy();
  342. purged = 1;
  343. goto retry;
  344. }
  345. if (printk_ratelimit())
  346. printk(KERN_WARNING
  347. "vmap allocation for size %lu failed: "
  348. "use vmalloc=<size> to increase size.\n", size);
  349. kfree(va);
  350. return ERR_PTR(-EBUSY);
  351. }
  352. BUG_ON(addr & (align-1));
  353. va->va_start = addr;
  354. va->va_end = addr + size;
  355. va->flags = 0;
  356. __insert_vmap_area(va);
  357. spin_unlock(&vmap_area_lock);
  358. return va;
  359. }
  360. static void rcu_free_va(struct rcu_head *head)
  361. {
  362. struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
  363. kfree(va);
  364. }
  365. static void __free_vmap_area(struct vmap_area *va)
  366. {
  367. BUG_ON(RB_EMPTY_NODE(&va->rb_node));
  368. rb_erase(&va->rb_node, &vmap_area_root);
  369. RB_CLEAR_NODE(&va->rb_node);
  370. list_del_rcu(&va->list);
  371. /*
  372. * Track the highest possible candidate for pcpu area
  373. * allocation. Areas outside of vmalloc area can be returned
  374. * here too, consider only end addresses which fall inside
  375. * vmalloc area proper.
  376. */
  377. if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
  378. vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
  379. call_rcu(&va->rcu_head, rcu_free_va);
  380. }
  381. /*
  382. * Free a region of KVA allocated by alloc_vmap_area
  383. */
  384. static void free_vmap_area(struct vmap_area *va)
  385. {
  386. spin_lock(&vmap_area_lock);
  387. __free_vmap_area(va);
  388. spin_unlock(&vmap_area_lock);
  389. }
  390. /*
  391. * Clear the pagetable entries of a given vmap_area
  392. */
  393. static void unmap_vmap_area(struct vmap_area *va)
  394. {
  395. vunmap_page_range(va->va_start, va->va_end);
  396. }
  397. static void vmap_debug_free_range(unsigned long start, unsigned long end)
  398. {
  399. /*
  400. * Unmap page tables and force a TLB flush immediately if
  401. * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
  402. * bugs similarly to those in linear kernel virtual address
  403. * space after a page has been freed.
  404. *
  405. * All the lazy freeing logic is still retained, in order to
  406. * minimise intrusiveness of this debugging feature.
  407. *
  408. * This is going to be *slow* (linear kernel virtual address
  409. * debugging doesn't do a broadcast TLB flush so it is a lot
  410. * faster).
  411. */
  412. #ifdef CONFIG_DEBUG_PAGEALLOC
  413. vunmap_page_range(start, end);
  414. flush_tlb_kernel_range(start, end);
  415. #endif
  416. }
  417. /*
  418. * lazy_max_pages is the maximum amount of virtual address space we gather up
  419. * before attempting to purge with a TLB flush.
  420. *
  421. * There is a tradeoff here: a larger number will cover more kernel page tables
  422. * and take slightly longer to purge, but it will linearly reduce the number of
  423. * global TLB flushes that must be performed. It would seem natural to scale
  424. * this number up linearly with the number of CPUs (because vmapping activity
  425. * could also scale linearly with the number of CPUs), however it is likely
  426. * that in practice, workloads might be constrained in other ways that mean
  427. * vmap activity will not scale linearly with CPUs. Also, I want to be
  428. * conservative and not introduce a big latency on huge systems, so go with
  429. * a less aggressive log scale. It will still be an improvement over the old
  430. * code, and it will be simple to change the scale factor if we find that it
  431. * becomes a problem on bigger systems.
  432. */
  433. static unsigned long lazy_max_pages(void)
  434. {
  435. unsigned int log;
  436. log = fls(num_online_cpus());
  437. return log * (32UL * 1024 * 1024 / PAGE_SIZE);
  438. }
  439. static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
  440. /* for per-CPU blocks */
  441. static void purge_fragmented_blocks_allcpus(void);
  442. /*
  443. * called before a call to iounmap() if the caller wants vm_area_struct's
  444. * immediately freed.
  445. */
  446. void set_iounmap_nonlazy(void)
  447. {
  448. atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
  449. }
  450. /*
  451. * Purges all lazily-freed vmap areas.
  452. *
  453. * If sync is 0 then don't purge if there is already a purge in progress.
  454. * If force_flush is 1, then flush kernel TLBs between *start and *end even
  455. * if we found no lazy vmap areas to unmap (callers can use this to optimise
  456. * their own TLB flushing).
  457. * Returns with *start = min(*start, lowest purged address)
  458. * *end = max(*end, highest purged address)
  459. */
  460. static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
  461. int sync, int force_flush)
  462. {
  463. static DEFINE_SPINLOCK(purge_lock);
  464. LIST_HEAD(valist);
  465. struct vmap_area *va;
  466. struct vmap_area *n_va;
  467. int nr = 0;
  468. /*
  469. * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
  470. * should not expect such behaviour. This just simplifies locking for
  471. * the case that isn't actually used at the moment anyway.
  472. */
  473. if (!sync && !force_flush) {
  474. if (!spin_trylock(&purge_lock))
  475. return;
  476. } else
  477. spin_lock(&purge_lock);
  478. if (sync)
  479. purge_fragmented_blocks_allcpus();
  480. rcu_read_lock();
  481. list_for_each_entry_rcu(va, &vmap_area_list, list) {
  482. if (va->flags & VM_LAZY_FREE) {
  483. if (va->va_start < *start)
  484. *start = va->va_start;
  485. if (va->va_end > *end)
  486. *end = va->va_end;
  487. nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
  488. list_add_tail(&va->purge_list, &valist);
  489. va->flags |= VM_LAZY_FREEING;
  490. va->flags &= ~VM_LAZY_FREE;
  491. }
  492. }
  493. rcu_read_unlock();
  494. if (nr)
  495. atomic_sub(nr, &vmap_lazy_nr);
  496. if (nr || force_flush)
  497. flush_tlb_kernel_range(*start, *end);
  498. if (nr) {
  499. spin_lock(&vmap_area_lock);
  500. list_for_each_entry_safe(va, n_va, &valist, purge_list)
  501. __free_vmap_area(va);
  502. spin_unlock(&vmap_area_lock);
  503. }
  504. spin_unlock(&purge_lock);
  505. }
  506. /*
  507. * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
  508. * is already purging.
  509. */
  510. static void try_purge_vmap_area_lazy(void)
  511. {
  512. unsigned long start = ULONG_MAX, end = 0;
  513. __purge_vmap_area_lazy(&start, &end, 0, 0);
  514. }
  515. /*
  516. * Kick off a purge of the outstanding lazy areas.
  517. */
  518. static void purge_vmap_area_lazy(void)
  519. {
  520. unsigned long start = ULONG_MAX, end = 0;
  521. __purge_vmap_area_lazy(&start, &end, 1, 0);
  522. }
  523. /*
  524. * Free a vmap area, caller ensuring that the area has been unmapped
  525. * and flush_cache_vunmap had been called for the correct range
  526. * previously.
  527. */
  528. static void free_vmap_area_noflush(struct vmap_area *va)
  529. {
  530. va->flags |= VM_LAZY_FREE;
  531. atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
  532. if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
  533. try_purge_vmap_area_lazy();
  534. }
  535. /*
  536. * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
  537. * called for the correct range previously.
  538. */
  539. static void free_unmap_vmap_area_noflush(struct vmap_area *va)
  540. {
  541. unmap_vmap_area(va);
  542. free_vmap_area_noflush(va);
  543. }
  544. /*
  545. * Free and unmap a vmap area
  546. */
  547. static void free_unmap_vmap_area(struct vmap_area *va)
  548. {
  549. flush_cache_vunmap(va->va_start, va->va_end);
  550. free_unmap_vmap_area_noflush(va);
  551. }
  552. static struct vmap_area *find_vmap_area(unsigned long addr)
  553. {
  554. struct vmap_area *va;
  555. spin_lock(&vmap_area_lock);
  556. va = __find_vmap_area(addr);
  557. spin_unlock(&vmap_area_lock);
  558. return va;
  559. }
  560. static void free_unmap_vmap_area_addr(unsigned long addr)
  561. {
  562. struct vmap_area *va;
  563. va = find_vmap_area(addr);
  564. BUG_ON(!va);
  565. free_unmap_vmap_area(va);
  566. }
  567. /*** Per cpu kva allocator ***/
  568. /*
  569. * vmap space is limited especially on 32 bit architectures. Ensure there is
  570. * room for at least 16 percpu vmap blocks per CPU.
  571. */
  572. /*
  573. * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
  574. * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
  575. * instead (we just need a rough idea)
  576. */
  577. #if BITS_PER_LONG == 32
  578. #define VMALLOC_SPACE (128UL*1024*1024)
  579. #else
  580. #define VMALLOC_SPACE (128UL*1024*1024*1024)
  581. #endif
  582. #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
  583. #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
  584. #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
  585. #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
  586. #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
  587. #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
  588. #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
  589. VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
  590. VMALLOC_PAGES / NR_CPUS / 16))
  591. #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
  592. static bool vmap_initialized __read_mostly = false;
  593. struct vmap_block_queue {
  594. spinlock_t lock;
  595. struct list_head free;
  596. };
  597. struct vmap_block {
  598. spinlock_t lock;
  599. struct vmap_area *va;
  600. struct vmap_block_queue *vbq;
  601. unsigned long free, dirty;
  602. DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
  603. DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
  604. struct list_head free_list;
  605. struct rcu_head rcu_head;
  606. struct list_head purge;
  607. };
  608. /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
  609. static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
  610. /*
  611. * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
  612. * in the free path. Could get rid of this if we change the API to return a
  613. * "cookie" from alloc, to be passed to free. But no big deal yet.
  614. */
  615. static DEFINE_SPINLOCK(vmap_block_tree_lock);
  616. static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
  617. /*
  618. * We should probably have a fallback mechanism to allocate virtual memory
  619. * out of partially filled vmap blocks. However vmap block sizing should be
  620. * fairly reasonable according to the vmalloc size, so it shouldn't be a
  621. * big problem.
  622. */
  623. static unsigned long addr_to_vb_idx(unsigned long addr)
  624. {
  625. addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
  626. addr /= VMAP_BLOCK_SIZE;
  627. return addr;
  628. }
  629. static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
  630. {
  631. struct vmap_block_queue *vbq;
  632. struct vmap_block *vb;
  633. struct vmap_area *va;
  634. unsigned long vb_idx;
  635. int node, err;
  636. node = numa_node_id();
  637. vb = kmalloc_node(sizeof(struct vmap_block),
  638. gfp_mask & GFP_RECLAIM_MASK, node);
  639. if (unlikely(!vb))
  640. return ERR_PTR(-ENOMEM);
  641. va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
  642. VMALLOC_START, VMALLOC_END,
  643. node, gfp_mask);
  644. if (IS_ERR(va)) {
  645. kfree(vb);
  646. return ERR_CAST(va);
  647. }
  648. err = radix_tree_preload(gfp_mask);
  649. if (unlikely(err)) {
  650. kfree(vb);
  651. free_vmap_area(va);
  652. return ERR_PTR(err);
  653. }
  654. spin_lock_init(&vb->lock);
  655. vb->va = va;
  656. vb->free = VMAP_BBMAP_BITS;
  657. vb->dirty = 0;
  658. bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
  659. bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
  660. INIT_LIST_HEAD(&vb->free_list);
  661. vb_idx = addr_to_vb_idx(va->va_start);
  662. spin_lock(&vmap_block_tree_lock);
  663. err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
  664. spin_unlock(&vmap_block_tree_lock);
  665. BUG_ON(err);
  666. radix_tree_preload_end();
  667. vbq = &get_cpu_var(vmap_block_queue);
  668. vb->vbq = vbq;
  669. spin_lock(&vbq->lock);
  670. list_add_rcu(&vb->free_list, &vbq->free);
  671. spin_unlock(&vbq->lock);
  672. put_cpu_var(vmap_block_queue);
  673. return vb;
  674. }
  675. static void rcu_free_vb(struct rcu_head *head)
  676. {
  677. struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
  678. kfree(vb);
  679. }
  680. static void free_vmap_block(struct vmap_block *vb)
  681. {
  682. struct vmap_block *tmp;
  683. unsigned long vb_idx;
  684. vb_idx = addr_to_vb_idx(vb->va->va_start);
  685. spin_lock(&vmap_block_tree_lock);
  686. tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
  687. spin_unlock(&vmap_block_tree_lock);
  688. BUG_ON(tmp != vb);
  689. free_vmap_area_noflush(vb->va);
  690. call_rcu(&vb->rcu_head, rcu_free_vb);
  691. }
  692. static void purge_fragmented_blocks(int cpu)
  693. {
  694. LIST_HEAD(purge);
  695. struct vmap_block *vb;
  696. struct vmap_block *n_vb;
  697. struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  698. rcu_read_lock();
  699. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  700. if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
  701. continue;
  702. spin_lock(&vb->lock);
  703. if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
  704. vb->free = 0; /* prevent further allocs after releasing lock */
  705. vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
  706. bitmap_fill(vb->alloc_map, VMAP_BBMAP_BITS);
  707. bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
  708. spin_lock(&vbq->lock);
  709. list_del_rcu(&vb->free_list);
  710. spin_unlock(&vbq->lock);
  711. spin_unlock(&vb->lock);
  712. list_add_tail(&vb->purge, &purge);
  713. } else
  714. spin_unlock(&vb->lock);
  715. }
  716. rcu_read_unlock();
  717. list_for_each_entry_safe(vb, n_vb, &purge, purge) {
  718. list_del(&vb->purge);
  719. free_vmap_block(vb);
  720. }
  721. }
  722. static void purge_fragmented_blocks_thiscpu(void)
  723. {
  724. purge_fragmented_blocks(smp_processor_id());
  725. }
  726. static void purge_fragmented_blocks_allcpus(void)
  727. {
  728. int cpu;
  729. for_each_possible_cpu(cpu)
  730. purge_fragmented_blocks(cpu);
  731. }
  732. static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
  733. {
  734. struct vmap_block_queue *vbq;
  735. struct vmap_block *vb;
  736. unsigned long addr = 0;
  737. unsigned int order;
  738. int purge = 0;
  739. BUG_ON(size & ~PAGE_MASK);
  740. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  741. order = get_order(size);
  742. again:
  743. rcu_read_lock();
  744. vbq = &get_cpu_var(vmap_block_queue);
  745. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  746. int i;
  747. spin_lock(&vb->lock);
  748. if (vb->free < 1UL << order)
  749. goto next;
  750. i = bitmap_find_free_region(vb->alloc_map,
  751. VMAP_BBMAP_BITS, order);
  752. if (i < 0) {
  753. if (vb->free + vb->dirty == VMAP_BBMAP_BITS) {
  754. /* fragmented and no outstanding allocations */
  755. BUG_ON(vb->dirty != VMAP_BBMAP_BITS);
  756. purge = 1;
  757. }
  758. goto next;
  759. }
  760. addr = vb->va->va_start + (i << PAGE_SHIFT);
  761. BUG_ON(addr_to_vb_idx(addr) !=
  762. addr_to_vb_idx(vb->va->va_start));
  763. vb->free -= 1UL << order;
  764. if (vb->free == 0) {
  765. spin_lock(&vbq->lock);
  766. list_del_rcu(&vb->free_list);
  767. spin_unlock(&vbq->lock);
  768. }
  769. spin_unlock(&vb->lock);
  770. break;
  771. next:
  772. spin_unlock(&vb->lock);
  773. }
  774. if (purge)
  775. purge_fragmented_blocks_thiscpu();
  776. put_cpu_var(vmap_block_queue);
  777. rcu_read_unlock();
  778. if (!addr) {
  779. vb = new_vmap_block(gfp_mask);
  780. if (IS_ERR(vb))
  781. return vb;
  782. goto again;
  783. }
  784. return (void *)addr;
  785. }
  786. static void vb_free(const void *addr, unsigned long size)
  787. {
  788. unsigned long offset;
  789. unsigned long vb_idx;
  790. unsigned int order;
  791. struct vmap_block *vb;
  792. BUG_ON(size & ~PAGE_MASK);
  793. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  794. flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
  795. order = get_order(size);
  796. offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
  797. vb_idx = addr_to_vb_idx((unsigned long)addr);
  798. rcu_read_lock();
  799. vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
  800. rcu_read_unlock();
  801. BUG_ON(!vb);
  802. vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
  803. spin_lock(&vb->lock);
  804. BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
  805. vb->dirty += 1UL << order;
  806. if (vb->dirty == VMAP_BBMAP_BITS) {
  807. BUG_ON(vb->free);
  808. spin_unlock(&vb->lock);
  809. free_vmap_block(vb);
  810. } else
  811. spin_unlock(&vb->lock);
  812. }
  813. /**
  814. * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
  815. *
  816. * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
  817. * to amortize TLB flushing overheads. What this means is that any page you
  818. * have now, may, in a former life, have been mapped into kernel virtual
  819. * address by the vmap layer and so there might be some CPUs with TLB entries
  820. * still referencing that page (additional to the regular 1:1 kernel mapping).
  821. *
  822. * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
  823. * be sure that none of the pages we have control over will have any aliases
  824. * from the vmap layer.
  825. */
  826. void vm_unmap_aliases(void)
  827. {
  828. unsigned long start = ULONG_MAX, end = 0;
  829. int cpu;
  830. int flush = 0;
  831. if (unlikely(!vmap_initialized))
  832. return;
  833. for_each_possible_cpu(cpu) {
  834. struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  835. struct vmap_block *vb;
  836. rcu_read_lock();
  837. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  838. int i;
  839. spin_lock(&vb->lock);
  840. i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
  841. while (i < VMAP_BBMAP_BITS) {
  842. unsigned long s, e;
  843. int j;
  844. j = find_next_zero_bit(vb->dirty_map,
  845. VMAP_BBMAP_BITS, i);
  846. s = vb->va->va_start + (i << PAGE_SHIFT);
  847. e = vb->va->va_start + (j << PAGE_SHIFT);
  848. flush = 1;
  849. if (s < start)
  850. start = s;
  851. if (e > end)
  852. end = e;
  853. i = j;
  854. i = find_next_bit(vb->dirty_map,
  855. VMAP_BBMAP_BITS, i);
  856. }
  857. spin_unlock(&vb->lock);
  858. }
  859. rcu_read_unlock();
  860. }
  861. __purge_vmap_area_lazy(&start, &end, 1, flush);
  862. }
  863. EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  864. /**
  865. * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
  866. * @mem: the pointer returned by vm_map_ram
  867. * @count: the count passed to that vm_map_ram call (cannot unmap partial)
  868. */
  869. void vm_unmap_ram(const void *mem, unsigned int count)
  870. {
  871. unsigned long size = count << PAGE_SHIFT;
  872. unsigned long addr = (unsigned long)mem;
  873. BUG_ON(!addr);
  874. BUG_ON(addr < VMALLOC_START);
  875. BUG_ON(addr > VMALLOC_END);
  876. BUG_ON(addr & (PAGE_SIZE-1));
  877. debug_check_no_locks_freed(mem, size);
  878. vmap_debug_free_range(addr, addr+size);
  879. if (likely(count <= VMAP_MAX_ALLOC))
  880. vb_free(mem, size);
  881. else
  882. free_unmap_vmap_area_addr(addr);
  883. }
  884. EXPORT_SYMBOL(vm_unmap_ram);
  885. /**
  886. * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
  887. * @pages: an array of pointers to the pages to be mapped
  888. * @count: number of pages
  889. * @node: prefer to allocate data structures on this node
  890. * @prot: memory protection to use. PAGE_KERNEL for regular RAM
  891. *
  892. * Returns: a pointer to the address that has been mapped, or %NULL on failure
  893. */
  894. void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
  895. {
  896. unsigned long size = count << PAGE_SHIFT;
  897. unsigned long addr;
  898. void *mem;
  899. if (likely(count <= VMAP_MAX_ALLOC)) {
  900. mem = vb_alloc(size, GFP_KERNEL);
  901. if (IS_ERR(mem))
  902. return NULL;
  903. addr = (unsigned long)mem;
  904. } else {
  905. struct vmap_area *va;
  906. va = alloc_vmap_area(size, PAGE_SIZE,
  907. VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
  908. if (IS_ERR(va))
  909. return NULL;
  910. addr = va->va_start;
  911. mem = (void *)addr;
  912. }
  913. if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
  914. vm_unmap_ram(mem, count);
  915. return NULL;
  916. }
  917. return mem;
  918. }
  919. EXPORT_SYMBOL(vm_map_ram);
  920. /**
  921. * vm_area_register_early - register vmap area early during boot
  922. * @vm: vm_struct to register
  923. * @align: requested alignment
  924. *
  925. * This function is used to register kernel vm area before
  926. * vmalloc_init() is called. @vm->size and @vm->flags should contain
  927. * proper values on entry and other fields should be zero. On return,
  928. * vm->addr contains the allocated address.
  929. *
  930. * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
  931. */
  932. void __init vm_area_register_early(struct vm_struct *vm, size_t align)
  933. {
  934. static size_t vm_init_off __initdata;
  935. unsigned long addr;
  936. addr = ALIGN(VMALLOC_START + vm_init_off, align);
  937. vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
  938. vm->addr = (void *)addr;
  939. vm->next = vmlist;
  940. vmlist = vm;
  941. }
  942. void __init vmalloc_init(void)
  943. {
  944. struct vmap_area *va;
  945. struct vm_struct *tmp;
  946. int i;
  947. for_each_possible_cpu(i) {
  948. struct vmap_block_queue *vbq;
  949. vbq = &per_cpu(vmap_block_queue, i);
  950. spin_lock_init(&vbq->lock);
  951. INIT_LIST_HEAD(&vbq->free);
  952. }
  953. /* Import existing vmlist entries. */
  954. for (tmp = vmlist; tmp; tmp = tmp->next) {
  955. va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
  956. va->flags = tmp->flags | VM_VM_AREA;
  957. va->va_start = (unsigned long)tmp->addr;
  958. va->va_end = va->va_start + tmp->size;
  959. __insert_vmap_area(va);
  960. }
  961. vmap_area_pcpu_hole = VMALLOC_END;
  962. vmap_initialized = true;
  963. }
  964. /**
  965. * map_kernel_range_noflush - map kernel VM area with the specified pages
  966. * @addr: start of the VM area to map
  967. * @size: size of the VM area to map
  968. * @prot: page protection flags to use
  969. * @pages: pages to map
  970. *
  971. * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
  972. * specify should have been allocated using get_vm_area() and its
  973. * friends.
  974. *
  975. * NOTE:
  976. * This function does NOT do any cache flushing. The caller is
  977. * responsible for calling flush_cache_vmap() on to-be-mapped areas
  978. * before calling this function.
  979. *
  980. * RETURNS:
  981. * The number of pages mapped on success, -errno on failure.
  982. */
  983. int map_kernel_range_noflush(unsigned long addr, unsigned long size,
  984. pgprot_t prot, struct page **pages)
  985. {
  986. return vmap_page_range_noflush(addr, addr + size, prot, pages);
  987. }
  988. /**
  989. * unmap_kernel_range_noflush - unmap kernel VM area
  990. * @addr: start of the VM area to unmap
  991. * @size: size of the VM area to unmap
  992. *
  993. * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
  994. * specify should have been allocated using get_vm_area() and its
  995. * friends.
  996. *
  997. * NOTE:
  998. * This function does NOT do any cache flushing. The caller is
  999. * responsible for calling flush_cache_vunmap() on to-be-mapped areas
  1000. * before calling this function and flush_tlb_kernel_range() after.
  1001. */
  1002. void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
  1003. {
  1004. vunmap_page_range(addr, addr + size);
  1005. }
  1006. EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
  1007. /**
  1008. * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
  1009. * @addr: start of the VM area to unmap
  1010. * @size: size of the VM area to unmap
  1011. *
  1012. * Similar to unmap_kernel_range_noflush() but flushes vcache before
  1013. * the unmapping and tlb after.
  1014. */
  1015. void unmap_kernel_range(unsigned long addr, unsigned long size)
  1016. {
  1017. unsigned long end = addr + size;
  1018. flush_cache_vunmap(addr, end);
  1019. vunmap_page_range(addr, end);
  1020. flush_tlb_kernel_range(addr, end);
  1021. }
  1022. int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
  1023. {
  1024. unsigned long addr = (unsigned long)area->addr;
  1025. unsigned long end = addr + area->size - PAGE_SIZE;
  1026. int err;
  1027. err = vmap_page_range(addr, end, prot, *pages);
  1028. if (err > 0) {
  1029. *pages += err;
  1030. err = 0;
  1031. }
  1032. return err;
  1033. }
  1034. EXPORT_SYMBOL_GPL(map_vm_area);
  1035. /*** Old vmalloc interfaces ***/
  1036. DEFINE_RWLOCK(vmlist_lock);
  1037. struct vm_struct *vmlist;
  1038. static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
  1039. unsigned long flags, void *caller)
  1040. {
  1041. struct vm_struct *tmp, **p;
  1042. vm->flags = flags;
  1043. vm->addr = (void *)va->va_start;
  1044. vm->size = va->va_end - va->va_start;
  1045. vm->caller = caller;
  1046. va->private = vm;
  1047. va->flags |= VM_VM_AREA;
  1048. write_lock(&vmlist_lock);
  1049. for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  1050. if (tmp->addr >= vm->addr)
  1051. break;
  1052. }
  1053. vm->next = *p;
  1054. *p = vm;
  1055. write_unlock(&vmlist_lock);
  1056. }
  1057. static struct vm_struct *__get_vm_area_node(unsigned long size,
  1058. unsigned long align, unsigned long flags, unsigned long start,
  1059. unsigned long end, int node, gfp_t gfp_mask, void *caller)
  1060. {
  1061. static struct vmap_area *va;
  1062. struct vm_struct *area;
  1063. BUG_ON(in_interrupt());
  1064. if (flags & VM_IOREMAP) {
  1065. int bit = fls(size);
  1066. if (bit > IOREMAP_MAX_ORDER)
  1067. bit = IOREMAP_MAX_ORDER;
  1068. else if (bit < PAGE_SHIFT)
  1069. bit = PAGE_SHIFT;
  1070. align = 1ul << bit;
  1071. }
  1072. size = PAGE_ALIGN(size);
  1073. if (unlikely(!size))
  1074. return NULL;
  1075. area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
  1076. if (unlikely(!area))
  1077. return NULL;
  1078. /*
  1079. * We always allocate a guard page.
  1080. */
  1081. size += PAGE_SIZE;
  1082. va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  1083. if (IS_ERR(va)) {
  1084. kfree(area);
  1085. return NULL;
  1086. }
  1087. insert_vmalloc_vm(area, va, flags, caller);
  1088. return area;
  1089. }
  1090. struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  1091. unsigned long start, unsigned long end)
  1092. {
  1093. return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
  1094. __builtin_return_address(0));
  1095. }
  1096. EXPORT_SYMBOL_GPL(__get_vm_area);
  1097. struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  1098. unsigned long start, unsigned long end,
  1099. void *caller)
  1100. {
  1101. return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
  1102. caller);
  1103. }
  1104. /**
  1105. * get_vm_area - reserve a contiguous kernel virtual area
  1106. * @size: size of the area
  1107. * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
  1108. *
  1109. * Search an area of @size in the kernel virtual mapping area,
  1110. * and reserved it for out purposes. Returns the area descriptor
  1111. * on success or %NULL on failure.
  1112. */
  1113. struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  1114. {
  1115. return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
  1116. -1, GFP_KERNEL, __builtin_return_address(0));
  1117. }
  1118. struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
  1119. void *caller)
  1120. {
  1121. return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
  1122. -1, GFP_KERNEL, caller);
  1123. }
  1124. static struct vm_struct *find_vm_area(const void *addr)
  1125. {
  1126. struct vmap_area *va;
  1127. va = find_vmap_area((unsigned long)addr);
  1128. if (va && va->flags & VM_VM_AREA)
  1129. return va->private;
  1130. return NULL;
  1131. }
  1132. /**
  1133. * remove_vm_area - find and remove a continuous kernel virtual area
  1134. * @addr: base address
  1135. *
  1136. * Search for the kernel VM area starting at @addr, and remove it.
  1137. * This function returns the found VM area, but using it is NOT safe
  1138. * on SMP machines, except for its size or flags.
  1139. */
  1140. struct vm_struct *remove_vm_area(const void *addr)
  1141. {
  1142. struct vmap_area *va;
  1143. va = find_vmap_area((unsigned long)addr);
  1144. if (va && va->flags & VM_VM_AREA) {
  1145. struct vm_struct *vm = va->private;
  1146. struct vm_struct *tmp, **p;
  1147. /*
  1148. * remove from list and disallow access to this vm_struct
  1149. * before unmap. (address range confliction is maintained by
  1150. * vmap.)
  1151. */
  1152. write_lock(&vmlist_lock);
  1153. for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
  1154. ;
  1155. *p = tmp->next;
  1156. write_unlock(&vmlist_lock);
  1157. vmap_debug_free_range(va->va_start, va->va_end);
  1158. free_unmap_vmap_area(va);
  1159. vm->size -= PAGE_SIZE;
  1160. return vm;
  1161. }
  1162. return NULL;
  1163. }
  1164. static void __vunmap(const void *addr, int deallocate_pages)
  1165. {
  1166. struct vm_struct *area;
  1167. if (!addr)
  1168. return;
  1169. if ((PAGE_SIZE-1) & (unsigned long)addr) {
  1170. WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
  1171. return;
  1172. }
  1173. area = remove_vm_area(addr);
  1174. if (unlikely(!area)) {
  1175. WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
  1176. addr);
  1177. return;
  1178. }
  1179. debug_check_no_locks_freed(addr, area->size);
  1180. debug_check_no_obj_freed(addr, area->size);
  1181. if (deallocate_pages) {
  1182. int i;
  1183. for (i = 0; i < area->nr_pages; i++) {
  1184. struct page *page = area->pages[i];
  1185. BUG_ON(!page);
  1186. __free_page(page);
  1187. }
  1188. if (area->flags & VM_VPAGES)
  1189. vfree(area->pages);
  1190. else
  1191. kfree(area->pages);
  1192. }
  1193. kfree(area);
  1194. return;
  1195. }
  1196. /**
  1197. * vfree - release memory allocated by vmalloc()
  1198. * @addr: memory base address
  1199. *
  1200. * Free the virtually continuous memory area starting at @addr, as
  1201. * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
  1202. * NULL, no operation is performed.
  1203. *
  1204. * Must not be called in interrupt context.
  1205. */
  1206. void vfree(const void *addr)
  1207. {
  1208. BUG_ON(in_interrupt());
  1209. kmemleak_free(addr);
  1210. __vunmap(addr, 1);
  1211. }
  1212. EXPORT_SYMBOL(vfree);
  1213. /**
  1214. * vunmap - release virtual mapping obtained by vmap()
  1215. * @addr: memory base address
  1216. *
  1217. * Free the virtually contiguous memory area starting at @addr,
  1218. * which was created from the page array passed to vmap().
  1219. *
  1220. * Must not be called in interrupt context.
  1221. */
  1222. void vunmap(const void *addr)
  1223. {
  1224. BUG_ON(in_interrupt());
  1225. might_sleep();
  1226. __vunmap(addr, 0);
  1227. }
  1228. EXPORT_SYMBOL(vunmap);
  1229. /**
  1230. * vmap - map an array of pages into virtually contiguous space
  1231. * @pages: array of page pointers
  1232. * @count: number of pages to map
  1233. * @flags: vm_area->flags
  1234. * @prot: page protection for the mapping
  1235. *
  1236. * Maps @count pages from @pages into contiguous kernel virtual
  1237. * space.
  1238. */
  1239. void *vmap(struct page **pages, unsigned int count,
  1240. unsigned long flags, pgprot_t prot)
  1241. {
  1242. struct vm_struct *area;
  1243. might_sleep();
  1244. if (count > totalram_pages)
  1245. return NULL;
  1246. area = get_vm_area_caller((count << PAGE_SHIFT), flags,
  1247. __builtin_return_address(0));
  1248. if (!area)
  1249. return NULL;
  1250. if (map_vm_area(area, prot, &pages)) {
  1251. vunmap(area->addr);
  1252. return NULL;
  1253. }
  1254. return area->addr;
  1255. }
  1256. EXPORT_SYMBOL(vmap);
  1257. static void *__vmalloc_node(unsigned long size, unsigned long align,
  1258. gfp_t gfp_mask, pgprot_t prot,
  1259. int node, void *caller);
  1260. static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  1261. pgprot_t prot, int node, void *caller)
  1262. {
  1263. struct page **pages;
  1264. unsigned int nr_pages, array_size, i;
  1265. gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
  1266. nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
  1267. array_size = (nr_pages * sizeof(struct page *));
  1268. area->nr_pages = nr_pages;
  1269. /* Please note that the recursion is strictly bounded. */
  1270. if (array_size > PAGE_SIZE) {
  1271. pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
  1272. PAGE_KERNEL, node, caller);
  1273. area->flags |= VM_VPAGES;
  1274. } else {
  1275. pages = kmalloc_node(array_size, nested_gfp, node);
  1276. }
  1277. area->pages = pages;
  1278. area->caller = caller;
  1279. if (!area->pages) {
  1280. remove_vm_area(area->addr);
  1281. kfree(area);
  1282. return NULL;
  1283. }
  1284. for (i = 0; i < area->nr_pages; i++) {
  1285. struct page *page;
  1286. if (node < 0)
  1287. page = alloc_page(gfp_mask);
  1288. else
  1289. page = alloc_pages_node(node, gfp_mask, 0);
  1290. if (unlikely(!page)) {
  1291. /* Successfully allocated i pages, free them in __vunmap() */
  1292. area->nr_pages = i;
  1293. goto fail;
  1294. }
  1295. area->pages[i] = page;
  1296. }
  1297. if (map_vm_area(area, prot, &pages))
  1298. goto fail;
  1299. return area->addr;
  1300. fail:
  1301. vfree(area->addr);
  1302. return NULL;
  1303. }
  1304. /**
  1305. * __vmalloc_node_range - allocate virtually contiguous memory
  1306. * @size: allocation size
  1307. * @align: desired alignment
  1308. * @start: vm area range start
  1309. * @end: vm area range end
  1310. * @gfp_mask: flags for the page level allocator
  1311. * @prot: protection mask for the allocated pages
  1312. * @node: node to use for allocation or -1
  1313. * @caller: caller's return address
  1314. *
  1315. * Allocate enough pages to cover @size from the page level
  1316. * allocator with @gfp_mask flags. Map them into contiguous
  1317. * kernel virtual space, using a pagetable protection of @prot.
  1318. */
  1319. void *__vmalloc_node_range(unsigned long size, unsigned long align,
  1320. unsigned long start, unsigned long end, gfp_t gfp_mask,
  1321. pgprot_t prot, int node, void *caller)
  1322. {
  1323. struct vm_struct *area;
  1324. void *addr;
  1325. unsigned long real_size = size;
  1326. size = PAGE_ALIGN(size);
  1327. if (!size || (size >> PAGE_SHIFT) > totalram_pages)
  1328. return NULL;
  1329. area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
  1330. gfp_mask, caller);
  1331. if (!area)
  1332. return NULL;
  1333. addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
  1334. /*
  1335. * A ref_count = 3 is needed because the vm_struct and vmap_area
  1336. * structures allocated in the __get_vm_area_node() function contain
  1337. * references to the virtual address of the vmalloc'ed block.
  1338. */
  1339. kmemleak_alloc(addr, real_size, 3, gfp_mask);
  1340. return addr;
  1341. }
  1342. /**
  1343. * __vmalloc_node - allocate virtually contiguous memory
  1344. * @size: allocation size
  1345. * @align: desired alignment
  1346. * @gfp_mask: flags for the page level allocator
  1347. * @prot: protection mask for the allocated pages
  1348. * @node: node to use for allocation or -1
  1349. * @caller: caller's return address
  1350. *
  1351. * Allocate enough pages to cover @size from the page level
  1352. * allocator with @gfp_mask flags. Map them into contiguous
  1353. * kernel virtual space, using a pagetable protection of @prot.
  1354. */
  1355. static void *__vmalloc_node(unsigned long size, unsigned long align,
  1356. gfp_t gfp_mask, pgprot_t prot,
  1357. int node, void *caller)
  1358. {
  1359. return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
  1360. gfp_mask, prot, node, caller);
  1361. }
  1362. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  1363. {
  1364. return __vmalloc_node(size, 1, gfp_mask, prot, -1,
  1365. __builtin_return_address(0));
  1366. }
  1367. EXPORT_SYMBOL(__vmalloc);
  1368. static inline void *__vmalloc_node_flags(unsigned long size,
  1369. int node, gfp_t flags)
  1370. {
  1371. return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
  1372. node, __builtin_return_address(0));
  1373. }
  1374. /**
  1375. * vmalloc - allocate virtually contiguous memory
  1376. * @size: allocation size
  1377. * Allocate enough pages to cover @size from the page level
  1378. * allocator and map them into contiguous kernel virtual space.
  1379. *
  1380. * For tight control over page level allocator and protection flags
  1381. * use __vmalloc() instead.
  1382. */
  1383. void *vmalloc(unsigned long size)
  1384. {
  1385. return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
  1386. }
  1387. EXPORT_SYMBOL(vmalloc);
  1388. /**
  1389. * vzalloc - allocate virtually contiguous memory with zero fill
  1390. * @size: allocation size
  1391. * Allocate enough pages to cover @size from the page level
  1392. * allocator and map them into contiguous kernel virtual space.
  1393. * The memory allocated is set to zero.
  1394. *
  1395. * For tight control over page level allocator and protection flags
  1396. * use __vmalloc() instead.
  1397. */
  1398. void *vzalloc(unsigned long size)
  1399. {
  1400. return __vmalloc_node_flags(size, -1,
  1401. GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  1402. }
  1403. EXPORT_SYMBOL(vzalloc);
  1404. /**
  1405. * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
  1406. * @size: allocation size
  1407. *
  1408. * The resulting memory area is zeroed so it can be mapped to userspace
  1409. * without leaking data.
  1410. */
  1411. void *vmalloc_user(unsigned long size)
  1412. {
  1413. struct vm_struct *area;
  1414. void *ret;
  1415. ret = __vmalloc_node(size, SHMLBA,
  1416. GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
  1417. PAGE_KERNEL, -1, __builtin_return_address(0));
  1418. if (ret) {
  1419. area = find_vm_area(ret);
  1420. area->flags |= VM_USERMAP;
  1421. }
  1422. return ret;
  1423. }
  1424. EXPORT_SYMBOL(vmalloc_user);
  1425. /**
  1426. * vmalloc_node - allocate memory on a specific node
  1427. * @size: allocation size
  1428. * @node: numa node
  1429. *
  1430. * Allocate enough pages to cover @size from the page level
  1431. * allocator and map them into contiguous kernel virtual space.
  1432. *
  1433. * For tight control over page level allocator and protection flags
  1434. * use __vmalloc() instead.
  1435. */
  1436. void *vmalloc_node(unsigned long size, int node)
  1437. {
  1438. return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
  1439. node, __builtin_return_address(0));
  1440. }
  1441. EXPORT_SYMBOL(vmalloc_node);
  1442. /**
  1443. * vzalloc_node - allocate memory on a specific node with zero fill
  1444. * @size: allocation size
  1445. * @node: numa node
  1446. *
  1447. * Allocate enough pages to cover @size from the page level
  1448. * allocator and map them into contiguous kernel virtual space.
  1449. * The memory allocated is set to zero.
  1450. *
  1451. * For tight control over page level allocator and protection flags
  1452. * use __vmalloc_node() instead.
  1453. */
  1454. void *vzalloc_node(unsigned long size, int node)
  1455. {
  1456. return __vmalloc_node_flags(size, node,
  1457. GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  1458. }
  1459. EXPORT_SYMBOL(vzalloc_node);
  1460. #ifndef PAGE_KERNEL_EXEC
  1461. # define PAGE_KERNEL_EXEC PAGE_KERNEL
  1462. #endif
  1463. /**
  1464. * vmalloc_exec - allocate virtually contiguous, executable memory
  1465. * @size: allocation size
  1466. *
  1467. * Kernel-internal function to allocate enough pages to cover @size
  1468. * the page level allocator and map them into contiguous and
  1469. * executable kernel virtual space.
  1470. *
  1471. * For tight control over page level allocator and protection flags
  1472. * use __vmalloc() instead.
  1473. */
  1474. void *vmalloc_exec(unsigned long size)
  1475. {
  1476. return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
  1477. -1, __builtin_return_address(0));
  1478. }
  1479. #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
  1480. #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
  1481. #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
  1482. #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
  1483. #else
  1484. #define GFP_VMALLOC32 GFP_KERNEL
  1485. #endif
  1486. /**
  1487. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  1488. * @size: allocation size
  1489. *
  1490. * Allocate enough 32bit PA addressable pages to cover @size from the
  1491. * page level allocator and map them into contiguous kernel virtual space.
  1492. */
  1493. void *vmalloc_32(unsigned long size)
  1494. {
  1495. return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
  1496. -1, __builtin_return_address(0));
  1497. }
  1498. EXPORT_SYMBOL(vmalloc_32);
  1499. /**
  1500. * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
  1501. * @size: allocation size
  1502. *
  1503. * The resulting memory area is 32bit addressable and zeroed so it can be
  1504. * mapped to userspace without leaking data.
  1505. */
  1506. void *vmalloc_32_user(unsigned long size)
  1507. {
  1508. struct vm_struct *area;
  1509. void *ret;
  1510. ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
  1511. -1, __builtin_return_address(0));
  1512. if (ret) {
  1513. area = find_vm_area(ret);
  1514. area->flags |= VM_USERMAP;
  1515. }
  1516. return ret;
  1517. }
  1518. EXPORT_SYMBOL(vmalloc_32_user);
  1519. /*
  1520. * small helper routine , copy contents to buf from addr.
  1521. * If the page is not present, fill zero.
  1522. */
  1523. static int aligned_vread(char *buf, char *addr, unsigned long count)
  1524. {
  1525. struct page *p;
  1526. int copied = 0;
  1527. while (count) {
  1528. unsigned long offset, length;
  1529. offset = (unsigned long)addr & ~PAGE_MASK;
  1530. length = PAGE_SIZE - offset;
  1531. if (length > count)
  1532. length = count;
  1533. p = vmalloc_to_page(addr);
  1534. /*
  1535. * To do safe access to this _mapped_ area, we need
  1536. * lock. But adding lock here means that we need to add
  1537. * overhead of vmalloc()/vfree() calles for this _debug_
  1538. * interface, rarely used. Instead of that, we'll use
  1539. * kmap() and get small overhead in this access function.
  1540. */
  1541. if (p) {
  1542. /*
  1543. * we can expect USER0 is not used (see vread/vwrite's
  1544. * function description)
  1545. */
  1546. void *map = kmap_atomic(p, KM_USER0);
  1547. memcpy(buf, map + offset, length);
  1548. kunmap_atomic(map, KM_USER0);
  1549. } else
  1550. memset(buf, 0, length);
  1551. addr += length;
  1552. buf += length;
  1553. copied += length;
  1554. count -= length;
  1555. }
  1556. return copied;
  1557. }
  1558. static int aligned_vwrite(char *buf, char *addr, unsigned long count)
  1559. {
  1560. struct page *p;
  1561. int copied = 0;
  1562. while (count) {
  1563. unsigned long offset, length;
  1564. offset = (unsigned long)addr & ~PAGE_MASK;
  1565. length = PAGE_SIZE - offset;
  1566. if (length > count)
  1567. length = count;
  1568. p = vmalloc_to_page(addr);
  1569. /*
  1570. * To do safe access to this _mapped_ area, we need
  1571. * lock. But adding lock here means that we need to add
  1572. * overhead of vmalloc()/vfree() calles for this _debug_
  1573. * interface, rarely used. Instead of that, we'll use
  1574. * kmap() and get small overhead in this access function.
  1575. */
  1576. if (p) {
  1577. /*
  1578. * we can expect USER0 is not used (see vread/vwrite's
  1579. * function description)
  1580. */
  1581. void *map = kmap_atomic(p, KM_USER0);
  1582. memcpy(map + offset, buf, length);
  1583. kunmap_atomic(map, KM_USER0);
  1584. }
  1585. addr += length;
  1586. buf += length;
  1587. copied += length;
  1588. count -= length;
  1589. }
  1590. return copied;
  1591. }
  1592. /**
  1593. * vread() - read vmalloc area in a safe way.
  1594. * @buf: buffer for reading data
  1595. * @addr: vm address.
  1596. * @count: number of bytes to be read.
  1597. *
  1598. * Returns # of bytes which addr and buf should be increased.
  1599. * (same number to @count). Returns 0 if [addr...addr+count) doesn't
  1600. * includes any intersect with alive vmalloc area.
  1601. *
  1602. * This function checks that addr is a valid vmalloc'ed area, and
  1603. * copy data from that area to a given buffer. If the given memory range
  1604. * of [addr...addr+count) includes some valid address, data is copied to
  1605. * proper area of @buf. If there are memory holes, they'll be zero-filled.
  1606. * IOREMAP area is treated as memory hole and no copy is done.
  1607. *
  1608. * If [addr...addr+count) doesn't includes any intersects with alive
  1609. * vm_struct area, returns 0.
  1610. * @buf should be kernel's buffer. Because this function uses KM_USER0,
  1611. * the caller should guarantee KM_USER0 is not used.
  1612. *
  1613. * Note: In usual ops, vread() is never necessary because the caller
  1614. * should know vmalloc() area is valid and can use memcpy().
  1615. * This is for routines which have to access vmalloc area without
  1616. * any informaion, as /dev/kmem.
  1617. *
  1618. */
  1619. long vread(char *buf, char *addr, unsigned long count)
  1620. {
  1621. struct vm_struct *tmp;
  1622. char *vaddr, *buf_start = buf;
  1623. unsigned long buflen = count;
  1624. unsigned long n;
  1625. /* Don't allow overflow */
  1626. if ((unsigned long) addr + count < count)
  1627. count = -(unsigned long) addr;
  1628. read_lock(&vmlist_lock);
  1629. for (tmp = vmlist; count && tmp; tmp = tmp->next) {
  1630. vaddr = (char *) tmp->addr;
  1631. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  1632. continue;
  1633. while (addr < vaddr) {
  1634. if (count == 0)
  1635. goto finished;
  1636. *buf = '\0';
  1637. buf++;
  1638. addr++;
  1639. count--;
  1640. }
  1641. n = vaddr + tmp->size - PAGE_SIZE - addr;
  1642. if (n > count)
  1643. n = count;
  1644. if (!(tmp->flags & VM_IOREMAP))
  1645. aligned_vread(buf, addr, n);
  1646. else /* IOREMAP area is treated as memory hole */
  1647. memset(buf, 0, n);
  1648. buf += n;
  1649. addr += n;
  1650. count -= n;
  1651. }
  1652. finished:
  1653. read_unlock(&vmlist_lock);
  1654. if (buf == buf_start)
  1655. return 0;
  1656. /* zero-fill memory holes */
  1657. if (buf != buf_start + buflen)
  1658. memset(buf, 0, buflen - (buf - buf_start));
  1659. return buflen;
  1660. }
  1661. /**
  1662. * vwrite() - write vmalloc area in a safe way.
  1663. * @buf: buffer for source data
  1664. * @addr: vm address.
  1665. * @count: number of bytes to be read.
  1666. *
  1667. * Returns # of bytes which addr and buf should be incresed.
  1668. * (same number to @count).
  1669. * If [addr...addr+count) doesn't includes any intersect with valid
  1670. * vmalloc area, returns 0.
  1671. *
  1672. * This function checks that addr is a valid vmalloc'ed area, and
  1673. * copy data from a buffer to the given addr. If specified range of
  1674. * [addr...addr+count) includes some valid address, data is copied from
  1675. * proper area of @buf. If there are memory holes, no copy to hole.
  1676. * IOREMAP area is treated as memory hole and no copy is done.
  1677. *
  1678. * If [addr...addr+count) doesn't includes any intersects with alive
  1679. * vm_struct area, returns 0.
  1680. * @buf should be kernel's buffer. Because this function uses KM_USER0,
  1681. * the caller should guarantee KM_USER0 is not used.
  1682. *
  1683. * Note: In usual ops, vwrite() is never necessary because the caller
  1684. * should know vmalloc() area is valid and can use memcpy().
  1685. * This is for routines which have to access vmalloc area without
  1686. * any informaion, as /dev/kmem.
  1687. *
  1688. * The caller should guarantee KM_USER1 is not used.
  1689. */
  1690. long vwrite(char *buf, char *addr, unsigned long count)
  1691. {
  1692. struct vm_struct *tmp;
  1693. char *vaddr;
  1694. unsigned long n, buflen;
  1695. int copied = 0;
  1696. /* Don't allow overflow */
  1697. if ((unsigned long) addr + count < count)
  1698. count = -(unsigned long) addr;
  1699. buflen = count;
  1700. read_lock(&vmlist_lock);
  1701. for (tmp = vmlist; count && tmp; tmp = tmp->next) {
  1702. vaddr = (char *) tmp->addr;
  1703. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  1704. continue;
  1705. while (addr < vaddr) {
  1706. if (count == 0)
  1707. goto finished;
  1708. buf++;
  1709. addr++;
  1710. count--;
  1711. }
  1712. n = vaddr + tmp->size - PAGE_SIZE - addr;
  1713. if (n > count)
  1714. n = count;
  1715. if (!(tmp->flags & VM_IOREMAP)) {
  1716. aligned_vwrite(buf, addr, n);
  1717. copied++;
  1718. }
  1719. buf += n;
  1720. addr += n;
  1721. count -= n;
  1722. }
  1723. finished:
  1724. read_unlock(&vmlist_lock);
  1725. if (!copied)
  1726. return 0;
  1727. return buflen;
  1728. }
  1729. /**
  1730. * remap_vmalloc_range - map vmalloc pages to userspace
  1731. * @vma: vma to cover (map full range of vma)
  1732. * @addr: vmalloc memory
  1733. * @pgoff: number of pages into addr before first page to map
  1734. *
  1735. * Returns: 0 for success, -Exxx on failure
  1736. *
  1737. * This function checks that addr is a valid vmalloc'ed area, and
  1738. * that it is big enough to cover the vma. Will return failure if
  1739. * that criteria isn't met.
  1740. *
  1741. * Similar to remap_pfn_range() (see mm/memory.c)
  1742. */
  1743. int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  1744. unsigned long pgoff)
  1745. {
  1746. struct vm_struct *area;
  1747. unsigned long uaddr = vma->vm_start;
  1748. unsigned long usize = vma->vm_end - vma->vm_start;
  1749. if ((PAGE_SIZE-1) & (unsigned long)addr)
  1750. return -EINVAL;
  1751. area = find_vm_area(addr);
  1752. if (!area)
  1753. return -EINVAL;
  1754. if (!(area->flags & VM_USERMAP))
  1755. return -EINVAL;
  1756. if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
  1757. return -EINVAL;
  1758. addr += pgoff << PAGE_SHIFT;
  1759. do {
  1760. struct page *page = vmalloc_to_page(addr);
  1761. int ret;
  1762. ret = vm_insert_page(vma, uaddr, page);
  1763. if (ret)
  1764. return ret;
  1765. uaddr += PAGE_SIZE;
  1766. addr += PAGE_SIZE;
  1767. usize -= PAGE_SIZE;
  1768. } while (usize > 0);
  1769. /* Prevent "things" like memory migration? VM_flags need a cleanup... */
  1770. vma->vm_flags |= VM_RESERVED;
  1771. return 0;
  1772. }
  1773. EXPORT_SYMBOL(remap_vmalloc_range);
  1774. /*
  1775. * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  1776. * have one.
  1777. */
  1778. void __attribute__((weak)) vmalloc_sync_all(void)
  1779. {
  1780. }
  1781. static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
  1782. {
  1783. /* apply_to_page_range() does all the hard work. */
  1784. return 0;
  1785. }
  1786. /**
  1787. * alloc_vm_area - allocate a range of kernel address space
  1788. * @size: size of the area
  1789. *
  1790. * Returns: NULL on failure, vm_struct on success
  1791. *
  1792. * This function reserves a range of kernel address space, and
  1793. * allocates pagetables to map that range. No actual mappings
  1794. * are created. If the kernel address space is not shared
  1795. * between processes, it syncs the pagetable across all
  1796. * processes.
  1797. */
  1798. struct vm_struct *alloc_vm_area(size_t size)
  1799. {
  1800. struct vm_struct *area;
  1801. area = get_vm_area_caller(size, VM_IOREMAP,
  1802. __builtin_return_address(0));
  1803. if (area == NULL)
  1804. return NULL;
  1805. /*
  1806. * This ensures that page tables are constructed for this region
  1807. * of kernel virtual address space and mapped into init_mm.
  1808. */
  1809. if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
  1810. area->size, f, NULL)) {
  1811. free_vm_area(area);
  1812. return NULL;
  1813. }
  1814. /* Make sure the pagetables are constructed in process kernel
  1815. mappings */
  1816. vmalloc_sync_all();
  1817. return area;
  1818. }
  1819. EXPORT_SYMBOL_GPL(alloc_vm_area);
  1820. void free_vm_area(struct vm_struct *area)
  1821. {
  1822. struct vm_struct *ret;
  1823. ret = remove_vm_area(area->addr);
  1824. BUG_ON(ret != area);
  1825. kfree(area);
  1826. }
  1827. EXPORT_SYMBOL_GPL(free_vm_area);
  1828. #ifdef CONFIG_SMP
  1829. static struct vmap_area *node_to_va(struct rb_node *n)
  1830. {
  1831. return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
  1832. }
  1833. /**
  1834. * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
  1835. * @end: target address
  1836. * @pnext: out arg for the next vmap_area
  1837. * @pprev: out arg for the previous vmap_area
  1838. *
  1839. * Returns: %true if either or both of next and prev are found,
  1840. * %false if no vmap_area exists
  1841. *
  1842. * Find vmap_areas end addresses of which enclose @end. ie. if not
  1843. * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
  1844. */
  1845. static bool pvm_find_next_prev(unsigned long end,
  1846. struct vmap_area **pnext,
  1847. struct vmap_area **pprev)
  1848. {
  1849. struct rb_node *n = vmap_area_root.rb_node;
  1850. struct vmap_area *va = NULL;
  1851. while (n) {
  1852. va = rb_entry(n, struct vmap_area, rb_node);
  1853. if (end < va->va_end)
  1854. n = n->rb_left;
  1855. else if (end > va->va_end)
  1856. n = n->rb_right;
  1857. else
  1858. break;
  1859. }
  1860. if (!va)
  1861. return false;
  1862. if (va->va_end > end) {
  1863. *pnext = va;
  1864. *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  1865. } else {
  1866. *pprev = va;
  1867. *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
  1868. }
  1869. return true;
  1870. }
  1871. /**
  1872. * pvm_determine_end - find the highest aligned address between two vmap_areas
  1873. * @pnext: in/out arg for the next vmap_area
  1874. * @pprev: in/out arg for the previous vmap_area
  1875. * @align: alignment
  1876. *
  1877. * Returns: determined end address
  1878. *
  1879. * Find the highest aligned address between *@pnext and *@pprev below
  1880. * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
  1881. * down address is between the end addresses of the two vmap_areas.
  1882. *
  1883. * Please note that the address returned by this function may fall
  1884. * inside *@pnext vmap_area. The caller is responsible for checking
  1885. * that.
  1886. */
  1887. static unsigned long pvm_determine_end(struct vmap_area **pnext,
  1888. struct vmap_area **pprev,
  1889. unsigned long align)
  1890. {
  1891. const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  1892. unsigned long addr;
  1893. if (*pnext)
  1894. addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
  1895. else
  1896. addr = vmalloc_end;
  1897. while (*pprev && (*pprev)->va_end > addr) {
  1898. *pnext = *pprev;
  1899. *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  1900. }
  1901. return addr;
  1902. }
  1903. /**
  1904. * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
  1905. * @offsets: array containing offset of each area
  1906. * @sizes: array containing size of each area
  1907. * @nr_vms: the number of areas to allocate
  1908. * @align: alignment, all entries in @offsets and @sizes must be aligned to this
  1909. *
  1910. * Returns: kmalloc'd vm_struct pointer array pointing to allocated
  1911. * vm_structs on success, %NULL on failure
  1912. *
  1913. * Percpu allocator wants to use congruent vm areas so that it can
  1914. * maintain the offsets among percpu areas. This function allocates
  1915. * congruent vmalloc areas for it with GFP_KERNEL. These areas tend to
  1916. * be scattered pretty far, distance between two areas easily going up
  1917. * to gigabytes. To avoid interacting with regular vmallocs, these
  1918. * areas are allocated from top.
  1919. *
  1920. * Despite its complicated look, this allocator is rather simple. It
  1921. * does everything top-down and scans areas from the end looking for
  1922. * matching slot. While scanning, if any of the areas overlaps with
  1923. * existing vmap_area, the base address is pulled down to fit the
  1924. * area. Scanning is repeated till all the areas fit and then all
  1925. * necessary data structres are inserted and the result is returned.
  1926. */
  1927. struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  1928. const size_t *sizes, int nr_vms,
  1929. size_t align)
  1930. {
  1931. const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
  1932. const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  1933. struct vmap_area **vas, *prev, *next;
  1934. struct vm_struct **vms;
  1935. int area, area2, last_area, term_area;
  1936. unsigned long base, start, end, last_end;
  1937. bool purged = false;
  1938. /* verify parameters and allocate data structures */
  1939. BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
  1940. for (last_area = 0, area = 0; area < nr_vms; area++) {
  1941. start = offsets[area];
  1942. end = start + sizes[area];
  1943. /* is everything aligned properly? */
  1944. BUG_ON(!IS_ALIGNED(offsets[area], align));
  1945. BUG_ON(!IS_ALIGNED(sizes[area], align));
  1946. /* detect the area with the highest address */
  1947. if (start > offsets[last_area])
  1948. last_area = area;
  1949. for (area2 = 0; area2 < nr_vms; area2++) {
  1950. unsigned long start2 = offsets[area2];
  1951. unsigned long end2 = start2 + sizes[area2];
  1952. if (area2 == area)
  1953. continue;
  1954. BUG_ON(start2 >= start && start2 < end);
  1955. BUG_ON(end2 <= end && end2 > start);
  1956. }
  1957. }
  1958. last_end = offsets[last_area] + sizes[last_area];
  1959. if (vmalloc_end - vmalloc_start < last_end) {
  1960. WARN_ON(true);
  1961. return NULL;
  1962. }
  1963. vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL);
  1964. vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL);
  1965. if (!vas || !vms)
  1966. goto err_free;
  1967. for (area = 0; area < nr_vms; area++) {
  1968. vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
  1969. vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
  1970. if (!vas[area] || !vms[area])
  1971. goto err_free;
  1972. }
  1973. retry:
  1974. spin_lock(&vmap_area_lock);
  1975. /* start scanning - we scan from the top, begin with the last area */
  1976. area = term_area = last_area;
  1977. start = offsets[area];
  1978. end = start + sizes[area];
  1979. if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
  1980. base = vmalloc_end - last_end;
  1981. goto found;
  1982. }
  1983. base = pvm_determine_end(&next, &prev, align) - end;
  1984. while (true) {
  1985. BUG_ON(next && next->va_end <= base + end);
  1986. BUG_ON(prev && prev->va_end > base + end);
  1987. /*
  1988. * base might have underflowed, add last_end before
  1989. * comparing.
  1990. */
  1991. if (base + last_end < vmalloc_start + last_end) {
  1992. spin_unlock(&vmap_area_lock);
  1993. if (!purged) {
  1994. purge_vmap_area_lazy();
  1995. purged = true;
  1996. goto retry;
  1997. }
  1998. goto err_free;
  1999. }
  2000. /*
  2001. * If next overlaps, move base downwards so that it's
  2002. * right below next and then recheck.
  2003. */
  2004. if (next && next->va_start < base + end) {
  2005. base = pvm_determine_end(&next, &prev, align) - end;
  2006. term_area = area;
  2007. continue;
  2008. }
  2009. /*
  2010. * If prev overlaps, shift down next and prev and move
  2011. * base so that it's right below new next and then
  2012. * recheck.
  2013. */
  2014. if (prev && prev->va_end > base + start) {
  2015. next = prev;
  2016. prev = node_to_va(rb_prev(&next->rb_node));
  2017. base = pvm_determine_end(&next, &prev, align) - end;
  2018. term_area = area;
  2019. continue;
  2020. }
  2021. /*
  2022. * This area fits, move on to the previous one. If
  2023. * the previous one is the terminal one, we're done.
  2024. */
  2025. area = (area + nr_vms - 1) % nr_vms;
  2026. if (area == term_area)
  2027. break;
  2028. start = offsets[area];
  2029. end = start + sizes[area];
  2030. pvm_find_next_prev(base + end, &next, &prev);
  2031. }
  2032. found:
  2033. /* we've found a fitting base, insert all va's */
  2034. for (area = 0; area < nr_vms; area++) {
  2035. struct vmap_area *va = vas[area];
  2036. va->va_start = base + offsets[area];
  2037. va->va_end = va->va_start + sizes[area];
  2038. __insert_vmap_area(va);
  2039. }
  2040. vmap_area_pcpu_hole = base + offsets[last_area];
  2041. spin_unlock(&vmap_area_lock);
  2042. /* insert all vm's */
  2043. for (area = 0; area < nr_vms; area++)
  2044. insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
  2045. pcpu_get_vm_areas);
  2046. kfree(vas);
  2047. return vms;
  2048. err_free:
  2049. for (area = 0; area < nr_vms; area++) {
  2050. if (vas)
  2051. kfree(vas[area]);
  2052. if (vms)
  2053. kfree(vms[area]);
  2054. }
  2055. kfree(vas);
  2056. kfree(vms);
  2057. return NULL;
  2058. }
  2059. /**
  2060. * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
  2061. * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
  2062. * @nr_vms: the number of allocated areas
  2063. *
  2064. * Free vm_structs and the array allocated by pcpu_get_vm_areas().
  2065. */
  2066. void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  2067. {
  2068. int i;
  2069. for (i = 0; i < nr_vms; i++)
  2070. free_vm_area(vms[i]);
  2071. kfree(vms);
  2072. }
  2073. #endif /* CONFIG_SMP */
  2074. #ifdef CONFIG_PROC_FS
  2075. static void *s_start(struct seq_file *m, loff_t *pos)
  2076. __acquires(&vmlist_lock)
  2077. {
  2078. loff_t n = *pos;
  2079. struct vm_struct *v;
  2080. read_lock(&vmlist_lock);
  2081. v = vmlist;
  2082. while (n > 0 && v) {
  2083. n--;
  2084. v = v->next;
  2085. }
  2086. if (!n)
  2087. return v;
  2088. return NULL;
  2089. }
  2090. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  2091. {
  2092. struct vm_struct *v = p;
  2093. ++*pos;
  2094. return v->next;
  2095. }
  2096. static void s_stop(struct seq_file *m, void *p)
  2097. __releases(&vmlist_lock)
  2098. {
  2099. read_unlock(&vmlist_lock);
  2100. }
  2101. static void show_numa_info(struct seq_file *m, struct vm_struct *v)
  2102. {
  2103. if (NUMA_BUILD) {
  2104. unsigned int nr, *counters = m->private;
  2105. if (!counters)
  2106. return;
  2107. memset(counters, 0, nr_node_ids * sizeof(unsigned int));
  2108. for (nr = 0; nr < v->nr_pages; nr++)
  2109. counters[page_to_nid(v->pages[nr])]++;
  2110. for_each_node_state(nr, N_HIGH_MEMORY)
  2111. if (counters[nr])
  2112. seq_printf(m, " N%u=%u", nr, counters[nr]);
  2113. }
  2114. }
  2115. static int s_show(struct seq_file *m, void *p)
  2116. {
  2117. struct vm_struct *v = p;
  2118. seq_printf(m, "0x%p-0x%p %7ld",
  2119. v->addr, v->addr + v->size, v->size);
  2120. if (v->caller)
  2121. seq_printf(m, " %pS", v->caller);
  2122. if (v->nr_pages)
  2123. seq_printf(m, " pages=%d", v->nr_pages);
  2124. if (v->phys_addr)
  2125. seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
  2126. if (v->flags & VM_IOREMAP)
  2127. seq_printf(m, " ioremap");
  2128. if (v->flags & VM_ALLOC)
  2129. seq_printf(m, " vmalloc");
  2130. if (v->flags & VM_MAP)
  2131. seq_printf(m, " vmap");
  2132. if (v->flags & VM_USERMAP)
  2133. seq_printf(m, " user");
  2134. if (v->flags & VM_VPAGES)
  2135. seq_printf(m, " vpages");
  2136. show_numa_info(m, v);
  2137. seq_putc(m, '\n');
  2138. return 0;
  2139. }
  2140. static const struct seq_operations vmalloc_op = {
  2141. .start = s_start,
  2142. .next = s_next,
  2143. .stop = s_stop,
  2144. .show = s_show,
  2145. };
  2146. static int vmalloc_open(struct inode *inode, struct file *file)
  2147. {
  2148. unsigned int *ptr = NULL;
  2149. int ret;
  2150. if (NUMA_BUILD) {
  2151. ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
  2152. if (ptr == NULL)
  2153. return -ENOMEM;
  2154. }
  2155. ret = seq_open(file, &vmalloc_op);
  2156. if (!ret) {
  2157. struct seq_file *m = file->private_data;
  2158. m->private = ptr;
  2159. } else
  2160. kfree(ptr);
  2161. return ret;
  2162. }
  2163. static const struct file_operations proc_vmalloc_operations = {
  2164. .open = vmalloc_open,
  2165. .read = seq_read,
  2166. .llseek = seq_lseek,
  2167. .release = seq_release_private,
  2168. };
  2169. static int __init proc_vmalloc_init(void)
  2170. {
  2171. proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
  2172. return 0;
  2173. }
  2174. module_init(proc_vmalloc_init);
  2175. #endif