vmalloc.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249
  1. /*
  2. * linux/mm/vmalloc.c
  3. *
  4. * Copyright (C) 1993 Linus Torvalds
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
  7. * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
  8. * Numa awareness, Christoph Lameter, SGI, June 2005
  9. */
  10. #include <linux/vmalloc.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/highmem.h>
  14. #include <linux/slab.h>
  15. #include <linux/spinlock.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/proc_fs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/debugobjects.h>
  20. #include <linux/kallsyms.h>
  21. #include <linux/list.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/radix-tree.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/pfn.h>
  26. #include <linux/kmemleak.h>
  27. #include <asm/atomic.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/tlbflush.h>
  30. /*** Page table manipulation functions ***/
  31. static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  32. {
  33. pte_t *pte;
  34. pte = pte_offset_kernel(pmd, addr);
  35. do {
  36. pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  37. WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  38. } while (pte++, addr += PAGE_SIZE, addr != end);
  39. }
  40. static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  41. {
  42. pmd_t *pmd;
  43. unsigned long next;
  44. pmd = pmd_offset(pud, addr);
  45. do {
  46. next = pmd_addr_end(addr, end);
  47. if (pmd_none_or_clear_bad(pmd))
  48. continue;
  49. vunmap_pte_range(pmd, addr, next);
  50. } while (pmd++, addr = next, addr != end);
  51. }
  52. static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
  53. {
  54. pud_t *pud;
  55. unsigned long next;
  56. pud = pud_offset(pgd, addr);
  57. do {
  58. next = pud_addr_end(addr, end);
  59. if (pud_none_or_clear_bad(pud))
  60. continue;
  61. vunmap_pmd_range(pud, addr, next);
  62. } while (pud++, addr = next, addr != end);
  63. }
  64. static void vunmap_page_range(unsigned long addr, unsigned long end)
  65. {
  66. pgd_t *pgd;
  67. unsigned long next;
  68. BUG_ON(addr >= end);
  69. pgd = pgd_offset_k(addr);
  70. do {
  71. next = pgd_addr_end(addr, end);
  72. if (pgd_none_or_clear_bad(pgd))
  73. continue;
  74. vunmap_pud_range(pgd, addr, next);
  75. } while (pgd++, addr = next, addr != end);
  76. }
  77. static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  78. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  79. {
  80. pte_t *pte;
  81. /*
  82. * nr is a running index into the array which helps higher level
  83. * callers keep track of where we're up to.
  84. */
  85. pte = pte_alloc_kernel(pmd, addr);
  86. if (!pte)
  87. return -ENOMEM;
  88. do {
  89. struct page *page = pages[*nr];
  90. if (WARN_ON(!pte_none(*pte)))
  91. return -EBUSY;
  92. if (WARN_ON(!page))
  93. return -ENOMEM;
  94. set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
  95. (*nr)++;
  96. } while (pte++, addr += PAGE_SIZE, addr != end);
  97. return 0;
  98. }
  99. static int vmap_pmd_range(pud_t *pud, unsigned long addr,
  100. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  101. {
  102. pmd_t *pmd;
  103. unsigned long next;
  104. pmd = pmd_alloc(&init_mm, pud, addr);
  105. if (!pmd)
  106. return -ENOMEM;
  107. do {
  108. next = pmd_addr_end(addr, end);
  109. if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
  110. return -ENOMEM;
  111. } while (pmd++, addr = next, addr != end);
  112. return 0;
  113. }
  114. static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  115. unsigned long end, pgprot_t prot, struct page **pages, int *nr)
  116. {
  117. pud_t *pud;
  118. unsigned long next;
  119. pud = pud_alloc(&init_mm, pgd, addr);
  120. if (!pud)
  121. return -ENOMEM;
  122. do {
  123. next = pud_addr_end(addr, end);
  124. if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
  125. return -ENOMEM;
  126. } while (pud++, addr = next, addr != end);
  127. return 0;
  128. }
  129. /*
  130. * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
  131. * will have pfns corresponding to the "pages" array.
  132. *
  133. * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
  134. */
  135. static int vmap_page_range_noflush(unsigned long start, unsigned long end,
  136. pgprot_t prot, struct page **pages)
  137. {
  138. pgd_t *pgd;
  139. unsigned long next;
  140. unsigned long addr = start;
  141. int err = 0;
  142. int nr = 0;
  143. BUG_ON(addr >= end);
  144. pgd = pgd_offset_k(addr);
  145. do {
  146. next = pgd_addr_end(addr, end);
  147. err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
  148. if (err)
  149. return err;
  150. } while (pgd++, addr = next, addr != end);
  151. return nr;
  152. }
  153. static int vmap_page_range(unsigned long start, unsigned long end,
  154. pgprot_t prot, struct page **pages)
  155. {
  156. int ret;
  157. ret = vmap_page_range_noflush(start, end, prot, pages);
  158. flush_cache_vmap(start, end);
  159. return ret;
  160. }
  161. static inline int is_vmalloc_or_module_addr(const void *x)
  162. {
  163. /*
  164. * ARM, x86-64 and sparc64 put modules in a special place,
  165. * and fall back on vmalloc() if that fails. Others
  166. * just put it in the vmalloc space.
  167. */
  168. #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
  169. unsigned long addr = (unsigned long)x;
  170. if (addr >= MODULES_VADDR && addr < MODULES_END)
  171. return 1;
  172. #endif
  173. return is_vmalloc_addr(x);
  174. }
  175. /*
  176. * Walk a vmap address to the struct page it maps.
  177. */
  178. struct page *vmalloc_to_page(const void *vmalloc_addr)
  179. {
  180. unsigned long addr = (unsigned long) vmalloc_addr;
  181. struct page *page = NULL;
  182. pgd_t *pgd = pgd_offset_k(addr);
  183. /*
  184. * XXX we might need to change this if we add VIRTUAL_BUG_ON for
  185. * architectures that do not vmalloc module space
  186. */
  187. VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
  188. if (!pgd_none(*pgd)) {
  189. pud_t *pud = pud_offset(pgd, addr);
  190. if (!pud_none(*pud)) {
  191. pmd_t *pmd = pmd_offset(pud, addr);
  192. if (!pmd_none(*pmd)) {
  193. pte_t *ptep, pte;
  194. ptep = pte_offset_map(pmd, addr);
  195. pte = *ptep;
  196. if (pte_present(pte))
  197. page = pte_page(pte);
  198. pte_unmap(ptep);
  199. }
  200. }
  201. }
  202. return page;
  203. }
  204. EXPORT_SYMBOL(vmalloc_to_page);
  205. /*
  206. * Map a vmalloc()-space virtual address to the physical page frame number.
  207. */
  208. unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
  209. {
  210. return page_to_pfn(vmalloc_to_page(vmalloc_addr));
  211. }
  212. EXPORT_SYMBOL(vmalloc_to_pfn);
  213. /*** Global kva allocator ***/
  214. #define VM_LAZY_FREE 0x01
  215. #define VM_LAZY_FREEING 0x02
  216. #define VM_VM_AREA 0x04
  217. struct vmap_area {
  218. unsigned long va_start;
  219. unsigned long va_end;
  220. unsigned long flags;
  221. struct rb_node rb_node; /* address sorted rbtree */
  222. struct list_head list; /* address sorted list */
  223. struct list_head purge_list; /* "lazy purge" list */
  224. void *private;
  225. struct rcu_head rcu_head;
  226. };
  227. static DEFINE_SPINLOCK(vmap_area_lock);
  228. static struct rb_root vmap_area_root = RB_ROOT;
  229. static LIST_HEAD(vmap_area_list);
  230. static unsigned long vmap_area_pcpu_hole;
  231. static struct vmap_area *__find_vmap_area(unsigned long addr)
  232. {
  233. struct rb_node *n = vmap_area_root.rb_node;
  234. while (n) {
  235. struct vmap_area *va;
  236. va = rb_entry(n, struct vmap_area, rb_node);
  237. if (addr < va->va_start)
  238. n = n->rb_left;
  239. else if (addr > va->va_start)
  240. n = n->rb_right;
  241. else
  242. return va;
  243. }
  244. return NULL;
  245. }
  246. static void __insert_vmap_area(struct vmap_area *va)
  247. {
  248. struct rb_node **p = &vmap_area_root.rb_node;
  249. struct rb_node *parent = NULL;
  250. struct rb_node *tmp;
  251. while (*p) {
  252. struct vmap_area *tmp;
  253. parent = *p;
  254. tmp = rb_entry(parent, struct vmap_area, rb_node);
  255. if (va->va_start < tmp->va_end)
  256. p = &(*p)->rb_left;
  257. else if (va->va_end > tmp->va_start)
  258. p = &(*p)->rb_right;
  259. else
  260. BUG();
  261. }
  262. rb_link_node(&va->rb_node, parent, p);
  263. rb_insert_color(&va->rb_node, &vmap_area_root);
  264. /* address-sort this list so it is usable like the vmlist */
  265. tmp = rb_prev(&va->rb_node);
  266. if (tmp) {
  267. struct vmap_area *prev;
  268. prev = rb_entry(tmp, struct vmap_area, rb_node);
  269. list_add_rcu(&va->list, &prev->list);
  270. } else
  271. list_add_rcu(&va->list, &vmap_area_list);
  272. }
  273. static void purge_vmap_area_lazy(void);
  274. /*
  275. * Allocate a region of KVA of the specified size and alignment, within the
  276. * vstart and vend.
  277. */
  278. static struct vmap_area *alloc_vmap_area(unsigned long size,
  279. unsigned long align,
  280. unsigned long vstart, unsigned long vend,
  281. int node, gfp_t gfp_mask)
  282. {
  283. struct vmap_area *va;
  284. struct rb_node *n;
  285. unsigned long addr;
  286. int purged = 0;
  287. BUG_ON(!size);
  288. BUG_ON(size & ~PAGE_MASK);
  289. va = kmalloc_node(sizeof(struct vmap_area),
  290. gfp_mask & GFP_RECLAIM_MASK, node);
  291. if (unlikely(!va))
  292. return ERR_PTR(-ENOMEM);
  293. retry:
  294. addr = ALIGN(vstart, align);
  295. spin_lock(&vmap_area_lock);
  296. if (addr + size - 1 < addr)
  297. goto overflow;
  298. /* XXX: could have a last_hole cache */
  299. n = vmap_area_root.rb_node;
  300. if (n) {
  301. struct vmap_area *first = NULL;
  302. do {
  303. struct vmap_area *tmp;
  304. tmp = rb_entry(n, struct vmap_area, rb_node);
  305. if (tmp->va_end >= addr) {
  306. if (!first && tmp->va_start < addr + size)
  307. first = tmp;
  308. n = n->rb_left;
  309. } else {
  310. first = tmp;
  311. n = n->rb_right;
  312. }
  313. } while (n);
  314. if (!first)
  315. goto found;
  316. if (first->va_end < addr) {
  317. n = rb_next(&first->rb_node);
  318. if (n)
  319. first = rb_entry(n, struct vmap_area, rb_node);
  320. else
  321. goto found;
  322. }
  323. while (addr + size > first->va_start && addr + size <= vend) {
  324. addr = ALIGN(first->va_end + PAGE_SIZE, align);
  325. if (addr + size - 1 < addr)
  326. goto overflow;
  327. n = rb_next(&first->rb_node);
  328. if (n)
  329. first = rb_entry(n, struct vmap_area, rb_node);
  330. else
  331. goto found;
  332. }
  333. }
  334. found:
  335. if (addr + size > vend) {
  336. overflow:
  337. spin_unlock(&vmap_area_lock);
  338. if (!purged) {
  339. purge_vmap_area_lazy();
  340. purged = 1;
  341. goto retry;
  342. }
  343. if (printk_ratelimit())
  344. printk(KERN_WARNING
  345. "vmap allocation for size %lu failed: "
  346. "use vmalloc=<size> to increase size.\n", size);
  347. kfree(va);
  348. return ERR_PTR(-EBUSY);
  349. }
  350. BUG_ON(addr & (align-1));
  351. va->va_start = addr;
  352. va->va_end = addr + size;
  353. va->flags = 0;
  354. __insert_vmap_area(va);
  355. spin_unlock(&vmap_area_lock);
  356. return va;
  357. }
  358. static void rcu_free_va(struct rcu_head *head)
  359. {
  360. struct vmap_area *va = container_of(head, struct vmap_area, rcu_head);
  361. kfree(va);
  362. }
  363. static void __free_vmap_area(struct vmap_area *va)
  364. {
  365. BUG_ON(RB_EMPTY_NODE(&va->rb_node));
  366. rb_erase(&va->rb_node, &vmap_area_root);
  367. RB_CLEAR_NODE(&va->rb_node);
  368. list_del_rcu(&va->list);
  369. /*
  370. * Track the highest possible candidate for pcpu area
  371. * allocation. Areas outside of vmalloc area can be returned
  372. * here too, consider only end addresses which fall inside
  373. * vmalloc area proper.
  374. */
  375. if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
  376. vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
  377. call_rcu(&va->rcu_head, rcu_free_va);
  378. }
  379. /*
  380. * Free a region of KVA allocated by alloc_vmap_area
  381. */
  382. static void free_vmap_area(struct vmap_area *va)
  383. {
  384. spin_lock(&vmap_area_lock);
  385. __free_vmap_area(va);
  386. spin_unlock(&vmap_area_lock);
  387. }
  388. /*
  389. * Clear the pagetable entries of a given vmap_area
  390. */
  391. static void unmap_vmap_area(struct vmap_area *va)
  392. {
  393. vunmap_page_range(va->va_start, va->va_end);
  394. }
  395. static void vmap_debug_free_range(unsigned long start, unsigned long end)
  396. {
  397. /*
  398. * Unmap page tables and force a TLB flush immediately if
  399. * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
  400. * bugs similarly to those in linear kernel virtual address
  401. * space after a page has been freed.
  402. *
  403. * All the lazy freeing logic is still retained, in order to
  404. * minimise intrusiveness of this debugging feature.
  405. *
  406. * This is going to be *slow* (linear kernel virtual address
  407. * debugging doesn't do a broadcast TLB flush so it is a lot
  408. * faster).
  409. */
  410. #ifdef CONFIG_DEBUG_PAGEALLOC
  411. vunmap_page_range(start, end);
  412. flush_tlb_kernel_range(start, end);
  413. #endif
  414. }
  415. /*
  416. * lazy_max_pages is the maximum amount of virtual address space we gather up
  417. * before attempting to purge with a TLB flush.
  418. *
  419. * There is a tradeoff here: a larger number will cover more kernel page tables
  420. * and take slightly longer to purge, but it will linearly reduce the number of
  421. * global TLB flushes that must be performed. It would seem natural to scale
  422. * this number up linearly with the number of CPUs (because vmapping activity
  423. * could also scale linearly with the number of CPUs), however it is likely
  424. * that in practice, workloads might be constrained in other ways that mean
  425. * vmap activity will not scale linearly with CPUs. Also, I want to be
  426. * conservative and not introduce a big latency on huge systems, so go with
  427. * a less aggressive log scale. It will still be an improvement over the old
  428. * code, and it will be simple to change the scale factor if we find that it
  429. * becomes a problem on bigger systems.
  430. */
  431. static unsigned long lazy_max_pages(void)
  432. {
  433. unsigned int log;
  434. log = fls(num_online_cpus());
  435. return log * (32UL * 1024 * 1024 / PAGE_SIZE);
  436. }
  437. static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
  438. /*
  439. * Purges all lazily-freed vmap areas.
  440. *
  441. * If sync is 0 then don't purge if there is already a purge in progress.
  442. * If force_flush is 1, then flush kernel TLBs between *start and *end even
  443. * if we found no lazy vmap areas to unmap (callers can use this to optimise
  444. * their own TLB flushing).
  445. * Returns with *start = min(*start, lowest purged address)
  446. * *end = max(*end, highest purged address)
  447. */
  448. static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
  449. int sync, int force_flush)
  450. {
  451. static DEFINE_SPINLOCK(purge_lock);
  452. LIST_HEAD(valist);
  453. struct vmap_area *va;
  454. struct vmap_area *n_va;
  455. int nr = 0;
  456. /*
  457. * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
  458. * should not expect such behaviour. This just simplifies locking for
  459. * the case that isn't actually used at the moment anyway.
  460. */
  461. if (!sync && !force_flush) {
  462. if (!spin_trylock(&purge_lock))
  463. return;
  464. } else
  465. spin_lock(&purge_lock);
  466. rcu_read_lock();
  467. list_for_each_entry_rcu(va, &vmap_area_list, list) {
  468. if (va->flags & VM_LAZY_FREE) {
  469. if (va->va_start < *start)
  470. *start = va->va_start;
  471. if (va->va_end > *end)
  472. *end = va->va_end;
  473. nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
  474. unmap_vmap_area(va);
  475. list_add_tail(&va->purge_list, &valist);
  476. va->flags |= VM_LAZY_FREEING;
  477. va->flags &= ~VM_LAZY_FREE;
  478. }
  479. }
  480. rcu_read_unlock();
  481. if (nr) {
  482. BUG_ON(nr > atomic_read(&vmap_lazy_nr));
  483. atomic_sub(nr, &vmap_lazy_nr);
  484. }
  485. if (nr || force_flush)
  486. flush_tlb_kernel_range(*start, *end);
  487. if (nr) {
  488. spin_lock(&vmap_area_lock);
  489. list_for_each_entry_safe(va, n_va, &valist, purge_list)
  490. __free_vmap_area(va);
  491. spin_unlock(&vmap_area_lock);
  492. }
  493. spin_unlock(&purge_lock);
  494. }
  495. /*
  496. * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
  497. * is already purging.
  498. */
  499. static void try_purge_vmap_area_lazy(void)
  500. {
  501. unsigned long start = ULONG_MAX, end = 0;
  502. __purge_vmap_area_lazy(&start, &end, 0, 0);
  503. }
  504. /*
  505. * Kick off a purge of the outstanding lazy areas.
  506. */
  507. static void purge_vmap_area_lazy(void)
  508. {
  509. unsigned long start = ULONG_MAX, end = 0;
  510. __purge_vmap_area_lazy(&start, &end, 1, 0);
  511. }
  512. /*
  513. * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
  514. * called for the correct range previously.
  515. */
  516. static void free_unmap_vmap_area_noflush(struct vmap_area *va)
  517. {
  518. va->flags |= VM_LAZY_FREE;
  519. atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
  520. if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
  521. try_purge_vmap_area_lazy();
  522. }
  523. /*
  524. * Free and unmap a vmap area
  525. */
  526. static void free_unmap_vmap_area(struct vmap_area *va)
  527. {
  528. flush_cache_vunmap(va->va_start, va->va_end);
  529. free_unmap_vmap_area_noflush(va);
  530. }
  531. static struct vmap_area *find_vmap_area(unsigned long addr)
  532. {
  533. struct vmap_area *va;
  534. spin_lock(&vmap_area_lock);
  535. va = __find_vmap_area(addr);
  536. spin_unlock(&vmap_area_lock);
  537. return va;
  538. }
  539. static void free_unmap_vmap_area_addr(unsigned long addr)
  540. {
  541. struct vmap_area *va;
  542. va = find_vmap_area(addr);
  543. BUG_ON(!va);
  544. free_unmap_vmap_area(va);
  545. }
  546. /*** Per cpu kva allocator ***/
  547. /*
  548. * vmap space is limited especially on 32 bit architectures. Ensure there is
  549. * room for at least 16 percpu vmap blocks per CPU.
  550. */
  551. /*
  552. * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
  553. * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
  554. * instead (we just need a rough idea)
  555. */
  556. #if BITS_PER_LONG == 32
  557. #define VMALLOC_SPACE (128UL*1024*1024)
  558. #else
  559. #define VMALLOC_SPACE (128UL*1024*1024*1024)
  560. #endif
  561. #define VMALLOC_PAGES (VMALLOC_SPACE / PAGE_SIZE)
  562. #define VMAP_MAX_ALLOC BITS_PER_LONG /* 256K with 4K pages */
  563. #define VMAP_BBMAP_BITS_MAX 1024 /* 4MB with 4K pages */
  564. #define VMAP_BBMAP_BITS_MIN (VMAP_MAX_ALLOC*2)
  565. #define VMAP_MIN(x, y) ((x) < (y) ? (x) : (y)) /* can't use min() */
  566. #define VMAP_MAX(x, y) ((x) > (y) ? (x) : (y)) /* can't use max() */
  567. #define VMAP_BBMAP_BITS VMAP_MIN(VMAP_BBMAP_BITS_MAX, \
  568. VMAP_MAX(VMAP_BBMAP_BITS_MIN, \
  569. VMALLOC_PAGES / NR_CPUS / 16))
  570. #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE)
  571. static bool vmap_initialized __read_mostly = false;
  572. struct vmap_block_queue {
  573. spinlock_t lock;
  574. struct list_head free;
  575. struct list_head dirty;
  576. unsigned int nr_dirty;
  577. };
  578. struct vmap_block {
  579. spinlock_t lock;
  580. struct vmap_area *va;
  581. struct vmap_block_queue *vbq;
  582. unsigned long free, dirty;
  583. DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
  584. DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
  585. union {
  586. struct list_head free_list;
  587. struct rcu_head rcu_head;
  588. };
  589. };
  590. /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
  591. static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
  592. /*
  593. * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
  594. * in the free path. Could get rid of this if we change the API to return a
  595. * "cookie" from alloc, to be passed to free. But no big deal yet.
  596. */
  597. static DEFINE_SPINLOCK(vmap_block_tree_lock);
  598. static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
  599. /*
  600. * We should probably have a fallback mechanism to allocate virtual memory
  601. * out of partially filled vmap blocks. However vmap block sizing should be
  602. * fairly reasonable according to the vmalloc size, so it shouldn't be a
  603. * big problem.
  604. */
  605. static unsigned long addr_to_vb_idx(unsigned long addr)
  606. {
  607. addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
  608. addr /= VMAP_BLOCK_SIZE;
  609. return addr;
  610. }
  611. static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
  612. {
  613. struct vmap_block_queue *vbq;
  614. struct vmap_block *vb;
  615. struct vmap_area *va;
  616. unsigned long vb_idx;
  617. int node, err;
  618. node = numa_node_id();
  619. vb = kmalloc_node(sizeof(struct vmap_block),
  620. gfp_mask & GFP_RECLAIM_MASK, node);
  621. if (unlikely(!vb))
  622. return ERR_PTR(-ENOMEM);
  623. va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
  624. VMALLOC_START, VMALLOC_END,
  625. node, gfp_mask);
  626. if (unlikely(IS_ERR(va))) {
  627. kfree(vb);
  628. return ERR_PTR(PTR_ERR(va));
  629. }
  630. err = radix_tree_preload(gfp_mask);
  631. if (unlikely(err)) {
  632. kfree(vb);
  633. free_vmap_area(va);
  634. return ERR_PTR(err);
  635. }
  636. spin_lock_init(&vb->lock);
  637. vb->va = va;
  638. vb->free = VMAP_BBMAP_BITS;
  639. vb->dirty = 0;
  640. bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
  641. bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
  642. INIT_LIST_HEAD(&vb->free_list);
  643. vb_idx = addr_to_vb_idx(va->va_start);
  644. spin_lock(&vmap_block_tree_lock);
  645. err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
  646. spin_unlock(&vmap_block_tree_lock);
  647. BUG_ON(err);
  648. radix_tree_preload_end();
  649. vbq = &get_cpu_var(vmap_block_queue);
  650. vb->vbq = vbq;
  651. spin_lock(&vbq->lock);
  652. list_add(&vb->free_list, &vbq->free);
  653. spin_unlock(&vbq->lock);
  654. put_cpu_var(vmap_cpu_blocks);
  655. return vb;
  656. }
  657. static void rcu_free_vb(struct rcu_head *head)
  658. {
  659. struct vmap_block *vb = container_of(head, struct vmap_block, rcu_head);
  660. kfree(vb);
  661. }
  662. static void free_vmap_block(struct vmap_block *vb)
  663. {
  664. struct vmap_block *tmp;
  665. unsigned long vb_idx;
  666. BUG_ON(!list_empty(&vb->free_list));
  667. vb_idx = addr_to_vb_idx(vb->va->va_start);
  668. spin_lock(&vmap_block_tree_lock);
  669. tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
  670. spin_unlock(&vmap_block_tree_lock);
  671. BUG_ON(tmp != vb);
  672. free_unmap_vmap_area_noflush(vb->va);
  673. call_rcu(&vb->rcu_head, rcu_free_vb);
  674. }
  675. static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
  676. {
  677. struct vmap_block_queue *vbq;
  678. struct vmap_block *vb;
  679. unsigned long addr = 0;
  680. unsigned int order;
  681. BUG_ON(size & ~PAGE_MASK);
  682. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  683. order = get_order(size);
  684. again:
  685. rcu_read_lock();
  686. vbq = &get_cpu_var(vmap_block_queue);
  687. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  688. int i;
  689. spin_lock(&vb->lock);
  690. i = bitmap_find_free_region(vb->alloc_map,
  691. VMAP_BBMAP_BITS, order);
  692. if (i >= 0) {
  693. addr = vb->va->va_start + (i << PAGE_SHIFT);
  694. BUG_ON(addr_to_vb_idx(addr) !=
  695. addr_to_vb_idx(vb->va->va_start));
  696. vb->free -= 1UL << order;
  697. if (vb->free == 0) {
  698. spin_lock(&vbq->lock);
  699. list_del_init(&vb->free_list);
  700. spin_unlock(&vbq->lock);
  701. }
  702. spin_unlock(&vb->lock);
  703. break;
  704. }
  705. spin_unlock(&vb->lock);
  706. }
  707. put_cpu_var(vmap_cpu_blocks);
  708. rcu_read_unlock();
  709. if (!addr) {
  710. vb = new_vmap_block(gfp_mask);
  711. if (IS_ERR(vb))
  712. return vb;
  713. goto again;
  714. }
  715. return (void *)addr;
  716. }
  717. static void vb_free(const void *addr, unsigned long size)
  718. {
  719. unsigned long offset;
  720. unsigned long vb_idx;
  721. unsigned int order;
  722. struct vmap_block *vb;
  723. BUG_ON(size & ~PAGE_MASK);
  724. BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
  725. flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
  726. order = get_order(size);
  727. offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
  728. vb_idx = addr_to_vb_idx((unsigned long)addr);
  729. rcu_read_lock();
  730. vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
  731. rcu_read_unlock();
  732. BUG_ON(!vb);
  733. spin_lock(&vb->lock);
  734. bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
  735. vb->dirty += 1UL << order;
  736. if (vb->dirty == VMAP_BBMAP_BITS) {
  737. BUG_ON(vb->free || !list_empty(&vb->free_list));
  738. spin_unlock(&vb->lock);
  739. free_vmap_block(vb);
  740. } else
  741. spin_unlock(&vb->lock);
  742. }
  743. /**
  744. * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
  745. *
  746. * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
  747. * to amortize TLB flushing overheads. What this means is that any page you
  748. * have now, may, in a former life, have been mapped into kernel virtual
  749. * address by the vmap layer and so there might be some CPUs with TLB entries
  750. * still referencing that page (additional to the regular 1:1 kernel mapping).
  751. *
  752. * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
  753. * be sure that none of the pages we have control over will have any aliases
  754. * from the vmap layer.
  755. */
  756. void vm_unmap_aliases(void)
  757. {
  758. unsigned long start = ULONG_MAX, end = 0;
  759. int cpu;
  760. int flush = 0;
  761. if (unlikely(!vmap_initialized))
  762. return;
  763. for_each_possible_cpu(cpu) {
  764. struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
  765. struct vmap_block *vb;
  766. rcu_read_lock();
  767. list_for_each_entry_rcu(vb, &vbq->free, free_list) {
  768. int i;
  769. spin_lock(&vb->lock);
  770. i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
  771. while (i < VMAP_BBMAP_BITS) {
  772. unsigned long s, e;
  773. int j;
  774. j = find_next_zero_bit(vb->dirty_map,
  775. VMAP_BBMAP_BITS, i);
  776. s = vb->va->va_start + (i << PAGE_SHIFT);
  777. e = vb->va->va_start + (j << PAGE_SHIFT);
  778. vunmap_page_range(s, e);
  779. flush = 1;
  780. if (s < start)
  781. start = s;
  782. if (e > end)
  783. end = e;
  784. i = j;
  785. i = find_next_bit(vb->dirty_map,
  786. VMAP_BBMAP_BITS, i);
  787. }
  788. spin_unlock(&vb->lock);
  789. }
  790. rcu_read_unlock();
  791. }
  792. __purge_vmap_area_lazy(&start, &end, 1, flush);
  793. }
  794. EXPORT_SYMBOL_GPL(vm_unmap_aliases);
  795. /**
  796. * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
  797. * @mem: the pointer returned by vm_map_ram
  798. * @count: the count passed to that vm_map_ram call (cannot unmap partial)
  799. */
  800. void vm_unmap_ram(const void *mem, unsigned int count)
  801. {
  802. unsigned long size = count << PAGE_SHIFT;
  803. unsigned long addr = (unsigned long)mem;
  804. BUG_ON(!addr);
  805. BUG_ON(addr < VMALLOC_START);
  806. BUG_ON(addr > VMALLOC_END);
  807. BUG_ON(addr & (PAGE_SIZE-1));
  808. debug_check_no_locks_freed(mem, size);
  809. vmap_debug_free_range(addr, addr+size);
  810. if (likely(count <= VMAP_MAX_ALLOC))
  811. vb_free(mem, size);
  812. else
  813. free_unmap_vmap_area_addr(addr);
  814. }
  815. EXPORT_SYMBOL(vm_unmap_ram);
  816. /**
  817. * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
  818. * @pages: an array of pointers to the pages to be mapped
  819. * @count: number of pages
  820. * @node: prefer to allocate data structures on this node
  821. * @prot: memory protection to use. PAGE_KERNEL for regular RAM
  822. *
  823. * Returns: a pointer to the address that has been mapped, or %NULL on failure
  824. */
  825. void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
  826. {
  827. unsigned long size = count << PAGE_SHIFT;
  828. unsigned long addr;
  829. void *mem;
  830. if (likely(count <= VMAP_MAX_ALLOC)) {
  831. mem = vb_alloc(size, GFP_KERNEL);
  832. if (IS_ERR(mem))
  833. return NULL;
  834. addr = (unsigned long)mem;
  835. } else {
  836. struct vmap_area *va;
  837. va = alloc_vmap_area(size, PAGE_SIZE,
  838. VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
  839. if (IS_ERR(va))
  840. return NULL;
  841. addr = va->va_start;
  842. mem = (void *)addr;
  843. }
  844. if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
  845. vm_unmap_ram(mem, count);
  846. return NULL;
  847. }
  848. return mem;
  849. }
  850. EXPORT_SYMBOL(vm_map_ram);
  851. /**
  852. * vm_area_register_early - register vmap area early during boot
  853. * @vm: vm_struct to register
  854. * @align: requested alignment
  855. *
  856. * This function is used to register kernel vm area before
  857. * vmalloc_init() is called. @vm->size and @vm->flags should contain
  858. * proper values on entry and other fields should be zero. On return,
  859. * vm->addr contains the allocated address.
  860. *
  861. * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
  862. */
  863. void __init vm_area_register_early(struct vm_struct *vm, size_t align)
  864. {
  865. static size_t vm_init_off __initdata;
  866. unsigned long addr;
  867. addr = ALIGN(VMALLOC_START + vm_init_off, align);
  868. vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
  869. vm->addr = (void *)addr;
  870. vm->next = vmlist;
  871. vmlist = vm;
  872. }
  873. void __init vmalloc_init(void)
  874. {
  875. struct vmap_area *va;
  876. struct vm_struct *tmp;
  877. int i;
  878. for_each_possible_cpu(i) {
  879. struct vmap_block_queue *vbq;
  880. vbq = &per_cpu(vmap_block_queue, i);
  881. spin_lock_init(&vbq->lock);
  882. INIT_LIST_HEAD(&vbq->free);
  883. INIT_LIST_HEAD(&vbq->dirty);
  884. vbq->nr_dirty = 0;
  885. }
  886. /* Import existing vmlist entries. */
  887. for (tmp = vmlist; tmp; tmp = tmp->next) {
  888. va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
  889. va->flags = tmp->flags | VM_VM_AREA;
  890. va->va_start = (unsigned long)tmp->addr;
  891. va->va_end = va->va_start + tmp->size;
  892. __insert_vmap_area(va);
  893. }
  894. vmap_area_pcpu_hole = VMALLOC_END;
  895. vmap_initialized = true;
  896. }
  897. /**
  898. * map_kernel_range_noflush - map kernel VM area with the specified pages
  899. * @addr: start of the VM area to map
  900. * @size: size of the VM area to map
  901. * @prot: page protection flags to use
  902. * @pages: pages to map
  903. *
  904. * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
  905. * specify should have been allocated using get_vm_area() and its
  906. * friends.
  907. *
  908. * NOTE:
  909. * This function does NOT do any cache flushing. The caller is
  910. * responsible for calling flush_cache_vmap() on to-be-mapped areas
  911. * before calling this function.
  912. *
  913. * RETURNS:
  914. * The number of pages mapped on success, -errno on failure.
  915. */
  916. int map_kernel_range_noflush(unsigned long addr, unsigned long size,
  917. pgprot_t prot, struct page **pages)
  918. {
  919. return vmap_page_range_noflush(addr, addr + size, prot, pages);
  920. }
  921. /**
  922. * unmap_kernel_range_noflush - unmap kernel VM area
  923. * @addr: start of the VM area to unmap
  924. * @size: size of the VM area to unmap
  925. *
  926. * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
  927. * specify should have been allocated using get_vm_area() and its
  928. * friends.
  929. *
  930. * NOTE:
  931. * This function does NOT do any cache flushing. The caller is
  932. * responsible for calling flush_cache_vunmap() on to-be-mapped areas
  933. * before calling this function and flush_tlb_kernel_range() after.
  934. */
  935. void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
  936. {
  937. vunmap_page_range(addr, addr + size);
  938. }
  939. /**
  940. * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
  941. * @addr: start of the VM area to unmap
  942. * @size: size of the VM area to unmap
  943. *
  944. * Similar to unmap_kernel_range_noflush() but flushes vcache before
  945. * the unmapping and tlb after.
  946. */
  947. void unmap_kernel_range(unsigned long addr, unsigned long size)
  948. {
  949. unsigned long end = addr + size;
  950. flush_cache_vunmap(addr, end);
  951. vunmap_page_range(addr, end);
  952. flush_tlb_kernel_range(addr, end);
  953. }
  954. int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
  955. {
  956. unsigned long addr = (unsigned long)area->addr;
  957. unsigned long end = addr + area->size - PAGE_SIZE;
  958. int err;
  959. err = vmap_page_range(addr, end, prot, *pages);
  960. if (err > 0) {
  961. *pages += err;
  962. err = 0;
  963. }
  964. return err;
  965. }
  966. EXPORT_SYMBOL_GPL(map_vm_area);
  967. /*** Old vmalloc interfaces ***/
  968. DEFINE_RWLOCK(vmlist_lock);
  969. struct vm_struct *vmlist;
  970. static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
  971. unsigned long flags, void *caller)
  972. {
  973. struct vm_struct *tmp, **p;
  974. vm->flags = flags;
  975. vm->addr = (void *)va->va_start;
  976. vm->size = va->va_end - va->va_start;
  977. vm->caller = caller;
  978. va->private = vm;
  979. va->flags |= VM_VM_AREA;
  980. write_lock(&vmlist_lock);
  981. for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
  982. if (tmp->addr >= vm->addr)
  983. break;
  984. }
  985. vm->next = *p;
  986. *p = vm;
  987. write_unlock(&vmlist_lock);
  988. }
  989. static struct vm_struct *__get_vm_area_node(unsigned long size,
  990. unsigned long flags, unsigned long start, unsigned long end,
  991. int node, gfp_t gfp_mask, void *caller)
  992. {
  993. static struct vmap_area *va;
  994. struct vm_struct *area;
  995. unsigned long align = 1;
  996. BUG_ON(in_interrupt());
  997. if (flags & VM_IOREMAP) {
  998. int bit = fls(size);
  999. if (bit > IOREMAP_MAX_ORDER)
  1000. bit = IOREMAP_MAX_ORDER;
  1001. else if (bit < PAGE_SHIFT)
  1002. bit = PAGE_SHIFT;
  1003. align = 1ul << bit;
  1004. }
  1005. size = PAGE_ALIGN(size);
  1006. if (unlikely(!size))
  1007. return NULL;
  1008. area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
  1009. if (unlikely(!area))
  1010. return NULL;
  1011. /*
  1012. * We always allocate a guard page.
  1013. */
  1014. size += PAGE_SIZE;
  1015. va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  1016. if (IS_ERR(va)) {
  1017. kfree(area);
  1018. return NULL;
  1019. }
  1020. insert_vmalloc_vm(area, va, flags, caller);
  1021. return area;
  1022. }
  1023. struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  1024. unsigned long start, unsigned long end)
  1025. {
  1026. return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
  1027. __builtin_return_address(0));
  1028. }
  1029. EXPORT_SYMBOL_GPL(__get_vm_area);
  1030. struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
  1031. unsigned long start, unsigned long end,
  1032. void *caller)
  1033. {
  1034. return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
  1035. caller);
  1036. }
  1037. /**
  1038. * get_vm_area - reserve a contiguous kernel virtual area
  1039. * @size: size of the area
  1040. * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
  1041. *
  1042. * Search an area of @size in the kernel virtual mapping area,
  1043. * and reserved it for out purposes. Returns the area descriptor
  1044. * on success or %NULL on failure.
  1045. */
  1046. struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  1047. {
  1048. return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
  1049. -1, GFP_KERNEL, __builtin_return_address(0));
  1050. }
  1051. struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
  1052. void *caller)
  1053. {
  1054. return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
  1055. -1, GFP_KERNEL, caller);
  1056. }
  1057. struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
  1058. int node, gfp_t gfp_mask)
  1059. {
  1060. return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
  1061. gfp_mask, __builtin_return_address(0));
  1062. }
  1063. static struct vm_struct *find_vm_area(const void *addr)
  1064. {
  1065. struct vmap_area *va;
  1066. va = find_vmap_area((unsigned long)addr);
  1067. if (va && va->flags & VM_VM_AREA)
  1068. return va->private;
  1069. return NULL;
  1070. }
  1071. /**
  1072. * remove_vm_area - find and remove a continuous kernel virtual area
  1073. * @addr: base address
  1074. *
  1075. * Search for the kernel VM area starting at @addr, and remove it.
  1076. * This function returns the found VM area, but using it is NOT safe
  1077. * on SMP machines, except for its size or flags.
  1078. */
  1079. struct vm_struct *remove_vm_area(const void *addr)
  1080. {
  1081. struct vmap_area *va;
  1082. va = find_vmap_area((unsigned long)addr);
  1083. if (va && va->flags & VM_VM_AREA) {
  1084. struct vm_struct *vm = va->private;
  1085. struct vm_struct *tmp, **p;
  1086. /*
  1087. * remove from list and disallow access to this vm_struct
  1088. * before unmap. (address range confliction is maintained by
  1089. * vmap.)
  1090. */
  1091. write_lock(&vmlist_lock);
  1092. for (p = &vmlist; (tmp = *p) != vm; p = &tmp->next)
  1093. ;
  1094. *p = tmp->next;
  1095. write_unlock(&vmlist_lock);
  1096. vmap_debug_free_range(va->va_start, va->va_end);
  1097. free_unmap_vmap_area(va);
  1098. vm->size -= PAGE_SIZE;
  1099. return vm;
  1100. }
  1101. return NULL;
  1102. }
  1103. static void __vunmap(const void *addr, int deallocate_pages)
  1104. {
  1105. struct vm_struct *area;
  1106. if (!addr)
  1107. return;
  1108. if ((PAGE_SIZE-1) & (unsigned long)addr) {
  1109. WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
  1110. return;
  1111. }
  1112. area = remove_vm_area(addr);
  1113. if (unlikely(!area)) {
  1114. WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
  1115. addr);
  1116. return;
  1117. }
  1118. debug_check_no_locks_freed(addr, area->size);
  1119. debug_check_no_obj_freed(addr, area->size);
  1120. if (deallocate_pages) {
  1121. int i;
  1122. for (i = 0; i < area->nr_pages; i++) {
  1123. struct page *page = area->pages[i];
  1124. BUG_ON(!page);
  1125. __free_page(page);
  1126. }
  1127. if (area->flags & VM_VPAGES)
  1128. vfree(area->pages);
  1129. else
  1130. kfree(area->pages);
  1131. }
  1132. kfree(area);
  1133. return;
  1134. }
  1135. /**
  1136. * vfree - release memory allocated by vmalloc()
  1137. * @addr: memory base address
  1138. *
  1139. * Free the virtually continuous memory area starting at @addr, as
  1140. * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
  1141. * NULL, no operation is performed.
  1142. *
  1143. * Must not be called in interrupt context.
  1144. */
  1145. void vfree(const void *addr)
  1146. {
  1147. BUG_ON(in_interrupt());
  1148. kmemleak_free(addr);
  1149. __vunmap(addr, 1);
  1150. }
  1151. EXPORT_SYMBOL(vfree);
  1152. /**
  1153. * vunmap - release virtual mapping obtained by vmap()
  1154. * @addr: memory base address
  1155. *
  1156. * Free the virtually contiguous memory area starting at @addr,
  1157. * which was created from the page array passed to vmap().
  1158. *
  1159. * Must not be called in interrupt context.
  1160. */
  1161. void vunmap(const void *addr)
  1162. {
  1163. BUG_ON(in_interrupt());
  1164. might_sleep();
  1165. __vunmap(addr, 0);
  1166. }
  1167. EXPORT_SYMBOL(vunmap);
  1168. /**
  1169. * vmap - map an array of pages into virtually contiguous space
  1170. * @pages: array of page pointers
  1171. * @count: number of pages to map
  1172. * @flags: vm_area->flags
  1173. * @prot: page protection for the mapping
  1174. *
  1175. * Maps @count pages from @pages into contiguous kernel virtual
  1176. * space.
  1177. */
  1178. void *vmap(struct page **pages, unsigned int count,
  1179. unsigned long flags, pgprot_t prot)
  1180. {
  1181. struct vm_struct *area;
  1182. might_sleep();
  1183. if (count > num_physpages)
  1184. return NULL;
  1185. area = get_vm_area_caller((count << PAGE_SHIFT), flags,
  1186. __builtin_return_address(0));
  1187. if (!area)
  1188. return NULL;
  1189. if (map_vm_area(area, prot, &pages)) {
  1190. vunmap(area->addr);
  1191. return NULL;
  1192. }
  1193. return area->addr;
  1194. }
  1195. EXPORT_SYMBOL(vmap);
  1196. static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
  1197. int node, void *caller);
  1198. static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  1199. pgprot_t prot, int node, void *caller)
  1200. {
  1201. struct page **pages;
  1202. unsigned int nr_pages, array_size, i;
  1203. nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
  1204. array_size = (nr_pages * sizeof(struct page *));
  1205. area->nr_pages = nr_pages;
  1206. /* Please note that the recursion is strictly bounded. */
  1207. if (array_size > PAGE_SIZE) {
  1208. pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
  1209. PAGE_KERNEL, node, caller);
  1210. area->flags |= VM_VPAGES;
  1211. } else {
  1212. pages = kmalloc_node(array_size,
  1213. (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
  1214. node);
  1215. }
  1216. area->pages = pages;
  1217. area->caller = caller;
  1218. if (!area->pages) {
  1219. remove_vm_area(area->addr);
  1220. kfree(area);
  1221. return NULL;
  1222. }
  1223. for (i = 0; i < area->nr_pages; i++) {
  1224. struct page *page;
  1225. if (node < 0)
  1226. page = alloc_page(gfp_mask);
  1227. else
  1228. page = alloc_pages_node(node, gfp_mask, 0);
  1229. if (unlikely(!page)) {
  1230. /* Successfully allocated i pages, free them in __vunmap() */
  1231. area->nr_pages = i;
  1232. goto fail;
  1233. }
  1234. area->pages[i] = page;
  1235. }
  1236. if (map_vm_area(area, prot, &pages))
  1237. goto fail;
  1238. return area->addr;
  1239. fail:
  1240. vfree(area->addr);
  1241. return NULL;
  1242. }
  1243. void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
  1244. {
  1245. void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1,
  1246. __builtin_return_address(0));
  1247. /*
  1248. * A ref_count = 3 is needed because the vm_struct and vmap_area
  1249. * structures allocated in the __get_vm_area_node() function contain
  1250. * references to the virtual address of the vmalloc'ed block.
  1251. */
  1252. kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask);
  1253. return addr;
  1254. }
  1255. /**
  1256. * __vmalloc_node - allocate virtually contiguous memory
  1257. * @size: allocation size
  1258. * @gfp_mask: flags for the page level allocator
  1259. * @prot: protection mask for the allocated pages
  1260. * @node: node to use for allocation or -1
  1261. * @caller: caller's return address
  1262. *
  1263. * Allocate enough pages to cover @size from the page level
  1264. * allocator with @gfp_mask flags. Map them into contiguous
  1265. * kernel virtual space, using a pagetable protection of @prot.
  1266. */
  1267. static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
  1268. int node, void *caller)
  1269. {
  1270. struct vm_struct *area;
  1271. void *addr;
  1272. unsigned long real_size = size;
  1273. size = PAGE_ALIGN(size);
  1274. if (!size || (size >> PAGE_SHIFT) > num_physpages)
  1275. return NULL;
  1276. area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
  1277. node, gfp_mask, caller);
  1278. if (!area)
  1279. return NULL;
  1280. addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
  1281. /*
  1282. * A ref_count = 3 is needed because the vm_struct and vmap_area
  1283. * structures allocated in the __get_vm_area_node() function contain
  1284. * references to the virtual address of the vmalloc'ed block.
  1285. */
  1286. kmemleak_alloc(addr, real_size, 3, gfp_mask);
  1287. return addr;
  1288. }
  1289. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  1290. {
  1291. return __vmalloc_node(size, gfp_mask, prot, -1,
  1292. __builtin_return_address(0));
  1293. }
  1294. EXPORT_SYMBOL(__vmalloc);
  1295. /**
  1296. * vmalloc - allocate virtually contiguous memory
  1297. * @size: allocation size
  1298. * Allocate enough pages to cover @size from the page level
  1299. * allocator and map them into contiguous kernel virtual space.
  1300. *
  1301. * For tight control over page level allocator and protection flags
  1302. * use __vmalloc() instead.
  1303. */
  1304. void *vmalloc(unsigned long size)
  1305. {
  1306. return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
  1307. -1, __builtin_return_address(0));
  1308. }
  1309. EXPORT_SYMBOL(vmalloc);
  1310. /**
  1311. * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
  1312. * @size: allocation size
  1313. *
  1314. * The resulting memory area is zeroed so it can be mapped to userspace
  1315. * without leaking data.
  1316. */
  1317. void *vmalloc_user(unsigned long size)
  1318. {
  1319. struct vm_struct *area;
  1320. void *ret;
  1321. ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
  1322. PAGE_KERNEL, -1, __builtin_return_address(0));
  1323. if (ret) {
  1324. area = find_vm_area(ret);
  1325. area->flags |= VM_USERMAP;
  1326. }
  1327. return ret;
  1328. }
  1329. EXPORT_SYMBOL(vmalloc_user);
  1330. /**
  1331. * vmalloc_node - allocate memory on a specific node
  1332. * @size: allocation size
  1333. * @node: numa node
  1334. *
  1335. * Allocate enough pages to cover @size from the page level
  1336. * allocator and map them into contiguous kernel virtual space.
  1337. *
  1338. * For tight control over page level allocator and protection flags
  1339. * use __vmalloc() instead.
  1340. */
  1341. void *vmalloc_node(unsigned long size, int node)
  1342. {
  1343. return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
  1344. node, __builtin_return_address(0));
  1345. }
  1346. EXPORT_SYMBOL(vmalloc_node);
  1347. #ifndef PAGE_KERNEL_EXEC
  1348. # define PAGE_KERNEL_EXEC PAGE_KERNEL
  1349. #endif
  1350. /**
  1351. * vmalloc_exec - allocate virtually contiguous, executable memory
  1352. * @size: allocation size
  1353. *
  1354. * Kernel-internal function to allocate enough pages to cover @size
  1355. * the page level allocator and map them into contiguous and
  1356. * executable kernel virtual space.
  1357. *
  1358. * For tight control over page level allocator and protection flags
  1359. * use __vmalloc() instead.
  1360. */
  1361. void *vmalloc_exec(unsigned long size)
  1362. {
  1363. return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
  1364. -1, __builtin_return_address(0));
  1365. }
  1366. #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
  1367. #define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
  1368. #elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
  1369. #define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
  1370. #else
  1371. #define GFP_VMALLOC32 GFP_KERNEL
  1372. #endif
  1373. /**
  1374. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  1375. * @size: allocation size
  1376. *
  1377. * Allocate enough 32bit PA addressable pages to cover @size from the
  1378. * page level allocator and map them into contiguous kernel virtual space.
  1379. */
  1380. void *vmalloc_32(unsigned long size)
  1381. {
  1382. return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL,
  1383. -1, __builtin_return_address(0));
  1384. }
  1385. EXPORT_SYMBOL(vmalloc_32);
  1386. /**
  1387. * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
  1388. * @size: allocation size
  1389. *
  1390. * The resulting memory area is 32bit addressable and zeroed so it can be
  1391. * mapped to userspace without leaking data.
  1392. */
  1393. void *vmalloc_32_user(unsigned long size)
  1394. {
  1395. struct vm_struct *area;
  1396. void *ret;
  1397. ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
  1398. -1, __builtin_return_address(0));
  1399. if (ret) {
  1400. area = find_vm_area(ret);
  1401. area->flags |= VM_USERMAP;
  1402. }
  1403. return ret;
  1404. }
  1405. EXPORT_SYMBOL(vmalloc_32_user);
  1406. long vread(char *buf, char *addr, unsigned long count)
  1407. {
  1408. struct vm_struct *tmp;
  1409. char *vaddr, *buf_start = buf;
  1410. unsigned long n;
  1411. /* Don't allow overflow */
  1412. if ((unsigned long) addr + count < count)
  1413. count = -(unsigned long) addr;
  1414. read_lock(&vmlist_lock);
  1415. for (tmp = vmlist; tmp; tmp = tmp->next) {
  1416. vaddr = (char *) tmp->addr;
  1417. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  1418. continue;
  1419. while (addr < vaddr) {
  1420. if (count == 0)
  1421. goto finished;
  1422. *buf = '\0';
  1423. buf++;
  1424. addr++;
  1425. count--;
  1426. }
  1427. n = vaddr + tmp->size - PAGE_SIZE - addr;
  1428. do {
  1429. if (count == 0)
  1430. goto finished;
  1431. *buf = *addr;
  1432. buf++;
  1433. addr++;
  1434. count--;
  1435. } while (--n > 0);
  1436. }
  1437. finished:
  1438. read_unlock(&vmlist_lock);
  1439. return buf - buf_start;
  1440. }
  1441. long vwrite(char *buf, char *addr, unsigned long count)
  1442. {
  1443. struct vm_struct *tmp;
  1444. char *vaddr, *buf_start = buf;
  1445. unsigned long n;
  1446. /* Don't allow overflow */
  1447. if ((unsigned long) addr + count < count)
  1448. count = -(unsigned long) addr;
  1449. read_lock(&vmlist_lock);
  1450. for (tmp = vmlist; tmp; tmp = tmp->next) {
  1451. vaddr = (char *) tmp->addr;
  1452. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  1453. continue;
  1454. while (addr < vaddr) {
  1455. if (count == 0)
  1456. goto finished;
  1457. buf++;
  1458. addr++;
  1459. count--;
  1460. }
  1461. n = vaddr + tmp->size - PAGE_SIZE - addr;
  1462. do {
  1463. if (count == 0)
  1464. goto finished;
  1465. *addr = *buf;
  1466. buf++;
  1467. addr++;
  1468. count--;
  1469. } while (--n > 0);
  1470. }
  1471. finished:
  1472. read_unlock(&vmlist_lock);
  1473. return buf - buf_start;
  1474. }
  1475. /**
  1476. * remap_vmalloc_range - map vmalloc pages to userspace
  1477. * @vma: vma to cover (map full range of vma)
  1478. * @addr: vmalloc memory
  1479. * @pgoff: number of pages into addr before first page to map
  1480. *
  1481. * Returns: 0 for success, -Exxx on failure
  1482. *
  1483. * This function checks that addr is a valid vmalloc'ed area, and
  1484. * that it is big enough to cover the vma. Will return failure if
  1485. * that criteria isn't met.
  1486. *
  1487. * Similar to remap_pfn_range() (see mm/memory.c)
  1488. */
  1489. int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  1490. unsigned long pgoff)
  1491. {
  1492. struct vm_struct *area;
  1493. unsigned long uaddr = vma->vm_start;
  1494. unsigned long usize = vma->vm_end - vma->vm_start;
  1495. if ((PAGE_SIZE-1) & (unsigned long)addr)
  1496. return -EINVAL;
  1497. area = find_vm_area(addr);
  1498. if (!area)
  1499. return -EINVAL;
  1500. if (!(area->flags & VM_USERMAP))
  1501. return -EINVAL;
  1502. if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
  1503. return -EINVAL;
  1504. addr += pgoff << PAGE_SHIFT;
  1505. do {
  1506. struct page *page = vmalloc_to_page(addr);
  1507. int ret;
  1508. ret = vm_insert_page(vma, uaddr, page);
  1509. if (ret)
  1510. return ret;
  1511. uaddr += PAGE_SIZE;
  1512. addr += PAGE_SIZE;
  1513. usize -= PAGE_SIZE;
  1514. } while (usize > 0);
  1515. /* Prevent "things" like memory migration? VM_flags need a cleanup... */
  1516. vma->vm_flags |= VM_RESERVED;
  1517. return 0;
  1518. }
  1519. EXPORT_SYMBOL(remap_vmalloc_range);
  1520. /*
  1521. * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  1522. * have one.
  1523. */
  1524. void __attribute__((weak)) vmalloc_sync_all(void)
  1525. {
  1526. }
  1527. static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
  1528. {
  1529. /* apply_to_page_range() does all the hard work. */
  1530. return 0;
  1531. }
  1532. /**
  1533. * alloc_vm_area - allocate a range of kernel address space
  1534. * @size: size of the area
  1535. *
  1536. * Returns: NULL on failure, vm_struct on success
  1537. *
  1538. * This function reserves a range of kernel address space, and
  1539. * allocates pagetables to map that range. No actual mappings
  1540. * are created. If the kernel address space is not shared
  1541. * between processes, it syncs the pagetable across all
  1542. * processes.
  1543. */
  1544. struct vm_struct *alloc_vm_area(size_t size)
  1545. {
  1546. struct vm_struct *area;
  1547. area = get_vm_area_caller(size, VM_IOREMAP,
  1548. __builtin_return_address(0));
  1549. if (area == NULL)
  1550. return NULL;
  1551. /*
  1552. * This ensures that page tables are constructed for this region
  1553. * of kernel virtual address space and mapped into init_mm.
  1554. */
  1555. if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
  1556. area->size, f, NULL)) {
  1557. free_vm_area(area);
  1558. return NULL;
  1559. }
  1560. /* Make sure the pagetables are constructed in process kernel
  1561. mappings */
  1562. vmalloc_sync_all();
  1563. return area;
  1564. }
  1565. EXPORT_SYMBOL_GPL(alloc_vm_area);
  1566. void free_vm_area(struct vm_struct *area)
  1567. {
  1568. struct vm_struct *ret;
  1569. ret = remove_vm_area(area->addr);
  1570. BUG_ON(ret != area);
  1571. kfree(area);
  1572. }
  1573. EXPORT_SYMBOL_GPL(free_vm_area);
  1574. static struct vmap_area *node_to_va(struct rb_node *n)
  1575. {
  1576. return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
  1577. }
  1578. /**
  1579. * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
  1580. * @end: target address
  1581. * @pnext: out arg for the next vmap_area
  1582. * @pprev: out arg for the previous vmap_area
  1583. *
  1584. * Returns: %true if either or both of next and prev are found,
  1585. * %false if no vmap_area exists
  1586. *
  1587. * Find vmap_areas end addresses of which enclose @end. ie. if not
  1588. * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
  1589. */
  1590. static bool pvm_find_next_prev(unsigned long end,
  1591. struct vmap_area **pnext,
  1592. struct vmap_area **pprev)
  1593. {
  1594. struct rb_node *n = vmap_area_root.rb_node;
  1595. struct vmap_area *va = NULL;
  1596. while (n) {
  1597. va = rb_entry(n, struct vmap_area, rb_node);
  1598. if (end < va->va_end)
  1599. n = n->rb_left;
  1600. else if (end > va->va_end)
  1601. n = n->rb_right;
  1602. else
  1603. break;
  1604. }
  1605. if (!va)
  1606. return false;
  1607. if (va->va_end > end) {
  1608. *pnext = va;
  1609. *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  1610. } else {
  1611. *pprev = va;
  1612. *pnext = node_to_va(rb_next(&(*pprev)->rb_node));
  1613. }
  1614. return true;
  1615. }
  1616. /**
  1617. * pvm_determine_end - find the highest aligned address between two vmap_areas
  1618. * @pnext: in/out arg for the next vmap_area
  1619. * @pprev: in/out arg for the previous vmap_area
  1620. * @align: alignment
  1621. *
  1622. * Returns: determined end address
  1623. *
  1624. * Find the highest aligned address between *@pnext and *@pprev below
  1625. * VMALLOC_END. *@pnext and *@pprev are adjusted so that the aligned
  1626. * down address is between the end addresses of the two vmap_areas.
  1627. *
  1628. * Please note that the address returned by this function may fall
  1629. * inside *@pnext vmap_area. The caller is responsible for checking
  1630. * that.
  1631. */
  1632. static unsigned long pvm_determine_end(struct vmap_area **pnext,
  1633. struct vmap_area **pprev,
  1634. unsigned long align)
  1635. {
  1636. const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  1637. unsigned long addr;
  1638. if (*pnext)
  1639. addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
  1640. else
  1641. addr = vmalloc_end;
  1642. while (*pprev && (*pprev)->va_end > addr) {
  1643. *pnext = *pprev;
  1644. *pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
  1645. }
  1646. return addr;
  1647. }
  1648. /**
  1649. * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
  1650. * @offsets: array containing offset of each area
  1651. * @sizes: array containing size of each area
  1652. * @nr_vms: the number of areas to allocate
  1653. * @align: alignment, all entries in @offsets and @sizes must be aligned to this
  1654. * @gfp_mask: allocation mask
  1655. *
  1656. * Returns: kmalloc'd vm_struct pointer array pointing to allocated
  1657. * vm_structs on success, %NULL on failure
  1658. *
  1659. * Percpu allocator wants to use congruent vm areas so that it can
  1660. * maintain the offsets among percpu areas. This function allocates
  1661. * congruent vmalloc areas for it. These areas tend to be scattered
  1662. * pretty far, distance between two areas easily going up to
  1663. * gigabytes. To avoid interacting with regular vmallocs, these areas
  1664. * are allocated from top.
  1665. *
  1666. * Despite its complicated look, this allocator is rather simple. It
  1667. * does everything top-down and scans areas from the end looking for
  1668. * matching slot. While scanning, if any of the areas overlaps with
  1669. * existing vmap_area, the base address is pulled down to fit the
  1670. * area. Scanning is repeated till all the areas fit and then all
  1671. * necessary data structres are inserted and the result is returned.
  1672. */
  1673. struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
  1674. const size_t *sizes, int nr_vms,
  1675. size_t align, gfp_t gfp_mask)
  1676. {
  1677. const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
  1678. const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
  1679. struct vmap_area **vas, *prev, *next;
  1680. struct vm_struct **vms;
  1681. int area, area2, last_area, term_area;
  1682. unsigned long base, start, end, last_end;
  1683. bool purged = false;
  1684. gfp_mask &= GFP_RECLAIM_MASK;
  1685. /* verify parameters and allocate data structures */
  1686. BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
  1687. for (last_area = 0, area = 0; area < nr_vms; area++) {
  1688. start = offsets[area];
  1689. end = start + sizes[area];
  1690. /* is everything aligned properly? */
  1691. BUG_ON(!IS_ALIGNED(offsets[area], align));
  1692. BUG_ON(!IS_ALIGNED(sizes[area], align));
  1693. /* detect the area with the highest address */
  1694. if (start > offsets[last_area])
  1695. last_area = area;
  1696. for (area2 = 0; area2 < nr_vms; area2++) {
  1697. unsigned long start2 = offsets[area2];
  1698. unsigned long end2 = start2 + sizes[area2];
  1699. if (area2 == area)
  1700. continue;
  1701. BUG_ON(start2 >= start && start2 < end);
  1702. BUG_ON(end2 <= end && end2 > start);
  1703. }
  1704. }
  1705. last_end = offsets[last_area] + sizes[last_area];
  1706. if (vmalloc_end - vmalloc_start < last_end) {
  1707. WARN_ON(true);
  1708. return NULL;
  1709. }
  1710. vms = kzalloc(sizeof(vms[0]) * nr_vms, gfp_mask);
  1711. vas = kzalloc(sizeof(vas[0]) * nr_vms, gfp_mask);
  1712. if (!vas || !vms)
  1713. goto err_free;
  1714. for (area = 0; area < nr_vms; area++) {
  1715. vas[area] = kzalloc(sizeof(struct vmap_area), gfp_mask);
  1716. vms[area] = kzalloc(sizeof(struct vm_struct), gfp_mask);
  1717. if (!vas[area] || !vms[area])
  1718. goto err_free;
  1719. }
  1720. retry:
  1721. spin_lock(&vmap_area_lock);
  1722. /* start scanning - we scan from the top, begin with the last area */
  1723. area = term_area = last_area;
  1724. start = offsets[area];
  1725. end = start + sizes[area];
  1726. if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
  1727. base = vmalloc_end - last_end;
  1728. goto found;
  1729. }
  1730. base = pvm_determine_end(&next, &prev, align) - end;
  1731. while (true) {
  1732. BUG_ON(next && next->va_end <= base + end);
  1733. BUG_ON(prev && prev->va_end > base + end);
  1734. /*
  1735. * base might have underflowed, add last_end before
  1736. * comparing.
  1737. */
  1738. if (base + last_end < vmalloc_start + last_end) {
  1739. spin_unlock(&vmap_area_lock);
  1740. if (!purged) {
  1741. purge_vmap_area_lazy();
  1742. purged = true;
  1743. goto retry;
  1744. }
  1745. goto err_free;
  1746. }
  1747. /*
  1748. * If next overlaps, move base downwards so that it's
  1749. * right below next and then recheck.
  1750. */
  1751. if (next && next->va_start < base + end) {
  1752. base = pvm_determine_end(&next, &prev, align) - end;
  1753. term_area = area;
  1754. continue;
  1755. }
  1756. /*
  1757. * If prev overlaps, shift down next and prev and move
  1758. * base so that it's right below new next and then
  1759. * recheck.
  1760. */
  1761. if (prev && prev->va_end > base + start) {
  1762. next = prev;
  1763. prev = node_to_va(rb_prev(&next->rb_node));
  1764. base = pvm_determine_end(&next, &prev, align) - end;
  1765. term_area = area;
  1766. continue;
  1767. }
  1768. /*
  1769. * This area fits, move on to the previous one. If
  1770. * the previous one is the terminal one, we're done.
  1771. */
  1772. area = (area + nr_vms - 1) % nr_vms;
  1773. if (area == term_area)
  1774. break;
  1775. start = offsets[area];
  1776. end = start + sizes[area];
  1777. pvm_find_next_prev(base + end, &next, &prev);
  1778. }
  1779. found:
  1780. /* we've found a fitting base, insert all va's */
  1781. for (area = 0; area < nr_vms; area++) {
  1782. struct vmap_area *va = vas[area];
  1783. va->va_start = base + offsets[area];
  1784. va->va_end = va->va_start + sizes[area];
  1785. __insert_vmap_area(va);
  1786. }
  1787. vmap_area_pcpu_hole = base + offsets[last_area];
  1788. spin_unlock(&vmap_area_lock);
  1789. /* insert all vm's */
  1790. for (area = 0; area < nr_vms; area++)
  1791. insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
  1792. pcpu_get_vm_areas);
  1793. kfree(vas);
  1794. return vms;
  1795. err_free:
  1796. for (area = 0; area < nr_vms; area++) {
  1797. if (vas)
  1798. kfree(vas[area]);
  1799. if (vms)
  1800. kfree(vms[area]);
  1801. }
  1802. kfree(vas);
  1803. kfree(vms);
  1804. return NULL;
  1805. }
  1806. /**
  1807. * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
  1808. * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
  1809. * @nr_vms: the number of allocated areas
  1810. *
  1811. * Free vm_structs and the array allocated by pcpu_get_vm_areas().
  1812. */
  1813. void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
  1814. {
  1815. int i;
  1816. for (i = 0; i < nr_vms; i++)
  1817. free_vm_area(vms[i]);
  1818. kfree(vms);
  1819. }
  1820. #ifdef CONFIG_PROC_FS
  1821. static void *s_start(struct seq_file *m, loff_t *pos)
  1822. {
  1823. loff_t n = *pos;
  1824. struct vm_struct *v;
  1825. read_lock(&vmlist_lock);
  1826. v = vmlist;
  1827. while (n > 0 && v) {
  1828. n--;
  1829. v = v->next;
  1830. }
  1831. if (!n)
  1832. return v;
  1833. return NULL;
  1834. }
  1835. static void *s_next(struct seq_file *m, void *p, loff_t *pos)
  1836. {
  1837. struct vm_struct *v = p;
  1838. ++*pos;
  1839. return v->next;
  1840. }
  1841. static void s_stop(struct seq_file *m, void *p)
  1842. {
  1843. read_unlock(&vmlist_lock);
  1844. }
  1845. static void show_numa_info(struct seq_file *m, struct vm_struct *v)
  1846. {
  1847. if (NUMA_BUILD) {
  1848. unsigned int nr, *counters = m->private;
  1849. if (!counters)
  1850. return;
  1851. memset(counters, 0, nr_node_ids * sizeof(unsigned int));
  1852. for (nr = 0; nr < v->nr_pages; nr++)
  1853. counters[page_to_nid(v->pages[nr])]++;
  1854. for_each_node_state(nr, N_HIGH_MEMORY)
  1855. if (counters[nr])
  1856. seq_printf(m, " N%u=%u", nr, counters[nr]);
  1857. }
  1858. }
  1859. static int s_show(struct seq_file *m, void *p)
  1860. {
  1861. struct vm_struct *v = p;
  1862. seq_printf(m, "0x%p-0x%p %7ld",
  1863. v->addr, v->addr + v->size, v->size);
  1864. if (v->caller) {
  1865. char buff[KSYM_SYMBOL_LEN];
  1866. seq_putc(m, ' ');
  1867. sprint_symbol(buff, (unsigned long)v->caller);
  1868. seq_puts(m, buff);
  1869. }
  1870. if (v->nr_pages)
  1871. seq_printf(m, " pages=%d", v->nr_pages);
  1872. if (v->phys_addr)
  1873. seq_printf(m, " phys=%lx", v->phys_addr);
  1874. if (v->flags & VM_IOREMAP)
  1875. seq_printf(m, " ioremap");
  1876. if (v->flags & VM_ALLOC)
  1877. seq_printf(m, " vmalloc");
  1878. if (v->flags & VM_MAP)
  1879. seq_printf(m, " vmap");
  1880. if (v->flags & VM_USERMAP)
  1881. seq_printf(m, " user");
  1882. if (v->flags & VM_VPAGES)
  1883. seq_printf(m, " vpages");
  1884. show_numa_info(m, v);
  1885. seq_putc(m, '\n');
  1886. return 0;
  1887. }
  1888. static const struct seq_operations vmalloc_op = {
  1889. .start = s_start,
  1890. .next = s_next,
  1891. .stop = s_stop,
  1892. .show = s_show,
  1893. };
  1894. static int vmalloc_open(struct inode *inode, struct file *file)
  1895. {
  1896. unsigned int *ptr = NULL;
  1897. int ret;
  1898. if (NUMA_BUILD)
  1899. ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
  1900. ret = seq_open(file, &vmalloc_op);
  1901. if (!ret) {
  1902. struct seq_file *m = file->private_data;
  1903. m->private = ptr;
  1904. } else
  1905. kfree(ptr);
  1906. return ret;
  1907. }
  1908. static const struct file_operations proc_vmalloc_operations = {
  1909. .open = vmalloc_open,
  1910. .read = seq_read,
  1911. .llseek = seq_lseek,
  1912. .release = seq_release_private,
  1913. };
  1914. static int __init proc_vmalloc_init(void)
  1915. {
  1916. proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
  1917. return 0;
  1918. }
  1919. module_init(proc_vmalloc_init);
  1920. #endif