vmalloc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * linux/mm/vmalloc.c
  3. *
  4. * Copyright (C) 1993 Linus Torvalds
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
  7. * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/module.h>
  11. #include <linux/highmem.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/vmalloc.h>
  16. #include <asm/uaccess.h>
  17. #include <asm/tlbflush.h>
  18. DEFINE_RWLOCK(vmlist_lock);
  19. struct vm_struct *vmlist;
  20. static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  21. {
  22. pte_t *pte;
  23. pte = pte_offset_kernel(pmd, addr);
  24. do {
  25. pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  26. WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  27. } while (pte++, addr += PAGE_SIZE, addr != end);
  28. }
  29. static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr,
  30. unsigned long end)
  31. {
  32. pmd_t *pmd;
  33. unsigned long next;
  34. pmd = pmd_offset(pud, addr);
  35. do {
  36. next = pmd_addr_end(addr, end);
  37. if (pmd_none_or_clear_bad(pmd))
  38. continue;
  39. vunmap_pte_range(pmd, addr, next);
  40. } while (pmd++, addr = next, addr != end);
  41. }
  42. static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr,
  43. unsigned long end)
  44. {
  45. pud_t *pud;
  46. unsigned long next;
  47. pud = pud_offset(pgd, addr);
  48. do {
  49. next = pud_addr_end(addr, end);
  50. if (pud_none_or_clear_bad(pud))
  51. continue;
  52. vunmap_pmd_range(pud, addr, next);
  53. } while (pud++, addr = next, addr != end);
  54. }
  55. void unmap_vm_area(struct vm_struct *area)
  56. {
  57. pgd_t *pgd;
  58. unsigned long next;
  59. unsigned long addr = (unsigned long) area->addr;
  60. unsigned long end = addr + area->size;
  61. BUG_ON(addr >= end);
  62. pgd = pgd_offset_k(addr);
  63. flush_cache_vunmap(addr, end);
  64. do {
  65. next = pgd_addr_end(addr, end);
  66. if (pgd_none_or_clear_bad(pgd))
  67. continue;
  68. vunmap_pud_range(pgd, addr, next);
  69. } while (pgd++, addr = next, addr != end);
  70. flush_tlb_kernel_range((unsigned long) area->addr, end);
  71. }
  72. static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
  73. unsigned long end, pgprot_t prot, struct page ***pages)
  74. {
  75. pte_t *pte;
  76. pte = pte_alloc_kernel(&init_mm, pmd, addr);
  77. if (!pte)
  78. return -ENOMEM;
  79. do {
  80. struct page *page = **pages;
  81. WARN_ON(!pte_none(*pte));
  82. if (!page)
  83. return -ENOMEM;
  84. set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
  85. (*pages)++;
  86. } while (pte++, addr += PAGE_SIZE, addr != end);
  87. return 0;
  88. }
  89. static inline int vmap_pmd_range(pud_t *pud, unsigned long addr,
  90. unsigned long end, pgprot_t prot, struct page ***pages)
  91. {
  92. pmd_t *pmd;
  93. unsigned long next;
  94. pmd = pmd_alloc(&init_mm, pud, addr);
  95. if (!pmd)
  96. return -ENOMEM;
  97. do {
  98. next = pmd_addr_end(addr, end);
  99. if (vmap_pte_range(pmd, addr, next, prot, pages))
  100. return -ENOMEM;
  101. } while (pmd++, addr = next, addr != end);
  102. return 0;
  103. }
  104. static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  105. unsigned long end, pgprot_t prot, struct page ***pages)
  106. {
  107. pud_t *pud;
  108. unsigned long next;
  109. pud = pud_alloc(&init_mm, pgd, addr);
  110. if (!pud)
  111. return -ENOMEM;
  112. do {
  113. next = pud_addr_end(addr, end);
  114. if (vmap_pmd_range(pud, addr, next, prot, pages))
  115. return -ENOMEM;
  116. } while (pud++, addr = next, addr != end);
  117. return 0;
  118. }
  119. int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
  120. {
  121. pgd_t *pgd;
  122. unsigned long next;
  123. unsigned long addr = (unsigned long) area->addr;
  124. unsigned long end = addr + area->size - PAGE_SIZE;
  125. int err;
  126. BUG_ON(addr >= end);
  127. pgd = pgd_offset_k(addr);
  128. spin_lock(&init_mm.page_table_lock);
  129. do {
  130. next = pgd_addr_end(addr, end);
  131. err = vmap_pud_range(pgd, addr, next, prot, pages);
  132. if (err)
  133. break;
  134. } while (pgd++, addr = next, addr != end);
  135. spin_unlock(&init_mm.page_table_lock);
  136. flush_cache_vmap((unsigned long) area->addr, end);
  137. return err;
  138. }
  139. struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
  140. unsigned long start, unsigned long end)
  141. {
  142. struct vm_struct **p, *tmp, *area;
  143. unsigned long align = 1;
  144. unsigned long addr;
  145. if (flags & VM_IOREMAP) {
  146. int bit = fls(size);
  147. if (bit > IOREMAP_MAX_ORDER)
  148. bit = IOREMAP_MAX_ORDER;
  149. else if (bit < PAGE_SHIFT)
  150. bit = PAGE_SHIFT;
  151. align = 1ul << bit;
  152. }
  153. addr = ALIGN(start, align);
  154. size = PAGE_ALIGN(size);
  155. area = kmalloc(sizeof(*area), GFP_KERNEL);
  156. if (unlikely(!area))
  157. return NULL;
  158. if (unlikely(!size)) {
  159. kfree (area);
  160. return NULL;
  161. }
  162. /*
  163. * We always allocate a guard page.
  164. */
  165. size += PAGE_SIZE;
  166. write_lock(&vmlist_lock);
  167. for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
  168. if ((unsigned long)tmp->addr < addr) {
  169. if((unsigned long)tmp->addr + tmp->size >= addr)
  170. addr = ALIGN(tmp->size +
  171. (unsigned long)tmp->addr, align);
  172. continue;
  173. }
  174. if ((size + addr) < addr)
  175. goto out;
  176. if (size + addr <= (unsigned long)tmp->addr)
  177. goto found;
  178. addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
  179. if (addr > end - size)
  180. goto out;
  181. }
  182. found:
  183. area->next = *p;
  184. *p = area;
  185. area->flags = flags;
  186. area->addr = (void *)addr;
  187. area->size = size;
  188. area->pages = NULL;
  189. area->nr_pages = 0;
  190. area->phys_addr = 0;
  191. write_unlock(&vmlist_lock);
  192. return area;
  193. out:
  194. write_unlock(&vmlist_lock);
  195. kfree(area);
  196. if (printk_ratelimit())
  197. printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
  198. return NULL;
  199. }
  200. /**
  201. * get_vm_area - reserve a contingous kernel virtual area
  202. *
  203. * @size: size of the area
  204. * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
  205. *
  206. * Search an area of @size in the kernel virtual mapping area,
  207. * and reserved it for out purposes. Returns the area descriptor
  208. * on success or %NULL on failure.
  209. */
  210. struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
  211. {
  212. return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
  213. }
  214. /* Caller must hold vmlist_lock */
  215. struct vm_struct *__remove_vm_area(void *addr)
  216. {
  217. struct vm_struct **p, *tmp;
  218. for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
  219. if (tmp->addr == addr)
  220. goto found;
  221. }
  222. return NULL;
  223. found:
  224. unmap_vm_area(tmp);
  225. *p = tmp->next;
  226. /*
  227. * Remove the guard page.
  228. */
  229. tmp->size -= PAGE_SIZE;
  230. return tmp;
  231. }
  232. /**
  233. * remove_vm_area - find and remove a contingous kernel virtual area
  234. *
  235. * @addr: base address
  236. *
  237. * Search for the kernel VM area starting at @addr, and remove it.
  238. * This function returns the found VM area, but using it is NOT safe
  239. * on SMP machines, except for its size or flags.
  240. */
  241. struct vm_struct *remove_vm_area(void *addr)
  242. {
  243. struct vm_struct *v;
  244. write_lock(&vmlist_lock);
  245. v = __remove_vm_area(addr);
  246. write_unlock(&vmlist_lock);
  247. return v;
  248. }
  249. void __vunmap(void *addr, int deallocate_pages)
  250. {
  251. struct vm_struct *area;
  252. if (!addr)
  253. return;
  254. if ((PAGE_SIZE-1) & (unsigned long)addr) {
  255. printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
  256. WARN_ON(1);
  257. return;
  258. }
  259. area = remove_vm_area(addr);
  260. if (unlikely(!area)) {
  261. printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
  262. addr);
  263. WARN_ON(1);
  264. return;
  265. }
  266. if (deallocate_pages) {
  267. int i;
  268. for (i = 0; i < area->nr_pages; i++) {
  269. if (unlikely(!area->pages[i]))
  270. BUG();
  271. __free_page(area->pages[i]);
  272. }
  273. if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
  274. vfree(area->pages);
  275. else
  276. kfree(area->pages);
  277. }
  278. kfree(area);
  279. return;
  280. }
  281. /**
  282. * vfree - release memory allocated by vmalloc()
  283. *
  284. * @addr: memory base address
  285. *
  286. * Free the virtually contiguous memory area starting at @addr, as
  287. * obtained from vmalloc(), vmalloc_32() or __vmalloc().
  288. *
  289. * May not be called in interrupt context.
  290. */
  291. void vfree(void *addr)
  292. {
  293. BUG_ON(in_interrupt());
  294. __vunmap(addr, 1);
  295. }
  296. EXPORT_SYMBOL(vfree);
  297. /**
  298. * vunmap - release virtual mapping obtained by vmap()
  299. *
  300. * @addr: memory base address
  301. *
  302. * Free the virtually contiguous memory area starting at @addr,
  303. * which was created from the page array passed to vmap().
  304. *
  305. * May not be called in interrupt context.
  306. */
  307. void vunmap(void *addr)
  308. {
  309. BUG_ON(in_interrupt());
  310. __vunmap(addr, 0);
  311. }
  312. EXPORT_SYMBOL(vunmap);
  313. /**
  314. * vmap - map an array of pages into virtually contiguous space
  315. *
  316. * @pages: array of page pointers
  317. * @count: number of pages to map
  318. * @flags: vm_area->flags
  319. * @prot: page protection for the mapping
  320. *
  321. * Maps @count pages from @pages into contiguous kernel virtual
  322. * space.
  323. */
  324. void *vmap(struct page **pages, unsigned int count,
  325. unsigned long flags, pgprot_t prot)
  326. {
  327. struct vm_struct *area;
  328. if (count > num_physpages)
  329. return NULL;
  330. area = get_vm_area((count << PAGE_SHIFT), flags);
  331. if (!area)
  332. return NULL;
  333. if (map_vm_area(area, prot, &pages)) {
  334. vunmap(area->addr);
  335. return NULL;
  336. }
  337. return area->addr;
  338. }
  339. EXPORT_SYMBOL(vmap);
  340. void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot)
  341. {
  342. struct page **pages;
  343. unsigned int nr_pages, array_size, i;
  344. nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
  345. array_size = (nr_pages * sizeof(struct page *));
  346. area->nr_pages = nr_pages;
  347. /* Please note that the recursion is strictly bounded. */
  348. if (array_size > PAGE_SIZE)
  349. pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
  350. else
  351. pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
  352. area->pages = pages;
  353. if (!area->pages) {
  354. remove_vm_area(area->addr);
  355. kfree(area);
  356. return NULL;
  357. }
  358. memset(area->pages, 0, array_size);
  359. for (i = 0; i < area->nr_pages; i++) {
  360. area->pages[i] = alloc_page(gfp_mask);
  361. if (unlikely(!area->pages[i])) {
  362. /* Successfully allocated i pages, free them in __vunmap() */
  363. area->nr_pages = i;
  364. goto fail;
  365. }
  366. }
  367. if (map_vm_area(area, prot, &pages))
  368. goto fail;
  369. return area->addr;
  370. fail:
  371. vfree(area->addr);
  372. return NULL;
  373. }
  374. /**
  375. * __vmalloc - allocate virtually contiguous memory
  376. *
  377. * @size: allocation size
  378. * @gfp_mask: flags for the page level allocator
  379. * @prot: protection mask for the allocated pages
  380. *
  381. * Allocate enough pages to cover @size from the page level
  382. * allocator with @gfp_mask flags. Map them into contiguous
  383. * kernel virtual space, using a pagetable protection of @prot.
  384. */
  385. void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot)
  386. {
  387. struct vm_struct *area;
  388. size = PAGE_ALIGN(size);
  389. if (!size || (size >> PAGE_SHIFT) > num_physpages)
  390. return NULL;
  391. area = get_vm_area(size, VM_ALLOC);
  392. if (!area)
  393. return NULL;
  394. return __vmalloc_area(area, gfp_mask, prot);
  395. }
  396. EXPORT_SYMBOL(__vmalloc);
  397. /**
  398. * vmalloc - allocate virtually contiguous memory
  399. *
  400. * @size: allocation size
  401. *
  402. * Allocate enough pages to cover @size from the page level
  403. * allocator and map them into contiguous kernel virtual space.
  404. *
  405. * For tight cotrol over page level allocator and protection flags
  406. * use __vmalloc() instead.
  407. */
  408. void *vmalloc(unsigned long size)
  409. {
  410. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
  411. }
  412. EXPORT_SYMBOL(vmalloc);
  413. #ifndef PAGE_KERNEL_EXEC
  414. # define PAGE_KERNEL_EXEC PAGE_KERNEL
  415. #endif
  416. /**
  417. * vmalloc_exec - allocate virtually contiguous, executable memory
  418. *
  419. * @size: allocation size
  420. *
  421. * Kernel-internal function to allocate enough pages to cover @size
  422. * the page level allocator and map them into contiguous and
  423. * executable kernel virtual space.
  424. *
  425. * For tight cotrol over page level allocator and protection flags
  426. * use __vmalloc() instead.
  427. */
  428. void *vmalloc_exec(unsigned long size)
  429. {
  430. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
  431. }
  432. /**
  433. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  434. *
  435. * @size: allocation size
  436. *
  437. * Allocate enough 32bit PA addressable pages to cover @size from the
  438. * page level allocator and map them into contiguous kernel virtual space.
  439. */
  440. void *vmalloc_32(unsigned long size)
  441. {
  442. return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
  443. }
  444. EXPORT_SYMBOL(vmalloc_32);
  445. long vread(char *buf, char *addr, unsigned long count)
  446. {
  447. struct vm_struct *tmp;
  448. char *vaddr, *buf_start = buf;
  449. unsigned long n;
  450. /* Don't allow overflow */
  451. if ((unsigned long) addr + count < count)
  452. count = -(unsigned long) addr;
  453. read_lock(&vmlist_lock);
  454. for (tmp = vmlist; tmp; tmp = tmp->next) {
  455. vaddr = (char *) tmp->addr;
  456. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  457. continue;
  458. while (addr < vaddr) {
  459. if (count == 0)
  460. goto finished;
  461. *buf = '\0';
  462. buf++;
  463. addr++;
  464. count--;
  465. }
  466. n = vaddr + tmp->size - PAGE_SIZE - addr;
  467. do {
  468. if (count == 0)
  469. goto finished;
  470. *buf = *addr;
  471. buf++;
  472. addr++;
  473. count--;
  474. } while (--n > 0);
  475. }
  476. finished:
  477. read_unlock(&vmlist_lock);
  478. return buf - buf_start;
  479. }
  480. long vwrite(char *buf, char *addr, unsigned long count)
  481. {
  482. struct vm_struct *tmp;
  483. char *vaddr, *buf_start = buf;
  484. unsigned long n;
  485. /* Don't allow overflow */
  486. if ((unsigned long) addr + count < count)
  487. count = -(unsigned long) addr;
  488. read_lock(&vmlist_lock);
  489. for (tmp = vmlist; tmp; tmp = tmp->next) {
  490. vaddr = (char *) tmp->addr;
  491. if (addr >= vaddr + tmp->size - PAGE_SIZE)
  492. continue;
  493. while (addr < vaddr) {
  494. if (count == 0)
  495. goto finished;
  496. buf++;
  497. addr++;
  498. count--;
  499. }
  500. n = vaddr + tmp->size - PAGE_SIZE - addr;
  501. do {
  502. if (count == 0)
  503. goto finished;
  504. *addr = *buf;
  505. buf++;
  506. addr++;
  507. count--;
  508. } while (--n > 0);
  509. }
  510. finished:
  511. read_unlock(&vmlist_lock);
  512. return buf - buf_start;
  513. }