ioremap.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Re-map IO memory to kernel address space so that we can access it.
  3. * This is needed for high PCI addresses that aren't mapped in the
  4. * 640k-1MB IO memory area on PC's
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. */
  8. #include <linux/bootmem.h>
  9. #include <linux/init.h>
  10. #include <linux/io.h>
  11. #include <linux/module.h>
  12. #include <linux/slab.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/mmiotrace.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/e820.h>
  17. #include <asm/fixmap.h>
  18. #include <asm/pgtable.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/pat.h>
  22. #include "physaddr.h"
  23. int page_is_ram(unsigned long pagenr)
  24. {
  25. resource_size_t addr, end;
  26. int i;
  27. /*
  28. * A special case is the first 4Kb of memory;
  29. * This is a BIOS owned area, not kernel ram, but generally
  30. * not listed as such in the E820 table.
  31. */
  32. if (pagenr == 0)
  33. return 0;
  34. /*
  35. * Second special case: Some BIOSen report the PC BIOS
  36. * area (640->1Mb) as ram even though it is not.
  37. */
  38. if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
  39. pagenr < (BIOS_END >> PAGE_SHIFT))
  40. return 0;
  41. for (i = 0; i < e820.nr_map; i++) {
  42. /*
  43. * Not usable memory:
  44. */
  45. if (e820.map[i].type != E820_RAM)
  46. continue;
  47. addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
  48. end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
  49. if ((pagenr >= addr) && (pagenr < end))
  50. return 1;
  51. }
  52. return 0;
  53. }
  54. /*
  55. * Fix up the linear direct mapping of the kernel to avoid cache attribute
  56. * conflicts.
  57. */
  58. int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  59. unsigned long prot_val)
  60. {
  61. unsigned long nrpages = size >> PAGE_SHIFT;
  62. int err;
  63. switch (prot_val) {
  64. case _PAGE_CACHE_UC:
  65. default:
  66. err = _set_memory_uc(vaddr, nrpages);
  67. break;
  68. case _PAGE_CACHE_WC:
  69. err = _set_memory_wc(vaddr, nrpages);
  70. break;
  71. case _PAGE_CACHE_WB:
  72. err = _set_memory_wb(vaddr, nrpages);
  73. break;
  74. }
  75. return err;
  76. }
  77. /*
  78. * Remap an arbitrary physical address space into the kernel virtual
  79. * address space. Needed when the kernel wants to access high addresses
  80. * directly.
  81. *
  82. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  83. * have to convert them into an offset in a page-aligned mapping, but the
  84. * caller shouldn't need to know that small detail.
  85. */
  86. static void __iomem *__ioremap_caller(resource_size_t phys_addr,
  87. unsigned long size, unsigned long prot_val, void *caller)
  88. {
  89. unsigned long pfn, offset, vaddr;
  90. resource_size_t last_addr;
  91. const resource_size_t unaligned_phys_addr = phys_addr;
  92. const unsigned long unaligned_size = size;
  93. struct vm_struct *area;
  94. unsigned long new_prot_val;
  95. pgprot_t prot;
  96. int retval;
  97. void __iomem *ret_addr;
  98. /* Don't allow wraparound or zero size */
  99. last_addr = phys_addr + size - 1;
  100. if (!size || last_addr < phys_addr)
  101. return NULL;
  102. if (!phys_addr_valid(phys_addr)) {
  103. printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
  104. (unsigned long long)phys_addr);
  105. WARN_ON_ONCE(1);
  106. return NULL;
  107. }
  108. /*
  109. * Don't remap the low PCI/ISA area, it's always mapped..
  110. */
  111. if (is_ISA_range(phys_addr, last_addr))
  112. return (__force void __iomem *)phys_to_virt(phys_addr);
  113. /*
  114. * Check if the request spans more than any BAR in the iomem resource
  115. * tree.
  116. */
  117. WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
  118. KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
  119. /*
  120. * Don't allow anybody to remap normal RAM that we're using..
  121. */
  122. for (pfn = phys_addr >> PAGE_SHIFT;
  123. (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
  124. pfn++) {
  125. int is_ram = page_is_ram(pfn);
  126. if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
  127. return NULL;
  128. WARN_ON_ONCE(is_ram);
  129. }
  130. /*
  131. * Mappings have to be page-aligned
  132. */
  133. offset = phys_addr & ~PAGE_MASK;
  134. phys_addr &= PAGE_MASK;
  135. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  136. retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
  137. prot_val, &new_prot_val);
  138. if (retval) {
  139. printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
  140. return NULL;
  141. }
  142. if (prot_val != new_prot_val) {
  143. if (!is_new_memtype_allowed(phys_addr, size,
  144. prot_val, new_prot_val)) {
  145. printk(KERN_ERR
  146. "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
  147. (unsigned long long)phys_addr,
  148. (unsigned long long)(phys_addr + size),
  149. prot_val, new_prot_val);
  150. free_memtype(phys_addr, phys_addr + size);
  151. return NULL;
  152. }
  153. prot_val = new_prot_val;
  154. }
  155. switch (prot_val) {
  156. case _PAGE_CACHE_UC:
  157. default:
  158. prot = PAGE_KERNEL_IO_NOCACHE;
  159. break;
  160. case _PAGE_CACHE_UC_MINUS:
  161. prot = PAGE_KERNEL_IO_UC_MINUS;
  162. break;
  163. case _PAGE_CACHE_WC:
  164. prot = PAGE_KERNEL_IO_WC;
  165. break;
  166. case _PAGE_CACHE_WB:
  167. prot = PAGE_KERNEL_IO;
  168. break;
  169. }
  170. /*
  171. * Ok, go for it..
  172. */
  173. area = get_vm_area_caller(size, VM_IOREMAP, caller);
  174. if (!area)
  175. return NULL;
  176. area->phys_addr = phys_addr;
  177. vaddr = (unsigned long) area->addr;
  178. if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
  179. free_memtype(phys_addr, phys_addr + size);
  180. free_vm_area(area);
  181. return NULL;
  182. }
  183. if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
  184. free_memtype(phys_addr, phys_addr + size);
  185. free_vm_area(area);
  186. return NULL;
  187. }
  188. ret_addr = (void __iomem *) (vaddr + offset);
  189. mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
  190. return ret_addr;
  191. }
  192. /**
  193. * ioremap_nocache - map bus memory into CPU space
  194. * @offset: bus address of the memory
  195. * @size: size of the resource to map
  196. *
  197. * ioremap_nocache performs a platform specific sequence of operations to
  198. * make bus memory CPU accessible via the readb/readw/readl/writeb/
  199. * writew/writel functions and the other mmio helpers. The returned
  200. * address is not guaranteed to be usable directly as a virtual
  201. * address.
  202. *
  203. * This version of ioremap ensures that the memory is marked uncachable
  204. * on the CPU as well as honouring existing caching rules from things like
  205. * the PCI bus. Note that there are other caches and buffers on many
  206. * busses. In particular driver authors should read up on PCI writes
  207. *
  208. * It's useful if some control registers are in such an area and
  209. * write combining or read caching is not desirable:
  210. *
  211. * Must be freed with iounmap.
  212. */
  213. void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
  214. {
  215. /*
  216. * Ideally, this should be:
  217. * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
  218. *
  219. * Till we fix all X drivers to use ioremap_wc(), we will use
  220. * UC MINUS.
  221. */
  222. unsigned long val = _PAGE_CACHE_UC_MINUS;
  223. return __ioremap_caller(phys_addr, size, val,
  224. __builtin_return_address(0));
  225. }
  226. EXPORT_SYMBOL(ioremap_nocache);
  227. /**
  228. * ioremap_wc - map memory into CPU space write combined
  229. * @offset: bus address of the memory
  230. * @size: size of the resource to map
  231. *
  232. * This version of ioremap ensures that the memory is marked write combining.
  233. * Write combining allows faster writes to some hardware devices.
  234. *
  235. * Must be freed with iounmap.
  236. */
  237. void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
  238. {
  239. if (pat_enabled)
  240. return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
  241. __builtin_return_address(0));
  242. else
  243. return ioremap_nocache(phys_addr, size);
  244. }
  245. EXPORT_SYMBOL(ioremap_wc);
  246. void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
  247. {
  248. return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
  249. __builtin_return_address(0));
  250. }
  251. EXPORT_SYMBOL(ioremap_cache);
  252. static void __iomem *ioremap_default(resource_size_t phys_addr,
  253. unsigned long size)
  254. {
  255. unsigned long flags;
  256. void __iomem *ret;
  257. int err;
  258. /*
  259. * - WB for WB-able memory and no other conflicting mappings
  260. * - UC_MINUS for non-WB-able memory with no other conflicting mappings
  261. * - Inherit from confliting mappings otherwise
  262. */
  263. err = reserve_memtype(phys_addr, phys_addr + size,
  264. _PAGE_CACHE_WB, &flags);
  265. if (err < 0)
  266. return NULL;
  267. ret = __ioremap_caller(phys_addr, size, flags,
  268. __builtin_return_address(0));
  269. free_memtype(phys_addr, phys_addr + size);
  270. return ret;
  271. }
  272. void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
  273. unsigned long prot_val)
  274. {
  275. return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
  276. __builtin_return_address(0));
  277. }
  278. EXPORT_SYMBOL(ioremap_prot);
  279. /**
  280. * iounmap - Free a IO remapping
  281. * @addr: virtual address from ioremap_*
  282. *
  283. * Caller must ensure there is only one unmapping for the same pointer.
  284. */
  285. void iounmap(volatile void __iomem *addr)
  286. {
  287. struct vm_struct *p, *o;
  288. if ((void __force *)addr <= high_memory)
  289. return;
  290. /*
  291. * __ioremap special-cases the PCI/ISA range by not instantiating a
  292. * vm_area and by simply returning an address into the kernel mapping
  293. * of ISA space. So handle that here.
  294. */
  295. if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
  296. (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
  297. return;
  298. addr = (volatile void __iomem *)
  299. (PAGE_MASK & (unsigned long __force)addr);
  300. mmiotrace_iounmap(addr);
  301. /* Use the vm area unlocked, assuming the caller
  302. ensures there isn't another iounmap for the same address
  303. in parallel. Reuse of the virtual address is prevented by
  304. leaving it in the global lists until we're done with it.
  305. cpa takes care of the direct mappings. */
  306. read_lock(&vmlist_lock);
  307. for (p = vmlist; p; p = p->next) {
  308. if (p->addr == (void __force *)addr)
  309. break;
  310. }
  311. read_unlock(&vmlist_lock);
  312. if (!p) {
  313. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  314. dump_stack();
  315. return;
  316. }
  317. free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
  318. /* Finally remove it */
  319. o = remove_vm_area((void __force *)addr);
  320. BUG_ON(p != o || o == NULL);
  321. kfree(p);
  322. }
  323. EXPORT_SYMBOL(iounmap);
  324. /*
  325. * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  326. * access
  327. */
  328. void *xlate_dev_mem_ptr(unsigned long phys)
  329. {
  330. void *addr;
  331. unsigned long start = phys & PAGE_MASK;
  332. /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
  333. if (page_is_ram(start >> PAGE_SHIFT))
  334. return __va(phys);
  335. addr = (void __force *)ioremap_default(start, PAGE_SIZE);
  336. if (addr)
  337. addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
  338. return addr;
  339. }
  340. void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
  341. {
  342. if (page_is_ram(phys >> PAGE_SHIFT))
  343. return;
  344. iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
  345. return;
  346. }
  347. static int __initdata early_ioremap_debug;
  348. static int __init early_ioremap_debug_setup(char *str)
  349. {
  350. early_ioremap_debug = 1;
  351. return 0;
  352. }
  353. early_param("early_ioremap_debug", early_ioremap_debug_setup);
  354. static __initdata int after_paging_init;
  355. static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
  356. static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
  357. {
  358. /* Don't assume we're using swapper_pg_dir at this point */
  359. pgd_t *base = __va(read_cr3());
  360. pgd_t *pgd = &base[pgd_index(addr)];
  361. pud_t *pud = pud_offset(pgd, addr);
  362. pmd_t *pmd = pmd_offset(pud, addr);
  363. return pmd;
  364. }
  365. static inline pte_t * __init early_ioremap_pte(unsigned long addr)
  366. {
  367. return &bm_pte[pte_index(addr)];
  368. }
  369. static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
  370. void __init early_ioremap_init(void)
  371. {
  372. pmd_t *pmd;
  373. int i;
  374. if (early_ioremap_debug)
  375. printk(KERN_INFO "early_ioremap_init()\n");
  376. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  377. slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
  378. pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
  379. memset(bm_pte, 0, sizeof(bm_pte));
  380. pmd_populate_kernel(&init_mm, pmd, bm_pte);
  381. /*
  382. * The boot-ioremap range spans multiple pmds, for which
  383. * we are not prepared:
  384. */
  385. if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
  386. WARN_ON(1);
  387. printk(KERN_WARNING "pmd %p != %p\n",
  388. pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
  389. printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
  390. fix_to_virt(FIX_BTMAP_BEGIN));
  391. printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
  392. fix_to_virt(FIX_BTMAP_END));
  393. printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
  394. printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
  395. FIX_BTMAP_BEGIN);
  396. }
  397. }
  398. void __init early_ioremap_reset(void)
  399. {
  400. after_paging_init = 1;
  401. }
  402. static void __init __early_set_fixmap(enum fixed_addresses idx,
  403. phys_addr_t phys, pgprot_t flags)
  404. {
  405. unsigned long addr = __fix_to_virt(idx);
  406. pte_t *pte;
  407. if (idx >= __end_of_fixed_addresses) {
  408. BUG();
  409. return;
  410. }
  411. pte = early_ioremap_pte(addr);
  412. if (pgprot_val(flags))
  413. set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
  414. else
  415. pte_clear(&init_mm, addr, pte);
  416. __flush_tlb_one(addr);
  417. }
  418. static inline void __init early_set_fixmap(enum fixed_addresses idx,
  419. phys_addr_t phys, pgprot_t prot)
  420. {
  421. if (after_paging_init)
  422. __set_fixmap(idx, phys, prot);
  423. else
  424. __early_set_fixmap(idx, phys, prot);
  425. }
  426. static inline void __init early_clear_fixmap(enum fixed_addresses idx)
  427. {
  428. if (after_paging_init)
  429. clear_fixmap(idx);
  430. else
  431. __early_set_fixmap(idx, 0, __pgprot(0));
  432. }
  433. static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
  434. static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
  435. static int __init check_early_ioremap_leak(void)
  436. {
  437. int count = 0;
  438. int i;
  439. for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
  440. if (prev_map[i])
  441. count++;
  442. if (!count)
  443. return 0;
  444. WARN(1, KERN_WARNING
  445. "Debug warning: early ioremap leak of %d areas detected.\n",
  446. count);
  447. printk(KERN_WARNING
  448. "please boot with early_ioremap_debug and report the dmesg.\n");
  449. return 1;
  450. }
  451. late_initcall(check_early_ioremap_leak);
  452. static void __init __iomem *
  453. __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
  454. {
  455. unsigned long offset;
  456. resource_size_t last_addr;
  457. unsigned int nrpages;
  458. enum fixed_addresses idx0, idx;
  459. int i, slot;
  460. WARN_ON(system_state != SYSTEM_BOOTING);
  461. slot = -1;
  462. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  463. if (!prev_map[i]) {
  464. slot = i;
  465. break;
  466. }
  467. }
  468. if (slot < 0) {
  469. printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
  470. (u64)phys_addr, size);
  471. WARN_ON(1);
  472. return NULL;
  473. }
  474. if (early_ioremap_debug) {
  475. printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
  476. (u64)phys_addr, size, slot);
  477. dump_stack();
  478. }
  479. /* Don't allow wraparound or zero size */
  480. last_addr = phys_addr + size - 1;
  481. if (!size || last_addr < phys_addr) {
  482. WARN_ON(1);
  483. return NULL;
  484. }
  485. prev_size[slot] = size;
  486. /*
  487. * Mappings have to be page-aligned
  488. */
  489. offset = phys_addr & ~PAGE_MASK;
  490. phys_addr &= PAGE_MASK;
  491. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  492. /*
  493. * Mappings have to fit in the FIX_BTMAP area.
  494. */
  495. nrpages = size >> PAGE_SHIFT;
  496. if (nrpages > NR_FIX_BTMAPS) {
  497. WARN_ON(1);
  498. return NULL;
  499. }
  500. /*
  501. * Ok, go for it..
  502. */
  503. idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  504. idx = idx0;
  505. while (nrpages > 0) {
  506. early_set_fixmap(idx, phys_addr, prot);
  507. phys_addr += PAGE_SIZE;
  508. --idx;
  509. --nrpages;
  510. }
  511. if (early_ioremap_debug)
  512. printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
  513. prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
  514. return prev_map[slot];
  515. }
  516. /* Remap an IO device */
  517. void __init __iomem *
  518. early_ioremap(resource_size_t phys_addr, unsigned long size)
  519. {
  520. return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
  521. }
  522. /* Remap memory */
  523. void __init __iomem *
  524. early_memremap(resource_size_t phys_addr, unsigned long size)
  525. {
  526. return __early_ioremap(phys_addr, size, PAGE_KERNEL);
  527. }
  528. void __init early_iounmap(void __iomem *addr, unsigned long size)
  529. {
  530. unsigned long virt_addr;
  531. unsigned long offset;
  532. unsigned int nrpages;
  533. enum fixed_addresses idx;
  534. int i, slot;
  535. slot = -1;
  536. for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
  537. if (prev_map[i] == addr) {
  538. slot = i;
  539. break;
  540. }
  541. }
  542. if (slot < 0) {
  543. printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
  544. addr, size);
  545. WARN_ON(1);
  546. return;
  547. }
  548. if (prev_size[slot] != size) {
  549. printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
  550. addr, size, slot, prev_size[slot]);
  551. WARN_ON(1);
  552. return;
  553. }
  554. if (early_ioremap_debug) {
  555. printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
  556. size, slot);
  557. dump_stack();
  558. }
  559. virt_addr = (unsigned long)addr;
  560. if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
  561. WARN_ON(1);
  562. return;
  563. }
  564. offset = virt_addr & ~PAGE_MASK;
  565. nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
  566. idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
  567. while (nrpages > 0) {
  568. early_clear_fixmap(idx);
  569. --idx;
  570. --nrpages;
  571. }
  572. prev_map[slot] = NULL;
  573. }