123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327 |
- /*
- * arch/i386/mm/ioremap.c
- *
- * Re-map IO memory to kernel address space so that we can access it.
- * This is needed for high PCI addresses that aren't mapped in the
- * 640k-1MB IO memory area on PC's
- *
- * (C) Copyright 1995 1996 Linus Torvalds
- */
- #include <linux/vmalloc.h>
- #include <linux/init.h>
- #include <linux/slab.h>
- #include <linux/module.h>
- #include <asm/io.h>
- #include <asm/fixmap.h>
- #include <asm/cacheflush.h>
- #include <asm/tlbflush.h>
- #include <asm/pgtable.h>
- #define ISA_START_ADDRESS 0xa0000
- #define ISA_END_ADDRESS 0x100000
- static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
- {
- pte_t *pte;
- unsigned long pfn;
- pfn = phys_addr >> PAGE_SHIFT;
- pte = pte_alloc_kernel(&init_mm, pmd, addr);
- if (!pte)
- return -ENOMEM;
- do {
- BUG_ON(!pte_none(*pte));
- set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
- _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
- pfn++;
- } while (pte++, addr += PAGE_SIZE, addr != end);
- return 0;
- }
- static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
- {
- pmd_t *pmd;
- unsigned long next;
- phys_addr -= addr;
- pmd = pmd_alloc(&init_mm, pud, addr);
- if (!pmd)
- return -ENOMEM;
- do {
- next = pmd_addr_end(addr, end);
- if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
- return -ENOMEM;
- } while (pmd++, addr = next, addr != end);
- return 0;
- }
- static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
- {
- pud_t *pud;
- unsigned long next;
- phys_addr -= addr;
- pud = pud_alloc(&init_mm, pgd, addr);
- if (!pud)
- return -ENOMEM;
- do {
- next = pud_addr_end(addr, end);
- if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
- return -ENOMEM;
- } while (pud++, addr = next, addr != end);
- return 0;
- }
- static int ioremap_page_range(unsigned long addr,
- unsigned long end, unsigned long phys_addr, unsigned long flags)
- {
- pgd_t *pgd;
- unsigned long next;
- int err;
- BUG_ON(addr >= end);
- flush_cache_all();
- phys_addr -= addr;
- pgd = pgd_offset_k(addr);
- spin_lock(&init_mm.page_table_lock);
- do {
- next = pgd_addr_end(addr, end);
- err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
- if (err)
- break;
- } while (pgd++, addr = next, addr != end);
- spin_unlock(&init_mm.page_table_lock);
- flush_tlb_all();
- return err;
- }
- /*
- * Generic mapping function (not visible outside):
- */
- /*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- */
- void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
- {
- void __iomem * addr;
- struct vm_struct * area;
- unsigned long offset, last_addr;
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
- /*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
- return (void __iomem *) phys_to_virt(phys_addr);
- /*
- * Don't allow anybody to remap normal RAM that we're using..
- */
- if (phys_addr <= virt_to_phys(high_memory - 1)) {
- char *t_addr, *t_end;
- struct page *page;
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
- }
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr+1) - phys_addr;
- /*
- * Ok, go for it..
- */
- area = get_vm_area(size, VM_IOREMAP | (flags << 20));
- if (!area)
- return NULL;
- area->phys_addr = phys_addr;
- addr = (void __iomem *) area->addr;
- if (ioremap_page_range((unsigned long) addr,
- (unsigned long) addr + size, phys_addr, flags)) {
- vunmap((void __force *) addr);
- return NULL;
- }
- return (void __iomem *) (offset + (char __iomem *)addr);
- }
- EXPORT_SYMBOL(__ioremap);
- /**
- * ioremap_nocache - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
- *
- * ioremap_nocache performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * This version of ioremap ensures that the memory is marked uncachable
- * on the CPU as well as honouring existing caching rules from things like
- * the PCI bus. Note that there are other caches and buffers on many
- * busses. In particular driver authors should read up on PCI writes
- *
- * It's useful if some control registers are in such an area and
- * write combining or read caching is not desirable:
- *
- * Must be freed with iounmap.
- */
- void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
- {
- unsigned long last_addr;
- void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
- if (!p)
- return p;
- /* Guaranteed to be > phys_addr, as per __ioremap() */
- last_addr = phys_addr + size - 1;
- if (last_addr < virt_to_phys(high_memory) - 1) {
- struct page *ppage = virt_to_page(__va(phys_addr));
- unsigned long npages;
- phys_addr &= PAGE_MASK;
- /* This might overflow and become zero.. */
- last_addr = PAGE_ALIGN(last_addr);
- /* .. but that's ok, because modulo-2**n arithmetic will make
- * the page-aligned "last - first" come out right.
- */
- npages = (last_addr - phys_addr) >> PAGE_SHIFT;
- if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
- iounmap(p);
- p = NULL;
- }
- global_flush_tlb();
- }
- return p;
- }
- EXPORT_SYMBOL(ioremap_nocache);
- void iounmap(volatile void __iomem *addr)
- {
- struct vm_struct *p;
- if ((void __force *)addr <= high_memory)
- return;
- /*
- * __ioremap special-cases the PCI/ISA range by not instantiating a
- * vm_area and by simply returning an address into the kernel mapping
- * of ISA space. So handle that here.
- */
- if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
- addr < phys_to_virt(ISA_END_ADDRESS))
- return;
- write_lock(&vmlist_lock);
- p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr));
- if (!p) {
- printk(KERN_WARNING "iounmap: bad address %p\n", addr);
- dump_stack();
- goto out_unlock;
- }
- if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
- change_page_attr(virt_to_page(__va(p->phys_addr)),
- p->size >> PAGE_SHIFT,
- PAGE_KERNEL);
- global_flush_tlb();
- }
- out_unlock:
- write_unlock(&vmlist_lock);
- kfree(p);
- }
- EXPORT_SYMBOL(iounmap);
- void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
- {
- unsigned long offset, last_addr;
- unsigned int nrpages;
- enum fixed_addresses idx;
- /* Don't allow wraparound or zero size */
- last_addr = phys_addr + size - 1;
- if (!size || last_addr < phys_addr)
- return NULL;
- /*
- * Don't remap the low PCI/ISA area, it's always mapped..
- */
- if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
- return phys_to_virt(phys_addr);
- /*
- * Mappings have to be page-aligned
- */
- offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PAGE_MASK;
- size = PAGE_ALIGN(last_addr) - phys_addr;
- /*
- * Mappings have to fit in the FIX_BTMAP area.
- */
- nrpages = size >> PAGE_SHIFT;
- if (nrpages > NR_FIX_BTMAPS)
- return NULL;
- /*
- * Ok, go for it..
- */
- idx = FIX_BTMAP_BEGIN;
- while (nrpages > 0) {
- set_fixmap(idx, phys_addr);
- phys_addr += PAGE_SIZE;
- --idx;
- --nrpages;
- }
- return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
- }
- void __init bt_iounmap(void *addr, unsigned long size)
- {
- unsigned long virt_addr;
- unsigned long offset;
- unsigned int nrpages;
- enum fixed_addresses idx;
- virt_addr = (unsigned long)addr;
- if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
- return;
- offset = virt_addr & ~PAGE_MASK;
- nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
- idx = FIX_BTMAP_BEGIN;
- while (nrpages > 0) {
- clear_fixmap(idx);
- --idx;
- --nrpages;
- }
- }
|