ioremap.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * (C) Copyright 1995 1996 Linus Torvalds
  7. * (C) Copyright 2001, 2002 Ralf Baechle
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/module.h>
  11. #include <asm/addrspace.h>
  12. #include <asm/byteorder.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/io.h>
  15. /*
  16. * Generic mapping function (not visible outside):
  17. */
  18. /*
  19. * Remap an arbitrary physical address space into the kernel virtual
  20. * address space. Needed when the kernel wants to access high addresses
  21. * directly.
  22. *
  23. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  24. * have to convert them into an offset in a page-aligned mapping, but the
  25. * caller shouldn't need to know that small detail.
  26. */
  27. #define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL))
  28. void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags)
  29. {
  30. struct vm_struct * area;
  31. unsigned long offset;
  32. phys_t last_addr;
  33. void * addr;
  34. pgprot_t pgprot;
  35. phys_addr = fixup_bigphys_addr(phys_addr, size);
  36. /* Don't allow wraparound or zero size */
  37. last_addr = phys_addr + size - 1;
  38. if (!size || last_addr < phys_addr)
  39. return NULL;
  40. /*
  41. * Map uncached objects in the low 512mb of address space using KSEG1,
  42. * otherwise map using page tables.
  43. */
  44. if (IS_LOW512(phys_addr) && IS_LOW512(last_addr) &&
  45. flags == _CACHE_UNCACHED)
  46. return (void __iomem *) CKSEG1ADDR(phys_addr);
  47. /*
  48. * Don't allow anybody to remap normal RAM that we're using..
  49. */
  50. if (phys_addr < virt_to_phys(high_memory)) {
  51. char *t_addr, *t_end;
  52. struct page *page;
  53. t_addr = __va(phys_addr);
  54. t_end = t_addr + (size - 1);
  55. for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
  56. if(!PageReserved(page))
  57. return NULL;
  58. }
  59. pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
  60. | __WRITEABLE | flags);
  61. /*
  62. * Mappings have to be page-aligned
  63. */
  64. offset = phys_addr & ~PAGE_MASK;
  65. phys_addr &= PAGE_MASK;
  66. size = PAGE_ALIGN(last_addr + 1) - phys_addr;
  67. /*
  68. * Ok, go for it..
  69. */
  70. area = get_vm_area(size, VM_IOREMAP);
  71. if (!area)
  72. return NULL;
  73. addr = area->addr;
  74. if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
  75. phys_addr, pgprot)) {
  76. vunmap(addr);
  77. return NULL;
  78. }
  79. return (void __iomem *) (offset + (char *)addr);
  80. }
  81. #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
  82. void __iounmap(const volatile void __iomem *addr)
  83. {
  84. struct vm_struct *p;
  85. if (IS_KSEG1(addr))
  86. return;
  87. p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
  88. if (!p)
  89. printk(KERN_ERR "iounmap: bad address %p\n", addr);
  90. kfree(p);
  91. }
  92. EXPORT_SYMBOL(__ioremap);
  93. EXPORT_SYMBOL(__iounmap);