ioremap.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * arch/cris/mm/ioremap.c
  3. *
  4. * Re-map IO memory to kernel address space so that we can access it.
  5. * Needed for memory-mapped I/O devices mapped outside our normal DRAM
  6. * window (that is, all memory-mapped I/O devices).
  7. *
  8. * (C) Copyright 1995 1996 Linus Torvalds
  9. * CRIS-port by Axis Communications AB
  10. */
  11. #include <linux/vmalloc.h>
  12. #include <asm/io.h>
  13. #include <asm/pgalloc.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/tlbflush.h>
  16. #include <asm/arch/memmap.h>
  17. extern inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  18. unsigned long phys_addr, pgprot_t prot)
  19. {
  20. unsigned long end;
  21. address &= ~PMD_MASK;
  22. end = address + size;
  23. if (end > PMD_SIZE)
  24. end = PMD_SIZE;
  25. if (address >= end)
  26. BUG();
  27. do {
  28. if (!pte_none(*pte)) {
  29. printk("remap_area_pte: page already exists\n");
  30. BUG();
  31. }
  32. set_pte(pte, mk_pte_phys(phys_addr, prot));
  33. address += PAGE_SIZE;
  34. phys_addr += PAGE_SIZE;
  35. pte++;
  36. } while (address && (address < end));
  37. }
  38. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  39. unsigned long phys_addr, pgprot_t prot)
  40. {
  41. unsigned long end;
  42. address &= ~PGDIR_MASK;
  43. end = address + size;
  44. if (end > PGDIR_SIZE)
  45. end = PGDIR_SIZE;
  46. phys_addr -= address;
  47. if (address >= end)
  48. BUG();
  49. do {
  50. pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address);
  51. if (!pte)
  52. return -ENOMEM;
  53. remap_area_pte(pte, address, end - address, address + phys_addr, prot);
  54. address = (address + PMD_SIZE) & PMD_MASK;
  55. pmd++;
  56. } while (address && (address < end));
  57. return 0;
  58. }
  59. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  60. unsigned long size, pgprot_t prot)
  61. {
  62. int error;
  63. pgd_t * dir;
  64. unsigned long end = address + size;
  65. phys_addr -= address;
  66. dir = pgd_offset(&init_mm, address);
  67. flush_cache_all();
  68. if (address >= end)
  69. BUG();
  70. spin_lock(&init_mm.page_table_lock);
  71. do {
  72. pud_t *pud;
  73. pmd_t *pmd;
  74. error = -ENOMEM;
  75. pud = pud_alloc(&init_mm, dir, address);
  76. if (!pud)
  77. break;
  78. pmd = pmd_alloc(&init_mm, pud, address);
  79. if (!pmd)
  80. break;
  81. if (remap_area_pmd(pmd, address, end - address,
  82. phys_addr + address, prot))
  83. break;
  84. error = 0;
  85. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  86. dir++;
  87. } while (address && (address < end));
  88. spin_unlock(&init_mm.page_table_lock);
  89. flush_tlb_all();
  90. return error;
  91. }
  92. /*
  93. * Generic mapping function (not visible outside):
  94. */
  95. /*
  96. * Remap an arbitrary physical address space into the kernel virtual
  97. * address space. Needed when the kernel wants to access high addresses
  98. * directly.
  99. *
  100. * NOTE! We need to allow non-page-aligned mappings too: we will obviously
  101. * have to convert them into an offset in a page-aligned mapping, but the
  102. * caller shouldn't need to know that small detail.
  103. */
  104. void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot)
  105. {
  106. void __iomem * addr;
  107. struct vm_struct * area;
  108. unsigned long offset, last_addr;
  109. /* Don't allow wraparound or zero size */
  110. last_addr = phys_addr + size - 1;
  111. if (!size || last_addr < phys_addr)
  112. return NULL;
  113. /*
  114. * Mappings have to be page-aligned
  115. */
  116. offset = phys_addr & ~PAGE_MASK;
  117. phys_addr &= PAGE_MASK;
  118. size = PAGE_ALIGN(last_addr+1) - phys_addr;
  119. /*
  120. * Ok, go for it..
  121. */
  122. area = get_vm_area(size, VM_IOREMAP);
  123. if (!area)
  124. return NULL;
  125. addr = (void __iomem *)area->addr;
  126. if (remap_area_pages((unsigned long) addr, phys_addr, size, prot)) {
  127. vfree((void __force *)addr);
  128. return NULL;
  129. }
  130. return (void __iomem *) (offset + (char __iomem *)addr);
  131. }
  132. void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  133. {
  134. return __ioremap_prot(phys_addr, size,
  135. __pgprot(_PAGE_PRESENT | __READABLE |
  136. __WRITEABLE | _PAGE_GLOBAL |
  137. _PAGE_KERNEL | flags));
  138. }
  139. /**
  140. * ioremap_nocache - map bus memory into CPU space
  141. * @offset: bus address of the memory
  142. * @size: size of the resource to map
  143. *
  144. * Must be freed with iounmap.
  145. */
  146. void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
  147. {
  148. return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
  149. }
  150. void iounmap(volatile void __iomem *addr)
  151. {
  152. if (addr > high_memory)
  153. return vfree((void *) (PAGE_MASK & (unsigned long) addr));
  154. }