ioremap.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * arch/s390/mm/ioremap.c
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. *
  8. * Derived from "arch/i386/mm/extable.c"
  9. * (C) Copyright 1995 1996 Linus Torvalds
  10. *
  11. * Re-map IO memory to kernel address space so that we can access it.
  12. * This is needed for high PCI addresses that aren't mapped in the
  13. * 640k-1MB IO memory area on PC's
  14. */
  15. #include <linux/vmalloc.h>
  16. #include <linux/mm.h>
  17. #include <asm/io.h>
  18. #include <asm/pgalloc.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/tlbflush.h>
  21. static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
  22. unsigned long phys_addr, unsigned long flags)
  23. {
  24. unsigned long end;
  25. unsigned long pfn;
  26. address &= ~PMD_MASK;
  27. end = address + size;
  28. if (end > PMD_SIZE)
  29. end = PMD_SIZE;
  30. if (address >= end)
  31. BUG();
  32. pfn = phys_addr >> PAGE_SHIFT;
  33. do {
  34. if (!pte_none(*pte)) {
  35. printk("remap_area_pte: page already exists\n");
  36. BUG();
  37. }
  38. set_pte(pte, pfn_pte(pfn, __pgprot(flags)));
  39. address += PAGE_SIZE;
  40. pfn++;
  41. pte++;
  42. } while (address && (address < end));
  43. }
  44. static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
  45. unsigned long phys_addr, unsigned long flags)
  46. {
  47. unsigned long end;
  48. address &= ~PGDIR_MASK;
  49. end = address + size;
  50. if (end > PGDIR_SIZE)
  51. end = PGDIR_SIZE;
  52. phys_addr -= address;
  53. if (address >= end)
  54. BUG();
  55. do {
  56. pte_t * pte = pte_alloc_kernel(pmd, address);
  57. if (!pte)
  58. return -ENOMEM;
  59. remap_area_pte(pte, address, end - address, address + phys_addr, flags);
  60. address = (address + PMD_SIZE) & PMD_MASK;
  61. pmd++;
  62. } while (address && (address < end));
  63. return 0;
  64. }
  65. static int remap_area_pages(unsigned long address, unsigned long phys_addr,
  66. unsigned long size, unsigned long flags)
  67. {
  68. int error;
  69. pgd_t * dir;
  70. unsigned long end = address + size;
  71. phys_addr -= address;
  72. dir = pgd_offset(&init_mm, address);
  73. flush_cache_all();
  74. if (address >= end)
  75. BUG();
  76. do {
  77. pmd_t *pmd;
  78. pmd = pmd_alloc(&init_mm, dir, address);
  79. error = -ENOMEM;
  80. if (!pmd)
  81. break;
  82. if (remap_area_pmd(pmd, address, end - address,
  83. phys_addr + address, flags))
  84. break;
  85. error = 0;
  86. address = (address + PGDIR_SIZE) & PGDIR_MASK;
  87. dir++;
  88. } while (address && (address < end));
  89. flush_tlb_all();
  90. return 0;
  91. }
  92. /*
  93. * Generic mapping function (not visible outside):
  94. */
  95. /*
  96. * Remap an arbitrary physical address space into the kernel virtual
  97. * address space. Needed when the kernel wants to access high addresses
  98. * directly.
  99. */
  100. void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
  101. {
  102. void * addr;
  103. struct vm_struct * area;
  104. if (phys_addr < virt_to_phys(high_memory))
  105. return phys_to_virt(phys_addr);
  106. if (phys_addr & ~PAGE_MASK)
  107. return NULL;
  108. size = PAGE_ALIGN(size);
  109. if (!size || size > phys_addr + size)
  110. return NULL;
  111. area = get_vm_area(size, VM_IOREMAP);
  112. if (!area)
  113. return NULL;
  114. addr = area->addr;
  115. if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
  116. vfree(addr);
  117. return NULL;
  118. }
  119. return addr;
  120. }
  121. void iounmap(void *addr)
  122. {
  123. if (addr > high_memory)
  124. vfree(addr);
  125. }