boot_ioremap.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /*
  2. * arch/i386/mm/boot_ioremap.c
  3. *
  4. * Re-map functions for early boot-time before paging_init() when the
  5. * boot-time pagetables are still in use
  6. *
  7. * Written by Dave Hansen <haveblue@us.ibm.com>
  8. */
  9. /*
  10. * We need to use the 2-level pagetable functions, but CONFIG_X86_PAE
  11. * keeps that from happenning. If anyone has a better way, I'm listening.
  12. *
  13. * boot_pte_t is defined only if this all works correctly
  14. */
  15. #undef CONFIG_X86_PAE
  16. #include <asm/page.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/tlbflush.h>
  19. #include <linux/init.h>
  20. #include <linux/stddef.h>
  21. /*
  22. * I'm cheating here. It is known that the two boot PTE pages are
  23. * allocated next to each other. I'm pretending that they're just
  24. * one big array.
  25. */
  26. #define BOOT_PTE_PTRS (PTRS_PER_PTE*2)
  27. #define boot_pte_index(address) \
  28. (((address) >> PAGE_SHIFT) & (BOOT_PTE_PTRS - 1))
  29. static inline boot_pte_t* boot_vaddr_to_pte(void *address)
  30. {
  31. boot_pte_t* boot_pg = (boot_pte_t*)pg0;
  32. return &boot_pg[boot_pte_index((unsigned long)address)];
  33. }
  34. /*
  35. * This is only for a caller who is clever enough to page-align
  36. * phys_addr and virtual_source, and who also has a preference
  37. * about which virtual address from which to steal ptes
  38. */
  39. static void __boot_ioremap(unsigned long phys_addr, unsigned long nrpages,
  40. void* virtual_source)
  41. {
  42. boot_pte_t* pte;
  43. int i;
  44. char *vaddr = virtual_source;
  45. pte = boot_vaddr_to_pte(virtual_source);
  46. for (i=0; i < nrpages; i++, phys_addr += PAGE_SIZE, pte++) {
  47. set_pte(pte, pfn_pte(phys_addr>>PAGE_SHIFT, PAGE_KERNEL));
  48. __flush_tlb_one(&vaddr[i*PAGE_SIZE]);
  49. }
  50. }
  51. /* the virtual space we're going to remap comes from this array */
  52. #define BOOT_IOREMAP_PAGES 4
  53. #define BOOT_IOREMAP_SIZE (BOOT_IOREMAP_PAGES*PAGE_SIZE)
  54. static __initdata char boot_ioremap_space[BOOT_IOREMAP_SIZE]
  55. __attribute__ ((aligned (PAGE_SIZE)));
  56. /*
  57. * This only applies to things which need to ioremap before paging_init()
  58. * bt_ioremap() and plain ioremap() are both useless at this point.
  59. *
  60. * When used, we're still using the boot-time pagetables, which only
  61. * have 2 PTE pages mapping the first 8MB
  62. *
  63. * There is no unmap. The boot-time PTE pages aren't used after boot.
  64. * If you really want the space back, just remap it yourself.
  65. * boot_ioremap(&ioremap_space-PAGE_OFFSET, BOOT_IOREMAP_SIZE)
  66. */
  67. __init void* boot_ioremap(unsigned long phys_addr, unsigned long size)
  68. {
  69. unsigned long last_addr, offset;
  70. unsigned int nrpages;
  71. last_addr = phys_addr + size - 1;
  72. /* page align the requested address */
  73. offset = phys_addr & ~PAGE_MASK;
  74. phys_addr &= PAGE_MASK;
  75. size = PAGE_ALIGN(last_addr) - phys_addr;
  76. nrpages = size >> PAGE_SHIFT;
  77. if (nrpages > BOOT_IOREMAP_PAGES)
  78. return NULL;
  79. __boot_ioremap(phys_addr, nrpages, boot_ioremap_space);
  80. return &boot_ioremap_space[offset];
  81. }