memc.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188
  1. /*
  2. * linux/arch/arm26/mm/memc.c
  3. *
  4. * Copyright (C) 1998-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Page table sludge for older ARM processor architectures.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <linux/init.h>
  15. #include <linux/bootmem.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/page.h>
  19. #include <asm/memory.h>
  20. #include <asm/hardware.h>
  21. #include <asm/map.h>
  22. #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
  23. kmem_cache_t *pte_cache, *pgd_cache;
  24. int page_nr;
  25. /*
  26. * Allocate space for a page table and a MEMC table.
  27. * Note that we place the MEMC
  28. * table before the page directory. This means we can
  29. * easily get to both tightly-associated data structures
  30. * with a single pointer.
  31. */
  32. static inline pgd_t *alloc_pgd_table(void)
  33. {
  34. void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
  35. if (pg2k)
  36. pg2k += MEMC_TABLE_SIZE;
  37. return (pgd_t *)pg2k;
  38. }
  39. /*
  40. * Free a page table. this function is the counterpart to get_pgd_slow
  41. * below, not alloc_pgd_table above.
  42. */
  43. void free_pgd_slow(pgd_t *pgd)
  44. {
  45. unsigned long tbl = (unsigned long)pgd;
  46. tbl -= MEMC_TABLE_SIZE;
  47. kmem_cache_free(pgd_cache, (void *)tbl);
  48. }
  49. /*
  50. * Allocate a new pgd and fill it in ready for use
  51. *
  52. * A new tasks pgd is completely empty (all pages !present) except for:
  53. *
  54. * o The machine vectors at virtual address 0x0
  55. * o The vmalloc region at the top of address space
  56. *
  57. */
  58. #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
  59. pgd_t *get_pgd_slow(struct mm_struct *mm)
  60. {
  61. pgd_t *new_pgd, *init_pgd;
  62. pmd_t *new_pmd, *init_pmd;
  63. pte_t *new_pte, *init_pte;
  64. new_pgd = alloc_pgd_table();
  65. if (!new_pgd)
  66. goto no_pgd;
  67. /*
  68. * On ARM, first page must always be allocated since it contains
  69. * the machine vectors.
  70. */
  71. new_pmd = pmd_alloc(mm, new_pgd, 0);
  72. if (!new_pmd)
  73. goto no_pmd;
  74. new_pte = pte_alloc_map(mm, new_pmd, 0);
  75. if (!new_pte)
  76. goto no_pte;
  77. init_pgd = pgd_offset(&init_mm, 0);
  78. init_pmd = pmd_offset(init_pgd, 0);
  79. init_pte = pte_offset(init_pmd, 0);
  80. set_pte(new_pte, *init_pte);
  81. pte_unmap(new_pte);
  82. /*
  83. * the page table entries are zeroed
  84. * when the table is created. (see the cache_ctor functions below)
  85. * Now we need to plonk the kernel (vmalloc) area at the end of
  86. * the address space. We copy this from the init thread, just like
  87. * the init_pte we copied above...
  88. */
  89. memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
  90. (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
  91. /* update MEMC tables */
  92. cpu_memc_update_all(new_pgd);
  93. return new_pgd;
  94. no_pte:
  95. pmd_free(new_pmd);
  96. no_pmd:
  97. free_pgd_slow(new_pgd);
  98. no_pgd:
  99. return NULL;
  100. }
  101. /*
  102. * No special code is required here.
  103. */
  104. void setup_mm_for_reboot(char mode)
  105. {
  106. }
  107. /*
  108. * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
  109. * o swapper_pg_dir = 0x0207d000
  110. * o kernel proper starts at 0x0208000
  111. * o create (allocate) a pte to contain the machine vectors
  112. * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?)
  113. * o populate the init tasks page directory (pgd) with the new pte
  114. * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!)
  115. */
  116. void __init memtable_init(struct meminfo *mi)
  117. {
  118. pte_t *pte;
  119. int i;
  120. page_nr = max_low_pfn;
  121. pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
  122. pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY);
  123. pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
  124. for (i = 1; i < PTRS_PER_PGD; i++)
  125. pgd_val(swapper_pg_dir[i]) = 0;
  126. }
  127. void __init iotable_init(struct map_desc *io_desc)
  128. {
  129. /* nothing to do */
  130. }
  131. /*
  132. * We never have holes in the memmap
  133. */
  134. void __init create_memmap_holes(struct meminfo *mi)
  135. {
  136. }
  137. static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
  138. {
  139. memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
  140. }
  141. static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
  142. {
  143. memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
  144. }
  145. void __init pgtable_cache_init(void)
  146. {
  147. pte_cache = kmem_cache_create("pte-cache",
  148. sizeof(pte_t) * PTRS_PER_PTE,
  149. 0, 0, pte_cache_ctor, NULL);
  150. if (!pte_cache)
  151. BUG();
  152. pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
  153. sizeof(pgd_t) * PTRS_PER_PGD,
  154. 0, 0, pgd_cache_ctor, NULL);
  155. if (!pgd_cache)
  156. BUG();
  157. }