memc.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * linux/arch/arm26/mm/memc.c
  3. *
  4. * Copyright (C) 1998-2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Page table sludge for older ARM processor architectures.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <linux/init.h>
  15. #include <linux/bootmem.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/page.h>
  19. #include <asm/memory.h>
  20. #include <asm/hardware.h>
  21. #include <asm/map.h>
  22. #define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
  23. kmem_cache_t *pte_cache, *pgd_cache;
  24. int page_nr;
  25. /*
  26. * Allocate space for a page table and a MEMC table.
  27. * Note that we place the MEMC
  28. * table before the page directory. This means we can
  29. * easily get to both tightly-associated data structures
  30. * with a single pointer.
  31. */
  32. static inline pgd_t *alloc_pgd_table(void)
  33. {
  34. void *pg2k = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
  35. if (pg2k)
  36. pg2k += MEMC_TABLE_SIZE;
  37. return (pgd_t *)pg2k;
  38. }
  39. /*
  40. * Free a page table. this function is the counterpart to get_pgd_slow
  41. * below, not alloc_pgd_table above.
  42. */
  43. void free_pgd_slow(pgd_t *pgd)
  44. {
  45. unsigned long tbl = (unsigned long)pgd;
  46. tbl -= MEMC_TABLE_SIZE;
  47. kmem_cache_free(pgd_cache, (void *)tbl);
  48. }
  49. /*
  50. * Allocate a new pgd and fill it in ready for use
  51. *
  52. * A new tasks pgd is completely empty (all pages !present) except for:
  53. *
  54. * o The machine vectors at virtual address 0x0
  55. * o The vmalloc region at the top of address space
  56. *
  57. */
  58. #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
  59. pgd_t *get_pgd_slow(struct mm_struct *mm)
  60. {
  61. pgd_t *new_pgd, *init_pgd;
  62. pmd_t *new_pmd, *init_pmd;
  63. pte_t *new_pte, *init_pte;
  64. new_pgd = alloc_pgd_table();
  65. if (!new_pgd)
  66. goto no_pgd;
  67. /*
  68. * This lock is here just to satisfy pmd_alloc and pte_lock
  69. * FIXME: I bet we could avoid taking it pretty much altogether
  70. */
  71. spin_lock(&mm->page_table_lock);
  72. /*
  73. * On ARM, first page must always be allocated since it contains
  74. * the machine vectors.
  75. */
  76. new_pmd = pmd_alloc(mm, new_pgd, 0);
  77. if (!new_pmd)
  78. goto no_pmd;
  79. new_pte = pte_alloc_kernel(mm, new_pmd, 0);
  80. if (!new_pte)
  81. goto no_pte;
  82. init_pgd = pgd_offset(&init_mm, 0);
  83. init_pmd = pmd_offset(init_pgd, 0);
  84. init_pte = pte_offset(init_pmd, 0);
  85. set_pte(new_pte, *init_pte);
  86. /*
  87. * the page table entries are zeroed
  88. * when the table is created. (see the cache_ctor functions below)
  89. * Now we need to plonk the kernel (vmalloc) area at the end of
  90. * the address space. We copy this from the init thread, just like
  91. * the init_pte we copied above...
  92. */
  93. memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
  94. (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
  95. spin_unlock(&mm->page_table_lock);
  96. /* update MEMC tables */
  97. cpu_memc_update_all(new_pgd);
  98. return new_pgd;
  99. no_pte:
  100. spin_unlock(&mm->page_table_lock);
  101. pmd_free(new_pmd);
  102. free_pgd_slow(new_pgd);
  103. return NULL;
  104. no_pmd:
  105. spin_unlock(&mm->page_table_lock);
  106. free_pgd_slow(new_pgd);
  107. return NULL;
  108. no_pgd:
  109. return NULL;
  110. }
  111. /*
  112. * No special code is required here.
  113. */
  114. void setup_mm_for_reboot(char mode)
  115. {
  116. }
  117. /*
  118. * This contains the code to setup the memory map on an ARM2/ARM250/ARM3
  119. * o swapper_pg_dir = 0x0207d000
  120. * o kernel proper starts at 0x0208000
  121. * o create (allocate) a pte to contain the machine vectors
  122. * o populate the pte (points to 0x02078000) (FIXME - is it zeroed?)
  123. * o populate the init tasks page directory (pgd) with the new pte
  124. * o zero the rest of the init tasks pgdir (FIXME - what about vmalloc?!)
  125. */
  126. void __init memtable_init(struct meminfo *mi)
  127. {
  128. pte_t *pte;
  129. int i;
  130. page_nr = max_low_pfn;
  131. pte = alloc_bootmem_low_pages(PTRS_PER_PTE * sizeof(pte_t));
  132. pte[0] = mk_pte_phys(PAGE_OFFSET + SCREEN_SIZE, PAGE_READONLY);
  133. pmd_populate(&init_mm, pmd_offset(swapper_pg_dir, 0), pte);
  134. for (i = 1; i < PTRS_PER_PGD; i++)
  135. pgd_val(swapper_pg_dir[i]) = 0;
  136. }
  137. void __init iotable_init(struct map_desc *io_desc)
  138. {
  139. /* nothing to do */
  140. }
  141. /*
  142. * We never have holes in the memmap
  143. */
  144. void __init create_memmap_holes(struct meminfo *mi)
  145. {
  146. }
  147. static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
  148. {
  149. memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
  150. }
  151. static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
  152. {
  153. memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
  154. }
  155. void __init pgtable_cache_init(void)
  156. {
  157. pte_cache = kmem_cache_create("pte-cache",
  158. sizeof(pte_t) * PTRS_PER_PTE,
  159. 0, 0, pte_cache_ctor, NULL);
  160. if (!pte_cache)
  161. BUG();
  162. pgd_cache = kmem_cache_create("pgd-cache", MEMC_TABLE_SIZE +
  163. sizeof(pgd_t) * PTRS_PER_PGD,
  164. 0, 0, pgd_cache_ctor, NULL);
  165. if (!pgd_cache)
  166. BUG();
  167. }