generic.c 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
  2. * generic.c: Generic Sparc mm routines that are not dependent upon
  3. * MMU type but are Sparc specific.
  4. *
  5. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/pagemap.h>
  11. #include <asm/pgalloc.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/page.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/tlbflush.h>
  16. static inline void forget_pte(pte_t page)
  17. {
  18. #if 0 /* old 2.4 code */
  19. if (pte_none(page))
  20. return;
  21. if (pte_present(page)) {
  22. unsigned long pfn = pte_pfn(page);
  23. struct page *ptpage;
  24. if (!pfn_valid(pfn))
  25. return;
  26. ptpage = pfn_to_page(pfn);
  27. if (PageReserved(ptpage))
  28. return;
  29. page_cache_release(ptpage);
  30. return;
  31. }
  32. swap_free(pte_to_swp_entry(page));
  33. #else
  34. if (!pte_none(page)) {
  35. printk("forget_pte: old mapping existed!\n");
  36. BUG();
  37. }
  38. #endif
  39. }
  40. /* Remap IO memory, the same way as remap_pfn_range(), but use
  41. * the obio memory space.
  42. *
  43. * They use a pgprot that sets PAGE_IO and does not check the
  44. * mem_map table as this is independent of normal memory.
  45. */
  46. static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
  47. unsigned long offset, pgprot_t prot, int space)
  48. {
  49. unsigned long end;
  50. address &= ~PMD_MASK;
  51. end = address + size;
  52. if (end > PMD_SIZE)
  53. end = PMD_SIZE;
  54. do {
  55. pte_t oldpage = *pte;
  56. pte_clear(mm, address, pte);
  57. set_pte(pte, mk_pte_io(offset, prot, space));
  58. forget_pte(oldpage);
  59. address += PAGE_SIZE;
  60. offset += PAGE_SIZE;
  61. pte++;
  62. } while (address < end);
  63. }
  64. static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
  65. unsigned long offset, pgprot_t prot, int space)
  66. {
  67. unsigned long end;
  68. address &= ~PGDIR_MASK;
  69. end = address + size;
  70. if (end > PGDIR_SIZE)
  71. end = PGDIR_SIZE;
  72. offset -= address;
  73. do {
  74. pte_t * pte = pte_alloc_map(mm, pmd, address);
  75. if (!pte)
  76. return -ENOMEM;
  77. io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
  78. address = (address + PMD_SIZE) & PMD_MASK;
  79. pmd++;
  80. } while (address < end);
  81. return 0;
  82. }
  83. int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned long offset, unsigned long size, pgprot_t prot, int space)
  84. {
  85. int error = 0;
  86. pgd_t * dir;
  87. unsigned long beg = from;
  88. unsigned long end = from + size;
  89. struct mm_struct *mm = vma->vm_mm;
  90. prot = __pgprot(pg_iobits);
  91. offset -= from;
  92. dir = pgd_offset(mm, from);
  93. flush_cache_range(vma, beg, end);
  94. spin_lock(&mm->page_table_lock);
  95. while (from < end) {
  96. pmd_t *pmd = pmd_alloc(current->mm, dir, from);
  97. error = -ENOMEM;
  98. if (!pmd)
  99. break;
  100. error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
  101. if (error)
  102. break;
  103. from = (from + PGDIR_SIZE) & PGDIR_MASK;
  104. dir++;
  105. }
  106. spin_unlock(&mm->page_table_lock);
  107. flush_tlb_range(vma, beg, end);
  108. return error;
  109. }
  110. int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  111. unsigned long pfn, unsigned long size, pgprot_t prot)
  112. {
  113. int error = 0;
  114. pgd_t * dir;
  115. unsigned long beg = from;
  116. unsigned long end = from + size;
  117. struct mm_struct *mm = vma->vm_mm;
  118. int space = GET_IOSPACE(pfn);
  119. unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
  120. prot = __pgprot(pg_iobits);
  121. offset -= from;
  122. dir = pgd_offset(mm, from);
  123. flush_cache_range(vma, beg, end);
  124. spin_lock(&mm->page_table_lock);
  125. while (from < end) {
  126. pmd_t *pmd = pmd_alloc(current->mm, dir, from);
  127. error = -ENOMEM;
  128. if (!pmd)
  129. break;
  130. error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
  131. if (error)
  132. break;
  133. from = (from + PGDIR_SIZE) & PGDIR_MASK;
  134. dir++;
  135. }
  136. spin_unlock(&mm->page_table_lock);
  137. flush_tlb_range(vma, beg, end);
  138. return error;
  139. }