generic.c 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. /* $Id: generic.c,v 1.14 2001/12/21 04:56:15 davem Exp $
  2. * generic.c: Generic Sparc mm routines that are not dependent upon
  3. * MMU type but are Sparc specific.
  4. *
  5. * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/pagemap.h>
  11. #include <asm/pgalloc.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/page.h>
  14. #include <asm/cacheflush.h>
  15. #include <asm/tlbflush.h>
  16. /* Remap IO memory, the same way as remap_pfn_range(), but use
  17. * the obio memory space.
  18. *
  19. * They use a pgprot that sets PAGE_IO and does not check the
  20. * mem_map table as this is independent of normal memory.
  21. */
  22. static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size,
  23. unsigned long offset, pgprot_t prot, int space)
  24. {
  25. unsigned long end;
  26. address &= ~PMD_MASK;
  27. end = address + size;
  28. if (end > PMD_SIZE)
  29. end = PMD_SIZE;
  30. do {
  31. pte_t oldpage = *pte;
  32. pte_clear(mm, address, pte);
  33. set_pte(pte, mk_pte_io(offset, prot, space));
  34. address += PAGE_SIZE;
  35. offset += PAGE_SIZE;
  36. pte++;
  37. } while (address < end);
  38. }
  39. static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
  40. unsigned long offset, pgprot_t prot, int space)
  41. {
  42. unsigned long end;
  43. address &= ~PGDIR_MASK;
  44. end = address + size;
  45. if (end > PGDIR_SIZE)
  46. end = PGDIR_SIZE;
  47. offset -= address;
  48. do {
  49. pte_t * pte = pte_alloc_map(mm, pmd, address);
  50. if (!pte)
  51. return -ENOMEM;
  52. io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
  53. address = (address + PMD_SIZE) & PMD_MASK;
  54. pmd++;
  55. } while (address < end);
  56. return 0;
  57. }
  58. int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  59. unsigned long pfn, unsigned long size, pgprot_t prot)
  60. {
  61. int error = 0;
  62. pgd_t * dir;
  63. unsigned long beg = from;
  64. unsigned long end = from + size;
  65. struct mm_struct *mm = vma->vm_mm;
  66. int space = GET_IOSPACE(pfn);
  67. unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
  68. prot = __pgprot(pg_iobits);
  69. offset -= from;
  70. dir = pgd_offset(mm, from);
  71. flush_cache_range(vma, beg, end);
  72. spin_lock(&mm->page_table_lock);
  73. while (from < end) {
  74. pmd_t *pmd = pmd_alloc(current->mm, dir, from);
  75. error = -ENOMEM;
  76. if (!pmd)
  77. break;
  78. error = io_remap_pmd_range(mm, pmd, from, end - from, offset + from, prot, space);
  79. if (error)
  80. break;
  81. from = (from + PGDIR_SIZE) & PGDIR_MASK;
  82. dir++;
  83. }
  84. spin_unlock(&mm->page_table_lock);
  85. flush_tlb_range(vma, beg, end);
  86. return error;
  87. }