tlb.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
  3. * Copyright 2003 PathScale, Inc.
  4. * Licensed under the GPL
  5. */
  6. #include "linux/stddef.h"
  7. #include "linux/kernel.h"
  8. #include "linux/sched.h"
  9. #include "linux/mm.h"
  10. #include "asm/page.h"
  11. #include "asm/pgtable.h"
  12. #include "asm/uaccess.h"
  13. #include "asm/tlbflush.h"
  14. #include "mem_user.h"
  15. #include "os.h"
  16. #include "tlb.h"
  17. static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
  18. int finished, void **flush)
  19. {
  20. struct host_vm_op *op;
  21. int i, ret=0;
  22. for(i = 0; i <= last && !ret; i++){
  23. op = &ops[i];
  24. switch(op->type){
  25. case MMAP:
  26. ret = os_map_memory((void *) op->u.mmap.addr,
  27. op->u.mmap.fd, op->u.mmap.offset,
  28. op->u.mmap.len, op->u.mmap.r,
  29. op->u.mmap.w, op->u.mmap.x);
  30. break;
  31. case MUNMAP:
  32. ret = os_unmap_memory((void *) op->u.munmap.addr,
  33. op->u.munmap.len);
  34. break;
  35. case MPROTECT:
  36. ret = protect_memory(op->u.mprotect.addr,
  37. op->u.munmap.len,
  38. op->u.mprotect.r,
  39. op->u.mprotect.w,
  40. op->u.mprotect.x, 1);
  41. protect_memory(op->u.mprotect.addr, op->u.munmap.len,
  42. op->u.mprotect.r, op->u.mprotect.w,
  43. op->u.mprotect.x, 1);
  44. break;
  45. default:
  46. printk("Unknown op type %d in do_ops\n", op->type);
  47. break;
  48. }
  49. }
  50. return ret;
  51. }
  52. static void fix_range(struct mm_struct *mm, unsigned long start_addr,
  53. unsigned long end_addr, int force)
  54. {
  55. if((current->thread.mode.tt.extern_pid != -1) &&
  56. (current->thread.mode.tt.extern_pid != os_getpid()))
  57. panic("fix_range fixing wrong address space, current = 0x%p",
  58. current);
  59. fix_range_common(mm, start_addr, end_addr, force, do_ops);
  60. }
  61. atomic_t vmchange_seq = ATOMIC_INIT(1);
  62. void flush_tlb_kernel_range_tt(unsigned long start, unsigned long end)
  63. {
  64. if(flush_tlb_kernel_range_common(start, end))
  65. atomic_inc(&vmchange_seq);
  66. }
  67. void flush_tlb_kernel_vm_tt(void)
  68. {
  69. flush_tlb_kernel_range(start_vm, end_vm);
  70. }
  71. void __flush_tlb_one_tt(unsigned long addr)
  72. {
  73. flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
  74. }
  75. void flush_tlb_range_tt(struct vm_area_struct *vma, unsigned long start,
  76. unsigned long end)
  77. {
  78. if(vma->vm_mm != current->mm) return;
  79. /* Assumes that the range start ... end is entirely within
  80. * either process memory or kernel vm
  81. */
  82. if((start >= start_vm) && (start < end_vm)){
  83. if(flush_tlb_kernel_range_common(start, end))
  84. atomic_inc(&vmchange_seq);
  85. }
  86. else fix_range(vma->vm_mm, start, end, 0);
  87. }
  88. void flush_tlb_mm_tt(struct mm_struct *mm)
  89. {
  90. unsigned long seq;
  91. if(mm != current->mm) return;
  92. fix_range(mm, 0, STACK_TOP, 0);
  93. seq = atomic_read(&vmchange_seq);
  94. if(current->thread.mode.tt.vm_seq == seq)
  95. return;
  96. current->thread.mode.tt.vm_seq = seq;
  97. flush_tlb_kernel_range_common(start_vm, end_vm);
  98. }
  99. void force_flush_all_tt(void)
  100. {
  101. fix_range(current->mm, 0, STACK_TOP, 1);
  102. flush_tlb_kernel_range_common(start_vm, end_vm);
  103. }