mmu_context.h 1.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. /*
  2. * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #ifndef __UM_MMU_CONTEXT_H
  6. #define __UM_MMU_CONTEXT_H
  7. #include "linux/sched.h"
  8. #include "um_mmu.h"
  9. extern void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
  10. extern void arch_exit_mmap(struct mm_struct *mm);
  11. #define get_mmu_context(task) do ; while(0)
  12. #define activate_context(tsk) do ; while(0)
  13. #define deactivate_mm(tsk,mm) do { } while (0)
  14. extern void force_flush_all(void);
  15. static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  16. {
  17. /*
  18. * This is called by fs/exec.c and fs/aio.c. In the first case, for an
  19. * exec, we don't need to do anything as we're called from userspace
  20. * and thus going to use a new host PID. In the second, we're called
  21. * from a kernel thread, and thus need to go doing the mmap's on the
  22. * host. Since they're very expensive, we want to avoid that as far as
  23. * possible.
  24. */
  25. if (old != new && (current->flags & PF_BORROWED_MM))
  26. __switch_mm(&new->context.id);
  27. arch_dup_mmap(old, new);
  28. }
  29. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  30. struct task_struct *tsk)
  31. {
  32. unsigned cpu = smp_processor_id();
  33. if(prev != next){
  34. cpu_clear(cpu, prev->cpu_vm_mask);
  35. cpu_set(cpu, next->cpu_vm_mask);
  36. if(next != &init_mm)
  37. __switch_mm(&next->context.id);
  38. }
  39. }
  40. static inline void enter_lazy_tlb(struct mm_struct *mm,
  41. struct task_struct *tsk)
  42. {
  43. }
  44. extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  45. extern void destroy_context(struct mm_struct *mm);
  46. #endif