mmu_context.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657
  1. /*
  2. * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3. * Licensed under the GPL
  4. */
  5. #ifndef __UM_MMU_CONTEXT_H
  6. #define __UM_MMU_CONTEXT_H
  7. #include <asm-generic/mm_hooks.h>
  8. #include "linux/sched.h"
  9. #include "um_mmu.h"
  10. #define get_mmu_context(task) do ; while(0)
  11. #define activate_context(tsk) do ; while(0)
  12. #define deactivate_mm(tsk,mm) do { } while (0)
  13. extern void force_flush_all(void);
  14. static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  15. {
  16. /*
  17. * This is called by fs/exec.c and fs/aio.c. In the first case, for an
  18. * exec, we don't need to do anything as we're called from userspace
  19. * and thus going to use a new host PID. In the second, we're called
  20. * from a kernel thread, and thus need to go doing the mmap's on the
  21. * host. Since they're very expensive, we want to avoid that as far as
  22. * possible.
  23. */
  24. if (old != new && (current->flags & PF_BORROWED_MM))
  25. __switch_mm(&new->context.id);
  26. }
  27. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  28. struct task_struct *tsk)
  29. {
  30. unsigned cpu = smp_processor_id();
  31. if(prev != next){
  32. cpu_clear(cpu, prev->cpu_vm_mask);
  33. cpu_set(cpu, next->cpu_vm_mask);
  34. if(next != &init_mm)
  35. __switch_mm(&next->context.id);
  36. }
  37. }
  38. static inline void enter_lazy_tlb(struct mm_struct *mm,
  39. struct task_struct *tsk)
  40. {
  41. }
  42. extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  43. extern void destroy_context(struct mm_struct *mm);
  44. #endif