mmu_context.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. #ifndef _ASM_X86_MMU_CONTEXT_H
  2. #define _ASM_X86_MMU_CONTEXT_H
  3. #include <asm/desc.h>
  4. #include <linux/atomic.h>
  5. #include <linux/mm_types.h>
  6. #include <trace/events/tlb.h>
  7. #include <asm/pgalloc.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/paravirt.h>
  10. #ifndef CONFIG_PARAVIRT
  11. #include <asm-generic/mm_hooks.h>
  12. static inline void paravirt_activate_mm(struct mm_struct *prev,
  13. struct mm_struct *next)
  14. {
  15. }
  16. #endif /* !CONFIG_PARAVIRT */
  17. /*
  18. * Used for LDT copy/destruction.
  19. */
  20. int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
  21. void destroy_context(struct mm_struct *mm);
  22. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  23. {
  24. #ifdef CONFIG_SMP
  25. if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  26. this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
  27. #endif
  28. }
  29. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  30. struct task_struct *tsk)
  31. {
  32. unsigned cpu = smp_processor_id();
  33. if (likely(prev != next)) {
  34. #ifdef CONFIG_SMP
  35. this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  36. this_cpu_write(cpu_tlbstate.active_mm, next);
  37. #endif
  38. cpumask_set_cpu(cpu, mm_cpumask(next));
  39. /* Re-load page tables */
  40. load_cr3(next->pgd);
  41. trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  42. /* Stop flush ipis for the previous mm */
  43. cpumask_clear_cpu(cpu, mm_cpumask(prev));
  44. /* Load the LDT, if the LDT is different: */
  45. if (unlikely(prev->context.ldt != next->context.ldt))
  46. load_LDT_nolock(&next->context);
  47. }
  48. #ifdef CONFIG_SMP
  49. else {
  50. this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  51. BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
  52. if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
  53. /*
  54. * On established mms, the mm_cpumask is only changed
  55. * from irq context, from ptep_clear_flush() while in
  56. * lazy tlb mode, and here. Irqs are blocked during
  57. * schedule, protecting us from simultaneous changes.
  58. */
  59. cpumask_set_cpu(cpu, mm_cpumask(next));
  60. /*
  61. * We were in lazy tlb mode and leave_mm disabled
  62. * tlb flush IPI delivery. We must reload CR3
  63. * to make sure to use no freed page tables.
  64. */
  65. load_cr3(next->pgd);
  66. trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  67. load_LDT_nolock(&next->context);
  68. }
  69. }
  70. #endif
  71. }
  72. #define activate_mm(prev, next) \
  73. do { \
  74. paravirt_activate_mm((prev), (next)); \
  75. switch_mm((prev), (next), NULL); \
  76. } while (0);
  77. #ifdef CONFIG_X86_32
  78. #define deactivate_mm(tsk, mm) \
  79. do { \
  80. lazy_load_gs(0); \
  81. } while (0)
  82. #else
  83. #define deactivate_mm(tsk, mm) \
  84. do { \
  85. load_gs_index(0); \
  86. loadsegment(fs, 0); \
  87. } while (0)
  88. #endif
  89. #endif /* _ASM_X86_MMU_CONTEXT_H */