mmu_context.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. /*
  2. * include/asm-s390/mmu_context.h
  3. *
  4. * S390 version
  5. *
  6. * Derived from "include/asm-i386/mmu_context.h"
  7. */
  8. #ifndef __S390_MMU_CONTEXT_H
  9. #define __S390_MMU_CONTEXT_H
  10. #include <asm/pgalloc.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/ctl_reg.h>
  14. static inline int init_new_context(struct task_struct *tsk,
  15. struct mm_struct *mm)
  16. {
  17. atomic_set(&mm->context.attach_count, 0);
  18. mm->context.flush_mm = 0;
  19. mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
  20. #ifdef CONFIG_64BIT
  21. mm->context.asce_bits |= _ASCE_TYPE_REGION3;
  22. #endif
  23. if (current->mm && current->mm->context.alloc_pgste) {
  24. /*
  25. * alloc_pgste indicates, that any NEW context will be created
  26. * with extended page tables. The old context is unchanged. The
  27. * page table allocation and the page table operations will
  28. * look at has_pgste to distinguish normal and extended page
  29. * tables. The only way to create extended page tables is to
  30. * set alloc_pgste and then create a new context (e.g. dup_mm).
  31. * The page table allocation is called after init_new_context
  32. * and if has_pgste is set, it will create extended page
  33. * tables.
  34. */
  35. mm->context.has_pgste = 1;
  36. mm->context.alloc_pgste = 1;
  37. } else {
  38. mm->context.has_pgste = 0;
  39. mm->context.alloc_pgste = 0;
  40. }
  41. mm->context.asce_limit = STACK_TOP_MAX;
  42. crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
  43. return 0;
  44. }
  45. #define destroy_context(mm) do { } while (0)
  46. #ifndef __s390x__
  47. #define LCTL_OPCODE "lctl"
  48. #else
  49. #define LCTL_OPCODE "lctlg"
  50. #endif
  51. static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
  52. {
  53. pgd_t *pgd = mm->pgd;
  54. S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
  55. if (user_mode != HOME_SPACE_MODE) {
  56. /* Load primary space page table origin. */
  57. asm volatile(LCTL_OPCODE" 1,1,%0\n"
  58. : : "m" (S390_lowcore.user_asce) );
  59. } else
  60. /* Load home space page table origin. */
  61. asm volatile(LCTL_OPCODE" 13,13,%0"
  62. : : "m" (S390_lowcore.user_asce) );
  63. set_fs(current->thread.mm_segment);
  64. }
  65. static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  66. struct task_struct *tsk)
  67. {
  68. cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
  69. update_mm(next, tsk);
  70. atomic_dec(&prev->context.attach_count);
  71. WARN_ON(atomic_read(&prev->context.attach_count) < 0);
  72. atomic_inc(&next->context.attach_count);
  73. /* Check for TLBs not flushed yet */
  74. if (next->context.flush_mm)
  75. __tlb_flush_mm(next);
  76. }
  77. #define enter_lazy_tlb(mm,tsk) do { } while (0)
  78. #define deactivate_mm(tsk,mm) do { } while (0)
  79. static inline void activate_mm(struct mm_struct *prev,
  80. struct mm_struct *next)
  81. {
  82. switch_mm(prev, next, current);
  83. }
  84. static inline void arch_dup_mmap(struct mm_struct *oldmm,
  85. struct mm_struct *mm)
  86. {
  87. #ifdef CONFIG_64BIT
  88. if (oldmm->context.asce_limit < mm->context.asce_limit)
  89. crst_table_downgrade(mm, oldmm->context.asce_limit);
  90. #endif
  91. }
  92. static inline void arch_exit_mmap(struct mm_struct *mm)
  93. {
  94. }
  95. #endif /* __S390_MMU_CONTEXT_H */