tlb.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * linux/arch/cris/arch-v10/mm/tlb.c
  3. *
  4. * Low level TLB handling
  5. *
  6. *
  7. * Copyright (C) 2000-2007 Axis Communications AB
  8. *
  9. * Authors: Bjorn Wesen (bjornw@axis.com)
  10. *
  11. */
  12. #include <asm/tlb.h>
  13. #include <asm/mmu_context.h>
  14. #include <arch/svinto.h>
  15. #define D(x)
  16. /* The TLB can host up to 64 different mm contexts at the same time.
  17. * The running context is R_MMU_CONTEXT, and each TLB entry contains a
  18. * page_id that has to match to give a hit. In page_id_map, we keep track
  19. * of which mm's we have assigned which page_id's, so that we know when
  20. * to invalidate TLB entries.
  21. *
  22. * The last page_id is never running - it is used as an invalid page_id
  23. * so we can make TLB entries that will never match.
  24. *
  25. * Notice that we need to make the flushes atomic, otherwise an interrupt
  26. * handler that uses vmalloced memory might cause a TLB load in the middle
  27. * of a flush causing.
  28. */
  29. /* invalidate all TLB entries */
  30. void
  31. flush_tlb_all(void)
  32. {
  33. int i;
  34. unsigned long flags;
  35. /* the vpn of i & 0xf is so we dont write similar TLB entries
  36. * in the same 4-way entry group. details...
  37. */
  38. local_irq_save(flags);
  39. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  40. *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) );
  41. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  42. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  43. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  44. IO_STATE(R_TLB_LO, valid, no ) |
  45. IO_STATE(R_TLB_LO, kernel,no ) |
  46. IO_STATE(R_TLB_LO, we, no ) |
  47. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  48. }
  49. local_irq_restore(flags);
  50. D(printk("tlb: flushed all\n"));
  51. }
  52. /* invalidate the selected mm context only */
  53. void
  54. flush_tlb_mm(struct mm_struct *mm)
  55. {
  56. int i;
  57. int page_id = mm->context.page_id;
  58. unsigned long flags;
  59. D(printk("tlb: flush mm context %d (%p)\n", page_id, mm));
  60. if(page_id == NO_CONTEXT)
  61. return;
  62. /* mark the TLB entries that match the page_id as invalid.
  63. * here we could also check the _PAGE_GLOBAL bit and NOT flush
  64. * global pages. is it worth the extra I/O ?
  65. */
  66. local_irq_save(flags);
  67. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  68. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  69. if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) {
  70. *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  71. IO_FIELD(R_TLB_HI, vpn, i & 0xf ) );
  72. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  73. IO_STATE(R_TLB_LO, valid, no ) |
  74. IO_STATE(R_TLB_LO, kernel,no ) |
  75. IO_STATE(R_TLB_LO, we, no ) |
  76. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  77. }
  78. }
  79. local_irq_restore(flags);
  80. }
  81. /* invalidate a single page */
  82. void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
  83. {
  84. struct mm_struct *mm = vma->vm_mm;
  85. int page_id = mm->context.page_id;
  86. int i;
  87. unsigned long flags;
  88. D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm));
  89. if(page_id == NO_CONTEXT)
  90. return;
  91. addr &= PAGE_MASK; /* perhaps not necessary */
  92. /* invalidate those TLB entries that match both the mm context
  93. * and the virtual address requested
  94. */
  95. local_irq_save(flags);
  96. for(i = 0; i < NUM_TLB_ENTRIES; i++) {
  97. unsigned long tlb_hi;
  98. *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i);
  99. tlb_hi = *R_TLB_HI;
  100. if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id &&
  101. (tlb_hi & PAGE_MASK) == addr) {
  102. *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) |
  103. addr; /* same addr as before works. */
  104. *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) |
  105. IO_STATE(R_TLB_LO, valid, no ) |
  106. IO_STATE(R_TLB_LO, kernel,no ) |
  107. IO_STATE(R_TLB_LO, we, no ) |
  108. IO_FIELD(R_TLB_LO, pfn, 0 ) );
  109. }
  110. }
  111. local_irq_restore(flags);
  112. }
  113. /*
  114. * Initialize the context related info for a new mm_struct
  115. * instance.
  116. */
  117. int
  118. init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  119. {
  120. mm->context.page_id = NO_CONTEXT;
  121. return 0;
  122. }
  123. /* called in schedule() just before actually doing the switch_to */
  124. void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  125. struct task_struct *tsk)
  126. {
  127. if (prev != next) {
  128. /* make sure we have a context */
  129. get_mmu_context(next);
  130. /* remember the pgd for the fault handlers
  131. * this is similar to the pgd register in some other CPU's.
  132. * we need our own copy of it because current and active_mm
  133. * might be invalid at points where we still need to derefer
  134. * the pgd.
  135. */
  136. per_cpu(current_pgd, smp_processor_id()) = next->pgd;
  137. /* switch context in the MMU */
  138. D(printk(KERN_DEBUG "switching mmu_context to %d (%p)\n",
  139. next->context, next));
  140. *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT,
  141. page_id, next->context.page_id);
  142. }
  143. }