hugetlbpage-radix.c 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. #include <linux/mm.h>
  2. #include <linux/hugetlb.h>
  3. #include <asm/pgtable.h>
  4. #include <asm/pgalloc.h>
  5. #include <asm/cacheflush.h>
  6. #include <asm/machdep.h>
  7. #include <asm/mman.h>
  8. #include <asm/tlb.h>
  9. void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  10. {
  11. int psize;
  12. struct hstate *hstate = hstate_file(vma->vm_file);
  13. psize = hstate_get_psize(hstate);
  14. radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
  15. }
  16. void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
  17. {
  18. int psize;
  19. struct hstate *hstate = hstate_file(vma->vm_file);
  20. psize = hstate_get_psize(hstate);
  21. radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
  22. }
  23. void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
  24. unsigned long end)
  25. {
  26. int psize;
  27. struct hstate *hstate = hstate_file(vma->vm_file);
  28. psize = hstate_get_psize(hstate);
  29. radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
  30. }
  31. /*
  32. * A vairant of hugetlb_get_unmapped_area doing topdown search
  33. * FIXME!! should we do as x86 does or non hugetlb area does ?
  34. * ie, use topdown or not based on mmap_is_legacy check ?
  35. */
  36. unsigned long
  37. radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  38. unsigned long len, unsigned long pgoff,
  39. unsigned long flags)
  40. {
  41. struct mm_struct *mm = current->mm;
  42. struct vm_area_struct *vma;
  43. struct hstate *h = hstate_file(file);
  44. struct vm_unmapped_area_info info;
  45. if (len & ~huge_page_mask(h))
  46. return -EINVAL;
  47. if (len > TASK_SIZE)
  48. return -ENOMEM;
  49. if (flags & MAP_FIXED) {
  50. if (prepare_hugepage_range(file, addr, len))
  51. return -EINVAL;
  52. return addr;
  53. }
  54. if (addr) {
  55. addr = ALIGN(addr, huge_page_size(h));
  56. vma = find_vma(mm, addr);
  57. if (TASK_SIZE - len >= addr &&
  58. (!vma || addr + len <= vm_start_gap(vma)))
  59. return addr;
  60. }
  61. /*
  62. * We are always doing an topdown search here. Slice code
  63. * does that too.
  64. */
  65. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  66. info.length = len;
  67. info.low_limit = PAGE_SIZE;
  68. info.high_limit = current->mm->mmap_base;
  69. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  70. info.align_offset = 0;
  71. return vm_unmapped_area(&info);
  72. }