clear_page_64.S 1.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374
  1. #include <linux/linkage.h>
  2. #include <asm/dwarf2.h>
  3. #include <asm/alternative-asm.h>
  4. /*
  5. * Zero a page.
  6. * rdi page
  7. */
  8. ENTRY(clear_page_c)
  9. CFI_STARTPROC
  10. movl $4096/8,%ecx
  11. xorl %eax,%eax
  12. rep stosq
  13. ret
  14. CFI_ENDPROC
  15. ENDPROC(clear_page_c)
  16. ENTRY(clear_page_c_e)
  17. CFI_STARTPROC
  18. movl $4096,%ecx
  19. xorl %eax,%eax
  20. rep stosb
  21. ret
  22. CFI_ENDPROC
  23. ENDPROC(clear_page_c_e)
  24. ENTRY(clear_page)
  25. CFI_STARTPROC
  26. xorl %eax,%eax
  27. movl $4096/64,%ecx
  28. .p2align 4
  29. .Lloop:
  30. decl %ecx
  31. #define PUT(x) movq %rax,x*8(%rdi)
  32. movq %rax,(%rdi)
  33. PUT(1)
  34. PUT(2)
  35. PUT(3)
  36. PUT(4)
  37. PUT(5)
  38. PUT(6)
  39. PUT(7)
  40. leaq 64(%rdi),%rdi
  41. jnz .Lloop
  42. nop
  43. ret
  44. CFI_ENDPROC
  45. .Lclear_page_end:
  46. ENDPROC(clear_page)
  47. /*
  48. * Some CPUs support enhanced REP MOVSB/STOSB instructions.
  49. * It is recommended to use this when possible.
  50. * If enhanced REP MOVSB/STOSB is not available, try to use fast string.
  51. * Otherwise, use original function.
  52. *
  53. */
  54. #include <asm/cpufeature.h>
  55. .section .altinstr_replacement,"ax"
  56. 1: .byte 0xeb /* jmp <disp8> */
  57. .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
  58. 2: .byte 0xeb /* jmp <disp8> */
  59. .byte (clear_page_c_e - clear_page) - (3f - 2b) /* offset */
  60. 3:
  61. .previous
  62. .section .altinstructions,"a"
  63. altinstruction_entry clear_page,1b,X86_FEATURE_REP_GOOD,\
  64. .Lclear_page_end-clear_page, 2b-1b
  65. altinstruction_entry clear_page,2b,X86_FEATURE_ERMS, \
  66. .Lclear_page_end-clear_page,3b-2b
  67. .previous