bpf_jit_asm_32.S 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #include <asm/ptrace.h>
  3. #include "bpf_jit_32.h"
  4. #define SAVE_SZ 96
  5. #define SCRATCH_OFF 72
  6. #define BE_PTR(label) be label
  7. #define SIGN_EXTEND(reg)
  8. #define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
  9. .text
  10. .globl bpf_jit_load_word
  11. bpf_jit_load_word:
  12. cmp r_OFF, 0
  13. bl bpf_slow_path_word_neg
  14. nop
  15. .globl bpf_jit_load_word_positive_offset
  16. bpf_jit_load_word_positive_offset:
  17. sub r_HEADLEN, r_OFF, r_TMP
  18. cmp r_TMP, 3
  19. ble bpf_slow_path_word
  20. add r_SKB_DATA, r_OFF, r_TMP
  21. andcc r_TMP, 3, %g0
  22. bne load_word_unaligned
  23. nop
  24. retl
  25. ld [r_TMP], r_A
  26. load_word_unaligned:
  27. ldub [r_TMP + 0x0], r_OFF
  28. ldub [r_TMP + 0x1], r_TMP2
  29. sll r_OFF, 8, r_OFF
  30. or r_OFF, r_TMP2, r_OFF
  31. ldub [r_TMP + 0x2], r_TMP2
  32. sll r_OFF, 8, r_OFF
  33. or r_OFF, r_TMP2, r_OFF
  34. ldub [r_TMP + 0x3], r_TMP2
  35. sll r_OFF, 8, r_OFF
  36. retl
  37. or r_OFF, r_TMP2, r_A
  38. .globl bpf_jit_load_half
  39. bpf_jit_load_half:
  40. cmp r_OFF, 0
  41. bl bpf_slow_path_half_neg
  42. nop
  43. .globl bpf_jit_load_half_positive_offset
  44. bpf_jit_load_half_positive_offset:
  45. sub r_HEADLEN, r_OFF, r_TMP
  46. cmp r_TMP, 1
  47. ble bpf_slow_path_half
  48. add r_SKB_DATA, r_OFF, r_TMP
  49. andcc r_TMP, 1, %g0
  50. bne load_half_unaligned
  51. nop
  52. retl
  53. lduh [r_TMP], r_A
  54. load_half_unaligned:
  55. ldub [r_TMP + 0x0], r_OFF
  56. ldub [r_TMP + 0x1], r_TMP2
  57. sll r_OFF, 8, r_OFF
  58. retl
  59. or r_OFF, r_TMP2, r_A
  60. .globl bpf_jit_load_byte
  61. bpf_jit_load_byte:
  62. cmp r_OFF, 0
  63. bl bpf_slow_path_byte_neg
  64. nop
  65. .globl bpf_jit_load_byte_positive_offset
  66. bpf_jit_load_byte_positive_offset:
  67. cmp r_OFF, r_HEADLEN
  68. bge bpf_slow_path_byte
  69. nop
  70. retl
  71. ldub [r_SKB_DATA + r_OFF], r_A
  72. .globl bpf_jit_load_byte_msh
  73. bpf_jit_load_byte_msh:
  74. cmp r_OFF, 0
  75. bl bpf_slow_path_byte_msh_neg
  76. nop
  77. .globl bpf_jit_load_byte_msh_positive_offset
  78. bpf_jit_load_byte_msh_positive_offset:
  79. cmp r_OFF, r_HEADLEN
  80. bge bpf_slow_path_byte_msh
  81. nop
  82. ldub [r_SKB_DATA + r_OFF], r_OFF
  83. and r_OFF, 0xf, r_OFF
  84. retl
  85. sll r_OFF, 2, r_X
  86. #define bpf_slow_path_common(LEN) \
  87. save %sp, -SAVE_SZ, %sp; \
  88. mov %i0, %o0; \
  89. mov r_OFF, %o1; \
  90. add %fp, SCRATCH_OFF, %o2; \
  91. call skb_copy_bits; \
  92. mov (LEN), %o3; \
  93. cmp %o0, 0; \
  94. restore;
  95. bpf_slow_path_word:
  96. bpf_slow_path_common(4)
  97. bl bpf_error
  98. ld [%sp + SCRATCH_OFF], r_A
  99. retl
  100. nop
  101. bpf_slow_path_half:
  102. bpf_slow_path_common(2)
  103. bl bpf_error
  104. lduh [%sp + SCRATCH_OFF], r_A
  105. retl
  106. nop
  107. bpf_slow_path_byte:
  108. bpf_slow_path_common(1)
  109. bl bpf_error
  110. ldub [%sp + SCRATCH_OFF], r_A
  111. retl
  112. nop
  113. bpf_slow_path_byte_msh:
  114. bpf_slow_path_common(1)
  115. bl bpf_error
  116. ldub [%sp + SCRATCH_OFF], r_A
  117. and r_OFF, 0xf, r_OFF
  118. retl
  119. sll r_OFF, 2, r_X
  120. #define bpf_negative_common(LEN) \
  121. save %sp, -SAVE_SZ, %sp; \
  122. mov %i0, %o0; \
  123. mov r_OFF, %o1; \
  124. SIGN_EXTEND(%o1); \
  125. call bpf_internal_load_pointer_neg_helper; \
  126. mov (LEN), %o2; \
  127. mov %o0, r_TMP; \
  128. cmp %o0, 0; \
  129. BE_PTR(bpf_error); \
  130. restore;
  131. bpf_slow_path_word_neg:
  132. sethi %hi(SKF_MAX_NEG_OFF), r_TMP
  133. cmp r_OFF, r_TMP
  134. bl bpf_error
  135. nop
  136. .globl bpf_jit_load_word_negative_offset
  137. bpf_jit_load_word_negative_offset:
  138. bpf_negative_common(4)
  139. andcc r_TMP, 3, %g0
  140. bne load_word_unaligned
  141. nop
  142. retl
  143. ld [r_TMP], r_A
  144. bpf_slow_path_half_neg:
  145. sethi %hi(SKF_MAX_NEG_OFF), r_TMP
  146. cmp r_OFF, r_TMP
  147. bl bpf_error
  148. nop
  149. .globl bpf_jit_load_half_negative_offset
  150. bpf_jit_load_half_negative_offset:
  151. bpf_negative_common(2)
  152. andcc r_TMP, 1, %g0
  153. bne load_half_unaligned
  154. nop
  155. retl
  156. lduh [r_TMP], r_A
  157. bpf_slow_path_byte_neg:
  158. sethi %hi(SKF_MAX_NEG_OFF), r_TMP
  159. cmp r_OFF, r_TMP
  160. bl bpf_error
  161. nop
  162. .globl bpf_jit_load_byte_negative_offset
  163. bpf_jit_load_byte_negative_offset:
  164. bpf_negative_common(1)
  165. retl
  166. ldub [r_TMP], r_A
  167. bpf_slow_path_byte_msh_neg:
  168. sethi %hi(SKF_MAX_NEG_OFF), r_TMP
  169. cmp r_OFF, r_TMP
  170. bl bpf_error
  171. nop
  172. .globl bpf_jit_load_byte_msh_negative_offset
  173. bpf_jit_load_byte_msh_negative_offset:
  174. bpf_negative_common(1)
  175. ldub [r_TMP], r_OFF
  176. and r_OFF, 0xf, r_OFF
  177. retl
  178. sll r_OFF, 2, r_X
  179. bpf_error:
  180. /* Make the JIT program return zero. The JIT epilogue
  181. * stores away the original %o7 into r_saved_O7. The
  182. * normal leaf function return is to use "retl" which
  183. * would evalute to "jmpl %o7 + 8, %g0" but we want to
  184. * use the saved value thus the sequence you see here.
  185. */
  186. jmpl r_saved_O7 + 8, %g0
  187. clr %o0