sigtramp.S 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312
  1. /*
  2. * Signal trampoline for 64 bits processes in a ppc64 kernel for
  3. * use in the vDSO
  4. *
  5. * Copyright (C) 2004 Benjamin Herrenschmuidt (benh@kernel.crashing.org), IBM Corp.
  6. * Copyright (C) 2004 Alan Modra (amodra@au.ibm.com)), IBM Corp.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #include <asm/processor.h>
  14. #include <asm/ppc_asm.h>
  15. #include <asm/unistd.h>
  16. #include <asm/vdso.h>
  17. #include <asm/ptrace.h> /* XXX for __SIGNAL_FRAMESIZE */
  18. .text
  19. /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from
  20. the return address to get an address in the middle of the presumed
  21. call instruction. Since we don't have a call here, we artificially
  22. extend the range covered by the unwind info by padding before the
  23. real start. */
  24. nop
  25. .balign 8
  26. V_FUNCTION_BEGIN(__kernel_sigtramp_rt64)
  27. .Lsigrt_start = . - 4
  28. addi r1, r1, __SIGNAL_FRAMESIZE
  29. li r0,__NR_rt_sigreturn
  30. sc
  31. .Lsigrt_end:
  32. V_FUNCTION_END(__kernel_sigtramp_rt64)
  33. /* The ".balign 8" above and the following zeros mimic the old stack
  34. trampoline layout. The last magic value is the ucontext pointer,
  35. chosen in such a way that older libgcc unwind code returns a zero
  36. for a sigcontext pointer. */
  37. .long 0,0,0
  38. .quad 0,-21*8
  39. /* Register r1 can be found at offset 8 of a pt_regs structure.
  40. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */
  41. #define cfa_save \
  42. .byte 0x0f; /* DW_CFA_def_cfa_expression */ \
  43. .uleb128 9f - 1f; /* length */ \
  44. 1: \
  45. .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
  46. .byte 0x06; /* DW_OP_deref */ \
  47. .byte 0x23; .uleb128 RSIZE; /* DW_OP_plus_uconst */ \
  48. .byte 0x06; /* DW_OP_deref */ \
  49. 9:
  50. /* Register REGNO can be found at offset OFS of a pt_regs structure.
  51. A pointer to the pt_regs is stored in memory at the old sp plus PTREGS. */
  52. #define rsave(regno, ofs) \
  53. .byte 0x10; /* DW_CFA_expression */ \
  54. .uleb128 regno; /* regno */ \
  55. .uleb128 9f - 1f; /* length */ \
  56. 1: \
  57. .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
  58. .byte 0x06; /* DW_OP_deref */ \
  59. .ifne ofs; \
  60. .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \
  61. .endif; \
  62. 9:
  63. /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16
  64. of the VMX reg struct. A pointer to the VMX reg struct is at VREGS in
  65. the pt_regs struct. This macro is for REGNO == 0, and contains
  66. 'subroutines' that the other macros jump to. */
  67. #define vsave_msr0(regno) \
  68. .byte 0x10; /* DW_CFA_expression */ \
  69. .uleb128 regno + 77; /* regno */ \
  70. .uleb128 9f - 1f; /* length */ \
  71. 1: \
  72. .byte 0x30 + regno; /* DW_OP_lit0 */ \
  73. 2: \
  74. .byte 0x40; /* DW_OP_lit16 */ \
  75. .byte 0x1e; /* DW_OP_mul */ \
  76. 3: \
  77. .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
  78. .byte 0x06; /* DW_OP_deref */ \
  79. .byte 0x12; /* DW_OP_dup */ \
  80. .byte 0x23; /* DW_OP_plus_uconst */ \
  81. .uleb128 33*RSIZE; /* msr offset */ \
  82. .byte 0x06; /* DW_OP_deref */ \
  83. .byte 0x0c; .long 1 << 25; /* DW_OP_const4u */ \
  84. .byte 0x1a; /* DW_OP_and */ \
  85. .byte 0x12; /* DW_OP_dup, ret 0 if bra taken */ \
  86. .byte 0x30; /* DW_OP_lit0 */ \
  87. .byte 0x29; /* DW_OP_eq */ \
  88. .byte 0x28; .short 0x7fff; /* DW_OP_bra to end */ \
  89. .byte 0x13; /* DW_OP_drop, pop the 0 */ \
  90. .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \
  91. .byte 0x06; /* DW_OP_deref */ \
  92. .byte 0x22; /* DW_OP_plus */ \
  93. .byte 0x2f; .short 0x7fff; /* DW_OP_skip to end */ \
  94. 9:
  95. /* If msr bit 1<<25 is set, then VMX register REGNO is at offset REGNO*16
  96. of the VMX reg struct. REGNO is 1 thru 31. */
  97. #define vsave_msr1(regno) \
  98. .byte 0x10; /* DW_CFA_expression */ \
  99. .uleb128 regno + 77; /* regno */ \
  100. .uleb128 9f - 1f; /* length */ \
  101. 1: \
  102. .byte 0x30 + regno; /* DW_OP_lit n */ \
  103. .byte 0x2f; .short 2b - 9f; /* DW_OP_skip */ \
  104. 9:
  105. /* If msr bit 1<<25 is set, then VMX register REGNO is at offset OFS of
  106. the VMX save block. */
  107. #define vsave_msr2(regno, ofs) \
  108. .byte 0x10; /* DW_CFA_expression */ \
  109. .uleb128 regno + 77; /* regno */ \
  110. .uleb128 9f - 1f; /* length */ \
  111. 1: \
  112. .byte 0x0a; .short ofs; /* DW_OP_const2u */ \
  113. .byte 0x2f; .short 3b - 9f; /* DW_OP_skip */ \
  114. 9:
  115. /* VMX register REGNO is at offset OFS of the VMX save area. */
  116. #define vsave(regno, ofs) \
  117. .byte 0x10; /* DW_CFA_expression */ \
  118. .uleb128 regno + 77; /* regno */ \
  119. .uleb128 9f - 1f; /* length */ \
  120. 1: \
  121. .byte 0x71; .sleb128 PTREGS; /* DW_OP_breg1 */ \
  122. .byte 0x06; /* DW_OP_deref */ \
  123. .byte 0x23; .uleb128 VREGS; /* DW_OP_plus_uconst */ \
  124. .byte 0x06; /* DW_OP_deref */ \
  125. .byte 0x23; .uleb128 ofs; /* DW_OP_plus_uconst */ \
  126. 9:
  127. /* This is where the pt_regs pointer can be found on the stack. */
  128. #define PTREGS 128+168+56
  129. /* Size of regs. */
  130. #define RSIZE 8
  131. /* Size of CR reg in DWARF unwind info. */
  132. #define CRSIZE 4
  133. /* Offset of CR reg within a full word. */
  134. #ifdef __LITTLE_ENDIAN__
  135. #define CROFF 0
  136. #else
  137. #define CROFF (RSIZE - CRSIZE)
  138. #endif
  139. /* This is the offset of the VMX reg pointer. */
  140. #define VREGS 48*RSIZE+33*8
  141. /* Describe where general purpose regs are saved. */
  142. #define EH_FRAME_GEN \
  143. cfa_save; \
  144. rsave ( 0, 0*RSIZE); \
  145. rsave ( 2, 2*RSIZE); \
  146. rsave ( 3, 3*RSIZE); \
  147. rsave ( 4, 4*RSIZE); \
  148. rsave ( 5, 5*RSIZE); \
  149. rsave ( 6, 6*RSIZE); \
  150. rsave ( 7, 7*RSIZE); \
  151. rsave ( 8, 8*RSIZE); \
  152. rsave ( 9, 9*RSIZE); \
  153. rsave (10, 10*RSIZE); \
  154. rsave (11, 11*RSIZE); \
  155. rsave (12, 12*RSIZE); \
  156. rsave (13, 13*RSIZE); \
  157. rsave (14, 14*RSIZE); \
  158. rsave (15, 15*RSIZE); \
  159. rsave (16, 16*RSIZE); \
  160. rsave (17, 17*RSIZE); \
  161. rsave (18, 18*RSIZE); \
  162. rsave (19, 19*RSIZE); \
  163. rsave (20, 20*RSIZE); \
  164. rsave (21, 21*RSIZE); \
  165. rsave (22, 22*RSIZE); \
  166. rsave (23, 23*RSIZE); \
  167. rsave (24, 24*RSIZE); \
  168. rsave (25, 25*RSIZE); \
  169. rsave (26, 26*RSIZE); \
  170. rsave (27, 27*RSIZE); \
  171. rsave (28, 28*RSIZE); \
  172. rsave (29, 29*RSIZE); \
  173. rsave (30, 30*RSIZE); \
  174. rsave (31, 31*RSIZE); \
  175. rsave (67, 32*RSIZE); /* ap, used as temp for nip */ \
  176. rsave (65, 36*RSIZE); /* lr */ \
  177. rsave (68, 38*RSIZE + CROFF); /* cr fields */ \
  178. rsave (69, 38*RSIZE + CROFF); \
  179. rsave (70, 38*RSIZE + CROFF); \
  180. rsave (71, 38*RSIZE + CROFF); \
  181. rsave (72, 38*RSIZE + CROFF); \
  182. rsave (73, 38*RSIZE + CROFF); \
  183. rsave (74, 38*RSIZE + CROFF); \
  184. rsave (75, 38*RSIZE + CROFF)
  185. /* Describe where the FP regs are saved. */
  186. #define EH_FRAME_FP \
  187. rsave (32, 48*RSIZE + 0*8); \
  188. rsave (33, 48*RSIZE + 1*8); \
  189. rsave (34, 48*RSIZE + 2*8); \
  190. rsave (35, 48*RSIZE + 3*8); \
  191. rsave (36, 48*RSIZE + 4*8); \
  192. rsave (37, 48*RSIZE + 5*8); \
  193. rsave (38, 48*RSIZE + 6*8); \
  194. rsave (39, 48*RSIZE + 7*8); \
  195. rsave (40, 48*RSIZE + 8*8); \
  196. rsave (41, 48*RSIZE + 9*8); \
  197. rsave (42, 48*RSIZE + 10*8); \
  198. rsave (43, 48*RSIZE + 11*8); \
  199. rsave (44, 48*RSIZE + 12*8); \
  200. rsave (45, 48*RSIZE + 13*8); \
  201. rsave (46, 48*RSIZE + 14*8); \
  202. rsave (47, 48*RSIZE + 15*8); \
  203. rsave (48, 48*RSIZE + 16*8); \
  204. rsave (49, 48*RSIZE + 17*8); \
  205. rsave (50, 48*RSIZE + 18*8); \
  206. rsave (51, 48*RSIZE + 19*8); \
  207. rsave (52, 48*RSIZE + 20*8); \
  208. rsave (53, 48*RSIZE + 21*8); \
  209. rsave (54, 48*RSIZE + 22*8); \
  210. rsave (55, 48*RSIZE + 23*8); \
  211. rsave (56, 48*RSIZE + 24*8); \
  212. rsave (57, 48*RSIZE + 25*8); \
  213. rsave (58, 48*RSIZE + 26*8); \
  214. rsave (59, 48*RSIZE + 27*8); \
  215. rsave (60, 48*RSIZE + 28*8); \
  216. rsave (61, 48*RSIZE + 29*8); \
  217. rsave (62, 48*RSIZE + 30*8); \
  218. rsave (63, 48*RSIZE + 31*8)
  219. /* Describe where the VMX regs are saved. */
  220. #ifdef CONFIG_ALTIVEC
  221. #define EH_FRAME_VMX \
  222. vsave_msr0 ( 0); \
  223. vsave_msr1 ( 1); \
  224. vsave_msr1 ( 2); \
  225. vsave_msr1 ( 3); \
  226. vsave_msr1 ( 4); \
  227. vsave_msr1 ( 5); \
  228. vsave_msr1 ( 6); \
  229. vsave_msr1 ( 7); \
  230. vsave_msr1 ( 8); \
  231. vsave_msr1 ( 9); \
  232. vsave_msr1 (10); \
  233. vsave_msr1 (11); \
  234. vsave_msr1 (12); \
  235. vsave_msr1 (13); \
  236. vsave_msr1 (14); \
  237. vsave_msr1 (15); \
  238. vsave_msr1 (16); \
  239. vsave_msr1 (17); \
  240. vsave_msr1 (18); \
  241. vsave_msr1 (19); \
  242. vsave_msr1 (20); \
  243. vsave_msr1 (21); \
  244. vsave_msr1 (22); \
  245. vsave_msr1 (23); \
  246. vsave_msr1 (24); \
  247. vsave_msr1 (25); \
  248. vsave_msr1 (26); \
  249. vsave_msr1 (27); \
  250. vsave_msr1 (28); \
  251. vsave_msr1 (29); \
  252. vsave_msr1 (30); \
  253. vsave_msr1 (31); \
  254. vsave_msr2 (33, 32*16+12); \
  255. vsave (32, 33*16)
  256. #else
  257. #define EH_FRAME_VMX
  258. #endif
  259. .section .eh_frame,"a",@progbits
  260. .Lcie:
  261. .long .Lcie_end - .Lcie_start
  262. .Lcie_start:
  263. .long 0 /* CIE ID */
  264. .byte 1 /* Version number */
  265. .string "zRS" /* NUL-terminated augmentation string */
  266. .uleb128 4 /* Code alignment factor */
  267. .sleb128 -8 /* Data alignment factor */
  268. .byte 67 /* Return address register column, ap */
  269. .uleb128 1 /* Augmentation value length */
  270. .byte 0x14 /* DW_EH_PE_pcrel | DW_EH_PE_udata8. */
  271. .byte 0x0c,1,0 /* DW_CFA_def_cfa: r1 ofs 0 */
  272. .balign 8
  273. .Lcie_end:
  274. .long .Lfde0_end - .Lfde0_start
  275. .Lfde0_start:
  276. .long .Lfde0_start - .Lcie /* CIE pointer. */
  277. .quad .Lsigrt_start - . /* PC start, length */
  278. .quad .Lsigrt_end - .Lsigrt_start
  279. .uleb128 0 /* Augmentation */
  280. EH_FRAME_GEN
  281. EH_FRAME_FP
  282. EH_FRAME_VMX
  283. # Do we really need to describe the frame at this point? ie. will
  284. # we ever have some call chain that returns somewhere past the addi?
  285. # I don't think so, since gcc doesn't support async signals.
  286. # .byte 0x41 /* DW_CFA_advance_loc 1*4 */
  287. #undef PTREGS
  288. #define PTREGS 168+56
  289. # EH_FRAME_GEN
  290. # EH_FRAME_FP
  291. # EH_FRAME_VMX
  292. .balign 8
  293. .Lfde0_end: