entry.S 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420
  1. /*
  2. * S390 low-level entry points.
  3. *
  4. * Copyright IBM Corp. 1999, 2012
  5. * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
  6. * Hartmut Penner (hp@de.ibm.com),
  7. * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  8. * Heiko Carstens <heiko.carstens@de.ibm.com>
  9. */
  10. #include <linux/init.h>
  11. #include <linux/linkage.h>
  12. #include <asm/processor.h>
  13. #include <asm/cache.h>
  14. #include <asm/errno.h>
  15. #include <asm/ptrace.h>
  16. #include <asm/thread_info.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/unistd.h>
  19. #include <asm/page.h>
  20. #include <asm/sigp.h>
  21. #include <asm/irq.h>
  22. #include <asm/vx-insn.h>
  23. #include <asm/setup.h>
  24. #include <asm/nmi.h>
  25. #include <asm/export.h>
  26. #include <asm/nospec-insn.h>
  27. __PT_R0 = __PT_GPRS
  28. __PT_R1 = __PT_GPRS + 8
  29. __PT_R2 = __PT_GPRS + 16
  30. __PT_R3 = __PT_GPRS + 24
  31. __PT_R4 = __PT_GPRS + 32
  32. __PT_R5 = __PT_GPRS + 40
  33. __PT_R6 = __PT_GPRS + 48
  34. __PT_R7 = __PT_GPRS + 56
  35. __PT_R8 = __PT_GPRS + 64
  36. __PT_R9 = __PT_GPRS + 72
  37. __PT_R10 = __PT_GPRS + 80
  38. __PT_R11 = __PT_GPRS + 88
  39. __PT_R12 = __PT_GPRS + 96
  40. __PT_R13 = __PT_GPRS + 104
  41. __PT_R14 = __PT_GPRS + 112
  42. __PT_R15 = __PT_GPRS + 120
  43. STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
  44. STACK_SIZE = 1 << STACK_SHIFT
  45. STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  46. _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
  47. _TIF_UPROBE)
  48. _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
  49. _TIF_SYSCALL_TRACEPOINT)
  50. _CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU)
  51. _PIF_WORK = (_PIF_PER_TRAP)
  52. #define BASED(name) name-cleanup_critical(%r13)
  53. .macro TRACE_IRQS_ON
  54. #ifdef CONFIG_TRACE_IRQFLAGS
  55. basr %r2,%r0
  56. brasl %r14,trace_hardirqs_on_caller
  57. #endif
  58. .endm
  59. .macro TRACE_IRQS_OFF
  60. #ifdef CONFIG_TRACE_IRQFLAGS
  61. basr %r2,%r0
  62. brasl %r14,trace_hardirqs_off_caller
  63. #endif
  64. .endm
  65. .macro LOCKDEP_SYS_EXIT
  66. #ifdef CONFIG_LOCKDEP
  67. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  68. jz .+10
  69. brasl %r14,lockdep_sys_exit
  70. #endif
  71. .endm
  72. .macro CHECK_STACK stacksize,savearea
  73. #ifdef CONFIG_CHECK_STACK
  74. tml %r15,\stacksize - CONFIG_STACK_GUARD
  75. lghi %r14,\savearea
  76. jz stack_overflow
  77. #endif
  78. .endm
  79. .macro SWITCH_ASYNC savearea,timer
  80. tmhh %r8,0x0001 # interrupting from user ?
  81. jnz 1f
  82. lgr %r14,%r9
  83. slg %r14,BASED(.Lcritical_start)
  84. clg %r14,BASED(.Lcritical_length)
  85. jhe 0f
  86. lghi %r11,\savearea # inside critical section, do cleanup
  87. brasl %r14,cleanup_critical
  88. tmhh %r8,0x0001 # retest problem state after cleanup
  89. jnz 1f
  90. 0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack?
  91. slgr %r14,%r15
  92. srag %r14,%r14,STACK_SHIFT
  93. jnz 2f
  94. CHECK_STACK 1<<STACK_SHIFT,\savearea
  95. aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  96. j 3f
  97. 1: LAST_BREAK %r14
  98. UPDATE_VTIME %r14,%r15,\timer
  99. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  100. 2: lg %r15,__LC_ASYNC_STACK # load async stack
  101. 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
  102. .endm
  103. .macro UPDATE_VTIME w1,w2,enter_timer
  104. lg \w1,__LC_EXIT_TIMER
  105. lg \w2,__LC_LAST_UPDATE_TIMER
  106. slg \w1,\enter_timer
  107. slg \w2,__LC_EXIT_TIMER
  108. alg \w1,__LC_USER_TIMER
  109. alg \w2,__LC_SYSTEM_TIMER
  110. stg \w1,__LC_USER_TIMER
  111. stg \w2,__LC_SYSTEM_TIMER
  112. mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
  113. .endm
  114. .macro LAST_BREAK scratch
  115. srag \scratch,%r10,23
  116. jz .+10
  117. stg %r10,__TI_last_break(%r12)
  118. .endm
  119. .macro REENABLE_IRQS
  120. stg %r8,__LC_RETURN_PSW
  121. ni __LC_RETURN_PSW,0xbf
  122. ssm __LC_RETURN_PSW
  123. .endm
  124. .macro STCK savearea
  125. #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
  126. .insn s,0xb27c0000,\savearea # store clock fast
  127. #else
  128. .insn s,0xb2050000,\savearea # store clock
  129. #endif
  130. .endm
  131. /*
  132. * The TSTMSK macro generates a test-under-mask instruction by
  133. * calculating the memory offset for the specified mask value.
  134. * Mask value can be any constant. The macro shifts the mask
  135. * value to calculate the memory offset for the test-under-mask
  136. * instruction.
  137. */
  138. .macro TSTMSK addr, mask, size=8, bytepos=0
  139. .if (\bytepos < \size) && (\mask >> 8)
  140. .if (\mask & 0xff)
  141. .error "Mask exceeds byte boundary"
  142. .endif
  143. TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
  144. .exitm
  145. .endif
  146. .ifeq \mask
  147. .error "Mask must not be zero"
  148. .endif
  149. off = \size - \bytepos - 1
  150. tm off+\addr, \mask
  151. .endm
  152. .macro BPOFF
  153. .pushsection .altinstr_replacement, "ax"
  154. 660: .long 0xb2e8c000
  155. .popsection
  156. 661: .long 0x47000000
  157. .pushsection .altinstructions, "a"
  158. .long 661b - .
  159. .long 660b - .
  160. .word 82
  161. .byte 4
  162. .byte 4
  163. .popsection
  164. .endm
  165. .macro BPON
  166. .pushsection .altinstr_replacement, "ax"
  167. 662: .long 0xb2e8d000
  168. .popsection
  169. 663: .long 0x47000000
  170. .pushsection .altinstructions, "a"
  171. .long 663b - .
  172. .long 662b - .
  173. .word 82
  174. .byte 4
  175. .byte 4
  176. .popsection
  177. .endm
  178. .macro BPENTER tif_ptr,tif_mask
  179. .pushsection .altinstr_replacement, "ax"
  180. 662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
  181. .word 0xc004, 0x0000, 0x0000 # 6 byte nop
  182. .popsection
  183. 664: TSTMSK \tif_ptr,\tif_mask
  184. jz . + 8
  185. .long 0xb2e8d000
  186. .pushsection .altinstructions, "a"
  187. .long 664b - .
  188. .long 662b - .
  189. .word 82
  190. .byte 12
  191. .byte 12
  192. .popsection
  193. .endm
  194. .macro BPEXIT tif_ptr,tif_mask
  195. TSTMSK \tif_ptr,\tif_mask
  196. .pushsection .altinstr_replacement, "ax"
  197. 662: jnz . + 8
  198. .long 0xb2e8d000
  199. .popsection
  200. 664: jz . + 8
  201. .long 0xb2e8c000
  202. .pushsection .altinstructions, "a"
  203. .long 664b - .
  204. .long 662b - .
  205. .word 82
  206. .byte 8
  207. .byte 8
  208. .popsection
  209. .endm
  210. GEN_BR_THUNK %r9
  211. GEN_BR_THUNK %r14
  212. GEN_BR_THUNK %r14,%r11
  213. .section .kprobes.text, "ax"
  214. .Ldummy:
  215. /*
  216. * This nop exists only in order to avoid that __switch_to starts at
  217. * the beginning of the kprobes text section. In that case we would
  218. * have several symbols at the same address. E.g. objdump would take
  219. * an arbitrary symbol name when disassembling this code.
  220. * With the added nop in between the __switch_to symbol is unique
  221. * again.
  222. */
  223. nop 0
  224. ENTRY(__bpon)
  225. .globl __bpon
  226. BPON
  227. BR_EX %r14
  228. /*
  229. * Scheduler resume function, called by switch_to
  230. * gpr2 = (task_struct *) prev
  231. * gpr3 = (task_struct *) next
  232. * Returns:
  233. * gpr2 = prev
  234. */
  235. ENTRY(__switch_to)
  236. stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
  237. lgr %r1,%r2
  238. aghi %r1,__TASK_thread # thread_struct of prev task
  239. lg %r5,__TASK_thread_info(%r3) # get thread_info of next
  240. stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
  241. lgr %r1,%r3
  242. aghi %r1,__TASK_thread # thread_struct of next task
  243. lgr %r15,%r5
  244. aghi %r15,STACK_INIT # end of kernel stack of next
  245. stg %r3,__LC_CURRENT # store task struct of next
  246. stg %r5,__LC_THREAD_INFO # store thread info of next
  247. stg %r15,__LC_KERNEL_STACK # store end of kernel stack
  248. lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
  249. /* c4 is used in guest detection: arch/s390/kernel/perf_cpum_sf.c */
  250. lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
  251. mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
  252. lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
  253. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
  254. jz 0f
  255. .insn s,0xb2800000,__LC_LPP # set program parameter
  256. 0: BR_EX %r14
  257. .L__critical_start:
  258. #if IS_ENABLED(CONFIG_KVM)
  259. /*
  260. * sie64a calling convention:
  261. * %r2 pointer to sie control block
  262. * %r3 guest register save area
  263. */
  264. ENTRY(sie64a)
  265. stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
  266. lg %r12,__LC_CURRENT
  267. stg %r2,__SF_EMPTY(%r15) # save control block pointer
  268. stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
  269. xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
  270. mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
  271. TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
  272. jno .Lsie_load_guest_gprs
  273. brasl %r14,load_fpu_regs # load guest fp/vx regs
  274. .Lsie_load_guest_gprs:
  275. lmg %r0,%r13,0(%r3) # load guest gprs 0-13
  276. lg %r14,__LC_GMAP # get gmap pointer
  277. ltgr %r14,%r14
  278. jz .Lsie_gmap
  279. lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
  280. .Lsie_gmap:
  281. lg %r14,__SF_EMPTY(%r15) # get control block pointer
  282. oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
  283. tm __SIE_PROG20+3(%r14),3 # last exit...
  284. jnz .Lsie_skip
  285. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  286. jo .Lsie_skip # exit if fp/vx regs changed
  287. BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  288. sie 0(%r14)
  289. .Lsie_exit:
  290. BPOFF
  291. BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  292. .Lsie_skip:
  293. ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
  294. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  295. .Lsie_done:
  296. # some program checks are suppressing. C code (e.g. do_protection_exception)
  297. # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
  298. # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
  299. # Other instructions between sie64a and .Lsie_done should not cause program
  300. # interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
  301. # See also .Lcleanup_sie
  302. .Lrewind_pad6:
  303. nopr 7
  304. .Lrewind_pad4:
  305. nopr 7
  306. .Lrewind_pad2:
  307. nopr 7
  308. .globl sie_exit
  309. sie_exit:
  310. lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
  311. stmg %r0,%r13,0(%r14) # save guest gprs 0-13
  312. xgr %r0,%r0 # clear guest registers to
  313. xgr %r1,%r1 # prevent speculative use
  314. xgr %r2,%r2
  315. xgr %r3,%r3
  316. xgr %r4,%r4
  317. xgr %r5,%r5
  318. lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
  319. lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
  320. BR_EX %r14
  321. .Lsie_fault:
  322. lghi %r14,-EFAULT
  323. stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
  324. j sie_exit
  325. EX_TABLE(.Lrewind_pad6,.Lsie_fault)
  326. EX_TABLE(.Lrewind_pad4,.Lsie_fault)
  327. EX_TABLE(.Lrewind_pad2,.Lsie_fault)
  328. EX_TABLE(sie_exit,.Lsie_fault)
  329. EXPORT_SYMBOL(sie64a)
  330. EXPORT_SYMBOL(sie_exit)
  331. #endif
  332. /*
  333. * SVC interrupt handler routine. System calls are synchronous events and
  334. * are executed with interrupts enabled.
  335. */
  336. ENTRY(system_call)
  337. stpt __LC_SYNC_ENTER_TIMER
  338. .Lsysc_stmg:
  339. stmg %r8,%r15,__LC_SAVE_AREA_SYNC
  340. BPOFF
  341. lg %r10,__LC_LAST_BREAK
  342. lg %r12,__LC_THREAD_INFO
  343. lghi %r14,_PIF_SYSCALL
  344. .Lsysc_per:
  345. lg %r15,__LC_KERNEL_STACK
  346. la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
  347. LAST_BREAK %r13
  348. .Lsysc_vtime:
  349. UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
  350. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  351. stmg %r0,%r7,__PT_R0(%r11)
  352. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
  353. mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
  354. mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
  355. stg %r14,__PT_FLAGS(%r11)
  356. .Lsysc_do_svc:
  357. # clear user controlled register to prevent speculative use
  358. xgr %r0,%r0
  359. lg %r10,__TI_sysc_table(%r12) # address of system call table
  360. llgh %r8,__PT_INT_CODE+2(%r11)
  361. slag %r8,%r8,2 # shift and test for svc 0
  362. jnz .Lsysc_nr_ok
  363. # svc 0: system call number in %r1
  364. llgfr %r1,%r1 # clear high word in r1
  365. cghi %r1,NR_syscalls
  366. jnl .Lsysc_nr_ok
  367. sth %r1,__PT_INT_CODE+2(%r11)
  368. slag %r8,%r1,2
  369. .Lsysc_nr_ok:
  370. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  371. stg %r2,__PT_ORIG_GPR2(%r11)
  372. stg %r7,STACK_FRAME_OVERHEAD(%r15)
  373. lgf %r9,0(%r8,%r10) # get system call add.
  374. TSTMSK __TI_flags(%r12),_TIF_TRACE
  375. jnz .Lsysc_tracesys
  376. BASR_EX %r14,%r9 # call sys_xxxx
  377. stg %r2,__PT_R2(%r11) # store return value
  378. .Lsysc_return:
  379. LOCKDEP_SYS_EXIT
  380. .Lsysc_tif:
  381. TSTMSK __PT_FLAGS(%r11),_PIF_WORK
  382. jnz .Lsysc_work
  383. TSTMSK __TI_flags(%r12),_TIF_WORK
  384. jnz .Lsysc_work # check for work
  385. TSTMSK __LC_CPU_FLAGS,_CIF_WORK
  386. jnz .Lsysc_work
  387. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  388. .Lsysc_restore:
  389. lg %r14,__LC_VDSO_PER_CPU
  390. lmg %r0,%r10,__PT_R0(%r11)
  391. mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
  392. .Lsysc_exit_timer:
  393. stpt __LC_EXIT_TIMER
  394. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  395. lmg %r11,%r15,__PT_R11(%r11)
  396. lpswe __LC_RETURN_PSW
  397. .Lsysc_done:
  398. #
  399. # One of the work bits is on. Find out which one.
  400. #
  401. .Lsysc_work:
  402. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  403. jo .Lsysc_mcck_pending
  404. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  405. jo .Lsysc_reschedule
  406. #ifdef CONFIG_UPROBES
  407. TSTMSK __TI_flags(%r12),_TIF_UPROBE
  408. jo .Lsysc_uprobe_notify
  409. #endif
  410. TSTMSK __PT_FLAGS(%r11),_PIF_PER_TRAP
  411. jo .Lsysc_singlestep
  412. TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
  413. jo .Lsysc_sigpending
  414. TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
  415. jo .Lsysc_notify_resume
  416. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  417. jo .Lsysc_vxrs
  418. TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
  419. jo .Lsysc_uaccess
  420. j .Lsysc_return # beware of critical section cleanup
  421. #
  422. # _TIF_NEED_RESCHED is set, call schedule
  423. #
  424. .Lsysc_reschedule:
  425. larl %r14,.Lsysc_return
  426. jg schedule
  427. #
  428. # _CIF_MCCK_PENDING is set, call handler
  429. #
  430. .Lsysc_mcck_pending:
  431. larl %r14,.Lsysc_return
  432. jg s390_handle_mcck # TIF bit will be cleared by handler
  433. #
  434. # _CIF_ASCE is set, load user space asce
  435. #
  436. .Lsysc_uaccess:
  437. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
  438. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  439. j .Lsysc_return
  440. #
  441. # CIF_FPU is set, restore floating-point controls and floating-point registers.
  442. #
  443. .Lsysc_vxrs:
  444. larl %r14,.Lsysc_return
  445. jg load_fpu_regs
  446. #
  447. # _TIF_SIGPENDING is set, call do_signal
  448. #
  449. .Lsysc_sigpending:
  450. lgr %r2,%r11 # pass pointer to pt_regs
  451. brasl %r14,do_signal
  452. TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
  453. jno .Lsysc_return
  454. lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
  455. lg %r10,__TI_sysc_table(%r12) # address of system call table
  456. lghi %r8,0 # svc 0 returns -ENOSYS
  457. llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
  458. cghi %r1,NR_syscalls
  459. jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
  460. slag %r8,%r1,2
  461. j .Lsysc_nr_ok # restart svc
  462. #
  463. # _TIF_NOTIFY_RESUME is set, call do_notify_resume
  464. #
  465. .Lsysc_notify_resume:
  466. lgr %r2,%r11 # pass pointer to pt_regs
  467. larl %r14,.Lsysc_return
  468. jg do_notify_resume
  469. #
  470. # _TIF_UPROBE is set, call uprobe_notify_resume
  471. #
  472. #ifdef CONFIG_UPROBES
  473. .Lsysc_uprobe_notify:
  474. lgr %r2,%r11 # pass pointer to pt_regs
  475. larl %r14,.Lsysc_return
  476. jg uprobe_notify_resume
  477. #endif
  478. #
  479. # _PIF_PER_TRAP is set, call do_per_trap
  480. #
  481. .Lsysc_singlestep:
  482. ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
  483. lgr %r2,%r11 # pass pointer to pt_regs
  484. larl %r14,.Lsysc_return
  485. jg do_per_trap
  486. #
  487. # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
  488. # and after the system call
  489. #
  490. .Lsysc_tracesys:
  491. lgr %r2,%r11 # pass pointer to pt_regs
  492. la %r3,0
  493. llgh %r0,__PT_INT_CODE+2(%r11)
  494. stg %r0,__PT_R2(%r11)
  495. brasl %r14,do_syscall_trace_enter
  496. lghi %r0,NR_syscalls
  497. clgr %r0,%r2
  498. jnh .Lsysc_tracenogo
  499. sllg %r8,%r2,2
  500. lgf %r9,0(%r8,%r10)
  501. .Lsysc_tracego:
  502. lmg %r3,%r7,__PT_R3(%r11)
  503. stg %r7,STACK_FRAME_OVERHEAD(%r15)
  504. lg %r2,__PT_ORIG_GPR2(%r11)
  505. BASR_EX %r14,%r9 # call sys_xxx
  506. stg %r2,__PT_R2(%r11) # store return value
  507. .Lsysc_tracenogo:
  508. TSTMSK __TI_flags(%r12),_TIF_TRACE
  509. jz .Lsysc_return
  510. lgr %r2,%r11 # pass pointer to pt_regs
  511. larl %r14,.Lsysc_return
  512. jg do_syscall_trace_exit
  513. #
  514. # a new process exits the kernel with ret_from_fork
  515. #
  516. ENTRY(ret_from_fork)
  517. la %r11,STACK_FRAME_OVERHEAD(%r15)
  518. lg %r12,__LC_THREAD_INFO
  519. brasl %r14,schedule_tail
  520. TRACE_IRQS_ON
  521. ssm __LC_SVC_NEW_PSW # reenable interrupts
  522. tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
  523. jne .Lsysc_tracenogo
  524. # it's a kernel thread
  525. lmg %r9,%r10,__PT_R9(%r11) # load gprs
  526. ENTRY(kernel_thread_starter)
  527. la %r2,0(%r10)
  528. BASR_EX %r14,%r9
  529. j .Lsysc_tracenogo
  530. /*
  531. * Program check handler routine
  532. */
  533. ENTRY(pgm_check_handler)
  534. stpt __LC_SYNC_ENTER_TIMER
  535. BPOFF
  536. stmg %r8,%r15,__LC_SAVE_AREA_SYNC
  537. lg %r10,__LC_LAST_BREAK
  538. lg %r12,__LC_THREAD_INFO
  539. larl %r13,cleanup_critical
  540. lmg %r8,%r9,__LC_PGM_OLD_PSW
  541. tmhh %r8,0x0001 # test problem state bit
  542. jnz 2f # -> fault in user space
  543. #if IS_ENABLED(CONFIG_KVM)
  544. # cleanup critical section for sie64a
  545. lgr %r14,%r9
  546. slg %r14,BASED(.Lsie_critical_start)
  547. clg %r14,BASED(.Lsie_critical_length)
  548. jhe 0f
  549. brasl %r14,.Lcleanup_sie
  550. #endif
  551. 0: tmhh %r8,0x4000 # PER bit set in old PSW ?
  552. jnz 1f # -> enabled, can't be a double fault
  553. tm __LC_PGM_ILC+3,0x80 # check for per exception
  554. jnz .Lpgm_svcper # -> single stepped svc
  555. 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
  556. aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  557. j 3f
  558. 2: LAST_BREAK %r14
  559. UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
  560. BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  561. lg %r15,__LC_KERNEL_STACK
  562. lg %r14,__TI_task(%r12)
  563. aghi %r14,__TASK_thread # pointer to thread_struct
  564. lghi %r13,__LC_PGM_TDB
  565. tm __LC_PGM_ILC+2,0x02 # check for transaction abort
  566. jz 3f
  567. mvc __THREAD_trap_tdb(256,%r14),0(%r13)
  568. 3: la %r11,STACK_FRAME_OVERHEAD(%r15)
  569. stmg %r0,%r7,__PT_R0(%r11)
  570. # clear user controlled registers to prevent speculative use
  571. xgr %r0,%r0
  572. xgr %r1,%r1
  573. xgr %r2,%r2
  574. xgr %r3,%r3
  575. xgr %r4,%r4
  576. xgr %r5,%r5
  577. xgr %r6,%r6
  578. xgr %r7,%r7
  579. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
  580. stmg %r8,%r9,__PT_PSW(%r11)
  581. mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
  582. mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE
  583. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  584. stg %r10,__PT_ARGS(%r11)
  585. tm __LC_PGM_ILC+3,0x80 # check for per exception
  586. jz 4f
  587. tmhh %r8,0x0001 # kernel per event ?
  588. jz .Lpgm_kprobe
  589. oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
  590. mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
  591. mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
  592. mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
  593. 4: REENABLE_IRQS
  594. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  595. larl %r1,pgm_check_table
  596. llgh %r10,__PT_INT_CODE+2(%r11)
  597. nill %r10,0x007f
  598. sll %r10,2
  599. je .Lpgm_return
  600. lgf %r9,0(%r10,%r1) # load address of handler routine
  601. lgr %r2,%r11 # pass pointer to pt_regs
  602. BASR_EX %r14,%r9 # branch to interrupt-handler
  603. .Lpgm_return:
  604. LOCKDEP_SYS_EXIT
  605. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  606. jno .Lsysc_restore
  607. j .Lsysc_tif
  608. #
  609. # PER event in supervisor state, must be kprobes
  610. #
  611. .Lpgm_kprobe:
  612. REENABLE_IRQS
  613. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  614. lgr %r2,%r11 # pass pointer to pt_regs
  615. brasl %r14,do_per_trap
  616. j .Lpgm_return
  617. #
  618. # single stepped system call
  619. #
  620. .Lpgm_svcper:
  621. mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
  622. larl %r14,.Lsysc_per
  623. stg %r14,__LC_RETURN_PSW+8
  624. lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
  625. lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
  626. /*
  627. * IO interrupt handler routine
  628. */
  629. ENTRY(io_int_handler)
  630. STCK __LC_INT_CLOCK
  631. stpt __LC_ASYNC_ENTER_TIMER
  632. BPOFF
  633. stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
  634. lg %r10,__LC_LAST_BREAK
  635. lg %r12,__LC_THREAD_INFO
  636. larl %r13,cleanup_critical
  637. lmg %r8,%r9,__LC_IO_OLD_PSW
  638. SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
  639. stmg %r0,%r7,__PT_R0(%r11)
  640. # clear user controlled registers to prevent speculative use
  641. xgr %r0,%r0
  642. xgr %r1,%r1
  643. xgr %r2,%r2
  644. xgr %r3,%r3
  645. xgr %r4,%r4
  646. xgr %r5,%r5
  647. xgr %r6,%r6
  648. xgr %r7,%r7
  649. xgr %r10,%r10
  650. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
  651. stmg %r8,%r9,__PT_PSW(%r11)
  652. mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
  653. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  654. TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
  655. jo .Lio_restore
  656. TRACE_IRQS_OFF
  657. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  658. .Lio_loop:
  659. lgr %r2,%r11 # pass pointer to pt_regs
  660. lghi %r3,IO_INTERRUPT
  661. tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
  662. jz .Lio_call
  663. lghi %r3,THIN_INTERRUPT
  664. .Lio_call:
  665. brasl %r14,do_IRQ
  666. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPAR
  667. jz .Lio_return
  668. tpi 0
  669. jz .Lio_return
  670. mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
  671. j .Lio_loop
  672. .Lio_return:
  673. LOCKDEP_SYS_EXIT
  674. TRACE_IRQS_ON
  675. .Lio_tif:
  676. TSTMSK __TI_flags(%r12),_TIF_WORK
  677. jnz .Lio_work # there is work to do (signals etc.)
  678. TSTMSK __LC_CPU_FLAGS,_CIF_WORK
  679. jnz .Lio_work
  680. .Lio_restore:
  681. lg %r14,__LC_VDSO_PER_CPU
  682. lmg %r0,%r10,__PT_R0(%r11)
  683. mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
  684. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  685. jno .Lio_exit_kernel
  686. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  687. .Lio_exit_timer:
  688. stpt __LC_EXIT_TIMER
  689. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  690. .Lio_exit_kernel:
  691. lmg %r11,%r15,__PT_R11(%r11)
  692. lpswe __LC_RETURN_PSW
  693. .Lio_done:
  694. #
  695. # There is work todo, find out in which context we have been interrupted:
  696. # 1) if we return to user space we can do all _TIF_WORK work
  697. # 2) if we return to kernel code and kvm is enabled check if we need to
  698. # modify the psw to leave SIE
  699. # 3) if we return to kernel code and preemptive scheduling is enabled check
  700. # the preemption counter and if it is zero call preempt_schedule_irq
  701. # Before any work can be done, a switch to the kernel stack is required.
  702. #
  703. .Lio_work:
  704. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  705. jo .Lio_work_user # yes -> do resched & signal
  706. #ifdef CONFIG_PREEMPT
  707. # check for preemptive scheduling
  708. icm %r0,15,__TI_precount(%r12)
  709. jnz .Lio_restore # preemption is disabled
  710. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  711. jno .Lio_restore
  712. # switch to kernel stack
  713. lg %r1,__PT_R15(%r11)
  714. aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  715. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  716. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  717. la %r11,STACK_FRAME_OVERHEAD(%r1)
  718. lgr %r15,%r1
  719. # TRACE_IRQS_ON already done at .Lio_return, call
  720. # TRACE_IRQS_OFF to keep things symmetrical
  721. TRACE_IRQS_OFF
  722. brasl %r14,preempt_schedule_irq
  723. j .Lio_return
  724. #else
  725. j .Lio_restore
  726. #endif
  727. #
  728. # Need to do work before returning to userspace, switch to kernel stack
  729. #
  730. .Lio_work_user:
  731. lg %r1,__LC_KERNEL_STACK
  732. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  733. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  734. la %r11,STACK_FRAME_OVERHEAD(%r1)
  735. lgr %r15,%r1
  736. #
  737. # One of the work bits is on. Find out which one.
  738. #
  739. .Lio_work_tif:
  740. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  741. jo .Lio_mcck_pending
  742. TSTMSK __TI_flags(%r12),_TIF_NEED_RESCHED
  743. jo .Lio_reschedule
  744. TSTMSK __TI_flags(%r12),_TIF_SIGPENDING
  745. jo .Lio_sigpending
  746. TSTMSK __TI_flags(%r12),_TIF_NOTIFY_RESUME
  747. jo .Lio_notify_resume
  748. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  749. jo .Lio_vxrs
  750. TSTMSK __LC_CPU_FLAGS,_CIF_ASCE
  751. jo .Lio_uaccess
  752. j .Lio_return # beware of critical section cleanup
  753. #
  754. # _CIF_MCCK_PENDING is set, call handler
  755. #
  756. .Lio_mcck_pending:
  757. # TRACE_IRQS_ON already done at .Lio_return
  758. brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
  759. TRACE_IRQS_OFF
  760. j .Lio_return
  761. #
  762. # _CIF_ASCE is set, load user space asce
  763. #
  764. .Lio_uaccess:
  765. ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
  766. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  767. j .Lio_return
  768. #
  769. # CIF_FPU is set, restore floating-point controls and floating-point registers.
  770. #
  771. .Lio_vxrs:
  772. larl %r14,.Lio_return
  773. jg load_fpu_regs
  774. #
  775. # _TIF_NEED_RESCHED is set, call schedule
  776. #
  777. .Lio_reschedule:
  778. # TRACE_IRQS_ON already done at .Lio_return
  779. ssm __LC_SVC_NEW_PSW # reenable interrupts
  780. brasl %r14,schedule # call scheduler
  781. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  782. TRACE_IRQS_OFF
  783. j .Lio_return
  784. #
  785. # _TIF_SIGPENDING or is set, call do_signal
  786. #
  787. .Lio_sigpending:
  788. # TRACE_IRQS_ON already done at .Lio_return
  789. ssm __LC_SVC_NEW_PSW # reenable interrupts
  790. lgr %r2,%r11 # pass pointer to pt_regs
  791. brasl %r14,do_signal
  792. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  793. TRACE_IRQS_OFF
  794. j .Lio_return
  795. #
  796. # _TIF_NOTIFY_RESUME or is set, call do_notify_resume
  797. #
  798. .Lio_notify_resume:
  799. # TRACE_IRQS_ON already done at .Lio_return
  800. ssm __LC_SVC_NEW_PSW # reenable interrupts
  801. lgr %r2,%r11 # pass pointer to pt_regs
  802. brasl %r14,do_notify_resume
  803. ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
  804. TRACE_IRQS_OFF
  805. j .Lio_return
  806. /*
  807. * External interrupt handler routine
  808. */
  809. ENTRY(ext_int_handler)
  810. STCK __LC_INT_CLOCK
  811. stpt __LC_ASYNC_ENTER_TIMER
  812. BPOFF
  813. stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
  814. lg %r10,__LC_LAST_BREAK
  815. lg %r12,__LC_THREAD_INFO
  816. larl %r13,cleanup_critical
  817. lmg %r8,%r9,__LC_EXT_OLD_PSW
  818. SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
  819. stmg %r0,%r7,__PT_R0(%r11)
  820. # clear user controlled registers to prevent speculative use
  821. xgr %r0,%r0
  822. xgr %r1,%r1
  823. xgr %r2,%r2
  824. xgr %r3,%r3
  825. xgr %r4,%r4
  826. xgr %r5,%r5
  827. xgr %r6,%r6
  828. xgr %r7,%r7
  829. xgr %r10,%r10
  830. mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
  831. stmg %r8,%r9,__PT_PSW(%r11)
  832. lghi %r1,__LC_EXT_PARAMS2
  833. mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
  834. mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
  835. mvc __PT_INT_PARM_LONG(8,%r11),0(%r1)
  836. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  837. TSTMSK __LC_CPU_FLAGS,_CIF_IGNORE_IRQ
  838. jo .Lio_restore
  839. TRACE_IRQS_OFF
  840. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  841. lgr %r2,%r11 # pass pointer to pt_regs
  842. lghi %r3,EXT_INTERRUPT
  843. brasl %r14,do_IRQ
  844. j .Lio_return
  845. /*
  846. * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
  847. */
  848. ENTRY(psw_idle)
  849. stg %r3,__SF_EMPTY(%r15)
  850. larl %r1,.Lpsw_idle_lpsw+4
  851. stg %r1,__SF_EMPTY+8(%r15)
  852. #ifdef CONFIG_SMP
  853. larl %r1,smp_cpu_mtid
  854. llgf %r1,0(%r1)
  855. ltgr %r1,%r1
  856. jz .Lpsw_idle_stcctm
  857. .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
  858. .Lpsw_idle_stcctm:
  859. #endif
  860. oi __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
  861. BPON
  862. STCK __CLOCK_IDLE_ENTER(%r2)
  863. stpt __TIMER_IDLE_ENTER(%r2)
  864. .Lpsw_idle_lpsw:
  865. lpswe __SF_EMPTY(%r15)
  866. BR_EX %r14
  867. .Lpsw_idle_end:
  868. /*
  869. * Store floating-point controls and floating-point or vector register
  870. * depending whether the vector facility is available. A critical section
  871. * cleanup assures that the registers are stored even if interrupted for
  872. * some other work. The CIF_FPU flag is set to trigger a lazy restore
  873. * of the register contents at return from io or a system call.
  874. */
  875. ENTRY(save_fpu_regs)
  876. lg %r2,__LC_CURRENT
  877. aghi %r2,__TASK_thread
  878. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  879. jo .Lsave_fpu_regs_exit
  880. stfpc __THREAD_FPU_fpc(%r2)
  881. .Lsave_fpu_regs_fpc_end:
  882. lg %r3,__THREAD_FPU_regs(%r2)
  883. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
  884. jz .Lsave_fpu_regs_fp # no -> store FP regs
  885. .Lsave_fpu_regs_vx_low:
  886. VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3)
  887. .Lsave_fpu_regs_vx_high:
  888. VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3)
  889. j .Lsave_fpu_regs_done # -> set CIF_FPU flag
  890. .Lsave_fpu_regs_fp:
  891. std 0,0(%r3)
  892. std 1,8(%r3)
  893. std 2,16(%r3)
  894. std 3,24(%r3)
  895. std 4,32(%r3)
  896. std 5,40(%r3)
  897. std 6,48(%r3)
  898. std 7,56(%r3)
  899. std 8,64(%r3)
  900. std 9,72(%r3)
  901. std 10,80(%r3)
  902. std 11,88(%r3)
  903. std 12,96(%r3)
  904. std 13,104(%r3)
  905. std 14,112(%r3)
  906. std 15,120(%r3)
  907. .Lsave_fpu_regs_done:
  908. oi __LC_CPU_FLAGS+7,_CIF_FPU
  909. .Lsave_fpu_regs_exit:
  910. BR_EX %r14
  911. .Lsave_fpu_regs_end:
  912. #if IS_ENABLED(CONFIG_KVM)
  913. EXPORT_SYMBOL(save_fpu_regs)
  914. #endif
  915. /*
  916. * Load floating-point controls and floating-point or vector registers.
  917. * A critical section cleanup assures that the register contents are
  918. * loaded even if interrupted for some other work.
  919. *
  920. * There are special calling conventions to fit into sysc and io return work:
  921. * %r15: <kernel stack>
  922. * The function requires:
  923. * %r4
  924. */
  925. load_fpu_regs:
  926. lg %r4,__LC_CURRENT
  927. aghi %r4,__TASK_thread
  928. TSTMSK __LC_CPU_FLAGS,_CIF_FPU
  929. jno .Lload_fpu_regs_exit
  930. lfpc __THREAD_FPU_fpc(%r4)
  931. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
  932. lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
  933. jz .Lload_fpu_regs_fp # -> no VX, load FP regs
  934. .Lload_fpu_regs_vx:
  935. VLM %v0,%v15,0,%r4
  936. .Lload_fpu_regs_vx_high:
  937. VLM %v16,%v31,256,%r4
  938. j .Lload_fpu_regs_done
  939. .Lload_fpu_regs_fp:
  940. ld 0,0(%r4)
  941. ld 1,8(%r4)
  942. ld 2,16(%r4)
  943. ld 3,24(%r4)
  944. ld 4,32(%r4)
  945. ld 5,40(%r4)
  946. ld 6,48(%r4)
  947. ld 7,56(%r4)
  948. ld 8,64(%r4)
  949. ld 9,72(%r4)
  950. ld 10,80(%r4)
  951. ld 11,88(%r4)
  952. ld 12,96(%r4)
  953. ld 13,104(%r4)
  954. ld 14,112(%r4)
  955. ld 15,120(%r4)
  956. .Lload_fpu_regs_done:
  957. ni __LC_CPU_FLAGS+7,255-_CIF_FPU
  958. .Lload_fpu_regs_exit:
  959. BR_EX %r14
  960. .Lload_fpu_regs_end:
  961. .L__critical_end:
  962. /*
  963. * Machine check handler routines
  964. */
  965. ENTRY(mcck_int_handler)
  966. STCK __LC_MCCK_CLOCK
  967. BPOFF
  968. la %r1,4095 # revalidate r1
  969. spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
  970. lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
  971. lg %r10,__LC_LAST_BREAK
  972. lg %r12,__LC_THREAD_INFO
  973. larl %r13,cleanup_critical
  974. lmg %r8,%r9,__LC_MCK_OLD_PSW
  975. TSTMSK __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
  976. jo .Lmcck_panic # yes -> rest of mcck code invalid
  977. lghi %r14,__LC_CPU_TIMER_SAVE_AREA
  978. mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
  979. TSTMSK __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
  980. jo 3f
  981. la %r14,__LC_SYNC_ENTER_TIMER
  982. clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER
  983. jl 0f
  984. la %r14,__LC_ASYNC_ENTER_TIMER
  985. 0: clc 0(8,%r14),__LC_EXIT_TIMER
  986. jl 1f
  987. la %r14,__LC_EXIT_TIMER
  988. 1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER
  989. jl 2f
  990. la %r14,__LC_LAST_UPDATE_TIMER
  991. 2: spt 0(%r14)
  992. mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
  993. 3: TSTMSK __LC_MCCK_CODE,(MCCK_CODE_PSW_MWP_VALID|MCCK_CODE_PSW_IA_VALID)
  994. jno .Lmcck_panic # no -> skip cleanup critical
  995. SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER
  996. .Lmcck_skip:
  997. lghi %r14,__LC_GPREGS_SAVE_AREA+64
  998. stmg %r0,%r7,__PT_R0(%r11)
  999. # clear user controlled registers to prevent speculative use
  1000. xgr %r0,%r0
  1001. xgr %r1,%r1
  1002. xgr %r2,%r2
  1003. xgr %r3,%r3
  1004. xgr %r4,%r4
  1005. xgr %r5,%r5
  1006. xgr %r6,%r6
  1007. xgr %r7,%r7
  1008. xgr %r10,%r10
  1009. mvc __PT_R8(64,%r11),0(%r14)
  1010. stmg %r8,%r9,__PT_PSW(%r11)
  1011. xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
  1012. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  1013. lgr %r2,%r11 # pass pointer to pt_regs
  1014. brasl %r14,s390_do_machine_check
  1015. tm __PT_PSW+1(%r11),0x01 # returning to user ?
  1016. jno .Lmcck_return
  1017. lg %r1,__LC_KERNEL_STACK # switch to kernel stack
  1018. mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
  1019. xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
  1020. la %r11,STACK_FRAME_OVERHEAD(%r1)
  1021. lgr %r15,%r1
  1022. ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
  1023. TSTMSK __LC_CPU_FLAGS,_CIF_MCCK_PENDING
  1024. jno .Lmcck_return
  1025. TRACE_IRQS_OFF
  1026. brasl %r14,s390_handle_mcck
  1027. TRACE_IRQS_ON
  1028. .Lmcck_return:
  1029. lg %r14,__LC_VDSO_PER_CPU
  1030. lmg %r0,%r10,__PT_R0(%r11)
  1031. mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
  1032. tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
  1033. jno 0f
  1034. BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
  1035. stpt __LC_EXIT_TIMER
  1036. mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
  1037. 0: lmg %r11,%r15,__PT_R11(%r11)
  1038. lpswe __LC_RETURN_MCCK_PSW
  1039. .Lmcck_panic:
  1040. lg %r15,__LC_PANIC_STACK
  1041. aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
  1042. j .Lmcck_skip
  1043. #
  1044. # PSW restart interrupt handler
  1045. #
  1046. ENTRY(restart_int_handler)
  1047. TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
  1048. jz 0f
  1049. .insn s,0xb2800000,__LC_LPP
  1050. 0: stg %r15,__LC_SAVE_AREA_RESTART
  1051. lg %r15,__LC_RESTART_STACK
  1052. aghi %r15,-__PT_SIZE # create pt_regs on stack
  1053. xc 0(__PT_SIZE,%r15),0(%r15)
  1054. stmg %r0,%r14,__PT_R0(%r15)
  1055. mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
  1056. mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw
  1057. aghi %r15,-STACK_FRAME_OVERHEAD # create stack frame on stack
  1058. xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
  1059. lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
  1060. lg %r2,__LC_RESTART_DATA
  1061. lg %r3,__LC_RESTART_SOURCE
  1062. ltgr %r3,%r3 # test source cpu address
  1063. jm 1f # negative -> skip source stop
  1064. 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
  1065. brc 10,0b # wait for status stored
  1066. 1: basr %r14,%r1 # call function
  1067. stap __SF_EMPTY(%r15) # store cpu address
  1068. llgh %r3,__SF_EMPTY(%r15)
  1069. 2: sigp %r4,%r3,SIGP_STOP # sigp stop to current cpu
  1070. brc 2,2b
  1071. 3: j 3b
  1072. .section .kprobes.text, "ax"
  1073. #ifdef CONFIG_CHECK_STACK
  1074. /*
  1075. * The synchronous or the asynchronous stack overflowed. We are dead.
  1076. * No need to properly save the registers, we are going to panic anyway.
  1077. * Setup a pt_regs so that show_trace can provide a good call trace.
  1078. */
  1079. stack_overflow:
  1080. lg %r15,__LC_PANIC_STACK # change to panic stack
  1081. la %r11,STACK_FRAME_OVERHEAD(%r15)
  1082. stmg %r0,%r7,__PT_R0(%r11)
  1083. stmg %r8,%r9,__PT_PSW(%r11)
  1084. mvc __PT_R8(64,%r11),0(%r14)
  1085. stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
  1086. xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
  1087. lgr %r2,%r11 # pass pointer to pt_regs
  1088. jg kernel_stack_overflow
  1089. #endif
  1090. cleanup_critical:
  1091. #if IS_ENABLED(CONFIG_KVM)
  1092. clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
  1093. jl 0f
  1094. clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done
  1095. jl .Lcleanup_sie
  1096. #endif
  1097. clg %r9,BASED(.Lcleanup_table) # system_call
  1098. jl 0f
  1099. clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
  1100. jl .Lcleanup_system_call
  1101. clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
  1102. jl 0f
  1103. clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
  1104. jl .Lcleanup_sysc_tif
  1105. clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
  1106. jl .Lcleanup_sysc_restore
  1107. clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
  1108. jl 0f
  1109. clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
  1110. jl .Lcleanup_io_tif
  1111. clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
  1112. jl .Lcleanup_io_restore
  1113. clg %r9,BASED(.Lcleanup_table+64) # psw_idle
  1114. jl 0f
  1115. clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
  1116. jl .Lcleanup_idle
  1117. clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs
  1118. jl 0f
  1119. clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end
  1120. jl .Lcleanup_save_fpu_regs
  1121. clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs
  1122. jl 0f
  1123. clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
  1124. jl .Lcleanup_load_fpu_regs
  1125. 0: BR_EX %r14,%r11
  1126. .align 8
  1127. .Lcleanup_table:
  1128. .quad system_call
  1129. .quad .Lsysc_do_svc
  1130. .quad .Lsysc_tif
  1131. .quad .Lsysc_restore
  1132. .quad .Lsysc_done
  1133. .quad .Lio_tif
  1134. .quad .Lio_restore
  1135. .quad .Lio_done
  1136. .quad psw_idle
  1137. .quad .Lpsw_idle_end
  1138. .quad save_fpu_regs
  1139. .quad .Lsave_fpu_regs_end
  1140. .quad load_fpu_regs
  1141. .quad .Lload_fpu_regs_end
  1142. #if IS_ENABLED(CONFIG_KVM)
  1143. .Lcleanup_table_sie:
  1144. .quad .Lsie_gmap
  1145. .quad .Lsie_done
  1146. .Lcleanup_sie:
  1147. BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
  1148. lg %r9,__SF_EMPTY(%r15) # get control block pointer
  1149. ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
  1150. lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
  1151. larl %r9,sie_exit # skip forward to sie_exit
  1152. BR_EX %r14,%r11
  1153. #endif
  1154. .Lcleanup_system_call:
  1155. # check if stpt has been executed
  1156. clg %r9,BASED(.Lcleanup_system_call_insn)
  1157. jh 0f
  1158. mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
  1159. cghi %r11,__LC_SAVE_AREA_ASYNC
  1160. je 0f
  1161. mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
  1162. 0: # check if stmg has been executed
  1163. clg %r9,BASED(.Lcleanup_system_call_insn+8)
  1164. jh 0f
  1165. mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
  1166. 0: # check if base register setup + TIF bit load has been done
  1167. clg %r9,BASED(.Lcleanup_system_call_insn+16)
  1168. jhe 0f
  1169. # set up saved registers r10 and r12
  1170. stg %r10,16(%r11) # r10 last break
  1171. stg %r12,32(%r11) # r12 thread-info pointer
  1172. 0: # check if the user time update has been done
  1173. clg %r9,BASED(.Lcleanup_system_call_insn+24)
  1174. jh 0f
  1175. lg %r15,__LC_EXIT_TIMER
  1176. slg %r15,__LC_SYNC_ENTER_TIMER
  1177. alg %r15,__LC_USER_TIMER
  1178. stg %r15,__LC_USER_TIMER
  1179. 0: # check if the system time update has been done
  1180. clg %r9,BASED(.Lcleanup_system_call_insn+32)
  1181. jh 0f
  1182. lg %r15,__LC_LAST_UPDATE_TIMER
  1183. slg %r15,__LC_EXIT_TIMER
  1184. alg %r15,__LC_SYSTEM_TIMER
  1185. stg %r15,__LC_SYSTEM_TIMER
  1186. 0: # update accounting time stamp
  1187. mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
  1188. # do LAST_BREAK
  1189. lg %r9,16(%r11)
  1190. srag %r9,%r9,23
  1191. jz 0f
  1192. mvc __TI_last_break(8,%r12),16(%r11)
  1193. 0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
  1194. # set up saved register r11
  1195. lg %r15,__LC_KERNEL_STACK
  1196. la %r9,STACK_FRAME_OVERHEAD(%r15)
  1197. stg %r9,24(%r11) # r11 pt_regs pointer
  1198. # fill pt_regs
  1199. mvc __PT_R8(64,%r9),__LC_SAVE_AREA_SYNC
  1200. stmg %r0,%r7,__PT_R0(%r9)
  1201. mvc __PT_PSW(16,%r9),__LC_SVC_OLD_PSW
  1202. mvc __PT_INT_CODE(4,%r9),__LC_SVC_ILC
  1203. xc __PT_FLAGS(8,%r9),__PT_FLAGS(%r9)
  1204. mvi __PT_FLAGS+7(%r9),_PIF_SYSCALL
  1205. # setup saved register r15
  1206. stg %r15,56(%r11) # r15 stack pointer
  1207. # set new psw address and exit
  1208. larl %r9,.Lsysc_do_svc
  1209. BR_EX %r14,%r11
  1210. .Lcleanup_system_call_insn:
  1211. .quad system_call
  1212. .quad .Lsysc_stmg
  1213. .quad .Lsysc_per
  1214. .quad .Lsysc_vtime+36
  1215. .quad .Lsysc_vtime+42
  1216. .Lcleanup_sysc_tif:
  1217. larl %r9,.Lsysc_tif
  1218. BR_EX %r14,%r11
  1219. .Lcleanup_sysc_restore:
  1220. # check if stpt has been executed
  1221. clg %r9,BASED(.Lcleanup_sysc_restore_insn)
  1222. jh 0f
  1223. mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
  1224. cghi %r11,__LC_SAVE_AREA_ASYNC
  1225. je 0f
  1226. mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
  1227. 0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
  1228. je 1f
  1229. lg %r9,24(%r11) # get saved pointer to pt_regs
  1230. mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
  1231. mvc 0(64,%r11),__PT_R8(%r9)
  1232. lmg %r0,%r7,__PT_R0(%r9)
  1233. 1: lmg %r8,%r9,__LC_RETURN_PSW
  1234. BR_EX %r14,%r11
  1235. .Lcleanup_sysc_restore_insn:
  1236. .quad .Lsysc_exit_timer
  1237. .quad .Lsysc_done - 4
  1238. .Lcleanup_io_tif:
  1239. larl %r9,.Lio_tif
  1240. BR_EX %r14,%r11
  1241. .Lcleanup_io_restore:
  1242. # check if stpt has been executed
  1243. clg %r9,BASED(.Lcleanup_io_restore_insn)
  1244. jh 0f
  1245. mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
  1246. 0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
  1247. je 1f
  1248. lg %r9,24(%r11) # get saved r11 pointer to pt_regs
  1249. mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
  1250. mvc 0(64,%r11),__PT_R8(%r9)
  1251. lmg %r0,%r7,__PT_R0(%r9)
  1252. 1: lmg %r8,%r9,__LC_RETURN_PSW
  1253. BR_EX %r14,%r11
  1254. .Lcleanup_io_restore_insn:
  1255. .quad .Lio_exit_timer
  1256. .quad .Lio_done - 4
  1257. .Lcleanup_idle:
  1258. ni __LC_CPU_FLAGS+7,255-_CIF_ENABLED_WAIT
  1259. # copy interrupt clock & cpu timer
  1260. mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
  1261. mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
  1262. cghi %r11,__LC_SAVE_AREA_ASYNC
  1263. je 0f
  1264. mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
  1265. mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
  1266. 0: # check if stck & stpt have been executed
  1267. clg %r9,BASED(.Lcleanup_idle_insn)
  1268. jhe 1f
  1269. mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
  1270. mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
  1271. 1: # calculate idle cycles
  1272. #ifdef CONFIG_SMP
  1273. clg %r9,BASED(.Lcleanup_idle_insn)
  1274. jl 3f
  1275. larl %r1,smp_cpu_mtid
  1276. llgf %r1,0(%r1)
  1277. ltgr %r1,%r1
  1278. jz 3f
  1279. .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15)
  1280. larl %r3,mt_cycles
  1281. ag %r3,__LC_PERCPU_OFFSET
  1282. la %r4,__SF_EMPTY+16(%r15)
  1283. 2: lg %r0,0(%r3)
  1284. slg %r0,0(%r4)
  1285. alg %r0,64(%r4)
  1286. stg %r0,0(%r3)
  1287. la %r3,8(%r3)
  1288. la %r4,8(%r4)
  1289. brct %r1,2b
  1290. #endif
  1291. 3: # account system time going idle
  1292. lg %r9,__LC_STEAL_TIMER
  1293. alg %r9,__CLOCK_IDLE_ENTER(%r2)
  1294. slg %r9,__LC_LAST_UPDATE_CLOCK
  1295. stg %r9,__LC_STEAL_TIMER
  1296. mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
  1297. lg %r9,__LC_SYSTEM_TIMER
  1298. alg %r9,__LC_LAST_UPDATE_TIMER
  1299. slg %r9,__TIMER_IDLE_ENTER(%r2)
  1300. stg %r9,__LC_SYSTEM_TIMER
  1301. mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
  1302. # prepare return psw
  1303. nihh %r8,0xfcfd # clear irq & wait state bits
  1304. lg %r9,48(%r11) # return from psw_idle
  1305. BR_EX %r14,%r11
  1306. .Lcleanup_idle_insn:
  1307. .quad .Lpsw_idle_lpsw
  1308. .Lcleanup_save_fpu_regs:
  1309. larl %r9,save_fpu_regs
  1310. BR_EX %r14,%r11
  1311. .Lcleanup_load_fpu_regs:
  1312. larl %r9,load_fpu_regs
  1313. BR_EX %r14,%r11
  1314. /*
  1315. * Integer constants
  1316. */
  1317. .align 8
  1318. .Lcritical_start:
  1319. .quad .L__critical_start
  1320. .Lcritical_length:
  1321. .quad .L__critical_end - .L__critical_start
  1322. #if IS_ENABLED(CONFIG_KVM)
  1323. .Lsie_critical_start:
  1324. .quad .Lsie_gmap
  1325. .Lsie_critical_length:
  1326. .quad .Lsie_done - .Lsie_gmap
  1327. #endif
  1328. .section .rodata, "a"
  1329. #define SYSCALL(esame,emu) .long esame
  1330. .globl sys_call_table
  1331. sys_call_table:
  1332. #include "syscalls.S"
  1333. #undef SYSCALL
  1334. #ifdef CONFIG_COMPAT
  1335. #define SYSCALL(esame,emu) .long emu
  1336. .globl sys_call_table_emu
  1337. sys_call_table_emu:
  1338. #include "syscalls.S"
  1339. #undef SYSCALL
  1340. #endif