signal.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432
  1. /*
  2. * FPU signal frame handling routines.
  3. */
  4. #include <linux/compat.h>
  5. #include <linux/cpu.h>
  6. #include <asm/fpu/internal.h>
  7. #include <asm/fpu/signal.h>
  8. #include <asm/fpu/regset.h>
  9. #include <asm/fpu/xstate.h>
  10. #include <asm/sigframe.h>
  11. #include <asm/trace/fpu.h>
  12. static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
  13. /*
  14. * Check for the presence of extended state information in the
  15. * user fpstate pointer in the sigcontext.
  16. */
  17. static inline int check_for_xstate(struct fxregs_state __user *buf,
  18. void __user *fpstate,
  19. struct _fpx_sw_bytes *fx_sw)
  20. {
  21. int min_xstate_size = sizeof(struct fxregs_state) +
  22. sizeof(struct xstate_header);
  23. unsigned int magic2;
  24. if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
  25. return -1;
  26. /* Check for the first magic field and other error scenarios. */
  27. if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
  28. fx_sw->xstate_size < min_xstate_size ||
  29. fx_sw->xstate_size > fpu_user_xstate_size ||
  30. fx_sw->xstate_size > fx_sw->extended_size)
  31. return -1;
  32. /*
  33. * Check for the presence of second magic word at the end of memory
  34. * layout. This detects the case where the user just copied the legacy
  35. * fpstate layout with out copying the extended state information
  36. * in the memory layout.
  37. */
  38. if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
  39. || magic2 != FP_XSTATE_MAGIC2)
  40. return -1;
  41. return 0;
  42. }
  43. /*
  44. * Signal frame handlers.
  45. */
  46. static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
  47. {
  48. if (use_fxsr()) {
  49. struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
  50. struct user_i387_ia32_struct env;
  51. struct _fpstate_32 __user *fp = buf;
  52. convert_from_fxsr(&env, tsk);
  53. if (__copy_to_user(buf, &env, sizeof(env)) ||
  54. __put_user(xsave->i387.swd, &fp->status) ||
  55. __put_user(X86_FXSR_MAGIC, &fp->magic))
  56. return -1;
  57. } else {
  58. struct fregs_state __user *fp = buf;
  59. u32 swd;
  60. if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
  61. return -1;
  62. }
  63. return 0;
  64. }
  65. static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
  66. {
  67. struct xregs_state __user *x = buf;
  68. struct _fpx_sw_bytes *sw_bytes;
  69. u32 xfeatures;
  70. int err;
  71. /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
  72. sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
  73. err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
  74. if (!use_xsave())
  75. return err;
  76. err |= __put_user(FP_XSTATE_MAGIC2,
  77. (__u32 *)(buf + fpu_user_xstate_size));
  78. /*
  79. * Read the xfeatures which we copied (directly from the cpu or
  80. * from the state in task struct) to the user buffers.
  81. */
  82. err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
  83. /*
  84. * For legacy compatible, we always set FP/SSE bits in the bit
  85. * vector while saving the state to the user context. This will
  86. * enable us capturing any changes(during sigreturn) to
  87. * the FP/SSE bits by the legacy applications which don't touch
  88. * xfeatures in the xsave header.
  89. *
  90. * xsave aware apps can change the xfeatures in the xsave
  91. * header as well as change any contents in the memory layout.
  92. * xrestore as part of sigreturn will capture all the changes.
  93. */
  94. xfeatures |= XFEATURE_MASK_FPSSE;
  95. err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
  96. return err;
  97. }
  98. static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
  99. {
  100. int err;
  101. if (use_xsave())
  102. err = copy_xregs_to_user(buf);
  103. else if (use_fxsr())
  104. err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
  105. else
  106. err = copy_fregs_to_user((struct fregs_state __user *) buf);
  107. if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
  108. err = -EFAULT;
  109. return err;
  110. }
  111. /*
  112. * Save the fpu, extended register state to the user signal frame.
  113. *
  114. * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
  115. * state is copied.
  116. * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
  117. *
  118. * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
  119. * buf != buf_fx for 32-bit frames with fxstate.
  120. *
  121. * If the fpu, extended register state is live, save the state directly
  122. * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
  123. * copy the thread's fpu state to the user frame starting at 'buf_fx'.
  124. *
  125. * If this is a 32-bit frame with fxstate, put a fsave header before
  126. * the aligned state at 'buf_fx'.
  127. *
  128. * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
  129. * indicating the absence/presence of the extended state to the user.
  130. */
  131. int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
  132. {
  133. struct xregs_state *xsave = &current->thread.fpu.state.xsave;
  134. struct task_struct *tsk = current;
  135. int ia32_fxstate = (buf != buf_fx);
  136. ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
  137. IS_ENABLED(CONFIG_IA32_EMULATION));
  138. if (!access_ok(VERIFY_WRITE, buf, size))
  139. return -EACCES;
  140. if (!static_cpu_has(X86_FEATURE_FPU))
  141. return fpregs_soft_get(current, NULL, 0,
  142. sizeof(struct user_i387_ia32_struct), NULL,
  143. (struct _fpstate_32 __user *) buf) ? -1 : 1;
  144. if (fpregs_active() || using_compacted_format()) {
  145. /* Save the live register state to the user directly. */
  146. if (copy_fpregs_to_sigframe(buf_fx))
  147. return -1;
  148. /* Update the thread's fxstate to save the fsave header. */
  149. if (ia32_fxstate)
  150. copy_fxregs_to_kernel(&tsk->thread.fpu);
  151. } else {
  152. /*
  153. * It is a *bug* if kernel uses compacted-format for xsave
  154. * area and we copy it out directly to a signal frame. It
  155. * should have been handled above by saving the registers
  156. * directly.
  157. */
  158. if (boot_cpu_has(X86_FEATURE_XSAVES)) {
  159. WARN_ONCE(1, "x86/fpu: saving compacted-format xsave area to a signal frame!\n");
  160. return -1;
  161. }
  162. fpstate_sanitize_xstate(&tsk->thread.fpu);
  163. if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
  164. return -1;
  165. }
  166. /* Save the fsave header for the 32-bit frames. */
  167. if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
  168. return -1;
  169. if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
  170. return -1;
  171. return 0;
  172. }
  173. static inline void
  174. sanitize_restored_xstate(struct task_struct *tsk,
  175. struct user_i387_ia32_struct *ia32_env,
  176. u64 xfeatures, int fx_only)
  177. {
  178. struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
  179. struct xstate_header *header = &xsave->header;
  180. if (use_xsave()) {
  181. /* These bits must be zero. */
  182. memset(header->reserved, 0, 48);
  183. /*
  184. * Init the state that is not present in the memory
  185. * layout and not enabled by the OS.
  186. */
  187. if (fx_only)
  188. header->xfeatures = XFEATURE_MASK_FPSSE;
  189. else
  190. header->xfeatures &= (xfeatures_mask & xfeatures);
  191. }
  192. if (use_fxsr()) {
  193. /*
  194. * mscsr reserved bits must be masked to zero for security
  195. * reasons.
  196. */
  197. xsave->i387.mxcsr &= mxcsr_feature_mask;
  198. convert_to_fxsr(tsk, ia32_env);
  199. }
  200. }
  201. /*
  202. * Restore the extended state if present. Otherwise, restore the FP/SSE state.
  203. */
  204. static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
  205. {
  206. if (use_xsave()) {
  207. if ((unsigned long)buf % 64 || fx_only) {
  208. u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
  209. copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
  210. return copy_user_to_fxregs(buf);
  211. } else {
  212. u64 init_bv = xfeatures_mask & ~xbv;
  213. if (unlikely(init_bv))
  214. copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
  215. return copy_user_to_xregs(buf, xbv);
  216. }
  217. } else if (use_fxsr()) {
  218. return copy_user_to_fxregs(buf);
  219. } else
  220. return copy_user_to_fregs(buf);
  221. }
  222. static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
  223. {
  224. int ia32_fxstate = (buf != buf_fx);
  225. struct task_struct *tsk = current;
  226. struct fpu *fpu = &tsk->thread.fpu;
  227. int state_size = fpu_kernel_xstate_size;
  228. u64 xfeatures = 0;
  229. int fx_only = 0;
  230. ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
  231. IS_ENABLED(CONFIG_IA32_EMULATION));
  232. if (!buf) {
  233. fpu__clear(fpu);
  234. return 0;
  235. }
  236. if (!access_ok(VERIFY_READ, buf, size))
  237. return -EACCES;
  238. fpu__activate_curr(fpu);
  239. if (!static_cpu_has(X86_FEATURE_FPU))
  240. return fpregs_soft_set(current, NULL,
  241. 0, sizeof(struct user_i387_ia32_struct),
  242. NULL, buf) != 0;
  243. if (use_xsave()) {
  244. struct _fpx_sw_bytes fx_sw_user;
  245. if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
  246. /*
  247. * Couldn't find the extended state information in the
  248. * memory layout. Restore just the FP/SSE and init all
  249. * the other extended state.
  250. */
  251. state_size = sizeof(struct fxregs_state);
  252. fx_only = 1;
  253. trace_x86_fpu_xstate_check_failed(fpu);
  254. } else {
  255. state_size = fx_sw_user.xstate_size;
  256. xfeatures = fx_sw_user.xfeatures;
  257. }
  258. }
  259. if (ia32_fxstate) {
  260. /*
  261. * For 32-bit frames with fxstate, copy the user state to the
  262. * thread's fpu state, reconstruct fxstate from the fsave
  263. * header. Sanitize the copied state etc.
  264. */
  265. struct fpu *fpu = &tsk->thread.fpu;
  266. struct user_i387_ia32_struct env;
  267. int err = 0;
  268. /*
  269. * Drop the current fpu which clears fpu->fpstate_active. This ensures
  270. * that any context-switch during the copy of the new state,
  271. * avoids the intermediate state from getting restored/saved.
  272. * Thus avoiding the new restored state from getting corrupted.
  273. * We will be ready to restore/save the state only after
  274. * fpu->fpstate_active is again set.
  275. */
  276. fpu__drop(fpu);
  277. if (using_compacted_format()) {
  278. err = copyin_to_xsaves(NULL, buf_fx,
  279. &fpu->state.xsave);
  280. } else {
  281. err = __copy_from_user(&fpu->state.xsave,
  282. buf_fx, state_size);
  283. /* xcomp_bv must be 0 when using uncompacted format */
  284. if (!err && state_size > offsetof(struct xregs_state, header) && fpu->state.xsave.header.xcomp_bv)
  285. err = -EINVAL;
  286. }
  287. if (err || __copy_from_user(&env, buf, sizeof(env))) {
  288. fpstate_init(&fpu->state);
  289. trace_x86_fpu_init_state(fpu);
  290. err = -1;
  291. } else {
  292. sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
  293. }
  294. fpu->fpstate_active = 1;
  295. if (use_eager_fpu()) {
  296. preempt_disable();
  297. fpu__restore(fpu);
  298. preempt_enable();
  299. }
  300. return err;
  301. } else {
  302. /*
  303. * For 64-bit frames and 32-bit fsave frames, restore the user
  304. * state to the registers directly (with exceptions handled).
  305. */
  306. user_fpu_begin();
  307. if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
  308. fpu__clear(fpu);
  309. return -1;
  310. }
  311. }
  312. return 0;
  313. }
  314. static inline int xstate_sigframe_size(void)
  315. {
  316. return use_xsave() ? fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE :
  317. fpu_user_xstate_size;
  318. }
  319. /*
  320. * Restore FPU state from a sigframe:
  321. */
  322. int fpu__restore_sig(void __user *buf, int ia32_frame)
  323. {
  324. void __user *buf_fx = buf;
  325. int size = xstate_sigframe_size();
  326. if (ia32_frame && use_fxsr()) {
  327. buf_fx = buf + sizeof(struct fregs_state);
  328. size += sizeof(struct fregs_state);
  329. }
  330. return __fpu__restore_sig(buf, buf_fx, size);
  331. }
  332. unsigned long
  333. fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
  334. unsigned long *buf_fx, unsigned long *size)
  335. {
  336. unsigned long frame_size = xstate_sigframe_size();
  337. *buf_fx = sp = round_down(sp - frame_size, 64);
  338. if (ia32_frame && use_fxsr()) {
  339. frame_size += sizeof(struct fregs_state);
  340. sp -= sizeof(struct fregs_state);
  341. }
  342. *size = frame_size;
  343. return sp;
  344. }
  345. /*
  346. * Prepare the SW reserved portion of the fxsave memory layout, indicating
  347. * the presence of the extended state information in the memory layout
  348. * pointed by the fpstate pointer in the sigcontext.
  349. * This will be saved when ever the FP and extended state context is
  350. * saved on the user stack during the signal handler delivery to the user.
  351. */
  352. void fpu__init_prepare_fx_sw_frame(void)
  353. {
  354. int size = fpu_user_xstate_size + FP_XSTATE_MAGIC2_SIZE;
  355. fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
  356. fx_sw_reserved.extended_size = size;
  357. fx_sw_reserved.xfeatures = xfeatures_mask;
  358. fx_sw_reserved.xstate_size = fpu_user_xstate_size;
  359. if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
  360. IS_ENABLED(CONFIG_X86_32)) {
  361. int fsave_header_size = sizeof(struct fregs_state);
  362. fx_sw_reserved_ia32 = fx_sw_reserved;
  363. fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
  364. }
  365. }