ptrace.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560
  1. /* By Ross Biro 1/23/92 */
  2. /*
  3. * Pentium III FXSR, SSE support
  4. * Gareth Hughes <gareth@valinux.com>, May 2000
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/smp.h>
  10. #include <linux/errno.h>
  11. #include <linux/slab.h>
  12. #include <linux/ptrace.h>
  13. #include <linux/regset.h>
  14. #include <linux/tracehook.h>
  15. #include <linux/user.h>
  16. #include <linux/elf.h>
  17. #include <linux/security.h>
  18. #include <linux/audit.h>
  19. #include <linux/seccomp.h>
  20. #include <linux/signal.h>
  21. #include <linux/perf_event.h>
  22. #include <linux/hw_breakpoint.h>
  23. #include <linux/module.h>
  24. #include <asm/uaccess.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/processor.h>
  27. #include <asm/i387.h>
  28. #include <asm/fpu-internal.h>
  29. #include <asm/debugreg.h>
  30. #include <asm/ldt.h>
  31. #include <asm/desc.h>
  32. #include <asm/prctl.h>
  33. #include <asm/proto.h>
  34. #include <asm/hw_breakpoint.h>
  35. #include <asm/traps.h>
  36. #include "tls.h"
  37. #define CREATE_TRACE_POINTS
  38. #include <trace/events/syscalls.h>
  39. enum x86_regset {
  40. REGSET_GENERAL,
  41. REGSET_FP,
  42. REGSET_XFP,
  43. REGSET_IOPERM64 = REGSET_XFP,
  44. REGSET_XSTATE,
  45. REGSET_TLS,
  46. REGSET_IOPERM32,
  47. };
  48. struct pt_regs_offset {
  49. const char *name;
  50. int offset;
  51. };
  52. #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
  53. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  54. static const struct pt_regs_offset regoffset_table[] = {
  55. #ifdef CONFIG_X86_64
  56. REG_OFFSET_NAME(r15),
  57. REG_OFFSET_NAME(r14),
  58. REG_OFFSET_NAME(r13),
  59. REG_OFFSET_NAME(r12),
  60. REG_OFFSET_NAME(r11),
  61. REG_OFFSET_NAME(r10),
  62. REG_OFFSET_NAME(r9),
  63. REG_OFFSET_NAME(r8),
  64. #endif
  65. REG_OFFSET_NAME(bx),
  66. REG_OFFSET_NAME(cx),
  67. REG_OFFSET_NAME(dx),
  68. REG_OFFSET_NAME(si),
  69. REG_OFFSET_NAME(di),
  70. REG_OFFSET_NAME(bp),
  71. REG_OFFSET_NAME(ax),
  72. #ifdef CONFIG_X86_32
  73. REG_OFFSET_NAME(ds),
  74. REG_OFFSET_NAME(es),
  75. REG_OFFSET_NAME(fs),
  76. REG_OFFSET_NAME(gs),
  77. #endif
  78. REG_OFFSET_NAME(orig_ax),
  79. REG_OFFSET_NAME(ip),
  80. REG_OFFSET_NAME(cs),
  81. REG_OFFSET_NAME(flags),
  82. REG_OFFSET_NAME(sp),
  83. REG_OFFSET_NAME(ss),
  84. REG_OFFSET_END,
  85. };
  86. /**
  87. * regs_query_register_offset() - query register offset from its name
  88. * @name: the name of a register
  89. *
  90. * regs_query_register_offset() returns the offset of a register in struct
  91. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  92. */
  93. int regs_query_register_offset(const char *name)
  94. {
  95. const struct pt_regs_offset *roff;
  96. for (roff = regoffset_table; roff->name != NULL; roff++)
  97. if (!strcmp(roff->name, name))
  98. return roff->offset;
  99. return -EINVAL;
  100. }
  101. /**
  102. * regs_query_register_name() - query register name from its offset
  103. * @offset: the offset of a register in struct pt_regs.
  104. *
  105. * regs_query_register_name() returns the name of a register from its
  106. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  107. */
  108. const char *regs_query_register_name(unsigned int offset)
  109. {
  110. const struct pt_regs_offset *roff;
  111. for (roff = regoffset_table; roff->name != NULL; roff++)
  112. if (roff->offset == offset)
  113. return roff->name;
  114. return NULL;
  115. }
  116. static const int arg_offs_table[] = {
  117. #ifdef CONFIG_X86_32
  118. [0] = offsetof(struct pt_regs, ax),
  119. [1] = offsetof(struct pt_regs, dx),
  120. [2] = offsetof(struct pt_regs, cx)
  121. #else /* CONFIG_X86_64 */
  122. [0] = offsetof(struct pt_regs, di),
  123. [1] = offsetof(struct pt_regs, si),
  124. [2] = offsetof(struct pt_regs, dx),
  125. [3] = offsetof(struct pt_regs, cx),
  126. [4] = offsetof(struct pt_regs, r8),
  127. [5] = offsetof(struct pt_regs, r9)
  128. #endif
  129. };
  130. /*
  131. * does not yet catch signals sent when the child dies.
  132. * in exit.c or in signal.c.
  133. */
  134. /*
  135. * Determines which flags the user has access to [1 = access, 0 = no access].
  136. */
  137. #define FLAG_MASK_32 ((unsigned long) \
  138. (X86_EFLAGS_CF | X86_EFLAGS_PF | \
  139. X86_EFLAGS_AF | X86_EFLAGS_ZF | \
  140. X86_EFLAGS_SF | X86_EFLAGS_TF | \
  141. X86_EFLAGS_DF | X86_EFLAGS_OF | \
  142. X86_EFLAGS_RF | X86_EFLAGS_AC))
  143. /*
  144. * Determines whether a value may be installed in a segment register.
  145. */
  146. static inline bool invalid_selector(u16 value)
  147. {
  148. return unlikely(value != 0 && (value & SEGMENT_RPL_MASK) != USER_RPL);
  149. }
  150. #ifdef CONFIG_X86_32
  151. #define FLAG_MASK FLAG_MASK_32
  152. /*
  153. * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
  154. * when it traps. The previous stack will be directly underneath the saved
  155. * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
  156. *
  157. * Now, if the stack is empty, '&regs->sp' is out of range. In this
  158. * case we try to take the previous stack. To always return a non-null
  159. * stack pointer we fall back to regs as stack if no previous stack
  160. * exists.
  161. *
  162. * This is valid only for kernel mode traps.
  163. */
  164. unsigned long kernel_stack_pointer(struct pt_regs *regs)
  165. {
  166. unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
  167. unsigned long sp = (unsigned long)&regs->sp;
  168. struct thread_info *tinfo;
  169. if (context == (sp & ~(THREAD_SIZE - 1)))
  170. return sp;
  171. tinfo = (struct thread_info *)context;
  172. if (tinfo->previous_esp)
  173. return tinfo->previous_esp;
  174. return (unsigned long)regs;
  175. }
  176. EXPORT_SYMBOL_GPL(kernel_stack_pointer);
  177. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
  178. {
  179. BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
  180. return &regs->bx + (regno >> 2);
  181. }
  182. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  183. {
  184. /*
  185. * Returning the value truncates it to 16 bits.
  186. */
  187. unsigned int retval;
  188. if (offset != offsetof(struct user_regs_struct, gs))
  189. retval = *pt_regs_access(task_pt_regs(task), offset);
  190. else {
  191. if (task == current)
  192. retval = get_user_gs(task_pt_regs(task));
  193. else
  194. retval = task_user_gs(task);
  195. }
  196. return retval;
  197. }
  198. static int set_segment_reg(struct task_struct *task,
  199. unsigned long offset, u16 value)
  200. {
  201. /*
  202. * The value argument was already truncated to 16 bits.
  203. */
  204. if (invalid_selector(value))
  205. return -EIO;
  206. /*
  207. * For %cs and %ss we cannot permit a null selector.
  208. * We can permit a bogus selector as long as it has USER_RPL.
  209. * Null selectors are fine for other segment registers, but
  210. * we will never get back to user mode with invalid %cs or %ss
  211. * and will take the trap in iret instead. Much code relies
  212. * on user_mode() to distinguish a user trap frame (which can
  213. * safely use invalid selectors) from a kernel trap frame.
  214. */
  215. switch (offset) {
  216. case offsetof(struct user_regs_struct, cs):
  217. case offsetof(struct user_regs_struct, ss):
  218. if (unlikely(value == 0))
  219. return -EIO;
  220. default:
  221. *pt_regs_access(task_pt_regs(task), offset) = value;
  222. break;
  223. case offsetof(struct user_regs_struct, gs):
  224. if (task == current)
  225. set_user_gs(task_pt_regs(task), value);
  226. else
  227. task_user_gs(task) = value;
  228. }
  229. return 0;
  230. }
  231. #else /* CONFIG_X86_64 */
  232. #define FLAG_MASK (FLAG_MASK_32 | X86_EFLAGS_NT)
  233. static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long offset)
  234. {
  235. BUILD_BUG_ON(offsetof(struct pt_regs, r15) != 0);
  236. return &regs->r15 + (offset / sizeof(regs->r15));
  237. }
  238. static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
  239. {
  240. /*
  241. * Returning the value truncates it to 16 bits.
  242. */
  243. unsigned int seg;
  244. switch (offset) {
  245. case offsetof(struct user_regs_struct, fs):
  246. if (task == current) {
  247. /* Older gas can't assemble movq %?s,%r?? */
  248. asm("movl %%fs,%0" : "=r" (seg));
  249. return seg;
  250. }
  251. return task->thread.fsindex;
  252. case offsetof(struct user_regs_struct, gs):
  253. if (task == current) {
  254. asm("movl %%gs,%0" : "=r" (seg));
  255. return seg;
  256. }
  257. return task->thread.gsindex;
  258. case offsetof(struct user_regs_struct, ds):
  259. if (task == current) {
  260. asm("movl %%ds,%0" : "=r" (seg));
  261. return seg;
  262. }
  263. return task->thread.ds;
  264. case offsetof(struct user_regs_struct, es):
  265. if (task == current) {
  266. asm("movl %%es,%0" : "=r" (seg));
  267. return seg;
  268. }
  269. return task->thread.es;
  270. case offsetof(struct user_regs_struct, cs):
  271. case offsetof(struct user_regs_struct, ss):
  272. break;
  273. }
  274. return *pt_regs_access(task_pt_regs(task), offset);
  275. }
  276. static int set_segment_reg(struct task_struct *task,
  277. unsigned long offset, u16 value)
  278. {
  279. /*
  280. * The value argument was already truncated to 16 bits.
  281. */
  282. if (invalid_selector(value))
  283. return -EIO;
  284. switch (offset) {
  285. case offsetof(struct user_regs_struct,fs):
  286. /*
  287. * If this is setting fs as for normal 64-bit use but
  288. * setting fs_base has implicitly changed it, leave it.
  289. */
  290. if ((value == FS_TLS_SEL && task->thread.fsindex == 0 &&
  291. task->thread.fs != 0) ||
  292. (value == 0 && task->thread.fsindex == FS_TLS_SEL &&
  293. task->thread.fs == 0))
  294. break;
  295. task->thread.fsindex = value;
  296. if (task == current)
  297. loadsegment(fs, task->thread.fsindex);
  298. break;
  299. case offsetof(struct user_regs_struct,gs):
  300. /*
  301. * If this is setting gs as for normal 64-bit use but
  302. * setting gs_base has implicitly changed it, leave it.
  303. */
  304. if ((value == GS_TLS_SEL && task->thread.gsindex == 0 &&
  305. task->thread.gs != 0) ||
  306. (value == 0 && task->thread.gsindex == GS_TLS_SEL &&
  307. task->thread.gs == 0))
  308. break;
  309. task->thread.gsindex = value;
  310. if (task == current)
  311. load_gs_index(task->thread.gsindex);
  312. break;
  313. case offsetof(struct user_regs_struct,ds):
  314. task->thread.ds = value;
  315. if (task == current)
  316. loadsegment(ds, task->thread.ds);
  317. break;
  318. case offsetof(struct user_regs_struct,es):
  319. task->thread.es = value;
  320. if (task == current)
  321. loadsegment(es, task->thread.es);
  322. break;
  323. /*
  324. * Can't actually change these in 64-bit mode.
  325. */
  326. case offsetof(struct user_regs_struct,cs):
  327. if (unlikely(value == 0))
  328. return -EIO;
  329. #ifdef CONFIG_IA32_EMULATION
  330. if (test_tsk_thread_flag(task, TIF_IA32))
  331. task_pt_regs(task)->cs = value;
  332. #endif
  333. break;
  334. case offsetof(struct user_regs_struct,ss):
  335. if (unlikely(value == 0))
  336. return -EIO;
  337. #ifdef CONFIG_IA32_EMULATION
  338. if (test_tsk_thread_flag(task, TIF_IA32))
  339. task_pt_regs(task)->ss = value;
  340. #endif
  341. break;
  342. }
  343. return 0;
  344. }
  345. #endif /* CONFIG_X86_32 */
  346. static unsigned long get_flags(struct task_struct *task)
  347. {
  348. unsigned long retval = task_pt_regs(task)->flags;
  349. /*
  350. * If the debugger set TF, hide it from the readout.
  351. */
  352. if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  353. retval &= ~X86_EFLAGS_TF;
  354. return retval;
  355. }
  356. static int set_flags(struct task_struct *task, unsigned long value)
  357. {
  358. struct pt_regs *regs = task_pt_regs(task);
  359. /*
  360. * If the user value contains TF, mark that
  361. * it was not "us" (the debugger) that set it.
  362. * If not, make sure it stays set if we had.
  363. */
  364. if (value & X86_EFLAGS_TF)
  365. clear_tsk_thread_flag(task, TIF_FORCED_TF);
  366. else if (test_tsk_thread_flag(task, TIF_FORCED_TF))
  367. value |= X86_EFLAGS_TF;
  368. regs->flags = (regs->flags & ~FLAG_MASK) | (value & FLAG_MASK);
  369. return 0;
  370. }
  371. static int putreg(struct task_struct *child,
  372. unsigned long offset, unsigned long value)
  373. {
  374. switch (offset) {
  375. case offsetof(struct user_regs_struct, cs):
  376. case offsetof(struct user_regs_struct, ds):
  377. case offsetof(struct user_regs_struct, es):
  378. case offsetof(struct user_regs_struct, fs):
  379. case offsetof(struct user_regs_struct, gs):
  380. case offsetof(struct user_regs_struct, ss):
  381. return set_segment_reg(child, offset, value);
  382. case offsetof(struct user_regs_struct, flags):
  383. return set_flags(child, value);
  384. #ifdef CONFIG_X86_64
  385. case offsetof(struct user_regs_struct,fs_base):
  386. if (value >= TASK_SIZE_OF(child))
  387. return -EIO;
  388. /*
  389. * When changing the segment base, use do_arch_prctl
  390. * to set either thread.fs or thread.fsindex and the
  391. * corresponding GDT slot.
  392. */
  393. if (child->thread.fs != value)
  394. return do_arch_prctl(child, ARCH_SET_FS, value);
  395. return 0;
  396. case offsetof(struct user_regs_struct,gs_base):
  397. /*
  398. * Exactly the same here as the %fs handling above.
  399. */
  400. if (value >= TASK_SIZE_OF(child))
  401. return -EIO;
  402. if (child->thread.gs != value)
  403. return do_arch_prctl(child, ARCH_SET_GS, value);
  404. return 0;
  405. #endif
  406. }
  407. *pt_regs_access(task_pt_regs(child), offset) = value;
  408. return 0;
  409. }
  410. static unsigned long getreg(struct task_struct *task, unsigned long offset)
  411. {
  412. switch (offset) {
  413. case offsetof(struct user_regs_struct, cs):
  414. case offsetof(struct user_regs_struct, ds):
  415. case offsetof(struct user_regs_struct, es):
  416. case offsetof(struct user_regs_struct, fs):
  417. case offsetof(struct user_regs_struct, gs):
  418. case offsetof(struct user_regs_struct, ss):
  419. return get_segment_reg(task, offset);
  420. case offsetof(struct user_regs_struct, flags):
  421. return get_flags(task);
  422. #ifdef CONFIG_X86_64
  423. case offsetof(struct user_regs_struct, fs_base): {
  424. /*
  425. * do_arch_prctl may have used a GDT slot instead of
  426. * the MSR. To userland, it appears the same either
  427. * way, except the %fs segment selector might not be 0.
  428. */
  429. unsigned int seg = task->thread.fsindex;
  430. if (task->thread.fs != 0)
  431. return task->thread.fs;
  432. if (task == current)
  433. asm("movl %%fs,%0" : "=r" (seg));
  434. if (seg != FS_TLS_SEL)
  435. return 0;
  436. return get_desc_base(&task->thread.tls_array[FS_TLS]);
  437. }
  438. case offsetof(struct user_regs_struct, gs_base): {
  439. /*
  440. * Exactly the same here as the %fs handling above.
  441. */
  442. unsigned int seg = task->thread.gsindex;
  443. if (task->thread.gs != 0)
  444. return task->thread.gs;
  445. if (task == current)
  446. asm("movl %%gs,%0" : "=r" (seg));
  447. if (seg != GS_TLS_SEL)
  448. return 0;
  449. return get_desc_base(&task->thread.tls_array[GS_TLS]);
  450. }
  451. #endif
  452. }
  453. return *pt_regs_access(task_pt_regs(task), offset);
  454. }
  455. static int genregs_get(struct task_struct *target,
  456. const struct user_regset *regset,
  457. unsigned int pos, unsigned int count,
  458. void *kbuf, void __user *ubuf)
  459. {
  460. if (kbuf) {
  461. unsigned long *k = kbuf;
  462. while (count >= sizeof(*k)) {
  463. *k++ = getreg(target, pos);
  464. count -= sizeof(*k);
  465. pos += sizeof(*k);
  466. }
  467. } else {
  468. unsigned long __user *u = ubuf;
  469. while (count >= sizeof(*u)) {
  470. if (__put_user(getreg(target, pos), u++))
  471. return -EFAULT;
  472. count -= sizeof(*u);
  473. pos += sizeof(*u);
  474. }
  475. }
  476. return 0;
  477. }
  478. static int genregs_set(struct task_struct *target,
  479. const struct user_regset *regset,
  480. unsigned int pos, unsigned int count,
  481. const void *kbuf, const void __user *ubuf)
  482. {
  483. int ret = 0;
  484. if (kbuf) {
  485. const unsigned long *k = kbuf;
  486. while (count >= sizeof(*k) && !ret) {
  487. ret = putreg(target, pos, *k++);
  488. count -= sizeof(*k);
  489. pos += sizeof(*k);
  490. }
  491. } else {
  492. const unsigned long __user *u = ubuf;
  493. while (count >= sizeof(*u) && !ret) {
  494. unsigned long word;
  495. ret = __get_user(word, u++);
  496. if (ret)
  497. break;
  498. ret = putreg(target, pos, word);
  499. count -= sizeof(*u);
  500. pos += sizeof(*u);
  501. }
  502. }
  503. return ret;
  504. }
  505. static void ptrace_triggered(struct perf_event *bp,
  506. struct perf_sample_data *data,
  507. struct pt_regs *regs)
  508. {
  509. int i;
  510. struct thread_struct *thread = &(current->thread);
  511. /*
  512. * Store in the virtual DR6 register the fact that the breakpoint
  513. * was hit so the thread's debugger will see it.
  514. */
  515. for (i = 0; i < HBP_NUM; i++) {
  516. if (thread->ptrace_bps[i] == bp)
  517. break;
  518. }
  519. thread->debugreg6 |= (DR_TRAP0 << i);
  520. }
  521. /*
  522. * Walk through every ptrace breakpoints for this thread and
  523. * build the dr7 value on top of their attributes.
  524. *
  525. */
  526. static unsigned long ptrace_get_dr7(struct perf_event *bp[])
  527. {
  528. int i;
  529. int dr7 = 0;
  530. struct arch_hw_breakpoint *info;
  531. for (i = 0; i < HBP_NUM; i++) {
  532. if (bp[i] && !bp[i]->attr.disabled) {
  533. info = counter_arch_bp(bp[i]);
  534. dr7 |= encode_dr7(i, info->len, info->type);
  535. }
  536. }
  537. return dr7;
  538. }
  539. static int
  540. ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
  541. struct task_struct *tsk, int disabled)
  542. {
  543. int err;
  544. int gen_len, gen_type;
  545. struct perf_event_attr attr;
  546. /*
  547. * We should have at least an inactive breakpoint at this
  548. * slot. It means the user is writing dr7 without having
  549. * written the address register first
  550. */
  551. if (!bp)
  552. return -EINVAL;
  553. err = arch_bp_generic_fields(len, type, &gen_len, &gen_type);
  554. if (err)
  555. return err;
  556. attr = bp->attr;
  557. attr.bp_len = gen_len;
  558. attr.bp_type = gen_type;
  559. attr.disabled = disabled;
  560. return modify_user_hw_breakpoint(bp, &attr);
  561. }
  562. /*
  563. * Handle ptrace writes to debug register 7.
  564. */
  565. static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
  566. {
  567. struct thread_struct *thread = &(tsk->thread);
  568. unsigned long old_dr7;
  569. int i, orig_ret = 0, rc = 0;
  570. int enabled, second_pass = 0;
  571. unsigned len, type;
  572. struct perf_event *bp;
  573. if (ptrace_get_breakpoints(tsk) < 0)
  574. return -ESRCH;
  575. data &= ~DR_CONTROL_RESERVED;
  576. old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
  577. restore:
  578. /*
  579. * Loop through all the hardware breakpoints, making the
  580. * appropriate changes to each.
  581. */
  582. for (i = 0; i < HBP_NUM; i++) {
  583. enabled = decode_dr7(data, i, &len, &type);
  584. bp = thread->ptrace_bps[i];
  585. if (!enabled) {
  586. if (bp) {
  587. /*
  588. * Don't unregister the breakpoints right-away,
  589. * unless all register_user_hw_breakpoint()
  590. * requests have succeeded. This prevents
  591. * any window of opportunity for debug
  592. * register grabbing by other users.
  593. */
  594. if (!second_pass)
  595. continue;
  596. rc = ptrace_modify_breakpoint(bp, len, type,
  597. tsk, 1);
  598. if (rc)
  599. break;
  600. }
  601. continue;
  602. }
  603. rc = ptrace_modify_breakpoint(bp, len, type, tsk, 0);
  604. if (rc)
  605. break;
  606. }
  607. /*
  608. * Make a second pass to free the remaining unused breakpoints
  609. * or to restore the original breakpoints if an error occurred.
  610. */
  611. if (!second_pass) {
  612. second_pass = 1;
  613. if (rc < 0) {
  614. orig_ret = rc;
  615. data = old_dr7;
  616. }
  617. goto restore;
  618. }
  619. ptrace_put_breakpoints(tsk);
  620. return ((orig_ret < 0) ? orig_ret : rc);
  621. }
  622. /*
  623. * Handle PTRACE_PEEKUSR calls for the debug register area.
  624. */
  625. static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
  626. {
  627. struct thread_struct *thread = &(tsk->thread);
  628. unsigned long val = 0;
  629. if (n < HBP_NUM) {
  630. struct perf_event *bp;
  631. if (ptrace_get_breakpoints(tsk) < 0)
  632. return -ESRCH;
  633. bp = thread->ptrace_bps[n];
  634. if (!bp)
  635. val = 0;
  636. else
  637. val = bp->hw.info.address;
  638. ptrace_put_breakpoints(tsk);
  639. } else if (n == 6) {
  640. val = thread->debugreg6;
  641. } else if (n == 7) {
  642. val = thread->ptrace_dr7;
  643. }
  644. return val;
  645. }
  646. static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
  647. unsigned long addr)
  648. {
  649. struct perf_event *bp;
  650. struct thread_struct *t = &tsk->thread;
  651. struct perf_event_attr attr;
  652. int err = 0;
  653. if (ptrace_get_breakpoints(tsk) < 0)
  654. return -ESRCH;
  655. if (!t->ptrace_bps[nr]) {
  656. ptrace_breakpoint_init(&attr);
  657. /*
  658. * Put stub len and type to register (reserve) an inactive but
  659. * correct bp
  660. */
  661. attr.bp_addr = addr;
  662. attr.bp_len = HW_BREAKPOINT_LEN_1;
  663. attr.bp_type = HW_BREAKPOINT_W;
  664. attr.disabled = 1;
  665. bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
  666. NULL, tsk);
  667. /*
  668. * CHECKME: the previous code returned -EIO if the addr wasn't
  669. * a valid task virtual addr. The new one will return -EINVAL in
  670. * this case.
  671. * -EINVAL may be what we want for in-kernel breakpoints users,
  672. * but -EIO looks better for ptrace, since we refuse a register
  673. * writing for the user. And anyway this is the previous
  674. * behaviour.
  675. */
  676. if (IS_ERR(bp)) {
  677. err = PTR_ERR(bp);
  678. goto put;
  679. }
  680. t->ptrace_bps[nr] = bp;
  681. } else {
  682. bp = t->ptrace_bps[nr];
  683. attr = bp->attr;
  684. attr.bp_addr = addr;
  685. err = modify_user_hw_breakpoint(bp, &attr);
  686. }
  687. put:
  688. ptrace_put_breakpoints(tsk);
  689. return err;
  690. }
  691. /*
  692. * Handle PTRACE_POKEUSR calls for the debug register area.
  693. */
  694. static int ptrace_set_debugreg(struct task_struct *tsk, int n,
  695. unsigned long val)
  696. {
  697. struct thread_struct *thread = &(tsk->thread);
  698. int rc = 0;
  699. /* There are no DR4 or DR5 registers */
  700. if (n == 4 || n == 5)
  701. return -EIO;
  702. if (n == 6) {
  703. thread->debugreg6 = val;
  704. goto ret_path;
  705. }
  706. if (n < HBP_NUM) {
  707. rc = ptrace_set_breakpoint_addr(tsk, n, val);
  708. if (rc)
  709. return rc;
  710. }
  711. /* All that's left is DR7 */
  712. if (n == 7) {
  713. rc = ptrace_write_dr7(tsk, val);
  714. if (!rc)
  715. thread->ptrace_dr7 = val;
  716. }
  717. ret_path:
  718. return rc;
  719. }
  720. /*
  721. * These access the current or another (stopped) task's io permission
  722. * bitmap for debugging or core dump.
  723. */
  724. static int ioperm_active(struct task_struct *target,
  725. const struct user_regset *regset)
  726. {
  727. return target->thread.io_bitmap_max / regset->size;
  728. }
  729. static int ioperm_get(struct task_struct *target,
  730. const struct user_regset *regset,
  731. unsigned int pos, unsigned int count,
  732. void *kbuf, void __user *ubuf)
  733. {
  734. if (!target->thread.io_bitmap_ptr)
  735. return -ENXIO;
  736. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  737. target->thread.io_bitmap_ptr,
  738. 0, IO_BITMAP_BYTES);
  739. }
  740. /*
  741. * Called by kernel/ptrace.c when detaching..
  742. *
  743. * Make sure the single step bit is not set.
  744. */
  745. void ptrace_disable(struct task_struct *child)
  746. {
  747. user_disable_single_step(child);
  748. #ifdef TIF_SYSCALL_EMU
  749. clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
  750. #endif
  751. }
  752. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  753. static const struct user_regset_view user_x86_32_view; /* Initialized below. */
  754. #endif
  755. long arch_ptrace(struct task_struct *child, long request,
  756. unsigned long addr, unsigned long data)
  757. {
  758. int ret;
  759. unsigned long __user *datap = (unsigned long __user *)data;
  760. switch (request) {
  761. /* read the word at location addr in the USER area. */
  762. case PTRACE_PEEKUSR: {
  763. unsigned long tmp;
  764. ret = -EIO;
  765. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
  766. break;
  767. tmp = 0; /* Default return condition */
  768. if (addr < sizeof(struct user_regs_struct))
  769. tmp = getreg(child, addr);
  770. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  771. addr <= offsetof(struct user, u_debugreg[7])) {
  772. addr -= offsetof(struct user, u_debugreg[0]);
  773. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  774. }
  775. ret = put_user(tmp, datap);
  776. break;
  777. }
  778. case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
  779. ret = -EIO;
  780. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user))
  781. break;
  782. if (addr < sizeof(struct user_regs_struct))
  783. ret = putreg(child, addr, data);
  784. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  785. addr <= offsetof(struct user, u_debugreg[7])) {
  786. addr -= offsetof(struct user, u_debugreg[0]);
  787. ret = ptrace_set_debugreg(child,
  788. addr / sizeof(data), data);
  789. }
  790. break;
  791. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  792. return copy_regset_to_user(child,
  793. task_user_regset_view(current),
  794. REGSET_GENERAL,
  795. 0, sizeof(struct user_regs_struct),
  796. datap);
  797. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  798. return copy_regset_from_user(child,
  799. task_user_regset_view(current),
  800. REGSET_GENERAL,
  801. 0, sizeof(struct user_regs_struct),
  802. datap);
  803. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  804. return copy_regset_to_user(child,
  805. task_user_regset_view(current),
  806. REGSET_FP,
  807. 0, sizeof(struct user_i387_struct),
  808. datap);
  809. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  810. return copy_regset_from_user(child,
  811. task_user_regset_view(current),
  812. REGSET_FP,
  813. 0, sizeof(struct user_i387_struct),
  814. datap);
  815. #ifdef CONFIG_X86_32
  816. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  817. return copy_regset_to_user(child, &user_x86_32_view,
  818. REGSET_XFP,
  819. 0, sizeof(struct user_fxsr_struct),
  820. datap) ? -EIO : 0;
  821. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  822. return copy_regset_from_user(child, &user_x86_32_view,
  823. REGSET_XFP,
  824. 0, sizeof(struct user_fxsr_struct),
  825. datap) ? -EIO : 0;
  826. #endif
  827. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  828. case PTRACE_GET_THREAD_AREA:
  829. if ((int) addr < 0)
  830. return -EIO;
  831. ret = do_get_thread_area(child, addr,
  832. (struct user_desc __user *)data);
  833. break;
  834. case PTRACE_SET_THREAD_AREA:
  835. if ((int) addr < 0)
  836. return -EIO;
  837. ret = do_set_thread_area(child, addr,
  838. (struct user_desc __user *)data, 0);
  839. break;
  840. #endif
  841. #ifdef CONFIG_X86_64
  842. /* normal 64bit interface to access TLS data.
  843. Works just like arch_prctl, except that the arguments
  844. are reversed. */
  845. case PTRACE_ARCH_PRCTL:
  846. ret = do_arch_prctl(child, data, addr);
  847. break;
  848. #endif
  849. default:
  850. ret = ptrace_request(child, request, addr, data);
  851. break;
  852. }
  853. return ret;
  854. }
  855. #ifdef CONFIG_IA32_EMULATION
  856. #include <linux/compat.h>
  857. #include <linux/syscalls.h>
  858. #include <asm/ia32.h>
  859. #include <asm/user32.h>
  860. #define R32(l,q) \
  861. case offsetof(struct user32, regs.l): \
  862. regs->q = value; break
  863. #define SEG32(rs) \
  864. case offsetof(struct user32, regs.rs): \
  865. return set_segment_reg(child, \
  866. offsetof(struct user_regs_struct, rs), \
  867. value); \
  868. break
  869. static int putreg32(struct task_struct *child, unsigned regno, u32 value)
  870. {
  871. struct pt_regs *regs = task_pt_regs(child);
  872. switch (regno) {
  873. SEG32(cs);
  874. SEG32(ds);
  875. SEG32(es);
  876. SEG32(fs);
  877. SEG32(gs);
  878. SEG32(ss);
  879. R32(ebx, bx);
  880. R32(ecx, cx);
  881. R32(edx, dx);
  882. R32(edi, di);
  883. R32(esi, si);
  884. R32(ebp, bp);
  885. R32(eax, ax);
  886. R32(eip, ip);
  887. R32(esp, sp);
  888. case offsetof(struct user32, regs.orig_eax):
  889. /*
  890. * A 32-bit debugger setting orig_eax means to restore
  891. * the state of the task restarting a 32-bit syscall.
  892. * Make sure we interpret the -ERESTART* codes correctly
  893. * in case the task is not actually still sitting at the
  894. * exit from a 32-bit syscall with TS_COMPAT still set.
  895. */
  896. regs->orig_ax = value;
  897. if (syscall_get_nr(child, regs) >= 0)
  898. task_thread_info(child)->status |= TS_COMPAT;
  899. break;
  900. case offsetof(struct user32, regs.eflags):
  901. return set_flags(child, value);
  902. case offsetof(struct user32, u_debugreg[0]) ...
  903. offsetof(struct user32, u_debugreg[7]):
  904. regno -= offsetof(struct user32, u_debugreg[0]);
  905. return ptrace_set_debugreg(child, regno / 4, value);
  906. default:
  907. if (regno > sizeof(struct user32) || (regno & 3))
  908. return -EIO;
  909. /*
  910. * Other dummy fields in the virtual user structure
  911. * are ignored
  912. */
  913. break;
  914. }
  915. return 0;
  916. }
  917. #undef R32
  918. #undef SEG32
  919. #define R32(l,q) \
  920. case offsetof(struct user32, regs.l): \
  921. *val = regs->q; break
  922. #define SEG32(rs) \
  923. case offsetof(struct user32, regs.rs): \
  924. *val = get_segment_reg(child, \
  925. offsetof(struct user_regs_struct, rs)); \
  926. break
  927. static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
  928. {
  929. struct pt_regs *regs = task_pt_regs(child);
  930. switch (regno) {
  931. SEG32(ds);
  932. SEG32(es);
  933. SEG32(fs);
  934. SEG32(gs);
  935. R32(cs, cs);
  936. R32(ss, ss);
  937. R32(ebx, bx);
  938. R32(ecx, cx);
  939. R32(edx, dx);
  940. R32(edi, di);
  941. R32(esi, si);
  942. R32(ebp, bp);
  943. R32(eax, ax);
  944. R32(orig_eax, orig_ax);
  945. R32(eip, ip);
  946. R32(esp, sp);
  947. case offsetof(struct user32, regs.eflags):
  948. *val = get_flags(child);
  949. break;
  950. case offsetof(struct user32, u_debugreg[0]) ...
  951. offsetof(struct user32, u_debugreg[7]):
  952. regno -= offsetof(struct user32, u_debugreg[0]);
  953. *val = ptrace_get_debugreg(child, regno / 4);
  954. break;
  955. default:
  956. if (regno > sizeof(struct user32) || (regno & 3))
  957. return -EIO;
  958. /*
  959. * Other dummy fields in the virtual user structure
  960. * are ignored
  961. */
  962. *val = 0;
  963. break;
  964. }
  965. return 0;
  966. }
  967. #undef R32
  968. #undef SEG32
  969. static int genregs32_get(struct task_struct *target,
  970. const struct user_regset *regset,
  971. unsigned int pos, unsigned int count,
  972. void *kbuf, void __user *ubuf)
  973. {
  974. if (kbuf) {
  975. compat_ulong_t *k = kbuf;
  976. while (count >= sizeof(*k)) {
  977. getreg32(target, pos, k++);
  978. count -= sizeof(*k);
  979. pos += sizeof(*k);
  980. }
  981. } else {
  982. compat_ulong_t __user *u = ubuf;
  983. while (count >= sizeof(*u)) {
  984. compat_ulong_t word;
  985. getreg32(target, pos, &word);
  986. if (__put_user(word, u++))
  987. return -EFAULT;
  988. count -= sizeof(*u);
  989. pos += sizeof(*u);
  990. }
  991. }
  992. return 0;
  993. }
  994. static int genregs32_set(struct task_struct *target,
  995. const struct user_regset *regset,
  996. unsigned int pos, unsigned int count,
  997. const void *kbuf, const void __user *ubuf)
  998. {
  999. int ret = 0;
  1000. if (kbuf) {
  1001. const compat_ulong_t *k = kbuf;
  1002. while (count >= sizeof(*k) && !ret) {
  1003. ret = putreg32(target, pos, *k++);
  1004. count -= sizeof(*k);
  1005. pos += sizeof(*k);
  1006. }
  1007. } else {
  1008. const compat_ulong_t __user *u = ubuf;
  1009. while (count >= sizeof(*u) && !ret) {
  1010. compat_ulong_t word;
  1011. ret = __get_user(word, u++);
  1012. if (ret)
  1013. break;
  1014. ret = putreg32(target, pos, word);
  1015. count -= sizeof(*u);
  1016. pos += sizeof(*u);
  1017. }
  1018. }
  1019. return ret;
  1020. }
  1021. #ifdef CONFIG_X86_X32_ABI
  1022. static long x32_arch_ptrace(struct task_struct *child,
  1023. compat_long_t request, compat_ulong_t caddr,
  1024. compat_ulong_t cdata)
  1025. {
  1026. unsigned long addr = caddr;
  1027. unsigned long data = cdata;
  1028. void __user *datap = compat_ptr(data);
  1029. int ret;
  1030. switch (request) {
  1031. /* Read 32bits at location addr in the USER area. Only allow
  1032. to return the lower 32bits of segment and debug registers. */
  1033. case PTRACE_PEEKUSR: {
  1034. u32 tmp;
  1035. ret = -EIO;
  1036. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
  1037. addr < offsetof(struct user_regs_struct, cs))
  1038. break;
  1039. tmp = 0; /* Default return condition */
  1040. if (addr < sizeof(struct user_regs_struct))
  1041. tmp = getreg(child, addr);
  1042. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  1043. addr <= offsetof(struct user, u_debugreg[7])) {
  1044. addr -= offsetof(struct user, u_debugreg[0]);
  1045. tmp = ptrace_get_debugreg(child, addr / sizeof(data));
  1046. }
  1047. ret = put_user(tmp, (__u32 __user *)datap);
  1048. break;
  1049. }
  1050. /* Write the word at location addr in the USER area. Only allow
  1051. to update segment and debug registers with the upper 32bits
  1052. zero-extended. */
  1053. case PTRACE_POKEUSR:
  1054. ret = -EIO;
  1055. if ((addr & (sizeof(data) - 1)) || addr >= sizeof(struct user) ||
  1056. addr < offsetof(struct user_regs_struct, cs))
  1057. break;
  1058. if (addr < sizeof(struct user_regs_struct))
  1059. ret = putreg(child, addr, data);
  1060. else if (addr >= offsetof(struct user, u_debugreg[0]) &&
  1061. addr <= offsetof(struct user, u_debugreg[7])) {
  1062. addr -= offsetof(struct user, u_debugreg[0]);
  1063. ret = ptrace_set_debugreg(child,
  1064. addr / sizeof(data), data);
  1065. }
  1066. break;
  1067. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1068. return copy_regset_to_user(child,
  1069. task_user_regset_view(current),
  1070. REGSET_GENERAL,
  1071. 0, sizeof(struct user_regs_struct),
  1072. datap);
  1073. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1074. return copy_regset_from_user(child,
  1075. task_user_regset_view(current),
  1076. REGSET_GENERAL,
  1077. 0, sizeof(struct user_regs_struct),
  1078. datap);
  1079. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1080. return copy_regset_to_user(child,
  1081. task_user_regset_view(current),
  1082. REGSET_FP,
  1083. 0, sizeof(struct user_i387_struct),
  1084. datap);
  1085. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1086. return copy_regset_from_user(child,
  1087. task_user_regset_view(current),
  1088. REGSET_FP,
  1089. 0, sizeof(struct user_i387_struct),
  1090. datap);
  1091. default:
  1092. return compat_ptrace_request(child, request, addr, data);
  1093. }
  1094. return ret;
  1095. }
  1096. #endif
  1097. long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
  1098. compat_ulong_t caddr, compat_ulong_t cdata)
  1099. {
  1100. unsigned long addr = caddr;
  1101. unsigned long data = cdata;
  1102. void __user *datap = compat_ptr(data);
  1103. int ret;
  1104. __u32 val;
  1105. #ifdef CONFIG_X86_X32_ABI
  1106. if (!is_ia32_task())
  1107. return x32_arch_ptrace(child, request, caddr, cdata);
  1108. #endif
  1109. switch (request) {
  1110. case PTRACE_PEEKUSR:
  1111. ret = getreg32(child, addr, &val);
  1112. if (ret == 0)
  1113. ret = put_user(val, (__u32 __user *)datap);
  1114. break;
  1115. case PTRACE_POKEUSR:
  1116. ret = putreg32(child, addr, data);
  1117. break;
  1118. case PTRACE_GETREGS: /* Get all gp regs from the child. */
  1119. return copy_regset_to_user(child, &user_x86_32_view,
  1120. REGSET_GENERAL,
  1121. 0, sizeof(struct user_regs_struct32),
  1122. datap);
  1123. case PTRACE_SETREGS: /* Set all gp regs in the child. */
  1124. return copy_regset_from_user(child, &user_x86_32_view,
  1125. REGSET_GENERAL, 0,
  1126. sizeof(struct user_regs_struct32),
  1127. datap);
  1128. case PTRACE_GETFPREGS: /* Get the child FPU state. */
  1129. return copy_regset_to_user(child, &user_x86_32_view,
  1130. REGSET_FP, 0,
  1131. sizeof(struct user_i387_ia32_struct),
  1132. datap);
  1133. case PTRACE_SETFPREGS: /* Set the child FPU state. */
  1134. return copy_regset_from_user(
  1135. child, &user_x86_32_view, REGSET_FP,
  1136. 0, sizeof(struct user_i387_ia32_struct), datap);
  1137. case PTRACE_GETFPXREGS: /* Get the child extended FPU state. */
  1138. return copy_regset_to_user(child, &user_x86_32_view,
  1139. REGSET_XFP, 0,
  1140. sizeof(struct user32_fxsr_struct),
  1141. datap);
  1142. case PTRACE_SETFPXREGS: /* Set the child extended FPU state. */
  1143. return copy_regset_from_user(child, &user_x86_32_view,
  1144. REGSET_XFP, 0,
  1145. sizeof(struct user32_fxsr_struct),
  1146. datap);
  1147. case PTRACE_GET_THREAD_AREA:
  1148. case PTRACE_SET_THREAD_AREA:
  1149. return arch_ptrace(child, request, addr, data);
  1150. default:
  1151. return compat_ptrace_request(child, request, addr, data);
  1152. }
  1153. return ret;
  1154. }
  1155. #endif /* CONFIG_IA32_EMULATION */
  1156. #ifdef CONFIG_X86_64
  1157. static struct user_regset x86_64_regsets[] __read_mostly = {
  1158. [REGSET_GENERAL] = {
  1159. .core_note_type = NT_PRSTATUS,
  1160. .n = sizeof(struct user_regs_struct) / sizeof(long),
  1161. .size = sizeof(long), .align = sizeof(long),
  1162. .get = genregs_get, .set = genregs_set
  1163. },
  1164. [REGSET_FP] = {
  1165. .core_note_type = NT_PRFPREG,
  1166. .n = sizeof(struct user_i387_struct) / sizeof(long),
  1167. .size = sizeof(long), .align = sizeof(long),
  1168. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1169. },
  1170. [REGSET_XSTATE] = {
  1171. .core_note_type = NT_X86_XSTATE,
  1172. .size = sizeof(u64), .align = sizeof(u64),
  1173. .active = xstateregs_active, .get = xstateregs_get,
  1174. .set = xstateregs_set
  1175. },
  1176. [REGSET_IOPERM64] = {
  1177. .core_note_type = NT_386_IOPERM,
  1178. .n = IO_BITMAP_LONGS,
  1179. .size = sizeof(long), .align = sizeof(long),
  1180. .active = ioperm_active, .get = ioperm_get
  1181. },
  1182. };
  1183. static const struct user_regset_view user_x86_64_view = {
  1184. .name = "x86_64", .e_machine = EM_X86_64,
  1185. .regsets = x86_64_regsets, .n = ARRAY_SIZE(x86_64_regsets)
  1186. };
  1187. #else /* CONFIG_X86_32 */
  1188. #define user_regs_struct32 user_regs_struct
  1189. #define genregs32_get genregs_get
  1190. #define genregs32_set genregs_set
  1191. #define user_i387_ia32_struct user_i387_struct
  1192. #define user32_fxsr_struct user_fxsr_struct
  1193. #endif /* CONFIG_X86_64 */
  1194. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1195. static struct user_regset x86_32_regsets[] __read_mostly = {
  1196. [REGSET_GENERAL] = {
  1197. .core_note_type = NT_PRSTATUS,
  1198. .n = sizeof(struct user_regs_struct32) / sizeof(u32),
  1199. .size = sizeof(u32), .align = sizeof(u32),
  1200. .get = genregs32_get, .set = genregs32_set
  1201. },
  1202. [REGSET_FP] = {
  1203. .core_note_type = NT_PRFPREG,
  1204. .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
  1205. .size = sizeof(u32), .align = sizeof(u32),
  1206. .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
  1207. },
  1208. [REGSET_XFP] = {
  1209. .core_note_type = NT_PRXFPREG,
  1210. .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
  1211. .size = sizeof(u32), .align = sizeof(u32),
  1212. .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
  1213. },
  1214. [REGSET_XSTATE] = {
  1215. .core_note_type = NT_X86_XSTATE,
  1216. .size = sizeof(u64), .align = sizeof(u64),
  1217. .active = xstateregs_active, .get = xstateregs_get,
  1218. .set = xstateregs_set
  1219. },
  1220. [REGSET_TLS] = {
  1221. .core_note_type = NT_386_TLS,
  1222. .n = GDT_ENTRY_TLS_ENTRIES, .bias = GDT_ENTRY_TLS_MIN,
  1223. .size = sizeof(struct user_desc),
  1224. .align = sizeof(struct user_desc),
  1225. .active = regset_tls_active,
  1226. .get = regset_tls_get, .set = regset_tls_set
  1227. },
  1228. [REGSET_IOPERM32] = {
  1229. .core_note_type = NT_386_IOPERM,
  1230. .n = IO_BITMAP_BYTES / sizeof(u32),
  1231. .size = sizeof(u32), .align = sizeof(u32),
  1232. .active = ioperm_active, .get = ioperm_get
  1233. },
  1234. };
  1235. static const struct user_regset_view user_x86_32_view = {
  1236. .name = "i386", .e_machine = EM_386,
  1237. .regsets = x86_32_regsets, .n = ARRAY_SIZE(x86_32_regsets)
  1238. };
  1239. #endif
  1240. /*
  1241. * This represents bytes 464..511 in the memory layout exported through
  1242. * the REGSET_XSTATE interface.
  1243. */
  1244. u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
  1245. void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
  1246. {
  1247. #ifdef CONFIG_X86_64
  1248. x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
  1249. #endif
  1250. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1251. x86_32_regsets[REGSET_XSTATE].n = size / sizeof(u64);
  1252. #endif
  1253. xstate_fx_sw_bytes[USER_XSTATE_XCR0_WORD] = xstate_mask;
  1254. }
  1255. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  1256. {
  1257. #ifdef CONFIG_IA32_EMULATION
  1258. if (test_tsk_thread_flag(task, TIF_IA32))
  1259. #endif
  1260. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  1261. return &user_x86_32_view;
  1262. #endif
  1263. #ifdef CONFIG_X86_64
  1264. return &user_x86_64_view;
  1265. #endif
  1266. }
  1267. static void fill_sigtrap_info(struct task_struct *tsk,
  1268. struct pt_regs *regs,
  1269. int error_code, int si_code,
  1270. struct siginfo *info)
  1271. {
  1272. tsk->thread.trap_nr = X86_TRAP_DB;
  1273. tsk->thread.error_code = error_code;
  1274. memset(info, 0, sizeof(*info));
  1275. info->si_signo = SIGTRAP;
  1276. info->si_code = si_code;
  1277. info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
  1278. }
  1279. void user_single_step_siginfo(struct task_struct *tsk,
  1280. struct pt_regs *regs,
  1281. struct siginfo *info)
  1282. {
  1283. fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
  1284. }
  1285. void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  1286. int error_code, int si_code)
  1287. {
  1288. struct siginfo info;
  1289. fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
  1290. /* Send us the fake SIGTRAP */
  1291. force_sig_info(SIGTRAP, &info, tsk);
  1292. }
  1293. #ifdef CONFIG_X86_32
  1294. # define IS_IA32 1
  1295. #elif defined CONFIG_IA32_EMULATION
  1296. # define IS_IA32 is_compat_task()
  1297. #else
  1298. # define IS_IA32 0
  1299. #endif
  1300. /*
  1301. * We must return the syscall number to actually look up in the table.
  1302. * This can be -1L to skip running any syscall at all.
  1303. */
  1304. long syscall_trace_enter(struct pt_regs *regs)
  1305. {
  1306. long ret = 0;
  1307. /*
  1308. * If we stepped into a sysenter/syscall insn, it trapped in
  1309. * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
  1310. * If user-mode had set TF itself, then it's still clear from
  1311. * do_debug() and we need to set it again to restore the user
  1312. * state. If we entered on the slow path, TF was already set.
  1313. */
  1314. if (test_thread_flag(TIF_SINGLESTEP))
  1315. regs->flags |= X86_EFLAGS_TF;
  1316. /* do the secure computing check first */
  1317. if (secure_computing(regs->orig_ax)) {
  1318. /* seccomp failures shouldn't expose any additional code. */
  1319. ret = -1L;
  1320. goto out;
  1321. }
  1322. if (unlikely(test_thread_flag(TIF_SYSCALL_EMU)))
  1323. ret = -1L;
  1324. if ((ret || test_thread_flag(TIF_SYSCALL_TRACE)) &&
  1325. tracehook_report_syscall_entry(regs))
  1326. ret = -1L;
  1327. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1328. trace_sys_enter(regs, regs->orig_ax);
  1329. if (IS_IA32)
  1330. audit_syscall_entry(AUDIT_ARCH_I386,
  1331. regs->orig_ax,
  1332. regs->bx, regs->cx,
  1333. regs->dx, regs->si);
  1334. #ifdef CONFIG_X86_64
  1335. else
  1336. audit_syscall_entry(AUDIT_ARCH_X86_64,
  1337. regs->orig_ax,
  1338. regs->di, regs->si,
  1339. regs->dx, regs->r10);
  1340. #endif
  1341. out:
  1342. return ret ?: regs->orig_ax;
  1343. }
  1344. void syscall_trace_leave(struct pt_regs *regs)
  1345. {
  1346. bool step;
  1347. audit_syscall_exit(regs);
  1348. if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
  1349. trace_sys_exit(regs, regs->ax);
  1350. /*
  1351. * If TIF_SYSCALL_EMU is set, we only get here because of
  1352. * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
  1353. * We already reported this syscall instruction in
  1354. * syscall_trace_enter().
  1355. */
  1356. step = unlikely(test_thread_flag(TIF_SINGLESTEP)) &&
  1357. !test_thread_flag(TIF_SYSCALL_EMU);
  1358. if (step || test_thread_flag(TIF_SYSCALL_TRACE))
  1359. tracehook_report_syscall_exit(regs, step);
  1360. }