single_step.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * A code-rewriter that enables instruction single-stepping.
  15. * Derived from iLib's single-stepping code.
  16. */
  17. #ifndef __tilegx__ /* Hardware support for single step unavailable. */
  18. /* These functions are only used on the TILE platform */
  19. #include <linux/slab.h>
  20. #include <linux/thread_info.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/mman.h>
  23. #include <linux/types.h>
  24. #include <linux/err.h>
  25. #include <asm/cacheflush.h>
  26. #include <asm/opcode-tile.h>
  27. #include <asm/opcode_constants.h>
  28. #include <arch/abi.h>
  29. #define signExtend17(val) sign_extend((val), 17)
  30. #define TILE_X1_MASK (0xffffffffULL << 31)
  31. int unaligned_printk;
  32. static int __init setup_unaligned_printk(char *str)
  33. {
  34. long val;
  35. if (strict_strtol(str, 0, &val) != 0)
  36. return 0;
  37. unaligned_printk = val;
  38. pr_info("Printk for each unaligned data accesses is %s\n",
  39. unaligned_printk ? "enabled" : "disabled");
  40. return 1;
  41. }
  42. __setup("unaligned_printk=", setup_unaligned_printk);
  43. unsigned int unaligned_fixup_count;
  44. enum mem_op {
  45. MEMOP_NONE,
  46. MEMOP_LOAD,
  47. MEMOP_STORE,
  48. MEMOP_LOAD_POSTINCR,
  49. MEMOP_STORE_POSTINCR
  50. };
  51. static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
  52. {
  53. tile_bundle_bits result;
  54. /* mask out the old offset */
  55. tile_bundle_bits mask = create_BrOff_X1(-1);
  56. result = n & (~mask);
  57. /* or in the new offset */
  58. result |= create_BrOff_X1(offset);
  59. return result;
  60. }
  61. static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
  62. {
  63. tile_bundle_bits result;
  64. tile_bundle_bits op;
  65. result = n & (~TILE_X1_MASK);
  66. op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) |
  67. create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) |
  68. create_Dest_X1(dest) |
  69. create_SrcB_X1(TREG_ZERO) |
  70. create_SrcA_X1(src) ;
  71. result |= op;
  72. return result;
  73. }
  74. static inline tile_bundle_bits nop_X1(tile_bundle_bits n)
  75. {
  76. return move_X1(n, TREG_ZERO, TREG_ZERO);
  77. }
  78. static inline tile_bundle_bits addi_X1(
  79. tile_bundle_bits n, int dest, int src, int imm)
  80. {
  81. n &= ~TILE_X1_MASK;
  82. n |= (create_SrcA_X1(src) |
  83. create_Dest_X1(dest) |
  84. create_Imm8_X1(imm) |
  85. create_S_X1(0) |
  86. create_Opcode_X1(IMM_0_OPCODE_X1) |
  87. create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1));
  88. return n;
  89. }
  90. static tile_bundle_bits rewrite_load_store_unaligned(
  91. struct single_step_state *state,
  92. tile_bundle_bits bundle,
  93. struct pt_regs *regs,
  94. enum mem_op mem_op,
  95. int size, int sign_ext)
  96. {
  97. unsigned char __user *addr;
  98. int val_reg, addr_reg, err, val;
  99. /* Get address and value registers */
  100. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  101. addr_reg = get_SrcA_Y2(bundle);
  102. val_reg = get_SrcBDest_Y2(bundle);
  103. } else if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  104. addr_reg = get_SrcA_X1(bundle);
  105. val_reg = get_Dest_X1(bundle);
  106. } else {
  107. addr_reg = get_SrcA_X1(bundle);
  108. val_reg = get_SrcB_X1(bundle);
  109. }
  110. /*
  111. * If registers are not GPRs, don't try to handle it.
  112. *
  113. * FIXME: we could handle non-GPR loads by getting the real value
  114. * from memory, writing it to the single step buffer, using a
  115. * temp_reg to hold a pointer to that memory, then executing that
  116. * instruction and resetting temp_reg. For non-GPR stores, it's a
  117. * little trickier; we could use the single step buffer for that
  118. * too, but we'd have to add some more state bits so that we could
  119. * call back in here to copy that value to the real target. For
  120. * now, we just handle the simple case.
  121. */
  122. if ((val_reg >= PTREGS_NR_GPRS &&
  123. (val_reg != TREG_ZERO ||
  124. mem_op == MEMOP_LOAD ||
  125. mem_op == MEMOP_LOAD_POSTINCR)) ||
  126. addr_reg >= PTREGS_NR_GPRS)
  127. return bundle;
  128. /* If it's aligned, don't handle it specially */
  129. addr = (void __user *)regs->regs[addr_reg];
  130. if (((unsigned long)addr % size) == 0)
  131. return bundle;
  132. #ifndef __LITTLE_ENDIAN
  133. # error We assume little-endian representation with copy_xx_user size 2 here
  134. #endif
  135. /* Handle unaligned load/store */
  136. if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
  137. unsigned short val_16;
  138. switch (size) {
  139. case 2:
  140. err = copy_from_user(&val_16, addr, sizeof(val_16));
  141. val = sign_ext ? ((short)val_16) : val_16;
  142. break;
  143. case 4:
  144. err = copy_from_user(&val, addr, sizeof(val));
  145. break;
  146. default:
  147. BUG();
  148. }
  149. if (err == 0) {
  150. state->update_reg = val_reg;
  151. state->update_value = val;
  152. state->update = 1;
  153. }
  154. } else {
  155. val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
  156. err = copy_to_user(addr, &val, size);
  157. }
  158. if (err) {
  159. siginfo_t info = {
  160. .si_signo = SIGSEGV,
  161. .si_code = SEGV_MAPERR,
  162. .si_addr = addr
  163. };
  164. trace_unhandled_signal("segfault", regs,
  165. (unsigned long)addr, SIGSEGV);
  166. force_sig_info(info.si_signo, &info, current);
  167. return (tile_bundle_bits) 0;
  168. }
  169. if (unaligned_fixup == 0) {
  170. siginfo_t info = {
  171. .si_signo = SIGBUS,
  172. .si_code = BUS_ADRALN,
  173. .si_addr = addr
  174. };
  175. trace_unhandled_signal("unaligned trap", regs,
  176. (unsigned long)addr, SIGBUS);
  177. force_sig_info(info.si_signo, &info, current);
  178. return (tile_bundle_bits) 0;
  179. }
  180. if (unaligned_printk || unaligned_fixup_count == 0) {
  181. pr_info("Process %d/%s: PC %#lx: Fixup of"
  182. " unaligned %s at %#lx.\n",
  183. current->pid, current->comm, regs->pc,
  184. (mem_op == MEMOP_LOAD ||
  185. mem_op == MEMOP_LOAD_POSTINCR) ?
  186. "load" : "store",
  187. (unsigned long)addr);
  188. if (!unaligned_printk) {
  189. #define P pr_info
  190. P("\n");
  191. P("Unaligned fixups in the kernel will slow your application considerably.\n");
  192. P("To find them, write a \"1\" to /proc/sys/tile/unaligned_fixup/printk,\n");
  193. P("which requests the kernel show all unaligned fixups, or write a \"0\"\n");
  194. P("to /proc/sys/tile/unaligned_fixup/enabled, in which case each unaligned\n");
  195. P("access will become a SIGBUS you can debug. No further warnings will be\n");
  196. P("shown so as to avoid additional slowdown, but you can track the number\n");
  197. P("of fixups performed via /proc/sys/tile/unaligned_fixup/count.\n");
  198. P("Use the tile-addr2line command (see \"info addr2line\") to decode PCs.\n");
  199. P("\n");
  200. #undef P
  201. }
  202. }
  203. ++unaligned_fixup_count;
  204. if (bundle & TILE_BUNDLE_Y_ENCODING_MASK) {
  205. /* Convert the Y2 instruction to a prefetch. */
  206. bundle &= ~(create_SrcBDest_Y2(-1) |
  207. create_Opcode_Y2(-1));
  208. bundle |= (create_SrcBDest_Y2(TREG_ZERO) |
  209. create_Opcode_Y2(LW_OPCODE_Y2));
  210. /* Replace the load postincr with an addi */
  211. } else if (mem_op == MEMOP_LOAD_POSTINCR) {
  212. bundle = addi_X1(bundle, addr_reg, addr_reg,
  213. get_Imm8_X1(bundle));
  214. /* Replace the store postincr with an addi */
  215. } else if (mem_op == MEMOP_STORE_POSTINCR) {
  216. bundle = addi_X1(bundle, addr_reg, addr_reg,
  217. get_Dest_Imm8_X1(bundle));
  218. } else {
  219. /* Convert the X1 instruction to a nop. */
  220. bundle &= ~(create_Opcode_X1(-1) |
  221. create_UnShOpcodeExtension_X1(-1) |
  222. create_UnOpcodeExtension_X1(-1));
  223. bundle |= (create_Opcode_X1(SHUN_0_OPCODE_X1) |
  224. create_UnShOpcodeExtension_X1(
  225. UN_0_SHUN_0_OPCODE_X1) |
  226. create_UnOpcodeExtension_X1(
  227. NOP_UN_0_SHUN_0_OPCODE_X1));
  228. }
  229. return bundle;
  230. }
  231. /*
  232. * Called after execve() has started the new image. This allows us
  233. * to reset the info state. Note that the the mmap'ed memory, if there
  234. * was any, has already been unmapped by the exec.
  235. */
  236. void single_step_execve(void)
  237. {
  238. struct thread_info *ti = current_thread_info();
  239. kfree(ti->step_state);
  240. ti->step_state = NULL;
  241. }
  242. /**
  243. * single_step_once() - entry point when single stepping has been triggered.
  244. * @regs: The machine register state
  245. *
  246. * When we arrive at this routine via a trampoline, the single step
  247. * engine copies the executing bundle to the single step buffer.
  248. * If the instruction is a condition branch, then the target is
  249. * reset to one past the next instruction. If the instruction
  250. * sets the lr, then that is noted. If the instruction is a jump
  251. * or call, then the new target pc is preserved and the current
  252. * bundle instruction set to null.
  253. *
  254. * The necessary post-single-step rewriting information is stored in
  255. * single_step_state-> We use data segment values because the
  256. * stack will be rewound when we run the rewritten single-stepped
  257. * instruction.
  258. */
  259. void single_step_once(struct pt_regs *regs)
  260. {
  261. extern tile_bundle_bits __single_step_ill_insn;
  262. extern tile_bundle_bits __single_step_j_insn;
  263. extern tile_bundle_bits __single_step_addli_insn;
  264. extern tile_bundle_bits __single_step_auli_insn;
  265. struct thread_info *info = (void *)current_thread_info();
  266. struct single_step_state *state = info->step_state;
  267. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  268. tile_bundle_bits __user *buffer, *pc;
  269. tile_bundle_bits bundle;
  270. int temp_reg;
  271. int target_reg = TREG_LR;
  272. int err;
  273. enum mem_op mem_op = MEMOP_NONE;
  274. int size = 0, sign_ext = 0; /* happy compiler */
  275. asm(
  276. " .pushsection .rodata.single_step\n"
  277. " .align 8\n"
  278. " .globl __single_step_ill_insn\n"
  279. "__single_step_ill_insn:\n"
  280. " ill\n"
  281. " .globl __single_step_addli_insn\n"
  282. "__single_step_addli_insn:\n"
  283. " { nop; addli r0, zero, 0 }\n"
  284. " .globl __single_step_auli_insn\n"
  285. "__single_step_auli_insn:\n"
  286. " { nop; auli r0, r0, 0 }\n"
  287. " .globl __single_step_j_insn\n"
  288. "__single_step_j_insn:\n"
  289. " j .\n"
  290. " .popsection\n"
  291. );
  292. /*
  293. * Enable interrupts here to allow touching userspace and the like.
  294. * The callers expect this: do_trap() already has interrupts
  295. * enabled, and do_work_pending() handles functions that enable
  296. * interrupts internally.
  297. */
  298. local_irq_enable();
  299. if (state == NULL) {
  300. /* allocate a page of writable, executable memory */
  301. state = kmalloc(sizeof(struct single_step_state), GFP_KERNEL);
  302. if (state == NULL) {
  303. pr_err("Out of kernel memory trying to single-step\n");
  304. return;
  305. }
  306. /* allocate a cache line of writable, executable memory */
  307. down_write(&current->mm->mmap_sem);
  308. buffer = (void __user *) do_mmap(NULL, 0, 64,
  309. PROT_EXEC | PROT_READ | PROT_WRITE,
  310. MAP_PRIVATE | MAP_ANONYMOUS,
  311. 0);
  312. up_write(&current->mm->mmap_sem);
  313. if (IS_ERR((void __force *)buffer)) {
  314. kfree(state);
  315. pr_err("Out of kernel pages trying to single-step\n");
  316. return;
  317. }
  318. state->buffer = buffer;
  319. state->is_enabled = 0;
  320. info->step_state = state;
  321. /* Validate our stored instruction patterns */
  322. BUG_ON(get_Opcode_X1(__single_step_addli_insn) !=
  323. ADDLI_OPCODE_X1);
  324. BUG_ON(get_Opcode_X1(__single_step_auli_insn) !=
  325. AULI_OPCODE_X1);
  326. BUG_ON(get_SrcA_X1(__single_step_addli_insn) != TREG_ZERO);
  327. BUG_ON(get_Dest_X1(__single_step_addli_insn) != 0);
  328. BUG_ON(get_JOffLong_X1(__single_step_j_insn) != 0);
  329. }
  330. /*
  331. * If we are returning from a syscall, we still haven't hit the
  332. * "ill" for the swint1 instruction. So back the PC up to be
  333. * pointing at the swint1, but we'll actually return directly
  334. * back to the "ill" so we come back in via SIGILL as if we
  335. * had "executed" the swint1 without ever being in kernel space.
  336. */
  337. if (regs->faultnum == INT_SWINT_1)
  338. regs->pc -= 8;
  339. pc = (tile_bundle_bits __user *)(regs->pc);
  340. if (get_user(bundle, pc) != 0) {
  341. pr_err("Couldn't read instruction at %p trying to step\n", pc);
  342. return;
  343. }
  344. /* We'll follow the instruction with 2 ill op bundles */
  345. state->orig_pc = (unsigned long)pc;
  346. state->next_pc = (unsigned long)(pc + 1);
  347. state->branch_next_pc = 0;
  348. state->update = 0;
  349. if (!(bundle & TILE_BUNDLE_Y_ENCODING_MASK)) {
  350. /* two wide, check for control flow */
  351. int opcode = get_Opcode_X1(bundle);
  352. switch (opcode) {
  353. /* branches */
  354. case BRANCH_OPCODE_X1:
  355. {
  356. s32 offset = signExtend17(get_BrOff_X1(bundle));
  357. /*
  358. * For branches, we use a rewriting trick to let the
  359. * hardware evaluate whether the branch is taken or
  360. * untaken. We record the target offset and then
  361. * rewrite the branch instruction to target 1 insn
  362. * ahead if the branch is taken. We then follow the
  363. * rewritten branch with two bundles, each containing
  364. * an "ill" instruction. The supervisor examines the
  365. * pc after the single step code is executed, and if
  366. * the pc is the first ill instruction, then the
  367. * branch (if any) was not taken. If the pc is the
  368. * second ill instruction, then the branch was
  369. * taken. The new pc is computed for these cases, and
  370. * inserted into the registers for the thread. If
  371. * the pc is the start of the single step code, then
  372. * an exception or interrupt was taken before the
  373. * code started processing, and the same "original"
  374. * pc is restored. This change, different from the
  375. * original implementation, has the advantage of
  376. * executing a single user instruction.
  377. */
  378. state->branch_next_pc = (unsigned long)(pc + offset);
  379. /* rewrite branch offset to go forward one bundle */
  380. bundle = set_BrOff_X1(bundle, 2);
  381. }
  382. break;
  383. /* jumps */
  384. case JALB_OPCODE_X1:
  385. case JALF_OPCODE_X1:
  386. state->update = 1;
  387. state->next_pc =
  388. (unsigned long) (pc + get_JOffLong_X1(bundle));
  389. break;
  390. case JB_OPCODE_X1:
  391. case JF_OPCODE_X1:
  392. state->next_pc =
  393. (unsigned long) (pc + get_JOffLong_X1(bundle));
  394. bundle = nop_X1(bundle);
  395. break;
  396. case SPECIAL_0_OPCODE_X1:
  397. switch (get_RRROpcodeExtension_X1(bundle)) {
  398. /* jump-register */
  399. case JALRP_SPECIAL_0_OPCODE_X1:
  400. case JALR_SPECIAL_0_OPCODE_X1:
  401. state->update = 1;
  402. state->next_pc =
  403. regs->regs[get_SrcA_X1(bundle)];
  404. break;
  405. case JRP_SPECIAL_0_OPCODE_X1:
  406. case JR_SPECIAL_0_OPCODE_X1:
  407. state->next_pc =
  408. regs->regs[get_SrcA_X1(bundle)];
  409. bundle = nop_X1(bundle);
  410. break;
  411. case LNK_SPECIAL_0_OPCODE_X1:
  412. state->update = 1;
  413. target_reg = get_Dest_X1(bundle);
  414. break;
  415. /* stores */
  416. case SH_SPECIAL_0_OPCODE_X1:
  417. mem_op = MEMOP_STORE;
  418. size = 2;
  419. break;
  420. case SW_SPECIAL_0_OPCODE_X1:
  421. mem_op = MEMOP_STORE;
  422. size = 4;
  423. break;
  424. }
  425. break;
  426. /* loads and iret */
  427. case SHUN_0_OPCODE_X1:
  428. if (get_UnShOpcodeExtension_X1(bundle) ==
  429. UN_0_SHUN_0_OPCODE_X1) {
  430. switch (get_UnOpcodeExtension_X1(bundle)) {
  431. case LH_UN_0_SHUN_0_OPCODE_X1:
  432. mem_op = MEMOP_LOAD;
  433. size = 2;
  434. sign_ext = 1;
  435. break;
  436. case LH_U_UN_0_SHUN_0_OPCODE_X1:
  437. mem_op = MEMOP_LOAD;
  438. size = 2;
  439. sign_ext = 0;
  440. break;
  441. case LW_UN_0_SHUN_0_OPCODE_X1:
  442. mem_op = MEMOP_LOAD;
  443. size = 4;
  444. break;
  445. case IRET_UN_0_SHUN_0_OPCODE_X1:
  446. {
  447. unsigned long ex0_0 = __insn_mfspr(
  448. SPR_EX_CONTEXT_0_0);
  449. unsigned long ex0_1 = __insn_mfspr(
  450. SPR_EX_CONTEXT_0_1);
  451. /*
  452. * Special-case it if we're iret'ing
  453. * to PL0 again. Otherwise just let
  454. * it run and it will generate SIGILL.
  455. */
  456. if (EX1_PL(ex0_1) == USER_PL) {
  457. state->next_pc = ex0_0;
  458. regs->ex1 = ex0_1;
  459. bundle = nop_X1(bundle);
  460. }
  461. }
  462. }
  463. }
  464. break;
  465. #if CHIP_HAS_WH64()
  466. /* postincrement operations */
  467. case IMM_0_OPCODE_X1:
  468. switch (get_ImmOpcodeExtension_X1(bundle)) {
  469. case LWADD_IMM_0_OPCODE_X1:
  470. mem_op = MEMOP_LOAD_POSTINCR;
  471. size = 4;
  472. break;
  473. case LHADD_IMM_0_OPCODE_X1:
  474. mem_op = MEMOP_LOAD_POSTINCR;
  475. size = 2;
  476. sign_ext = 1;
  477. break;
  478. case LHADD_U_IMM_0_OPCODE_X1:
  479. mem_op = MEMOP_LOAD_POSTINCR;
  480. size = 2;
  481. sign_ext = 0;
  482. break;
  483. case SWADD_IMM_0_OPCODE_X1:
  484. mem_op = MEMOP_STORE_POSTINCR;
  485. size = 4;
  486. break;
  487. case SHADD_IMM_0_OPCODE_X1:
  488. mem_op = MEMOP_STORE_POSTINCR;
  489. size = 2;
  490. break;
  491. default:
  492. break;
  493. }
  494. break;
  495. #endif /* CHIP_HAS_WH64() */
  496. }
  497. if (state->update) {
  498. /*
  499. * Get an available register. We start with a
  500. * bitmask with 1's for available registers.
  501. * We truncate to the low 32 registers since
  502. * we are guaranteed to have set bits in the
  503. * low 32 bits, then use ctz to pick the first.
  504. */
  505. u32 mask = (u32) ~((1ULL << get_Dest_X0(bundle)) |
  506. (1ULL << get_SrcA_X0(bundle)) |
  507. (1ULL << get_SrcB_X0(bundle)) |
  508. (1ULL << target_reg));
  509. temp_reg = __builtin_ctz(mask);
  510. state->update_reg = temp_reg;
  511. state->update_value = regs->regs[temp_reg];
  512. regs->regs[temp_reg] = (unsigned long) (pc+1);
  513. regs->flags |= PT_FLAGS_RESTORE_REGS;
  514. bundle = move_X1(bundle, target_reg, temp_reg);
  515. }
  516. } else {
  517. int opcode = get_Opcode_Y2(bundle);
  518. switch (opcode) {
  519. /* loads */
  520. case LH_OPCODE_Y2:
  521. mem_op = MEMOP_LOAD;
  522. size = 2;
  523. sign_ext = 1;
  524. break;
  525. case LH_U_OPCODE_Y2:
  526. mem_op = MEMOP_LOAD;
  527. size = 2;
  528. sign_ext = 0;
  529. break;
  530. case LW_OPCODE_Y2:
  531. mem_op = MEMOP_LOAD;
  532. size = 4;
  533. break;
  534. /* stores */
  535. case SH_OPCODE_Y2:
  536. mem_op = MEMOP_STORE;
  537. size = 2;
  538. break;
  539. case SW_OPCODE_Y2:
  540. mem_op = MEMOP_STORE;
  541. size = 4;
  542. break;
  543. }
  544. }
  545. /*
  546. * Check if we need to rewrite an unaligned load/store.
  547. * Returning zero is a special value meaning we need to SIGSEGV.
  548. */
  549. if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) {
  550. bundle = rewrite_load_store_unaligned(state, bundle, regs,
  551. mem_op, size, sign_ext);
  552. if (bundle == 0)
  553. return;
  554. }
  555. /* write the bundle to our execution area */
  556. buffer = state->buffer;
  557. err = __put_user(bundle, buffer++);
  558. /*
  559. * If we're really single-stepping, we take an INT_ILL after.
  560. * If we're just handling an unaligned access, we can just
  561. * jump directly back to where we were in user code.
  562. */
  563. if (is_single_step) {
  564. err |= __put_user(__single_step_ill_insn, buffer++);
  565. err |= __put_user(__single_step_ill_insn, buffer++);
  566. } else {
  567. long delta;
  568. if (state->update) {
  569. /* We have some state to update; do it inline */
  570. int ha16;
  571. bundle = __single_step_addli_insn;
  572. bundle |= create_Dest_X1(state->update_reg);
  573. bundle |= create_Imm16_X1(state->update_value);
  574. err |= __put_user(bundle, buffer++);
  575. bundle = __single_step_auli_insn;
  576. bundle |= create_Dest_X1(state->update_reg);
  577. bundle |= create_SrcA_X1(state->update_reg);
  578. ha16 = (state->update_value + 0x8000) >> 16;
  579. bundle |= create_Imm16_X1(ha16);
  580. err |= __put_user(bundle, buffer++);
  581. state->update = 0;
  582. }
  583. /* End with a jump back to the next instruction */
  584. delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) -
  585. (unsigned long)buffer) >>
  586. TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
  587. bundle = __single_step_j_insn;
  588. bundle |= create_JOffLong_X1(delta);
  589. err |= __put_user(bundle, buffer++);
  590. }
  591. if (err) {
  592. pr_err("Fault when writing to single-step buffer\n");
  593. return;
  594. }
  595. /*
  596. * Flush the buffer.
  597. * We do a local flush only, since this is a thread-specific buffer.
  598. */
  599. __flush_icache_range((unsigned long)state->buffer,
  600. (unsigned long)buffer);
  601. /* Indicate enabled */
  602. state->is_enabled = is_single_step;
  603. regs->pc = (unsigned long)state->buffer;
  604. /* Fault immediately if we are coming back from a syscall. */
  605. if (regs->faultnum == INT_SWINT_1)
  606. regs->pc += 8;
  607. }
  608. #else
  609. #include <linux/smp.h>
  610. #include <linux/ptrace.h>
  611. #include <arch/spr_def.h>
  612. static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
  613. /*
  614. * Called directly on the occasion of an interrupt.
  615. *
  616. * If the process doesn't have single step set, then we use this as an
  617. * opportunity to turn single step off.
  618. *
  619. * It has been mentioned that we could conditionally turn off single stepping
  620. * on each entry into the kernel and rely on single_step_once to turn it
  621. * on for the processes that matter (as we already do), but this
  622. * implementation is somewhat more efficient in that we muck with registers
  623. * once on a bum interrupt rather than on every entry into the kernel.
  624. *
  625. * If SINGLE_STEP_CONTROL_K has CANCELED set, then an interrupt occurred,
  626. * so we have to run through this process again before we can say that an
  627. * instruction has executed.
  628. *
  629. * swint will set CANCELED, but it's a legitimate instruction. Fortunately
  630. * it changes the PC. If it hasn't changed, then we know that the interrupt
  631. * wasn't generated by swint and we'll need to run this process again before
  632. * we can say an instruction has executed.
  633. *
  634. * If either CANCELED == 0 or the PC's changed, we send out SIGTRAPs and get
  635. * on with our lives.
  636. */
  637. void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
  638. {
  639. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  640. struct thread_info *info = (void *)current_thread_info();
  641. int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
  642. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  643. if (is_single_step == 0) {
  644. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 0);
  645. } else if ((*ss_pc != regs->pc) ||
  646. (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
  647. ptrace_notify(SIGTRAP);
  648. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  649. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  650. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  651. }
  652. }
  653. /*
  654. * Called from need_singlestep. Set up the control registers and the enable
  655. * register, then return back.
  656. */
  657. void single_step_once(struct pt_regs *regs)
  658. {
  659. unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
  660. unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
  661. *ss_pc = regs->pc;
  662. control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
  663. control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
  664. __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
  665. __insn_mtspr(SPR_SINGLE_STEP_EN_K_K, 1 << USER_PL);
  666. }
  667. void single_step_execve(void)
  668. {
  669. /* Nothing */
  670. }
  671. #endif /* !__tilegx__ */