unaligned.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /*
  2. * Handle unaligned accesses by emulation.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
  9. * Copyright (C) 1999 Silicon Graphics, Inc.
  10. *
  11. * This file contains exception handler for address error exception with the
  12. * special capability to execute faulting instructions in software. The
  13. * handler does not try to handle the case when the program counter points
  14. * to an address not aligned to a word boundary.
  15. *
  16. * Putting data to unaligned addresses is a bad practice even on Intel where
  17. * only the performance is affected. Much worse is that such code is non-
  18. * portable. Due to several programs that die on MIPS due to alignment
  19. * problems I decided to implement this handler anyway though I originally
  20. * didn't intend to do this at all for user code.
  21. *
  22. * For now I enable fixing of address errors by default to make life easier.
  23. * I however intend to disable this somewhen in the future when the alignment
  24. * problems with user programs have been fixed. For programmers this is the
  25. * right way to go.
  26. *
  27. * Fixing address errors is a per process option. The option is inherited
  28. * across fork(2) and execve(2) calls. If you really want to use the
  29. * option in your user programs - I discourage the use of the software
  30. * emulation strongly - use the following code in your userland stuff:
  31. *
  32. * #include <sys/sysmips.h>
  33. *
  34. * ...
  35. * sysmips(MIPS_FIXADE, x);
  36. * ...
  37. *
  38. * The argument x is 0 for disabling software emulation, enabled otherwise.
  39. *
  40. * Below a little program to play around with this feature.
  41. *
  42. * #include <stdio.h>
  43. * #include <sys/sysmips.h>
  44. *
  45. * struct foo {
  46. * unsigned char bar[8];
  47. * };
  48. *
  49. * main(int argc, char *argv[])
  50. * {
  51. * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  52. * unsigned int *p = (unsigned int *) (x.bar + 3);
  53. * int i;
  54. *
  55. * if (argc > 1)
  56. * sysmips(MIPS_FIXADE, atoi(argv[1]));
  57. *
  58. * printf("*p = %08lx\n", *p);
  59. *
  60. * *p = 0xdeadface;
  61. *
  62. * for(i = 0; i <= 7; i++)
  63. * printf("%02x ", x.bar[i]);
  64. * printf("\n");
  65. * }
  66. *
  67. * Coprocessor loads are not supported; I think this case is unimportant
  68. * in the practice.
  69. *
  70. * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  71. * exception for the R6000.
  72. * A store crossing a page boundary might be executed only partially.
  73. * Undo the partial store in this case.
  74. */
  75. #include <linux/mm.h>
  76. #include <linux/signal.h>
  77. #include <linux/smp.h>
  78. #include <linux/sched.h>
  79. #include <linux/debugfs.h>
  80. #include <linux/perf_event.h>
  81. #include <asm/asm.h>
  82. #include <asm/branch.h>
  83. #include <asm/byteorder.h>
  84. #include <asm/cop2.h>
  85. #include <asm/inst.h>
  86. #include <asm/uaccess.h>
  87. #define STR(x) __STR(x)
  88. #define __STR(x) #x
  89. enum {
  90. UNALIGNED_ACTION_QUIET,
  91. UNALIGNED_ACTION_SIGNAL,
  92. UNALIGNED_ACTION_SHOW,
  93. };
  94. #ifdef CONFIG_DEBUG_FS
  95. static u32 unaligned_instructions;
  96. static u32 unaligned_action;
  97. #else
  98. #define unaligned_action UNALIGNED_ACTION_QUIET
  99. #endif
  100. extern void show_registers(struct pt_regs *regs);
  101. static void emulate_load_store_insn(struct pt_regs *regs,
  102. void __user *addr, unsigned int __user *pc)
  103. {
  104. union mips_instruction insn;
  105. unsigned long value;
  106. unsigned int res;
  107. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
  108. /*
  109. * This load never faults.
  110. */
  111. __get_user(insn.word, pc);
  112. switch (insn.i_format.opcode) {
  113. /*
  114. * These are instructions that a compiler doesn't generate. We
  115. * can assume therefore that the code is MIPS-aware and
  116. * really buggy. Emulating these instructions would break the
  117. * semantics anyway.
  118. */
  119. case ll_op:
  120. case lld_op:
  121. case sc_op:
  122. case scd_op:
  123. /*
  124. * For these instructions the only way to create an address
  125. * error is an attempted access to kernel/supervisor address
  126. * space.
  127. */
  128. case ldl_op:
  129. case ldr_op:
  130. case lwl_op:
  131. case lwr_op:
  132. case sdl_op:
  133. case sdr_op:
  134. case swl_op:
  135. case swr_op:
  136. case lb_op:
  137. case lbu_op:
  138. case sb_op:
  139. goto sigbus;
  140. /*
  141. * The remaining opcodes are the ones that are really of interest.
  142. */
  143. case lh_op:
  144. if (!access_ok(VERIFY_READ, addr, 2))
  145. goto sigbus;
  146. __asm__ __volatile__ (".set\tnoat\n"
  147. #ifdef __BIG_ENDIAN
  148. "1:\tlb\t%0, 0(%2)\n"
  149. "2:\tlbu\t$1, 1(%2)\n\t"
  150. #endif
  151. #ifdef __LITTLE_ENDIAN
  152. "1:\tlb\t%0, 1(%2)\n"
  153. "2:\tlbu\t$1, 0(%2)\n\t"
  154. #endif
  155. "sll\t%0, 0x8\n\t"
  156. "or\t%0, $1\n\t"
  157. "li\t%1, 0\n"
  158. "3:\t.set\tat\n\t"
  159. ".section\t.fixup,\"ax\"\n\t"
  160. "4:\tli\t%1, %3\n\t"
  161. "j\t3b\n\t"
  162. ".previous\n\t"
  163. ".section\t__ex_table,\"a\"\n\t"
  164. STR(PTR)"\t1b, 4b\n\t"
  165. STR(PTR)"\t2b, 4b\n\t"
  166. ".previous"
  167. : "=&r" (value), "=r" (res)
  168. : "r" (addr), "i" (-EFAULT));
  169. if (res)
  170. goto fault;
  171. compute_return_epc(regs);
  172. regs->regs[insn.i_format.rt] = value;
  173. break;
  174. case lw_op:
  175. if (!access_ok(VERIFY_READ, addr, 4))
  176. goto sigbus;
  177. __asm__ __volatile__ (
  178. #ifdef __BIG_ENDIAN
  179. "1:\tlwl\t%0, (%2)\n"
  180. "2:\tlwr\t%0, 3(%2)\n\t"
  181. #endif
  182. #ifdef __LITTLE_ENDIAN
  183. "1:\tlwl\t%0, 3(%2)\n"
  184. "2:\tlwr\t%0, (%2)\n\t"
  185. #endif
  186. "li\t%1, 0\n"
  187. "3:\t.section\t.fixup,\"ax\"\n\t"
  188. "4:\tli\t%1, %3\n\t"
  189. "j\t3b\n\t"
  190. ".previous\n\t"
  191. ".section\t__ex_table,\"a\"\n\t"
  192. STR(PTR)"\t1b, 4b\n\t"
  193. STR(PTR)"\t2b, 4b\n\t"
  194. ".previous"
  195. : "=&r" (value), "=r" (res)
  196. : "r" (addr), "i" (-EFAULT));
  197. if (res)
  198. goto fault;
  199. compute_return_epc(regs);
  200. regs->regs[insn.i_format.rt] = value;
  201. break;
  202. case lhu_op:
  203. if (!access_ok(VERIFY_READ, addr, 2))
  204. goto sigbus;
  205. __asm__ __volatile__ (
  206. ".set\tnoat\n"
  207. #ifdef __BIG_ENDIAN
  208. "1:\tlbu\t%0, 0(%2)\n"
  209. "2:\tlbu\t$1, 1(%2)\n\t"
  210. #endif
  211. #ifdef __LITTLE_ENDIAN
  212. "1:\tlbu\t%0, 1(%2)\n"
  213. "2:\tlbu\t$1, 0(%2)\n\t"
  214. #endif
  215. "sll\t%0, 0x8\n\t"
  216. "or\t%0, $1\n\t"
  217. "li\t%1, 0\n"
  218. "3:\t.set\tat\n\t"
  219. ".section\t.fixup,\"ax\"\n\t"
  220. "4:\tli\t%1, %3\n\t"
  221. "j\t3b\n\t"
  222. ".previous\n\t"
  223. ".section\t__ex_table,\"a\"\n\t"
  224. STR(PTR)"\t1b, 4b\n\t"
  225. STR(PTR)"\t2b, 4b\n\t"
  226. ".previous"
  227. : "=&r" (value), "=r" (res)
  228. : "r" (addr), "i" (-EFAULT));
  229. if (res)
  230. goto fault;
  231. compute_return_epc(regs);
  232. regs->regs[insn.i_format.rt] = value;
  233. break;
  234. case lwu_op:
  235. #ifdef CONFIG_64BIT
  236. /*
  237. * A 32-bit kernel might be running on a 64-bit processor. But
  238. * if we're on a 32-bit processor and an i-cache incoherency
  239. * or race makes us see a 64-bit instruction here the sdl/sdr
  240. * would blow up, so for now we don't handle unaligned 64-bit
  241. * instructions on 32-bit kernels.
  242. */
  243. if (!access_ok(VERIFY_READ, addr, 4))
  244. goto sigbus;
  245. __asm__ __volatile__ (
  246. #ifdef __BIG_ENDIAN
  247. "1:\tlwl\t%0, (%2)\n"
  248. "2:\tlwr\t%0, 3(%2)\n\t"
  249. #endif
  250. #ifdef __LITTLE_ENDIAN
  251. "1:\tlwl\t%0, 3(%2)\n"
  252. "2:\tlwr\t%0, (%2)\n\t"
  253. #endif
  254. "dsll\t%0, %0, 32\n\t"
  255. "dsrl\t%0, %0, 32\n\t"
  256. "li\t%1, 0\n"
  257. "3:\t.section\t.fixup,\"ax\"\n\t"
  258. "4:\tli\t%1, %3\n\t"
  259. "j\t3b\n\t"
  260. ".previous\n\t"
  261. ".section\t__ex_table,\"a\"\n\t"
  262. STR(PTR)"\t1b, 4b\n\t"
  263. STR(PTR)"\t2b, 4b\n\t"
  264. ".previous"
  265. : "=&r" (value), "=r" (res)
  266. : "r" (addr), "i" (-EFAULT));
  267. if (res)
  268. goto fault;
  269. compute_return_epc(regs);
  270. regs->regs[insn.i_format.rt] = value;
  271. break;
  272. #endif /* CONFIG_64BIT */
  273. /* Cannot handle 64-bit instructions in 32-bit kernel */
  274. goto sigill;
  275. case ld_op:
  276. #ifdef CONFIG_64BIT
  277. /*
  278. * A 32-bit kernel might be running on a 64-bit processor. But
  279. * if we're on a 32-bit processor and an i-cache incoherency
  280. * or race makes us see a 64-bit instruction here the sdl/sdr
  281. * would blow up, so for now we don't handle unaligned 64-bit
  282. * instructions on 32-bit kernels.
  283. */
  284. if (!access_ok(VERIFY_READ, addr, 8))
  285. goto sigbus;
  286. __asm__ __volatile__ (
  287. #ifdef __BIG_ENDIAN
  288. "1:\tldl\t%0, (%2)\n"
  289. "2:\tldr\t%0, 7(%2)\n\t"
  290. #endif
  291. #ifdef __LITTLE_ENDIAN
  292. "1:\tldl\t%0, 7(%2)\n"
  293. "2:\tldr\t%0, (%2)\n\t"
  294. #endif
  295. "li\t%1, 0\n"
  296. "3:\t.section\t.fixup,\"ax\"\n\t"
  297. "4:\tli\t%1, %3\n\t"
  298. "j\t3b\n\t"
  299. ".previous\n\t"
  300. ".section\t__ex_table,\"a\"\n\t"
  301. STR(PTR)"\t1b, 4b\n\t"
  302. STR(PTR)"\t2b, 4b\n\t"
  303. ".previous"
  304. : "=&r" (value), "=r" (res)
  305. : "r" (addr), "i" (-EFAULT));
  306. if (res)
  307. goto fault;
  308. compute_return_epc(regs);
  309. regs->regs[insn.i_format.rt] = value;
  310. break;
  311. #endif /* CONFIG_64BIT */
  312. /* Cannot handle 64-bit instructions in 32-bit kernel */
  313. goto sigill;
  314. case sh_op:
  315. if (!access_ok(VERIFY_WRITE, addr, 2))
  316. goto sigbus;
  317. value = regs->regs[insn.i_format.rt];
  318. __asm__ __volatile__ (
  319. #ifdef __BIG_ENDIAN
  320. ".set\tnoat\n"
  321. "1:\tsb\t%1, 1(%2)\n\t"
  322. "srl\t$1, %1, 0x8\n"
  323. "2:\tsb\t$1, 0(%2)\n\t"
  324. ".set\tat\n\t"
  325. #endif
  326. #ifdef __LITTLE_ENDIAN
  327. ".set\tnoat\n"
  328. "1:\tsb\t%1, 0(%2)\n\t"
  329. "srl\t$1,%1, 0x8\n"
  330. "2:\tsb\t$1, 1(%2)\n\t"
  331. ".set\tat\n\t"
  332. #endif
  333. "li\t%0, 0\n"
  334. "3:\n\t"
  335. ".section\t.fixup,\"ax\"\n\t"
  336. "4:\tli\t%0, %3\n\t"
  337. "j\t3b\n\t"
  338. ".previous\n\t"
  339. ".section\t__ex_table,\"a\"\n\t"
  340. STR(PTR)"\t1b, 4b\n\t"
  341. STR(PTR)"\t2b, 4b\n\t"
  342. ".previous"
  343. : "=r" (res)
  344. : "r" (value), "r" (addr), "i" (-EFAULT));
  345. if (res)
  346. goto fault;
  347. compute_return_epc(regs);
  348. break;
  349. case sw_op:
  350. if (!access_ok(VERIFY_WRITE, addr, 4))
  351. goto sigbus;
  352. value = regs->regs[insn.i_format.rt];
  353. __asm__ __volatile__ (
  354. #ifdef __BIG_ENDIAN
  355. "1:\tswl\t%1,(%2)\n"
  356. "2:\tswr\t%1, 3(%2)\n\t"
  357. #endif
  358. #ifdef __LITTLE_ENDIAN
  359. "1:\tswl\t%1, 3(%2)\n"
  360. "2:\tswr\t%1, (%2)\n\t"
  361. #endif
  362. "li\t%0, 0\n"
  363. "3:\n\t"
  364. ".section\t.fixup,\"ax\"\n\t"
  365. "4:\tli\t%0, %3\n\t"
  366. "j\t3b\n\t"
  367. ".previous\n\t"
  368. ".section\t__ex_table,\"a\"\n\t"
  369. STR(PTR)"\t1b, 4b\n\t"
  370. STR(PTR)"\t2b, 4b\n\t"
  371. ".previous"
  372. : "=r" (res)
  373. : "r" (value), "r" (addr), "i" (-EFAULT));
  374. if (res)
  375. goto fault;
  376. compute_return_epc(regs);
  377. break;
  378. case sd_op:
  379. #ifdef CONFIG_64BIT
  380. /*
  381. * A 32-bit kernel might be running on a 64-bit processor. But
  382. * if we're on a 32-bit processor and an i-cache incoherency
  383. * or race makes us see a 64-bit instruction here the sdl/sdr
  384. * would blow up, so for now we don't handle unaligned 64-bit
  385. * instructions on 32-bit kernels.
  386. */
  387. if (!access_ok(VERIFY_WRITE, addr, 8))
  388. goto sigbus;
  389. value = regs->regs[insn.i_format.rt];
  390. __asm__ __volatile__ (
  391. #ifdef __BIG_ENDIAN
  392. "1:\tsdl\t%1,(%2)\n"
  393. "2:\tsdr\t%1, 7(%2)\n\t"
  394. #endif
  395. #ifdef __LITTLE_ENDIAN
  396. "1:\tsdl\t%1, 7(%2)\n"
  397. "2:\tsdr\t%1, (%2)\n\t"
  398. #endif
  399. "li\t%0, 0\n"
  400. "3:\n\t"
  401. ".section\t.fixup,\"ax\"\n\t"
  402. "4:\tli\t%0, %3\n\t"
  403. "j\t3b\n\t"
  404. ".previous\n\t"
  405. ".section\t__ex_table,\"a\"\n\t"
  406. STR(PTR)"\t1b, 4b\n\t"
  407. STR(PTR)"\t2b, 4b\n\t"
  408. ".previous"
  409. : "=r" (res)
  410. : "r" (value), "r" (addr), "i" (-EFAULT));
  411. if (res)
  412. goto fault;
  413. compute_return_epc(regs);
  414. break;
  415. #endif /* CONFIG_64BIT */
  416. /* Cannot handle 64-bit instructions in 32-bit kernel */
  417. goto sigill;
  418. case lwc1_op:
  419. case ldc1_op:
  420. case swc1_op:
  421. case sdc1_op:
  422. /*
  423. * I herewith declare: this does not happen. So send SIGBUS.
  424. */
  425. goto sigbus;
  426. /*
  427. * COP2 is available to implementor for application specific use.
  428. * It's up to applications to register a notifier chain and do
  429. * whatever they have to do, including possible sending of signals.
  430. */
  431. case lwc2_op:
  432. cu2_notifier_call_chain(CU2_LWC2_OP, regs);
  433. break;
  434. case ldc2_op:
  435. cu2_notifier_call_chain(CU2_LDC2_OP, regs);
  436. break;
  437. case swc2_op:
  438. cu2_notifier_call_chain(CU2_SWC2_OP, regs);
  439. break;
  440. case sdc2_op:
  441. cu2_notifier_call_chain(CU2_SDC2_OP, regs);
  442. break;
  443. default:
  444. /*
  445. * Pheeee... We encountered an yet unknown instruction or
  446. * cache coherence problem. Die sucker, die ...
  447. */
  448. goto sigill;
  449. }
  450. #ifdef CONFIG_DEBUG_FS
  451. unaligned_instructions++;
  452. #endif
  453. return;
  454. fault:
  455. /* Did we have an exception handler installed? */
  456. if (fixup_exception(regs))
  457. return;
  458. die_if_kernel("Unhandled kernel unaligned access", regs);
  459. force_sig(SIGSEGV, current);
  460. return;
  461. sigbus:
  462. die_if_kernel("Unhandled kernel unaligned access", regs);
  463. force_sig(SIGBUS, current);
  464. return;
  465. sigill:
  466. die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
  467. force_sig(SIGILL, current);
  468. }
  469. asmlinkage void do_ade(struct pt_regs *regs)
  470. {
  471. unsigned int __user *pc;
  472. mm_segment_t seg;
  473. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
  474. 1, regs, regs->cp0_badvaddr);
  475. /*
  476. * Did we catch a fault trying to load an instruction?
  477. * Or are we running in MIPS16 mode?
  478. */
  479. if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
  480. goto sigbus;
  481. pc = (unsigned int __user *) exception_epc(regs);
  482. if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
  483. goto sigbus;
  484. if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
  485. goto sigbus;
  486. else if (unaligned_action == UNALIGNED_ACTION_SHOW)
  487. show_registers(regs);
  488. /*
  489. * Do branch emulation only if we didn't forward the exception.
  490. * This is all so but ugly ...
  491. */
  492. seg = get_fs();
  493. if (!user_mode(regs))
  494. set_fs(KERNEL_DS);
  495. emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
  496. set_fs(seg);
  497. return;
  498. sigbus:
  499. die_if_kernel("Kernel unaligned instruction access", regs);
  500. force_sig(SIGBUS, current);
  501. /*
  502. * XXX On return from the signal handler we should advance the epc
  503. */
  504. }
  505. #ifdef CONFIG_DEBUG_FS
  506. extern struct dentry *mips_debugfs_dir;
  507. static int __init debugfs_unaligned(void)
  508. {
  509. struct dentry *d;
  510. if (!mips_debugfs_dir)
  511. return -ENODEV;
  512. d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
  513. mips_debugfs_dir, &unaligned_instructions);
  514. if (!d)
  515. return -ENOMEM;
  516. d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
  517. mips_debugfs_dir, &unaligned_action);
  518. if (!d)
  519. return -ENOMEM;
  520. return 0;
  521. }
  522. __initcall(debugfs_unaligned);
  523. #endif