cp1emu.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /*
  2. * cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator
  3. *
  4. * MIPS floating point support
  5. * Copyright (C) 1994-2000 Algorithmics Ltd.
  6. *
  7. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  8. * Copyright (C) 2000 MIPS Technologies, Inc.
  9. *
  10. * This program is free software; you can distribute it and/or modify it
  11. * under the terms of the GNU General Public License (Version 2) as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  17. * for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License along
  20. * with this program; if not, write to the Free Software Foundation, Inc.,
  21. * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  22. *
  23. * A complete emulator for MIPS coprocessor 1 instructions. This is
  24. * required for #float(switch) or #float(trap), where it catches all
  25. * COP1 instructions via the "CoProcessor Unusable" exception.
  26. *
  27. * More surprisingly it is also required for #float(ieee), to help out
  28. * the hardware fpu at the boundaries of the IEEE-754 representation
  29. * (denormalised values, infinities, underflow, etc). It is made
  30. * quite nasty because emulation of some non-COP1 instructions is
  31. * required, e.g. in branch delay slots.
  32. *
  33. * Note if you know that you won't have an fpu, then you'll get much
  34. * better performance by compiling with -msoft-float!
  35. */
  36. #include <linux/sched.h>
  37. #include <linux/module.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/perf_event.h>
  40. #include <asm/inst.h>
  41. #include <asm/bootinfo.h>
  42. #include <asm/processor.h>
  43. #include <asm/ptrace.h>
  44. #include <asm/signal.h>
  45. #include <asm/mipsregs.h>
  46. #include <asm/fpu_emulator.h>
  47. #include <asm/uaccess.h>
  48. #include <asm/branch.h>
  49. #include "ieee754.h"
  50. /* Strap kernel emulator for full MIPS IV emulation */
  51. #ifdef __mips
  52. #undef __mips
  53. #endif
  54. #define __mips 4
  55. /* Function which emulates a floating point instruction. */
  56. static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
  57. mips_instruction);
  58. #if __mips >= 4 && __mips != 32
  59. static int fpux_emu(struct pt_regs *,
  60. struct mips_fpu_struct *, mips_instruction, void *__user *);
  61. #endif
  62. /* Further private data for which no space exists in mips_fpu_struct */
  63. #ifdef CONFIG_DEBUG_FS
  64. DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
  65. #endif
  66. /* Control registers */
  67. #define FPCREG_RID 0 /* $0 = revision id */
  68. #define FPCREG_CSR 31 /* $31 = csr */
  69. /* Determine rounding mode from the RM bits of the FCSR */
  70. #define modeindex(v) ((v) & FPU_CSR_RM)
  71. /* Convert Mips rounding mode (0..3) to IEEE library modes. */
  72. static const unsigned char ieee_rm[4] = {
  73. [FPU_CSR_RN] = IEEE754_RN,
  74. [FPU_CSR_RZ] = IEEE754_RZ,
  75. [FPU_CSR_RU] = IEEE754_RU,
  76. [FPU_CSR_RD] = IEEE754_RD,
  77. };
  78. /* Convert IEEE library modes to Mips rounding mode (0..3). */
  79. static const unsigned char mips_rm[4] = {
  80. [IEEE754_RN] = FPU_CSR_RN,
  81. [IEEE754_RZ] = FPU_CSR_RZ,
  82. [IEEE754_RD] = FPU_CSR_RD,
  83. [IEEE754_RU] = FPU_CSR_RU,
  84. };
  85. #if __mips >= 4
  86. /* convert condition code register number to csr bit */
  87. static const unsigned int fpucondbit[8] = {
  88. FPU_CSR_COND0,
  89. FPU_CSR_COND1,
  90. FPU_CSR_COND2,
  91. FPU_CSR_COND3,
  92. FPU_CSR_COND4,
  93. FPU_CSR_COND5,
  94. FPU_CSR_COND6,
  95. FPU_CSR_COND7
  96. };
  97. #endif
  98. /*
  99. * Redundant with logic already in kernel/branch.c,
  100. * embedded in compute_return_epc. At some point,
  101. * a single subroutine should be used across both
  102. * modules.
  103. */
  104. static int isBranchInstr(mips_instruction * i)
  105. {
  106. switch (MIPSInst_OPCODE(*i)) {
  107. case spec_op:
  108. switch (MIPSInst_FUNC(*i)) {
  109. case jalr_op:
  110. case jr_op:
  111. return 1;
  112. }
  113. break;
  114. case bcond_op:
  115. switch (MIPSInst_RT(*i)) {
  116. case bltz_op:
  117. case bgez_op:
  118. case bltzl_op:
  119. case bgezl_op:
  120. case bltzal_op:
  121. case bgezal_op:
  122. case bltzall_op:
  123. case bgezall_op:
  124. return 1;
  125. }
  126. break;
  127. case j_op:
  128. case jal_op:
  129. case jalx_op:
  130. case beq_op:
  131. case bne_op:
  132. case blez_op:
  133. case bgtz_op:
  134. case beql_op:
  135. case bnel_op:
  136. case blezl_op:
  137. case bgtzl_op:
  138. return 1;
  139. case cop0_op:
  140. case cop1_op:
  141. case cop2_op:
  142. case cop1x_op:
  143. if (MIPSInst_RS(*i) == bc_op)
  144. return 1;
  145. break;
  146. }
  147. return 0;
  148. }
  149. /*
  150. * In the Linux kernel, we support selection of FPR format on the
  151. * basis of the Status.FR bit. If an FPU is not present, the FR bit
  152. * is hardwired to zero, which would imply a 32-bit FPU even for
  153. * 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
  154. * as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
  155. * case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
  156. * even FPRs are used (Status.FR = 0).
  157. */
  158. static inline int cop1_64bit(struct pt_regs *xcp)
  159. {
  160. if (cpu_has_fpu)
  161. return xcp->cp0_status & ST0_FR;
  162. #ifdef CONFIG_64BIT
  163. return !test_thread_flag(TIF_32BIT_REGS);
  164. #else
  165. return 0;
  166. #endif
  167. }
  168. #define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
  169. (int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
  170. #define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
  171. cop1_64bit(xcp) || !(x & 1) ? \
  172. ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
  173. ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
  174. #define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
  175. #define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
  176. #define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
  177. #define SPTOREG(sp, x) SITOREG((sp).bits, x)
  178. #define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
  179. #define DPTOREG(dp, x) DITOREG((dp).bits, x)
  180. /*
  181. * Emulate the single floating point instruction pointed at by EPC.
  182. * Two instructions if the instruction is in a branch delay slot.
  183. */
  184. static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
  185. void *__user *fault_addr)
  186. {
  187. mips_instruction ir;
  188. unsigned long emulpc, contpc;
  189. unsigned int cond;
  190. if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
  191. MIPS_FPU_EMU_INC_STATS(errors);
  192. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  193. return SIGBUS;
  194. }
  195. if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
  196. MIPS_FPU_EMU_INC_STATS(errors);
  197. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  198. return SIGSEGV;
  199. }
  200. /* XXX NEC Vr54xx bug workaround */
  201. if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
  202. xcp->cp0_cause &= ~CAUSEF_BD;
  203. if (xcp->cp0_cause & CAUSEF_BD) {
  204. /*
  205. * The instruction to be emulated is in a branch delay slot
  206. * which means that we have to emulate the branch instruction
  207. * BEFORE we do the cop1 instruction.
  208. *
  209. * This branch could be a COP1 branch, but in that case we
  210. * would have had a trap for that instruction, and would not
  211. * come through this route.
  212. *
  213. * Linux MIPS branch emulator operates on context, updating the
  214. * cp0_epc.
  215. */
  216. emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
  217. if (__compute_return_epc(xcp) < 0) {
  218. #ifdef CP1DBG
  219. printk("failed to emulate branch at %p\n",
  220. (void *) (xcp->cp0_epc));
  221. #endif
  222. return SIGILL;
  223. }
  224. if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
  225. MIPS_FPU_EMU_INC_STATS(errors);
  226. *fault_addr = (mips_instruction __user *)emulpc;
  227. return SIGBUS;
  228. }
  229. if (__get_user(ir, (mips_instruction __user *) emulpc)) {
  230. MIPS_FPU_EMU_INC_STATS(errors);
  231. *fault_addr = (mips_instruction __user *)emulpc;
  232. return SIGSEGV;
  233. }
  234. /* __compute_return_epc() will have updated cp0_epc */
  235. contpc = xcp->cp0_epc;
  236. /* In order not to confuse ptrace() et al, tweak context */
  237. xcp->cp0_epc = emulpc - 4;
  238. } else {
  239. emulpc = xcp->cp0_epc;
  240. contpc = xcp->cp0_epc + 4;
  241. }
  242. emul:
  243. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
  244. MIPS_FPU_EMU_INC_STATS(emulated);
  245. switch (MIPSInst_OPCODE(ir)) {
  246. case ldc1_op:{
  247. u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
  248. MIPSInst_SIMM(ir));
  249. u64 val;
  250. MIPS_FPU_EMU_INC_STATS(loads);
  251. if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
  252. MIPS_FPU_EMU_INC_STATS(errors);
  253. *fault_addr = va;
  254. return SIGBUS;
  255. }
  256. if (__get_user(val, va)) {
  257. MIPS_FPU_EMU_INC_STATS(errors);
  258. *fault_addr = va;
  259. return SIGSEGV;
  260. }
  261. DITOREG(val, MIPSInst_RT(ir));
  262. break;
  263. }
  264. case sdc1_op:{
  265. u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
  266. MIPSInst_SIMM(ir));
  267. u64 val;
  268. MIPS_FPU_EMU_INC_STATS(stores);
  269. DIFROMREG(val, MIPSInst_RT(ir));
  270. if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
  271. MIPS_FPU_EMU_INC_STATS(errors);
  272. *fault_addr = va;
  273. return SIGBUS;
  274. }
  275. if (__put_user(val, va)) {
  276. MIPS_FPU_EMU_INC_STATS(errors);
  277. *fault_addr = va;
  278. return SIGSEGV;
  279. }
  280. break;
  281. }
  282. case lwc1_op:{
  283. u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
  284. MIPSInst_SIMM(ir));
  285. u32 val;
  286. MIPS_FPU_EMU_INC_STATS(loads);
  287. if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
  288. MIPS_FPU_EMU_INC_STATS(errors);
  289. *fault_addr = va;
  290. return SIGBUS;
  291. }
  292. if (__get_user(val, va)) {
  293. MIPS_FPU_EMU_INC_STATS(errors);
  294. *fault_addr = va;
  295. return SIGSEGV;
  296. }
  297. SITOREG(val, MIPSInst_RT(ir));
  298. break;
  299. }
  300. case swc1_op:{
  301. u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
  302. MIPSInst_SIMM(ir));
  303. u32 val;
  304. MIPS_FPU_EMU_INC_STATS(stores);
  305. SIFROMREG(val, MIPSInst_RT(ir));
  306. if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
  307. MIPS_FPU_EMU_INC_STATS(errors);
  308. *fault_addr = va;
  309. return SIGBUS;
  310. }
  311. if (__put_user(val, va)) {
  312. MIPS_FPU_EMU_INC_STATS(errors);
  313. *fault_addr = va;
  314. return SIGSEGV;
  315. }
  316. break;
  317. }
  318. case cop1_op:
  319. switch (MIPSInst_RS(ir)) {
  320. #if defined(__mips64)
  321. case dmfc_op:
  322. /* copregister fs -> gpr[rt] */
  323. if (MIPSInst_RT(ir) != 0) {
  324. DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
  325. MIPSInst_RD(ir));
  326. }
  327. break;
  328. case dmtc_op:
  329. /* copregister fs <- rt */
  330. DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
  331. break;
  332. #endif
  333. case mfc_op:
  334. /* copregister rd -> gpr[rt] */
  335. if (MIPSInst_RT(ir) != 0) {
  336. SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
  337. MIPSInst_RD(ir));
  338. }
  339. break;
  340. case mtc_op:
  341. /* copregister rd <- rt */
  342. SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
  343. break;
  344. case cfc_op:{
  345. /* cop control register rd -> gpr[rt] */
  346. u32 value;
  347. if (MIPSInst_RD(ir) == FPCREG_CSR) {
  348. value = ctx->fcr31;
  349. value = (value & ~FPU_CSR_RM) |
  350. mips_rm[modeindex(value)];
  351. #ifdef CSRTRACE
  352. printk("%p gpr[%d]<-csr=%08x\n",
  353. (void *) (xcp->cp0_epc),
  354. MIPSInst_RT(ir), value);
  355. #endif
  356. }
  357. else if (MIPSInst_RD(ir) == FPCREG_RID)
  358. value = 0;
  359. else
  360. value = 0;
  361. if (MIPSInst_RT(ir))
  362. xcp->regs[MIPSInst_RT(ir)] = value;
  363. break;
  364. }
  365. case ctc_op:{
  366. /* copregister rd <- rt */
  367. u32 value;
  368. if (MIPSInst_RT(ir) == 0)
  369. value = 0;
  370. else
  371. value = xcp->regs[MIPSInst_RT(ir)];
  372. /* we only have one writable control reg
  373. */
  374. if (MIPSInst_RD(ir) == FPCREG_CSR) {
  375. #ifdef CSRTRACE
  376. printk("%p gpr[%d]->csr=%08x\n",
  377. (void *) (xcp->cp0_epc),
  378. MIPSInst_RT(ir), value);
  379. #endif
  380. /*
  381. * Don't write reserved bits,
  382. * and convert to ieee library modes
  383. */
  384. ctx->fcr31 = (value &
  385. ~(FPU_CSR_RSVD | FPU_CSR_RM)) |
  386. ieee_rm[modeindex(value)];
  387. }
  388. if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
  389. return SIGFPE;
  390. }
  391. break;
  392. }
  393. case bc_op:{
  394. int likely = 0;
  395. if (xcp->cp0_cause & CAUSEF_BD)
  396. return SIGILL;
  397. #if __mips >= 4
  398. cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2];
  399. #else
  400. cond = ctx->fcr31 & FPU_CSR_COND;
  401. #endif
  402. switch (MIPSInst_RT(ir) & 3) {
  403. case bcfl_op:
  404. likely = 1;
  405. case bcf_op:
  406. cond = !cond;
  407. break;
  408. case bctl_op:
  409. likely = 1;
  410. case bct_op:
  411. break;
  412. default:
  413. /* thats an illegal instruction */
  414. return SIGILL;
  415. }
  416. xcp->cp0_cause |= CAUSEF_BD;
  417. if (cond) {
  418. /* branch taken: emulate dslot
  419. * instruction
  420. */
  421. xcp->cp0_epc += 4;
  422. contpc = (xcp->cp0_epc +
  423. (MIPSInst_SIMM(ir) << 2));
  424. if (!access_ok(VERIFY_READ, xcp->cp0_epc,
  425. sizeof(mips_instruction))) {
  426. MIPS_FPU_EMU_INC_STATS(errors);
  427. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  428. return SIGBUS;
  429. }
  430. if (__get_user(ir,
  431. (mips_instruction __user *) xcp->cp0_epc)) {
  432. MIPS_FPU_EMU_INC_STATS(errors);
  433. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  434. return SIGSEGV;
  435. }
  436. switch (MIPSInst_OPCODE(ir)) {
  437. case lwc1_op:
  438. case swc1_op:
  439. #if (__mips >= 2 || defined(__mips64))
  440. case ldc1_op:
  441. case sdc1_op:
  442. #endif
  443. case cop1_op:
  444. #if __mips >= 4 && __mips != 32
  445. case cop1x_op:
  446. #endif
  447. /* its one of ours */
  448. goto emul;
  449. #if __mips >= 4
  450. case spec_op:
  451. if (MIPSInst_FUNC(ir) == movc_op)
  452. goto emul;
  453. break;
  454. #endif
  455. }
  456. /*
  457. * Single step the non-cp1
  458. * instruction in the dslot
  459. */
  460. return mips_dsemul(xcp, ir, contpc);
  461. }
  462. else {
  463. /* branch not taken */
  464. if (likely) {
  465. /*
  466. * branch likely nullifies
  467. * dslot if not taken
  468. */
  469. xcp->cp0_epc += 4;
  470. contpc += 4;
  471. /*
  472. * else continue & execute
  473. * dslot as normal insn
  474. */
  475. }
  476. }
  477. break;
  478. }
  479. default:
  480. if (!(MIPSInst_RS(ir) & 0x10))
  481. return SIGILL;
  482. {
  483. int sig;
  484. /* a real fpu computation instruction */
  485. if ((sig = fpu_emu(xcp, ctx, ir)))
  486. return sig;
  487. }
  488. }
  489. break;
  490. #if __mips >= 4 && __mips != 32
  491. case cop1x_op:{
  492. int sig = fpux_emu(xcp, ctx, ir, fault_addr);
  493. if (sig)
  494. return sig;
  495. break;
  496. }
  497. #endif
  498. #if __mips >= 4
  499. case spec_op:
  500. if (MIPSInst_FUNC(ir) != movc_op)
  501. return SIGILL;
  502. cond = fpucondbit[MIPSInst_RT(ir) >> 2];
  503. if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
  504. xcp->regs[MIPSInst_RD(ir)] =
  505. xcp->regs[MIPSInst_RS(ir)];
  506. break;
  507. #endif
  508. default:
  509. return SIGILL;
  510. }
  511. /* we did it !! */
  512. xcp->cp0_epc = contpc;
  513. xcp->cp0_cause &= ~CAUSEF_BD;
  514. return 0;
  515. }
  516. /*
  517. * Conversion table from MIPS compare ops 48-63
  518. * cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
  519. */
  520. static const unsigned char cmptab[8] = {
  521. 0, /* cmp_0 (sig) cmp_sf */
  522. IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
  523. IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
  524. IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
  525. IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
  526. IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
  527. IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
  528. IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
  529. };
  530. #if __mips >= 4 && __mips != 32
  531. /*
  532. * Additional MIPS4 instructions
  533. */
  534. #define DEF3OP(name, p, f1, f2, f3) \
  535. static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \
  536. ieee754##p t) \
  537. { \
  538. struct _ieee754_csr ieee754_csr_save; \
  539. s = f1(s, t); \
  540. ieee754_csr_save = ieee754_csr; \
  541. s = f2(s, r); \
  542. ieee754_csr_save.cx |= ieee754_csr.cx; \
  543. ieee754_csr_save.sx |= ieee754_csr.sx; \
  544. s = f3(s); \
  545. ieee754_csr.cx |= ieee754_csr_save.cx; \
  546. ieee754_csr.sx |= ieee754_csr_save.sx; \
  547. return s; \
  548. }
  549. static ieee754dp fpemu_dp_recip(ieee754dp d)
  550. {
  551. return ieee754dp_div(ieee754dp_one(0), d);
  552. }
  553. static ieee754dp fpemu_dp_rsqrt(ieee754dp d)
  554. {
  555. return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
  556. }
  557. static ieee754sp fpemu_sp_recip(ieee754sp s)
  558. {
  559. return ieee754sp_div(ieee754sp_one(0), s);
  560. }
  561. static ieee754sp fpemu_sp_rsqrt(ieee754sp s)
  562. {
  563. return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
  564. }
  565. DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
  566. DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
  567. DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
  568. DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
  569. DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
  570. DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
  571. DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
  572. DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
  573. static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
  574. mips_instruction ir, void *__user *fault_addr)
  575. {
  576. unsigned rcsr = 0; /* resulting csr */
  577. MIPS_FPU_EMU_INC_STATS(cp1xops);
  578. switch (MIPSInst_FMA_FFMT(ir)) {
  579. case s_fmt:{ /* 0 */
  580. ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp);
  581. ieee754sp fd, fr, fs, ft;
  582. u32 __user *va;
  583. u32 val;
  584. switch (MIPSInst_FUNC(ir)) {
  585. case lwxc1_op:
  586. va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
  587. xcp->regs[MIPSInst_FT(ir)]);
  588. MIPS_FPU_EMU_INC_STATS(loads);
  589. if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
  590. MIPS_FPU_EMU_INC_STATS(errors);
  591. *fault_addr = va;
  592. return SIGBUS;
  593. }
  594. if (__get_user(val, va)) {
  595. MIPS_FPU_EMU_INC_STATS(errors);
  596. *fault_addr = va;
  597. return SIGSEGV;
  598. }
  599. SITOREG(val, MIPSInst_FD(ir));
  600. break;
  601. case swxc1_op:
  602. va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
  603. xcp->regs[MIPSInst_FT(ir)]);
  604. MIPS_FPU_EMU_INC_STATS(stores);
  605. SIFROMREG(val, MIPSInst_FS(ir));
  606. if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
  607. MIPS_FPU_EMU_INC_STATS(errors);
  608. *fault_addr = va;
  609. return SIGBUS;
  610. }
  611. if (put_user(val, va)) {
  612. MIPS_FPU_EMU_INC_STATS(errors);
  613. *fault_addr = va;
  614. return SIGSEGV;
  615. }
  616. break;
  617. case madd_s_op:
  618. handler = fpemu_sp_madd;
  619. goto scoptop;
  620. case msub_s_op:
  621. handler = fpemu_sp_msub;
  622. goto scoptop;
  623. case nmadd_s_op:
  624. handler = fpemu_sp_nmadd;
  625. goto scoptop;
  626. case nmsub_s_op:
  627. handler = fpemu_sp_nmsub;
  628. goto scoptop;
  629. scoptop:
  630. SPFROMREG(fr, MIPSInst_FR(ir));
  631. SPFROMREG(fs, MIPSInst_FS(ir));
  632. SPFROMREG(ft, MIPSInst_FT(ir));
  633. fd = (*handler) (fr, fs, ft);
  634. SPTOREG(fd, MIPSInst_FD(ir));
  635. copcsr:
  636. if (ieee754_cxtest(IEEE754_INEXACT))
  637. rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
  638. if (ieee754_cxtest(IEEE754_UNDERFLOW))
  639. rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
  640. if (ieee754_cxtest(IEEE754_OVERFLOW))
  641. rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
  642. if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
  643. rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
  644. ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
  645. if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
  646. /*printk ("SIGFPE: fpu csr = %08x\n",
  647. ctx->fcr31); */
  648. return SIGFPE;
  649. }
  650. break;
  651. default:
  652. return SIGILL;
  653. }
  654. break;
  655. }
  656. case d_fmt:{ /* 1 */
  657. ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp);
  658. ieee754dp fd, fr, fs, ft;
  659. u64 __user *va;
  660. u64 val;
  661. switch (MIPSInst_FUNC(ir)) {
  662. case ldxc1_op:
  663. va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
  664. xcp->regs[MIPSInst_FT(ir)]);
  665. MIPS_FPU_EMU_INC_STATS(loads);
  666. if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
  667. MIPS_FPU_EMU_INC_STATS(errors);
  668. *fault_addr = va;
  669. return SIGBUS;
  670. }
  671. if (__get_user(val, va)) {
  672. MIPS_FPU_EMU_INC_STATS(errors);
  673. *fault_addr = va;
  674. return SIGSEGV;
  675. }
  676. DITOREG(val, MIPSInst_FD(ir));
  677. break;
  678. case sdxc1_op:
  679. va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
  680. xcp->regs[MIPSInst_FT(ir)]);
  681. MIPS_FPU_EMU_INC_STATS(stores);
  682. DIFROMREG(val, MIPSInst_FS(ir));
  683. if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
  684. MIPS_FPU_EMU_INC_STATS(errors);
  685. *fault_addr = va;
  686. return SIGBUS;
  687. }
  688. if (__put_user(val, va)) {
  689. MIPS_FPU_EMU_INC_STATS(errors);
  690. *fault_addr = va;
  691. return SIGSEGV;
  692. }
  693. break;
  694. case madd_d_op:
  695. handler = fpemu_dp_madd;
  696. goto dcoptop;
  697. case msub_d_op:
  698. handler = fpemu_dp_msub;
  699. goto dcoptop;
  700. case nmadd_d_op:
  701. handler = fpemu_dp_nmadd;
  702. goto dcoptop;
  703. case nmsub_d_op:
  704. handler = fpemu_dp_nmsub;
  705. goto dcoptop;
  706. dcoptop:
  707. DPFROMREG(fr, MIPSInst_FR(ir));
  708. DPFROMREG(fs, MIPSInst_FS(ir));
  709. DPFROMREG(ft, MIPSInst_FT(ir));
  710. fd = (*handler) (fr, fs, ft);
  711. DPTOREG(fd, MIPSInst_FD(ir));
  712. goto copcsr;
  713. default:
  714. return SIGILL;
  715. }
  716. break;
  717. }
  718. case 0x7: /* 7 */
  719. if (MIPSInst_FUNC(ir) != pfetch_op) {
  720. return SIGILL;
  721. }
  722. /* ignore prefx operation */
  723. break;
  724. default:
  725. return SIGILL;
  726. }
  727. return 0;
  728. }
  729. #endif
  730. /*
  731. * Emulate a single COP1 arithmetic instruction.
  732. */
  733. static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
  734. mips_instruction ir)
  735. {
  736. int rfmt; /* resulting format */
  737. unsigned rcsr = 0; /* resulting csr */
  738. unsigned cond;
  739. union {
  740. ieee754dp d;
  741. ieee754sp s;
  742. int w;
  743. #ifdef __mips64
  744. s64 l;
  745. #endif
  746. } rv; /* resulting value */
  747. MIPS_FPU_EMU_INC_STATS(cp1ops);
  748. switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
  749. case s_fmt:{ /* 0 */
  750. union {
  751. ieee754sp(*b) (ieee754sp, ieee754sp);
  752. ieee754sp(*u) (ieee754sp);
  753. } handler;
  754. switch (MIPSInst_FUNC(ir)) {
  755. /* binary ops */
  756. case fadd_op:
  757. handler.b = ieee754sp_add;
  758. goto scopbop;
  759. case fsub_op:
  760. handler.b = ieee754sp_sub;
  761. goto scopbop;
  762. case fmul_op:
  763. handler.b = ieee754sp_mul;
  764. goto scopbop;
  765. case fdiv_op:
  766. handler.b = ieee754sp_div;
  767. goto scopbop;
  768. /* unary ops */
  769. #if __mips >= 2 || defined(__mips64)
  770. case fsqrt_op:
  771. handler.u = ieee754sp_sqrt;
  772. goto scopuop;
  773. #endif
  774. #if __mips >= 4 && __mips != 32
  775. case frsqrt_op:
  776. handler.u = fpemu_sp_rsqrt;
  777. goto scopuop;
  778. case frecip_op:
  779. handler.u = fpemu_sp_recip;
  780. goto scopuop;
  781. #endif
  782. #if __mips >= 4
  783. case fmovc_op:
  784. cond = fpucondbit[MIPSInst_FT(ir) >> 2];
  785. if (((ctx->fcr31 & cond) != 0) !=
  786. ((MIPSInst_FT(ir) & 1) != 0))
  787. return 0;
  788. SPFROMREG(rv.s, MIPSInst_FS(ir));
  789. break;
  790. case fmovz_op:
  791. if (xcp->regs[MIPSInst_FT(ir)] != 0)
  792. return 0;
  793. SPFROMREG(rv.s, MIPSInst_FS(ir));
  794. break;
  795. case fmovn_op:
  796. if (xcp->regs[MIPSInst_FT(ir)] == 0)
  797. return 0;
  798. SPFROMREG(rv.s, MIPSInst_FS(ir));
  799. break;
  800. #endif
  801. case fabs_op:
  802. handler.u = ieee754sp_abs;
  803. goto scopuop;
  804. case fneg_op:
  805. handler.u = ieee754sp_neg;
  806. goto scopuop;
  807. case fmov_op:
  808. /* an easy one */
  809. SPFROMREG(rv.s, MIPSInst_FS(ir));
  810. goto copcsr;
  811. /* binary op on handler */
  812. scopbop:
  813. {
  814. ieee754sp fs, ft;
  815. SPFROMREG(fs, MIPSInst_FS(ir));
  816. SPFROMREG(ft, MIPSInst_FT(ir));
  817. rv.s = (*handler.b) (fs, ft);
  818. goto copcsr;
  819. }
  820. scopuop:
  821. {
  822. ieee754sp fs;
  823. SPFROMREG(fs, MIPSInst_FS(ir));
  824. rv.s = (*handler.u) (fs);
  825. goto copcsr;
  826. }
  827. copcsr:
  828. if (ieee754_cxtest(IEEE754_INEXACT))
  829. rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
  830. if (ieee754_cxtest(IEEE754_UNDERFLOW))
  831. rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
  832. if (ieee754_cxtest(IEEE754_OVERFLOW))
  833. rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
  834. if (ieee754_cxtest(IEEE754_ZERO_DIVIDE))
  835. rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
  836. if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
  837. rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
  838. break;
  839. /* unary conv ops */
  840. case fcvts_op:
  841. return SIGILL; /* not defined */
  842. case fcvtd_op:{
  843. ieee754sp fs;
  844. SPFROMREG(fs, MIPSInst_FS(ir));
  845. rv.d = ieee754dp_fsp(fs);
  846. rfmt = d_fmt;
  847. goto copcsr;
  848. }
  849. case fcvtw_op:{
  850. ieee754sp fs;
  851. SPFROMREG(fs, MIPSInst_FS(ir));
  852. rv.w = ieee754sp_tint(fs);
  853. rfmt = w_fmt;
  854. goto copcsr;
  855. }
  856. #if __mips >= 2 || defined(__mips64)
  857. case fround_op:
  858. case ftrunc_op:
  859. case fceil_op:
  860. case ffloor_op:{
  861. unsigned int oldrm = ieee754_csr.rm;
  862. ieee754sp fs;
  863. SPFROMREG(fs, MIPSInst_FS(ir));
  864. ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
  865. rv.w = ieee754sp_tint(fs);
  866. ieee754_csr.rm = oldrm;
  867. rfmt = w_fmt;
  868. goto copcsr;
  869. }
  870. #endif /* __mips >= 2 */
  871. #if defined(__mips64)
  872. case fcvtl_op:{
  873. ieee754sp fs;
  874. SPFROMREG(fs, MIPSInst_FS(ir));
  875. rv.l = ieee754sp_tlong(fs);
  876. rfmt = l_fmt;
  877. goto copcsr;
  878. }
  879. case froundl_op:
  880. case ftruncl_op:
  881. case fceill_op:
  882. case ffloorl_op:{
  883. unsigned int oldrm = ieee754_csr.rm;
  884. ieee754sp fs;
  885. SPFROMREG(fs, MIPSInst_FS(ir));
  886. ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
  887. rv.l = ieee754sp_tlong(fs);
  888. ieee754_csr.rm = oldrm;
  889. rfmt = l_fmt;
  890. goto copcsr;
  891. }
  892. #endif /* defined(__mips64) */
  893. default:
  894. if (MIPSInst_FUNC(ir) >= fcmp_op) {
  895. unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
  896. ieee754sp fs, ft;
  897. SPFROMREG(fs, MIPSInst_FS(ir));
  898. SPFROMREG(ft, MIPSInst_FT(ir));
  899. rv.w = ieee754sp_cmp(fs, ft,
  900. cmptab[cmpop & 0x7], cmpop & 0x8);
  901. rfmt = -1;
  902. if ((cmpop & 0x8) && ieee754_cxtest
  903. (IEEE754_INVALID_OPERATION))
  904. rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
  905. else
  906. goto copcsr;
  907. }
  908. else {
  909. return SIGILL;
  910. }
  911. break;
  912. }
  913. break;
  914. }
  915. case d_fmt:{
  916. union {
  917. ieee754dp(*b) (ieee754dp, ieee754dp);
  918. ieee754dp(*u) (ieee754dp);
  919. } handler;
  920. switch (MIPSInst_FUNC(ir)) {
  921. /* binary ops */
  922. case fadd_op:
  923. handler.b = ieee754dp_add;
  924. goto dcopbop;
  925. case fsub_op:
  926. handler.b = ieee754dp_sub;
  927. goto dcopbop;
  928. case fmul_op:
  929. handler.b = ieee754dp_mul;
  930. goto dcopbop;
  931. case fdiv_op:
  932. handler.b = ieee754dp_div;
  933. goto dcopbop;
  934. /* unary ops */
  935. #if __mips >= 2 || defined(__mips64)
  936. case fsqrt_op:
  937. handler.u = ieee754dp_sqrt;
  938. goto dcopuop;
  939. #endif
  940. #if __mips >= 4 && __mips != 32
  941. case frsqrt_op:
  942. handler.u = fpemu_dp_rsqrt;
  943. goto dcopuop;
  944. case frecip_op:
  945. handler.u = fpemu_dp_recip;
  946. goto dcopuop;
  947. #endif
  948. #if __mips >= 4
  949. case fmovc_op:
  950. cond = fpucondbit[MIPSInst_FT(ir) >> 2];
  951. if (((ctx->fcr31 & cond) != 0) !=
  952. ((MIPSInst_FT(ir) & 1) != 0))
  953. return 0;
  954. DPFROMREG(rv.d, MIPSInst_FS(ir));
  955. break;
  956. case fmovz_op:
  957. if (xcp->regs[MIPSInst_FT(ir)] != 0)
  958. return 0;
  959. DPFROMREG(rv.d, MIPSInst_FS(ir));
  960. break;
  961. case fmovn_op:
  962. if (xcp->regs[MIPSInst_FT(ir)] == 0)
  963. return 0;
  964. DPFROMREG(rv.d, MIPSInst_FS(ir));
  965. break;
  966. #endif
  967. case fabs_op:
  968. handler.u = ieee754dp_abs;
  969. goto dcopuop;
  970. case fneg_op:
  971. handler.u = ieee754dp_neg;
  972. goto dcopuop;
  973. case fmov_op:
  974. /* an easy one */
  975. DPFROMREG(rv.d, MIPSInst_FS(ir));
  976. goto copcsr;
  977. /* binary op on handler */
  978. dcopbop:{
  979. ieee754dp fs, ft;
  980. DPFROMREG(fs, MIPSInst_FS(ir));
  981. DPFROMREG(ft, MIPSInst_FT(ir));
  982. rv.d = (*handler.b) (fs, ft);
  983. goto copcsr;
  984. }
  985. dcopuop:{
  986. ieee754dp fs;
  987. DPFROMREG(fs, MIPSInst_FS(ir));
  988. rv.d = (*handler.u) (fs);
  989. goto copcsr;
  990. }
  991. /* unary conv ops */
  992. case fcvts_op:{
  993. ieee754dp fs;
  994. DPFROMREG(fs, MIPSInst_FS(ir));
  995. rv.s = ieee754sp_fdp(fs);
  996. rfmt = s_fmt;
  997. goto copcsr;
  998. }
  999. case fcvtd_op:
  1000. return SIGILL; /* not defined */
  1001. case fcvtw_op:{
  1002. ieee754dp fs;
  1003. DPFROMREG(fs, MIPSInst_FS(ir));
  1004. rv.w = ieee754dp_tint(fs); /* wrong */
  1005. rfmt = w_fmt;
  1006. goto copcsr;
  1007. }
  1008. #if __mips >= 2 || defined(__mips64)
  1009. case fround_op:
  1010. case ftrunc_op:
  1011. case fceil_op:
  1012. case ffloor_op:{
  1013. unsigned int oldrm = ieee754_csr.rm;
  1014. ieee754dp fs;
  1015. DPFROMREG(fs, MIPSInst_FS(ir));
  1016. ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
  1017. rv.w = ieee754dp_tint(fs);
  1018. ieee754_csr.rm = oldrm;
  1019. rfmt = w_fmt;
  1020. goto copcsr;
  1021. }
  1022. #endif
  1023. #if defined(__mips64)
  1024. case fcvtl_op:{
  1025. ieee754dp fs;
  1026. DPFROMREG(fs, MIPSInst_FS(ir));
  1027. rv.l = ieee754dp_tlong(fs);
  1028. rfmt = l_fmt;
  1029. goto copcsr;
  1030. }
  1031. case froundl_op:
  1032. case ftruncl_op:
  1033. case fceill_op:
  1034. case ffloorl_op:{
  1035. unsigned int oldrm = ieee754_csr.rm;
  1036. ieee754dp fs;
  1037. DPFROMREG(fs, MIPSInst_FS(ir));
  1038. ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
  1039. rv.l = ieee754dp_tlong(fs);
  1040. ieee754_csr.rm = oldrm;
  1041. rfmt = l_fmt;
  1042. goto copcsr;
  1043. }
  1044. #endif /* __mips >= 3 */
  1045. default:
  1046. if (MIPSInst_FUNC(ir) >= fcmp_op) {
  1047. unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
  1048. ieee754dp fs, ft;
  1049. DPFROMREG(fs, MIPSInst_FS(ir));
  1050. DPFROMREG(ft, MIPSInst_FT(ir));
  1051. rv.w = ieee754dp_cmp(fs, ft,
  1052. cmptab[cmpop & 0x7], cmpop & 0x8);
  1053. rfmt = -1;
  1054. if ((cmpop & 0x8)
  1055. &&
  1056. ieee754_cxtest
  1057. (IEEE754_INVALID_OPERATION))
  1058. rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
  1059. else
  1060. goto copcsr;
  1061. }
  1062. else {
  1063. return SIGILL;
  1064. }
  1065. break;
  1066. }
  1067. break;
  1068. }
  1069. case w_fmt:{
  1070. ieee754sp fs;
  1071. switch (MIPSInst_FUNC(ir)) {
  1072. case fcvts_op:
  1073. /* convert word to single precision real */
  1074. SPFROMREG(fs, MIPSInst_FS(ir));
  1075. rv.s = ieee754sp_fint(fs.bits);
  1076. rfmt = s_fmt;
  1077. goto copcsr;
  1078. case fcvtd_op:
  1079. /* convert word to double precision real */
  1080. SPFROMREG(fs, MIPSInst_FS(ir));
  1081. rv.d = ieee754dp_fint(fs.bits);
  1082. rfmt = d_fmt;
  1083. goto copcsr;
  1084. default:
  1085. return SIGILL;
  1086. }
  1087. break;
  1088. }
  1089. #if defined(__mips64)
  1090. case l_fmt:{
  1091. switch (MIPSInst_FUNC(ir)) {
  1092. case fcvts_op:
  1093. /* convert long to single precision real */
  1094. rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
  1095. rfmt = s_fmt;
  1096. goto copcsr;
  1097. case fcvtd_op:
  1098. /* convert long to double precision real */
  1099. rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
  1100. rfmt = d_fmt;
  1101. goto copcsr;
  1102. default:
  1103. return SIGILL;
  1104. }
  1105. break;
  1106. }
  1107. #endif
  1108. default:
  1109. return SIGILL;
  1110. }
  1111. /*
  1112. * Update the fpu CSR register for this operation.
  1113. * If an exception is required, generate a tidy SIGFPE exception,
  1114. * without updating the result register.
  1115. * Note: cause exception bits do not accumulate, they are rewritten
  1116. * for each op; only the flag/sticky bits accumulate.
  1117. */
  1118. ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
  1119. if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
  1120. /*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */
  1121. return SIGFPE;
  1122. }
  1123. /*
  1124. * Now we can safely write the result back to the register file.
  1125. */
  1126. switch (rfmt) {
  1127. case -1:{
  1128. #if __mips >= 4
  1129. cond = fpucondbit[MIPSInst_FD(ir) >> 2];
  1130. #else
  1131. cond = FPU_CSR_COND;
  1132. #endif
  1133. if (rv.w)
  1134. ctx->fcr31 |= cond;
  1135. else
  1136. ctx->fcr31 &= ~cond;
  1137. break;
  1138. }
  1139. case d_fmt:
  1140. DPTOREG(rv.d, MIPSInst_FD(ir));
  1141. break;
  1142. case s_fmt:
  1143. SPTOREG(rv.s, MIPSInst_FD(ir));
  1144. break;
  1145. case w_fmt:
  1146. SITOREG(rv.w, MIPSInst_FD(ir));
  1147. break;
  1148. #if defined(__mips64)
  1149. case l_fmt:
  1150. DITOREG(rv.l, MIPSInst_FD(ir));
  1151. break;
  1152. #endif
  1153. default:
  1154. return SIGILL;
  1155. }
  1156. return 0;
  1157. }
  1158. int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
  1159. int has_fpu, void *__user *fault_addr)
  1160. {
  1161. unsigned long oldepc, prevepc;
  1162. mips_instruction insn;
  1163. int sig = 0;
  1164. oldepc = xcp->cp0_epc;
  1165. do {
  1166. prevepc = xcp->cp0_epc;
  1167. if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
  1168. MIPS_FPU_EMU_INC_STATS(errors);
  1169. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  1170. return SIGBUS;
  1171. }
  1172. if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
  1173. MIPS_FPU_EMU_INC_STATS(errors);
  1174. *fault_addr = (mips_instruction __user *)xcp->cp0_epc;
  1175. return SIGSEGV;
  1176. }
  1177. if (insn == 0)
  1178. xcp->cp0_epc += 4; /* skip nops */
  1179. else {
  1180. /*
  1181. * The 'ieee754_csr' is an alias of
  1182. * ctx->fcr31. No need to copy ctx->fcr31 to
  1183. * ieee754_csr. But ieee754_csr.rm is ieee
  1184. * library modes. (not mips rounding mode)
  1185. */
  1186. /* convert to ieee library modes */
  1187. ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
  1188. sig = cop1Emulate(xcp, ctx, fault_addr);
  1189. /* revert to mips rounding mode */
  1190. ieee754_csr.rm = mips_rm[ieee754_csr.rm];
  1191. }
  1192. if (has_fpu)
  1193. break;
  1194. if (sig)
  1195. break;
  1196. cond_resched();
  1197. } while (xcp->cp0_epc > prevepc);
  1198. /* SIGILL indicates a non-fpu instruction */
  1199. if (sig == SIGILL && xcp->cp0_epc != oldepc)
  1200. /* but if epc has advanced, then ignore it */
  1201. sig = 0;
  1202. return sig;
  1203. }
  1204. #ifdef CONFIG_DEBUG_FS
  1205. static int fpuemu_stat_get(void *data, u64 *val)
  1206. {
  1207. int cpu;
  1208. unsigned long sum = 0;
  1209. for_each_online_cpu(cpu) {
  1210. struct mips_fpu_emulator_stats *ps;
  1211. local_t *pv;
  1212. ps = &per_cpu(fpuemustats, cpu);
  1213. pv = (void *)ps + (unsigned long)data;
  1214. sum += local_read(pv);
  1215. }
  1216. *val = sum;
  1217. return 0;
  1218. }
  1219. DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
  1220. extern struct dentry *mips_debugfs_dir;
  1221. static int __init debugfs_fpuemu(void)
  1222. {
  1223. struct dentry *d, *dir;
  1224. if (!mips_debugfs_dir)
  1225. return -ENODEV;
  1226. dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
  1227. if (!dir)
  1228. return -ENOMEM;
  1229. #define FPU_STAT_CREATE(M) \
  1230. do { \
  1231. d = debugfs_create_file(#M , S_IRUGO, dir, \
  1232. (void *)offsetof(struct mips_fpu_emulator_stats, M), \
  1233. &fops_fpuemu_stat); \
  1234. if (!d) \
  1235. return -ENOMEM; \
  1236. } while (0)
  1237. FPU_STAT_CREATE(emulated);
  1238. FPU_STAT_CREATE(loads);
  1239. FPU_STAT_CREATE(stores);
  1240. FPU_STAT_CREATE(cp1ops);
  1241. FPU_STAT_CREATE(cp1xops);
  1242. FPU_STAT_CREATE(errors);
  1243. return 0;
  1244. }
  1245. __initcall(debugfs_fpuemu);
  1246. #endif