entry.S 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765
  1. /*
  2. * Contains the system-call and fault low-level handling routines.
  3. * This also contains the timer-interrupt handler, as well as all
  4. * interrupts and faults that can result in a task-switch.
  5. *
  6. * Copyright 2005-2009 Analog Devices Inc.
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. /* NOTE: This code handles signal-recognition, which happens every time
  11. * after a timer-interrupt and after each system call.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/linkage.h>
  15. #include <linux/unistd.h>
  16. #include <asm/blackfin.h>
  17. #include <asm/errno.h>
  18. #include <asm/fixed_code.h>
  19. #include <asm/thread_info.h> /* TIF_NEED_RESCHED */
  20. #include <asm/asm-offsets.h>
  21. #include <asm/trace.h>
  22. #include <asm/traps.h>
  23. #include <asm/context.S>
  24. #if defined(CONFIG_BFIN_SCRATCH_REG_RETN)
  25. # define EX_SCRATCH_REG RETN
  26. #elif defined(CONFIG_BFIN_SCRATCH_REG_RETE)
  27. # define EX_SCRATCH_REG RETE
  28. #else
  29. # define EX_SCRATCH_REG CYCLES
  30. #endif
  31. #ifdef CONFIG_EXCPT_IRQ_SYSC_L1
  32. .section .l1.text
  33. #else
  34. .text
  35. #endif
  36. /* Slightly simplified and streamlined entry point for CPLB misses.
  37. * This one does not lower the level to IRQ5, and thus can be used to
  38. * patch up CPLB misses on the kernel stack.
  39. */
  40. #if ANOMALY_05000261
  41. #define _ex_dviol _ex_workaround_261
  42. #define _ex_dmiss _ex_workaround_261
  43. #define _ex_dmult _ex_workaround_261
  44. ENTRY(_ex_workaround_261)
  45. /*
  46. * Work around an anomaly: if we see a new DCPLB fault, return
  47. * without doing anything. Then, if we get the same fault again,
  48. * handle it.
  49. */
  50. P4 = R7; /* Store EXCAUSE */
  51. GET_PDA(p5, r7);
  52. r7 = [p5 + PDA_LFRETX];
  53. r6 = retx;
  54. [p5 + PDA_LFRETX] = r6;
  55. cc = r6 == r7;
  56. if !cc jump _bfin_return_from_exception;
  57. /* fall through */
  58. R7 = P4;
  59. R6 = VEC_CPLB_M; /* Data CPLB Miss */
  60. cc = R6 == R7;
  61. if cc jump _ex_dcplb_miss (BP);
  62. #ifdef CONFIG_MPU
  63. R6 = VEC_CPLB_VL; /* Data CPLB Violation */
  64. cc = R6 == R7;
  65. if cc jump _ex_dcplb_viol (BP);
  66. #endif
  67. /* Handle Data CPLB Protection Violation
  68. * and Data CPLB Multiple Hits - Linux Trap Zero
  69. */
  70. jump _ex_trap_c;
  71. ENDPROC(_ex_workaround_261)
  72. #else
  73. #ifdef CONFIG_MPU
  74. #define _ex_dviol _ex_dcplb_viol
  75. #else
  76. #define _ex_dviol _ex_trap_c
  77. #endif
  78. #define _ex_dmiss _ex_dcplb_miss
  79. #define _ex_dmult _ex_trap_c
  80. #endif
  81. ENTRY(_ex_dcplb_viol)
  82. ENTRY(_ex_dcplb_miss)
  83. ENTRY(_ex_icplb_miss)
  84. (R7:6,P5:4) = [sp++];
  85. /* We leave the previously pushed ASTAT on the stack. */
  86. SAVE_CONTEXT_CPLB
  87. /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
  88. * will change the stack pointer. */
  89. R0 = SEQSTAT;
  90. R1 = SP;
  91. DEBUG_HWTRACE_SAVE(p5, r7)
  92. sp += -12;
  93. call _cplb_hdr;
  94. sp += 12;
  95. CC = R0 == 0;
  96. IF !CC JUMP _handle_bad_cplb;
  97. #ifdef CONFIG_DEBUG_DOUBLEFAULT
  98. /* While we were processing this, did we double fault? */
  99. r7 = SEQSTAT; /* reason code is in bit 5:0 */
  100. r6.l = lo(SEQSTAT_EXCAUSE);
  101. r6.h = hi(SEQSTAT_EXCAUSE);
  102. r7 = r7 & r6;
  103. r6 = 0x25;
  104. CC = R7 == R6;
  105. if CC JUMP _double_fault;
  106. #endif
  107. DEBUG_HWTRACE_RESTORE(p5, r7)
  108. RESTORE_CONTEXT_CPLB
  109. ASTAT = [SP++];
  110. SP = EX_SCRATCH_REG;
  111. rtx;
  112. ENDPROC(_ex_icplb_miss)
  113. ENTRY(_ex_syscall)
  114. raise 15; /* invoked by TRAP #0, for sys call */
  115. jump.s _bfin_return_from_exception;
  116. ENDPROC(_ex_syscall)
  117. ENTRY(_ex_single_step)
  118. /* If we just returned from an interrupt, the single step event is
  119. for the RTI instruction. */
  120. r7 = retx;
  121. r6 = reti;
  122. cc = r7 == r6;
  123. if cc jump _bfin_return_from_exception;
  124. #ifdef CONFIG_KGDB
  125. /* Don't do single step in hardware exception handler */
  126. p5.l = lo(IPEND);
  127. p5.h = hi(IPEND);
  128. r6 = [p5];
  129. cc = bittst(r6, 4);
  130. if cc jump _bfin_return_from_exception;
  131. cc = bittst(r6, 5);
  132. if cc jump _bfin_return_from_exception;
  133. /* skip single step if current interrupt priority is higher than
  134. * that of the first instruction, from which gdb starts single step */
  135. r6 >>= 6;
  136. r7 = 10;
  137. .Lfind_priority_start:
  138. cc = bittst(r6, 0);
  139. if cc jump .Lfind_priority_done;
  140. r6 >>= 1;
  141. r7 += -1;
  142. cc = r7 == 0;
  143. if cc jump .Lfind_priority_done;
  144. jump.s .Lfind_priority_start;
  145. .Lfind_priority_done:
  146. p4.l = _kgdb_single_step;
  147. p4.h = _kgdb_single_step;
  148. r6 = [p4];
  149. cc = r6 == 0;
  150. if cc jump .Ldo_single_step;
  151. r6 += -1;
  152. cc = r6 < r7;
  153. if cc jump 1f;
  154. .Ldo_single_step:
  155. #else
  156. /* If we were in user mode, do the single step normally. */
  157. p5.l = lo(IPEND);
  158. p5.h = hi(IPEND);
  159. r6 = [p5];
  160. r7 = 0xffe0 (z);
  161. r7 = r7 & r6;
  162. cc = r7 == 0;
  163. if !cc jump 1f;
  164. #endif
  165. #ifdef CONFIG_EXACT_HWERR
  166. /* Read the ILAT, and to check to see if the process we are
  167. * single stepping caused a previous hardware error
  168. * If so, do not single step, (which lowers to IRQ5, and makes
  169. * us miss the error).
  170. */
  171. p5.l = lo(ILAT);
  172. p5.h = hi(ILAT);
  173. r7 = [p5];
  174. cc = bittst(r7, EVT_IVHW_P);
  175. if cc jump 1f;
  176. #endif
  177. /* Single stepping only a single instruction, so clear the trace
  178. * bit here. */
  179. r7 = syscfg;
  180. bitclr (r7, SYSCFG_SSSTEP_P);
  181. syscfg = R7;
  182. jump _ex_trap_c;
  183. 1:
  184. /*
  185. * We were in an interrupt handler. By convention, all of them save
  186. * SYSCFG with their first instruction, so by checking whether our
  187. * RETX points at the entry point, we can determine whether to allow
  188. * a single step, or whether to clear SYSCFG.
  189. *
  190. * First, find out the interrupt level and the event vector for it.
  191. */
  192. p5.l = lo(EVT0);
  193. p5.h = hi(EVT0);
  194. p5 += -4;
  195. 2:
  196. r7 = rot r7 by -1;
  197. p5 += 4;
  198. if !cc jump 2b;
  199. /* What we actually do is test for the _second_ instruction in the
  200. * IRQ handler. That way, if there are insns following the restore
  201. * of SYSCFG after leaving the handler, we will not turn off SYSCFG
  202. * for them. */
  203. r7 = [p5];
  204. r7 += 2;
  205. r6 = RETX;
  206. cc = R7 == R6;
  207. if !cc jump _bfin_return_from_exception;
  208. r7 = syscfg;
  209. bitclr (r7, SYSCFG_SSSTEP_P); /* Turn off single step */
  210. syscfg = R7;
  211. /* Fall through to _bfin_return_from_exception. */
  212. ENDPROC(_ex_single_step)
  213. ENTRY(_bfin_return_from_exception)
  214. #if ANOMALY_05000257
  215. R7=LC0;
  216. LC0=R7;
  217. R7=LC1;
  218. LC1=R7;
  219. #endif
  220. #ifdef CONFIG_DEBUG_DOUBLEFAULT
  221. /* While we were processing the current exception,
  222. * did we cause another, and double fault?
  223. */
  224. r7 = SEQSTAT; /* reason code is in bit 5:0 */
  225. r6.l = lo(SEQSTAT_EXCAUSE);
  226. r6.h = hi(SEQSTAT_EXCAUSE);
  227. r7 = r7 & r6;
  228. r6 = VEC_UNCOV;
  229. CC = R7 == R6;
  230. if CC JUMP _double_fault;
  231. #endif
  232. (R7:6,P5:4) = [sp++];
  233. ASTAT = [sp++];
  234. sp = EX_SCRATCH_REG;
  235. rtx;
  236. ENDPROC(_bfin_return_from_exception)
  237. ENTRY(_handle_bad_cplb)
  238. DEBUG_HWTRACE_RESTORE(p5, r7)
  239. /* To get here, we just tried and failed to change a CPLB
  240. * so, handle things in trap_c (C code), by lowering to
  241. * IRQ5, just like we normally do. Since this is not a
  242. * "normal" return path, we have a do a lot of stuff to
  243. * the stack to get ready so, we can fall through - we
  244. * need to make a CPLB exception look like a normal exception
  245. */
  246. RESTORE_CONTEXT_CPLB
  247. /* ASTAT is still on the stack, where it is needed. */
  248. [--sp] = (R7:6,P5:4);
  249. ENTRY(_ex_replaceable)
  250. nop;
  251. ENTRY(_ex_trap_c)
  252. /* The only thing that has been saved in this context is
  253. * (R7:6,P5:4), ASTAT & SP - don't use anything else
  254. */
  255. GET_PDA(p5, r6);
  256. /* Make sure we are not in a double fault */
  257. p4.l = lo(IPEND);
  258. p4.h = hi(IPEND);
  259. r7 = [p4];
  260. CC = BITTST (r7, 5);
  261. if CC jump _double_fault;
  262. [p5 + PDA_EXIPEND] = r7;
  263. /* Call C code (trap_c) to handle the exception, which most
  264. * likely involves sending a signal to the current process.
  265. * To avoid double faults, lower our priority to IRQ5 first.
  266. */
  267. r7.h = _exception_to_level5;
  268. r7.l = _exception_to_level5;
  269. p4.l = lo(EVT5);
  270. p4.h = hi(EVT5);
  271. [p4] = r7;
  272. csync;
  273. /*
  274. * Save these registers, as they are only valid in exception context
  275. * (where we are now - as soon as we defer to IRQ5, they can change)
  276. * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
  277. * but they are not very interesting, so don't save them
  278. */
  279. p4.l = lo(DCPLB_FAULT_ADDR);
  280. p4.h = hi(DCPLB_FAULT_ADDR);
  281. r7 = [p4];
  282. [p5 + PDA_DCPLB] = r7;
  283. p4.l = lo(ICPLB_FAULT_ADDR);
  284. p4.h = hi(ICPLB_FAULT_ADDR);
  285. r6 = [p4];
  286. [p5 + PDA_ICPLB] = r6;
  287. r6 = retx;
  288. [p5 + PDA_RETX] = r6;
  289. r6 = SEQSTAT;
  290. [p5 + PDA_SEQSTAT] = r6;
  291. /* Save the state of single stepping */
  292. r6 = SYSCFG;
  293. [p5 + PDA_SYSCFG] = r6;
  294. /* Clear it while we handle the exception in IRQ5 mode */
  295. BITCLR(r6, SYSCFG_SSSTEP_P);
  296. SYSCFG = r6;
  297. /* Save the current IMASK, since we change in order to jump to level 5 */
  298. cli r6;
  299. [p5 + PDA_EXIMASK] = r6;
  300. p4.l = lo(SAFE_USER_INSTRUCTION);
  301. p4.h = hi(SAFE_USER_INSTRUCTION);
  302. retx = p4;
  303. /* Disable all interrupts, but make sure level 5 is enabled so
  304. * we can switch to that level.
  305. */
  306. r6 = 0x3f;
  307. sti r6;
  308. /* In case interrupts are disabled IPEND[4] (global interrupt disable bit)
  309. * clear it (re-enabling interrupts again) by the special sequence of pushing
  310. * RETI onto the stack. This way we can lower ourselves to IVG5 even if the
  311. * exception was taken after the interrupt handler was called but before it
  312. * got a chance to enable global interrupts itself.
  313. */
  314. [--sp] = reti;
  315. sp += 4;
  316. raise 5;
  317. jump.s _bfin_return_from_exception;
  318. ENDPROC(_ex_trap_c)
  319. /* We just realized we got an exception, while we were processing a different
  320. * exception. This is a unrecoverable event, so crash.
  321. * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
  322. */
  323. ENTRY(_double_fault)
  324. /* Turn caches & protection off, to ensure we don't get any more
  325. * double exceptions
  326. */
  327. P4.L = LO(IMEM_CONTROL);
  328. P4.H = HI(IMEM_CONTROL);
  329. R5 = [P4]; /* Control Register*/
  330. BITCLR(R5,ENICPLB_P);
  331. CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
  332. [P4] = R5;
  333. SSYNC;
  334. P4.L = LO(DMEM_CONTROL);
  335. P4.H = HI(DMEM_CONTROL);
  336. R5 = [P4];
  337. BITCLR(R5,ENDCPLB_P);
  338. CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
  339. [P4] = R5;
  340. SSYNC;
  341. /* Fix up the stack */
  342. (R7:6,P5:4) = [sp++];
  343. ASTAT = [sp++];
  344. SP = EX_SCRATCH_REG;
  345. /* We should be out of the exception stack, and back down into
  346. * kernel or user space stack
  347. */
  348. SAVE_ALL_SYS
  349. /* The dumping functions expect the return address in the RETI
  350. * slot. */
  351. r6 = retx;
  352. [sp + PT_PC] = r6;
  353. r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
  354. SP += -12;
  355. pseudo_long_call _double_fault_c, p5;
  356. SP += 12;
  357. .L_double_fault_panic:
  358. JUMP .L_double_fault_panic
  359. ENDPROC(_double_fault)
  360. ENTRY(_exception_to_level5)
  361. SAVE_ALL_SYS
  362. GET_PDA(p5, r7); /* Fetch current PDA */
  363. r6 = [p5 + PDA_RETX];
  364. [sp + PT_PC] = r6;
  365. r6 = [p5 + PDA_SYSCFG];
  366. [sp + PT_SYSCFG] = r6;
  367. r6 = [p5 + PDA_SEQSTAT]; /* Read back seqstat */
  368. [sp + PT_SEQSTAT] = r6;
  369. /* Restore the hardware error vector. */
  370. r7.h = _evt_ivhw;
  371. r7.l = _evt_ivhw;
  372. p4.l = lo(EVT5);
  373. p4.h = hi(EVT5);
  374. [p4] = r7;
  375. csync;
  376. #ifdef CONFIG_DEBUG_DOUBLEFAULT
  377. /* Now that we have the hardware error vector programmed properly
  378. * we can re-enable interrupts (IPEND[4]), so if the _trap_c causes
  379. * another hardware error, we can catch it (self-nesting).
  380. */
  381. [--sp] = reti;
  382. sp += 4;
  383. #endif
  384. r7 = [p5 + PDA_EXIPEND] /* Read the IPEND from the Exception state */
  385. [sp + PT_IPEND] = r7; /* Store IPEND onto the stack */
  386. r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
  387. SP += -12;
  388. pseudo_long_call _trap_c, p4;
  389. SP += 12;
  390. /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
  391. * before we return.
  392. */
  393. CC = BITTST(r7, EVT_IRPTEN_P)
  394. if !CC jump 1f;
  395. /* this will load a random value into the reti register - but that is OK,
  396. * since we do restore it to the correct value in the 'RESTORE_ALL_SYS' macro
  397. */
  398. sp += -4;
  399. reti = [sp++];
  400. 1:
  401. /* restore the interrupt mask (IMASK) */
  402. r6 = [p5 + PDA_EXIMASK];
  403. sti r6;
  404. call _ret_from_exception;
  405. RESTORE_ALL_SYS
  406. rti;
  407. ENDPROC(_exception_to_level5)
  408. ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
  409. /* Since the kernel stack can be anywhere, it's not guaranteed to be
  410. * covered by a CPLB. Switch to an exception stack; use RETN as a
  411. * scratch register (for want of a better option).
  412. */
  413. EX_SCRATCH_REG = sp;
  414. GET_PDA_SAFE(sp);
  415. sp = [sp + PDA_EXSTACK];
  416. /* Try to deal with syscalls quickly. */
  417. [--sp] = ASTAT;
  418. [--sp] = (R7:6,P5:4);
  419. ANOMALY_283_315_WORKAROUND(p5, r7)
  420. #ifdef CONFIG_EXACT_HWERR
  421. /* Make sure all pending read/writes complete. This will ensure any
  422. * accesses which could cause hardware errors completes, and signal
  423. * the the hardware before we do something silly, like crash the
  424. * kernel. We don't need to work around anomaly 05000312, since
  425. * we are already atomic
  426. */
  427. ssync;
  428. #endif
  429. #ifdef CONFIG_DEBUG_DOUBLEFAULT
  430. /*
  431. * Save these registers, as they are only valid in exception context
  432. * (where we are now - as soon as we defer to IRQ5, they can change)
  433. * DCPLB_STATUS and ICPLB_STATUS are also only valid in EVT3,
  434. * but they are not very interesting, so don't save them
  435. */
  436. GET_PDA(p5, r7);
  437. p4.l = lo(DCPLB_FAULT_ADDR);
  438. p4.h = hi(DCPLB_FAULT_ADDR);
  439. r7 = [p4];
  440. [p5 + PDA_DF_DCPLB] = r7;
  441. p4.l = lo(ICPLB_FAULT_ADDR);
  442. p4.h = hi(ICPLB_FAULT_ADDR);
  443. r7 = [p4];
  444. [p5 + PDA_DF_ICPLB] = r7;
  445. r7 = retx;
  446. [p5 + PDA_DF_RETX] = r7;
  447. r7 = SEQSTAT; /* reason code is in bit 5:0 */
  448. [p5 + PDA_DF_SEQSTAT] = r7;
  449. #else
  450. r7 = SEQSTAT; /* reason code is in bit 5:0 */
  451. #endif
  452. r6.l = lo(SEQSTAT_EXCAUSE);
  453. r6.h = hi(SEQSTAT_EXCAUSE);
  454. r7 = r7 & r6;
  455. p5.h = _ex_table;
  456. p5.l = _ex_table;
  457. p4 = r7;
  458. p5 = p5 + (p4 << 2);
  459. p4 = [p5];
  460. jump (p4);
  461. .Lbadsys:
  462. r7 = -ENOSYS; /* signextending enough */
  463. [sp + PT_R0] = r7; /* return value from system call */
  464. jump .Lsyscall_really_exit;
  465. ENDPROC(_trap)
  466. ENTRY(_kernel_execve)
  467. link SIZEOF_PTREGS;
  468. p0 = sp;
  469. r3 = SIZEOF_PTREGS / 4;
  470. r4 = 0(x);
  471. .Lclear_regs:
  472. [p0++] = r4;
  473. r3 += -1;
  474. cc = r3 == 0;
  475. if !cc jump .Lclear_regs (bp);
  476. p0 = sp;
  477. sp += -16;
  478. [sp + 12] = p0;
  479. pseudo_long_call _do_execve, p5;
  480. SP += 16;
  481. cc = r0 == 0;
  482. if ! cc jump .Lexecve_failed;
  483. /* Success. Copy our temporary pt_regs to the top of the kernel
  484. * stack and do a normal exception return.
  485. */
  486. r1 = sp;
  487. r0 = (-KERNEL_STACK_SIZE) (x);
  488. r1 = r1 & r0;
  489. p2 = r1;
  490. p3 = [p2];
  491. r0 = KERNEL_STACK_SIZE - 4 (z);
  492. p1 = r0;
  493. p1 = p1 + p2;
  494. p0 = fp;
  495. r4 = [p0--];
  496. r3 = SIZEOF_PTREGS / 4;
  497. .Lcopy_regs:
  498. r4 = [p0--];
  499. [p1--] = r4;
  500. r3 += -1;
  501. cc = r3 == 0;
  502. if ! cc jump .Lcopy_regs (bp);
  503. r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
  504. p1 = r0;
  505. p1 = p1 + p2;
  506. sp = p1;
  507. r0 = syscfg;
  508. [SP + PT_SYSCFG] = r0;
  509. [p3 + (TASK_THREAD + THREAD_KSP)] = sp;
  510. RESTORE_CONTEXT;
  511. rti;
  512. .Lexecve_failed:
  513. unlink;
  514. rts;
  515. ENDPROC(_kernel_execve)
  516. ENTRY(_system_call)
  517. /* Store IPEND */
  518. p2.l = lo(IPEND);
  519. p2.h = hi(IPEND);
  520. csync;
  521. r0 = [p2];
  522. [sp + PT_IPEND] = r0;
  523. /* Store RETS for now */
  524. r0 = rets;
  525. [sp + PT_RESERVED] = r0;
  526. /* Set the stack for the current process */
  527. r7 = sp;
  528. r6.l = lo(ALIGN_PAGE_MASK);
  529. r6.h = hi(ALIGN_PAGE_MASK);
  530. r7 = r7 & r6; /* thread_info */
  531. p2 = r7;
  532. p2 = [p2];
  533. [p2+(TASK_THREAD+THREAD_KSP)] = sp;
  534. #ifdef CONFIG_IPIPE
  535. r0 = sp;
  536. SP += -12;
  537. pseudo_long_call ___ipipe_syscall_root, p0;
  538. SP += 12;
  539. cc = r0 == 1;
  540. if cc jump .Lsyscall_really_exit;
  541. cc = r0 == -1;
  542. if cc jump .Lresume_userspace;
  543. r3 = [sp + PT_R3];
  544. r4 = [sp + PT_R4];
  545. p0 = [sp + PT_ORIG_P0];
  546. #endif /* CONFIG_IPIPE */
  547. /* are we tracing syscalls?*/
  548. r7 = sp;
  549. r6.l = lo(ALIGN_PAGE_MASK);
  550. r6.h = hi(ALIGN_PAGE_MASK);
  551. r7 = r7 & r6;
  552. p2 = r7;
  553. r7 = [p2+TI_FLAGS];
  554. CC = BITTST(r7,TIF_SYSCALL_TRACE);
  555. if CC JUMP _sys_trace;
  556. CC = BITTST(r7,TIF_SINGLESTEP);
  557. if CC JUMP _sys_trace;
  558. /* Make sure the system call # is valid */
  559. p4 = __NR_syscall;
  560. /* System call number is passed in P0 */
  561. cc = p4 <= p0;
  562. if cc jump .Lbadsys;
  563. /* Execute the appropriate system call */
  564. p4 = p0;
  565. p5.l = _sys_call_table;
  566. p5.h = _sys_call_table;
  567. p5 = p5 + (p4 << 2);
  568. r0 = [sp + PT_R0];
  569. r1 = [sp + PT_R1];
  570. r2 = [sp + PT_R2];
  571. p5 = [p5];
  572. [--sp] = r5;
  573. [--sp] = r4;
  574. [--sp] = r3;
  575. SP += -12;
  576. call (p5);
  577. SP += 24;
  578. [sp + PT_R0] = r0;
  579. .Lresume_userspace:
  580. r7 = sp;
  581. r4.l = lo(ALIGN_PAGE_MASK);
  582. r4.h = hi(ALIGN_PAGE_MASK);
  583. r7 = r7 & r4; /* thread_info->flags */
  584. p5 = r7;
  585. .Lresume_userspace_1:
  586. /* Disable interrupts. */
  587. [--sp] = reti;
  588. reti = [sp++];
  589. r7 = [p5 + TI_FLAGS];
  590. r4.l = lo(_TIF_WORK_MASK);
  591. r4.h = hi(_TIF_WORK_MASK);
  592. r7 = r7 & r4;
  593. .Lsyscall_resched:
  594. #ifdef CONFIG_IPIPE
  595. cc = BITTST(r7, TIF_IRQ_SYNC);
  596. if !cc jump .Lsyscall_no_irqsync;
  597. /*
  598. * Clear IPEND[4] manually to undo what resume_userspace_1 just did;
  599. * we need this so that high priority domain interrupts may still
  600. * preempt the current domain while the pipeline log is being played
  601. * back.
  602. */
  603. [--sp] = reti;
  604. SP += 4; /* don't merge with next insn to keep the pattern obvious */
  605. SP += -12;
  606. pseudo_long_call ___ipipe_sync_root, p4;
  607. SP += 12;
  608. jump .Lresume_userspace_1;
  609. .Lsyscall_no_irqsync:
  610. #endif
  611. cc = BITTST(r7, TIF_NEED_RESCHED);
  612. if !cc jump .Lsyscall_sigpending;
  613. /* Reenable interrupts. */
  614. [--sp] = reti;
  615. sp += 4;
  616. SP += -12;
  617. pseudo_long_call _schedule, p4;
  618. SP += 12;
  619. jump .Lresume_userspace_1;
  620. .Lsyscall_sigpending:
  621. cc = BITTST(r7, TIF_RESTORE_SIGMASK);
  622. if cc jump .Lsyscall_do_signals;
  623. cc = BITTST(r7, TIF_SIGPENDING);
  624. if cc jump .Lsyscall_do_signals;
  625. cc = BITTST(r7, TIF_NOTIFY_RESUME);
  626. if !cc jump .Lsyscall_really_exit;
  627. .Lsyscall_do_signals:
  628. /* Reenable interrupts. */
  629. [--sp] = reti;
  630. sp += 4;
  631. r0 = sp;
  632. SP += -12;
  633. pseudo_long_call _do_notify_resume, p5;
  634. SP += 12;
  635. .Lsyscall_really_exit:
  636. r5 = [sp + PT_RESERVED];
  637. rets = r5;
  638. rts;
  639. ENDPROC(_system_call)
  640. /* Do not mark as ENTRY() to avoid error in assembler ...
  641. * this symbol need not be global anyways, so ...
  642. */
  643. _sys_trace:
  644. r0 = sp;
  645. pseudo_long_call _syscall_trace_enter, p5;
  646. /* Make sure the system call # is valid */
  647. p4 = [SP + PT_P0];
  648. p3 = __NR_syscall;
  649. cc = p3 <= p4;
  650. r0 = -ENOSYS;
  651. if cc jump .Lsys_trace_badsys;
  652. /* Execute the appropriate system call */
  653. p5.l = _sys_call_table;
  654. p5.h = _sys_call_table;
  655. p5 = p5 + (p4 << 2);
  656. r0 = [sp + PT_R0];
  657. r1 = [sp + PT_R1];
  658. r2 = [sp + PT_R2];
  659. r3 = [sp + PT_R3];
  660. r4 = [sp + PT_R4];
  661. r5 = [sp + PT_R5];
  662. p5 = [p5];
  663. [--sp] = r5;
  664. [--sp] = r4;
  665. [--sp] = r3;
  666. SP += -12;
  667. call (p5);
  668. SP += 24;
  669. .Lsys_trace_badsys:
  670. [sp + PT_R0] = r0;
  671. r0 = sp;
  672. pseudo_long_call _syscall_trace_leave, p5;
  673. jump .Lresume_userspace;
  674. ENDPROC(_sys_trace)
  675. ENTRY(_resume)
  676. /*
  677. * Beware - when entering resume, prev (the current task) is
  678. * in r0, next (the new task) is in r1.
  679. */
  680. p0 = r0;
  681. p1 = r1;
  682. [--sp] = rets;
  683. [--sp] = fp;
  684. [--sp] = (r7:4, p5:3);
  685. /* save usp */
  686. p2 = usp;
  687. [p0+(TASK_THREAD+THREAD_USP)] = p2;
  688. /* save current kernel stack pointer */
  689. [p0+(TASK_THREAD+THREAD_KSP)] = sp;
  690. /* save program counter */
  691. r1.l = _new_old_task;
  692. r1.h = _new_old_task;
  693. [p0+(TASK_THREAD+THREAD_PC)] = r1;
  694. /* restore the kernel stack pointer */
  695. sp = [p1+(TASK_THREAD+THREAD_KSP)];
  696. /* restore user stack pointer */
  697. p0 = [p1+(TASK_THREAD+THREAD_USP)];
  698. usp = p0;
  699. /* restore pc */
  700. p0 = [p1+(TASK_THREAD+THREAD_PC)];
  701. jump (p0);
  702. /*
  703. * Following code actually lands up in a new (old) task.
  704. */
  705. _new_old_task:
  706. (r7:4, p5:3) = [sp++];
  707. fp = [sp++];
  708. rets = [sp++];
  709. /*
  710. * When we come out of resume, r0 carries "old" task, because we are
  711. * in "new" task.
  712. */
  713. rts;
  714. ENDPROC(_resume)
  715. ENTRY(_ret_from_exception)
  716. #ifdef CONFIG_IPIPE
  717. p2.l = _ipipe_percpu_domain;
  718. p2.h = _ipipe_percpu_domain;
  719. r0.l = _ipipe_root;
  720. r0.h = _ipipe_root;
  721. r2 = [p2];
  722. cc = r0 == r2;
  723. if !cc jump 4f; /* not on behalf of the root domain, get out */
  724. #endif /* CONFIG_IPIPE */
  725. p2.l = lo(IPEND);
  726. p2.h = hi(IPEND);
  727. csync;
  728. r0 = [p2];
  729. [sp + PT_IPEND] = r0;
  730. 1:
  731. r2 = LO(~0x37) (Z);
  732. r0 = r2 & r0;
  733. cc = r0 == 0;
  734. if !cc jump 4f; /* if not return to user mode, get out */
  735. /* Make sure any pending system call or deferred exception
  736. * return in ILAT for this process to get executed, otherwise
  737. * in case context switch happens, system call of
  738. * first process (i.e in ILAT) will be carried
  739. * forward to the switched process
  740. */
  741. p2.l = lo(ILAT);
  742. p2.h = hi(ILAT);
  743. r0 = [p2];
  744. r1 = (EVT_IVG14 | EVT_IVG15) (z);
  745. r0 = r0 & r1;
  746. cc = r0 == 0;
  747. if !cc jump 5f;
  748. /* Set the stack for the current process */
  749. r7 = sp;
  750. r4.l = lo(ALIGN_PAGE_MASK);
  751. r4.h = hi(ALIGN_PAGE_MASK);
  752. r7 = r7 & r4; /* thread_info->flags */
  753. p5 = r7;
  754. r7 = [p5 + TI_FLAGS];
  755. r4.l = lo(_TIF_WORK_MASK);
  756. r4.h = hi(_TIF_WORK_MASK);
  757. r7 = r7 & r4;
  758. cc = r7 == 0;
  759. if cc jump 4f;
  760. p0.l = lo(EVT15);
  761. p0.h = hi(EVT15);
  762. p1.l = _schedule_and_signal;
  763. p1.h = _schedule_and_signal;
  764. [p0] = p1;
  765. csync;
  766. raise 15; /* raise evt15 to do signal or reschedule */
  767. 4:
  768. r0 = syscfg;
  769. bitclr(r0, SYSCFG_SSSTEP_P); /* Turn off single step */
  770. syscfg = r0;
  771. 5:
  772. rts;
  773. ENDPROC(_ret_from_exception)
  774. #if defined(CONFIG_PREEMPT)
  775. ENTRY(_up_to_irq14)
  776. #if ANOMALY_05000281 || ANOMALY_05000461
  777. r0.l = lo(SAFE_USER_INSTRUCTION);
  778. r0.h = hi(SAFE_USER_INSTRUCTION);
  779. reti = r0;
  780. #endif
  781. #ifdef CONFIG_DEBUG_HWERR
  782. /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
  783. r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  784. #else
  785. /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
  786. r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  787. #endif
  788. sti r0;
  789. p0.l = lo(EVT14);
  790. p0.h = hi(EVT14);
  791. p1.l = _evt_up_evt14;
  792. p1.h = _evt_up_evt14;
  793. [p0] = p1;
  794. csync;
  795. raise 14;
  796. 1:
  797. jump 1b;
  798. ENDPROC(_up_to_irq14)
  799. ENTRY(_evt_up_evt14)
  800. #ifdef CONFIG_DEBUG_HWERR
  801. r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  802. sti r0;
  803. #else
  804. cli r0;
  805. #endif
  806. #ifdef CONFIG_TRACE_IRQFLAGS
  807. [--sp] = rets;
  808. sp += -12;
  809. call _trace_hardirqs_off;
  810. sp += 12;
  811. rets = [sp++];
  812. #endif
  813. [--sp] = RETI;
  814. SP += 4;
  815. /* restore normal evt14 */
  816. p0.l = lo(EVT14);
  817. p0.h = hi(EVT14);
  818. p1.l = _evt_evt14;
  819. p1.h = _evt_evt14;
  820. [p0] = p1;
  821. csync;
  822. rts;
  823. ENDPROC(_evt_up_evt14)
  824. #endif
  825. #ifdef CONFIG_IPIPE
  826. _resume_kernel_from_int:
  827. r1 = LO(~0x8000) (Z);
  828. r1 = r0 & r1;
  829. r0 = 1;
  830. r0 = r1 - r0;
  831. r2 = r1 & r0;
  832. cc = r2 == 0;
  833. /* Sync the root stage only from the outer interrupt level. */
  834. if !cc jump .Lnosync;
  835. r0.l = ___ipipe_sync_root;
  836. r0.h = ___ipipe_sync_root;
  837. [--sp] = reti;
  838. [--sp] = rets;
  839. [--sp] = ( r7:4, p5:3 );
  840. SP += -12;
  841. call ___ipipe_call_irqtail
  842. SP += 12;
  843. ( r7:4, p5:3 ) = [sp++];
  844. rets = [sp++];
  845. reti = [sp++];
  846. .Lnosync:
  847. rts
  848. #elif defined(CONFIG_PREEMPT)
  849. _resume_kernel_from_int:
  850. /* check preempt_count */
  851. r7 = sp;
  852. r4.l = lo(ALIGN_PAGE_MASK);
  853. r4.h = hi(ALIGN_PAGE_MASK);
  854. r7 = r7 & r4;
  855. p5 = r7;
  856. r7 = [p5 + TI_PREEMPT];
  857. cc = r7 == 0x0;
  858. if !cc jump .Lreturn_to_kernel;
  859. .Lneed_schedule:
  860. r7 = [p5 + TI_FLAGS];
  861. r4.l = lo(_TIF_WORK_MASK);
  862. r4.h = hi(_TIF_WORK_MASK);
  863. r7 = r7 & r4;
  864. cc = BITTST(r7, TIF_NEED_RESCHED);
  865. if !cc jump .Lreturn_to_kernel;
  866. /*
  867. * let schedule done at level 15, otherwise sheduled process will run
  868. * at high level and block low level interrupt
  869. */
  870. r6 = reti; /* save reti */
  871. r5.l = .Lkernel_schedule;
  872. r5.h = .Lkernel_schedule;
  873. reti = r5;
  874. rti;
  875. .Lkernel_schedule:
  876. [--sp] = rets;
  877. sp += -12;
  878. pseudo_long_call _preempt_schedule_irq, p4;
  879. sp += 12;
  880. rets = [sp++];
  881. [--sp] = rets;
  882. sp += -12;
  883. /* up to irq14 so that reti after restore_all can return to irq15(kernel) */
  884. pseudo_long_call _up_to_irq14, p4;
  885. sp += 12;
  886. rets = [sp++];
  887. reti = r6; /* restore reti so that origin process can return to interrupted point */
  888. jump .Lneed_schedule;
  889. #else
  890. #define _resume_kernel_from_int .Lreturn_to_kernel
  891. #endif
  892. ENTRY(_return_from_int)
  893. /* If someone else already raised IRQ 15, do nothing. */
  894. csync;
  895. p2.l = lo(ILAT);
  896. p2.h = hi(ILAT);
  897. r0 = [p2];
  898. cc = bittst (r0, EVT_IVG15_P);
  899. if cc jump .Lreturn_to_kernel;
  900. /* if not return to user mode, get out */
  901. p2.l = lo(IPEND);
  902. p2.h = hi(IPEND);
  903. r0 = [p2];
  904. r1 = 0x17(Z);
  905. r2 = ~r1;
  906. r2.h = 0;
  907. r0 = r2 & r0;
  908. r1 = 1;
  909. r1 = r0 - r1;
  910. r2 = r0 & r1;
  911. cc = r2 == 0;
  912. if !cc jump _resume_kernel_from_int;
  913. /* Lower the interrupt level to 15. */
  914. p0.l = lo(EVT15);
  915. p0.h = hi(EVT15);
  916. p1.l = _schedule_and_signal_from_int;
  917. p1.h = _schedule_and_signal_from_int;
  918. [p0] = p1;
  919. csync;
  920. #if ANOMALY_05000281 || ANOMALY_05000461
  921. r0.l = lo(SAFE_USER_INSTRUCTION);
  922. r0.h = hi(SAFE_USER_INSTRUCTION);
  923. reti = r0;
  924. #endif
  925. r0 = 0x801f (z);
  926. STI r0;
  927. raise 15; /* raise evt15 to do signal or reschedule */
  928. rti;
  929. .Lreturn_to_kernel:
  930. rts;
  931. ENDPROC(_return_from_int)
  932. ENTRY(_lower_to_irq14)
  933. #if ANOMALY_05000281 || ANOMALY_05000461
  934. r0.l = lo(SAFE_USER_INSTRUCTION);
  935. r0.h = hi(SAFE_USER_INSTRUCTION);
  936. reti = r0;
  937. #endif
  938. #ifdef CONFIG_DEBUG_HWERR
  939. /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
  940. r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  941. #else
  942. /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
  943. r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  944. #endif
  945. sti r0;
  946. raise 14;
  947. rti;
  948. ENDPROC(_lower_to_irq14)
  949. ENTRY(_evt_evt14)
  950. #ifdef CONFIG_DEBUG_HWERR
  951. r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
  952. sti r0;
  953. #else
  954. cli r0;
  955. #endif
  956. #ifdef CONFIG_TRACE_IRQFLAGS
  957. [--sp] = rets;
  958. sp += -12;
  959. call _trace_hardirqs_off;
  960. sp += 12;
  961. rets = [sp++];
  962. #endif
  963. [--sp] = RETI;
  964. SP += 4;
  965. rts;
  966. ENDPROC(_evt_evt14)
  967. ENTRY(_schedule_and_signal_from_int)
  968. /* To end up here, vector 15 was changed - so we have to change it
  969. * back.
  970. */
  971. p0.l = lo(EVT15);
  972. p0.h = hi(EVT15);
  973. p1.l = _evt_system_call;
  974. p1.h = _evt_system_call;
  975. [p0] = p1;
  976. csync;
  977. /* Set orig_p0 to -1 to indicate this isn't the end of a syscall. */
  978. r0 = -1 (x);
  979. [sp + PT_ORIG_P0] = r0;
  980. p1 = rets;
  981. [sp + PT_RESERVED] = p1;
  982. #ifdef CONFIG_TRACE_IRQFLAGS
  983. /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
  984. * is turned on, so disable all irqs. */
  985. cli r0;
  986. sp += -12;
  987. call _trace_hardirqs_on;
  988. sp += 12;
  989. #endif
  990. #ifdef CONFIG_SMP
  991. GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
  992. r0 = [p0 + PDA_IRQFLAGS];
  993. #else
  994. p0.l = _bfin_irq_flags;
  995. p0.h = _bfin_irq_flags;
  996. r0 = [p0];
  997. #endif
  998. sti r0;
  999. /* finish the userspace "atomic" functions for it */
  1000. r1 = FIXED_CODE_END;
  1001. r2 = [sp + PT_PC];
  1002. cc = r1 <= r2;
  1003. if cc jump .Lresume_userspace (bp);
  1004. r0 = sp;
  1005. sp += -12;
  1006. pseudo_long_call _finish_atomic_sections, p5;
  1007. sp += 12;
  1008. jump.s .Lresume_userspace;
  1009. ENDPROC(_schedule_and_signal_from_int)
  1010. ENTRY(_schedule_and_signal)
  1011. SAVE_CONTEXT_SYSCALL
  1012. /* To end up here, vector 15 was changed - so we have to change it
  1013. * back.
  1014. */
  1015. p0.l = lo(EVT15);
  1016. p0.h = hi(EVT15);
  1017. p1.l = _evt_system_call;
  1018. p1.h = _evt_system_call;
  1019. [p0] = p1;
  1020. csync;
  1021. p0.l = 1f;
  1022. p0.h = 1f;
  1023. [sp + PT_RESERVED] = P0;
  1024. call .Lresume_userspace;
  1025. 1:
  1026. RESTORE_CONTEXT
  1027. rti;
  1028. ENDPROC(_schedule_and_signal)
  1029. /* We handle this 100% in exception space - to reduce overhead
  1030. * Only potiential problem is if the software buffer gets swapped out of the
  1031. * CPLB table - then double fault. - so we don't let this happen in other places
  1032. */
  1033. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  1034. ENTRY(_ex_trace_buff_full)
  1035. [--sp] = P3;
  1036. [--sp] = P2;
  1037. [--sp] = LC0;
  1038. [--sp] = LT0;
  1039. [--sp] = LB0;
  1040. P5.L = _trace_buff_offset;
  1041. P5.H = _trace_buff_offset;
  1042. P3 = [P5]; /* trace_buff_offset */
  1043. P5.L = lo(TBUFSTAT);
  1044. P5.H = hi(TBUFSTAT);
  1045. R7 = [P5];
  1046. R7 <<= 1; /* double, since we need to read twice */
  1047. LC0 = R7;
  1048. R7 <<= 2; /* need to shift over again,
  1049. * to get the number of bytes */
  1050. P5.L = lo(TBUF);
  1051. P5.H = hi(TBUF);
  1052. R6 = ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*1024) - 1;
  1053. P2 = R7;
  1054. P3 = P3 + P2;
  1055. R7 = P3;
  1056. R7 = R7 & R6;
  1057. P3 = R7;
  1058. P2.L = _trace_buff_offset;
  1059. P2.H = _trace_buff_offset;
  1060. [P2] = P3;
  1061. P2.L = _software_trace_buff;
  1062. P2.H = _software_trace_buff;
  1063. LSETUP (.Lstart, .Lend) LC0;
  1064. .Lstart:
  1065. R7 = [P5]; /* read TBUF */
  1066. P4 = P3 + P2;
  1067. [P4] = R7;
  1068. P3 += -4;
  1069. R7 = P3;
  1070. R7 = R7 & R6;
  1071. .Lend:
  1072. P3 = R7;
  1073. LB0 = [sp++];
  1074. LT0 = [sp++];
  1075. LC0 = [sp++];
  1076. P2 = [sp++];
  1077. P3 = [sp++];
  1078. jump _bfin_return_from_exception;
  1079. ENDPROC(_ex_trace_buff_full)
  1080. #if CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN == 4
  1081. .data
  1082. #else
  1083. .section .l1.data.B
  1084. #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN */
  1085. ENTRY(_trace_buff_offset)
  1086. .long 0;
  1087. ALIGN
  1088. ENTRY(_software_trace_buff)
  1089. .rept ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN)*256);
  1090. .long 0
  1091. .endr
  1092. #endif /* CONFIG_DEBUG_BFIN_HWTRACE_EXPAND */
  1093. #ifdef CONFIG_EARLY_PRINTK
  1094. __INIT
  1095. ENTRY(_early_trap)
  1096. SAVE_ALL_SYS
  1097. trace_buffer_stop(p0,r0);
  1098. ANOMALY_283_315_WORKAROUND(p4, r5)
  1099. /* Turn caches off, to ensure we don't get double exceptions */
  1100. P4.L = LO(IMEM_CONTROL);
  1101. P4.H = HI(IMEM_CONTROL);
  1102. R5 = [P4]; /* Control Register*/
  1103. BITCLR(R5,ENICPLB_P);
  1104. CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
  1105. [P4] = R5;
  1106. SSYNC;
  1107. P4.L = LO(DMEM_CONTROL);
  1108. P4.H = HI(DMEM_CONTROL);
  1109. R5 = [P4];
  1110. BITCLR(R5,ENDCPLB_P);
  1111. CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
  1112. [P4] = R5;
  1113. SSYNC;
  1114. r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
  1115. r1 = RETX;
  1116. SP += -12;
  1117. call _early_trap_c;
  1118. SP += 12;
  1119. ENDPROC(_early_trap)
  1120. __FINIT
  1121. #endif /* CONFIG_EARLY_PRINTK */
  1122. /*
  1123. * Put these in the kernel data section - that should always be covered by
  1124. * a CPLB. This is needed to ensure we don't get double fault conditions
  1125. */
  1126. #ifdef CONFIG_SYSCALL_TAB_L1
  1127. .section .l1.data
  1128. #else
  1129. .data
  1130. #endif
  1131. ENTRY(_ex_table)
  1132. /* entry for each EXCAUSE[5:0]
  1133. * This table must be in sync with the table in ./kernel/traps.c
  1134. * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
  1135. */
  1136. .long _ex_syscall /* 0x00 - User Defined - Linux Syscall */
  1137. .long _ex_trap_c /* 0x01 - User Defined - Software breakpoint */
  1138. #ifdef CONFIG_KGDB
  1139. .long _ex_trap_c /* 0x02 - User Defined - KGDB initial connection
  1140. and break signal trap */
  1141. #else
  1142. .long _ex_replaceable /* 0x02 - User Defined */
  1143. #endif
  1144. .long _ex_trap_c /* 0x03 - User Defined - userspace stack overflow */
  1145. .long _ex_trap_c /* 0x04 - User Defined - dump trace buffer */
  1146. .long _ex_replaceable /* 0x05 - User Defined */
  1147. .long _ex_replaceable /* 0x06 - User Defined */
  1148. .long _ex_replaceable /* 0x07 - User Defined */
  1149. .long _ex_replaceable /* 0x08 - User Defined */
  1150. .long _ex_replaceable /* 0x09 - User Defined */
  1151. .long _ex_replaceable /* 0x0A - User Defined */
  1152. .long _ex_replaceable /* 0x0B - User Defined */
  1153. .long _ex_replaceable /* 0x0C - User Defined */
  1154. .long _ex_replaceable /* 0x0D - User Defined */
  1155. .long _ex_replaceable /* 0x0E - User Defined */
  1156. .long _ex_replaceable /* 0x0F - User Defined */
  1157. .long _ex_single_step /* 0x10 - HW Single step */
  1158. #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
  1159. .long _ex_trace_buff_full /* 0x11 - Trace Buffer Full */
  1160. #else
  1161. .long _ex_trap_c /* 0x11 - Trace Buffer Full */
  1162. #endif
  1163. .long _ex_trap_c /* 0x12 - Reserved */
  1164. .long _ex_trap_c /* 0x13 - Reserved */
  1165. .long _ex_trap_c /* 0x14 - Reserved */
  1166. .long _ex_trap_c /* 0x15 - Reserved */
  1167. .long _ex_trap_c /* 0x16 - Reserved */
  1168. .long _ex_trap_c /* 0x17 - Reserved */
  1169. .long _ex_trap_c /* 0x18 - Reserved */
  1170. .long _ex_trap_c /* 0x19 - Reserved */
  1171. .long _ex_trap_c /* 0x1A - Reserved */
  1172. .long _ex_trap_c /* 0x1B - Reserved */
  1173. .long _ex_trap_c /* 0x1C - Reserved */
  1174. .long _ex_trap_c /* 0x1D - Reserved */
  1175. .long _ex_trap_c /* 0x1E - Reserved */
  1176. .long _ex_trap_c /* 0x1F - Reserved */
  1177. .long _ex_trap_c /* 0x20 - Reserved */
  1178. .long _ex_trap_c /* 0x21 - Undefined Instruction */
  1179. .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */
  1180. .long _ex_dviol /* 0x23 - Data CPLB Protection Violation */
  1181. .long _ex_trap_c /* 0x24 - Data access misaligned */
  1182. .long _ex_trap_c /* 0x25 - Unrecoverable Event */
  1183. .long _ex_dmiss /* 0x26 - Data CPLB Miss */
  1184. .long _ex_dmult /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
  1185. .long _ex_trap_c /* 0x28 - Emulation Watchpoint */
  1186. .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */
  1187. .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */
  1188. .long _ex_trap_c /* 0x2B - Instruction CPLB protection Violation */
  1189. .long _ex_icplb_miss /* 0x2C - Instruction CPLB miss */
  1190. .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */
  1191. .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
  1192. .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
  1193. .long _ex_trap_c /* 0x2F - Reserved */
  1194. .long _ex_trap_c /* 0x30 - Reserved */
  1195. .long _ex_trap_c /* 0x31 - Reserved */
  1196. .long _ex_trap_c /* 0x32 - Reserved */
  1197. .long _ex_trap_c /* 0x33 - Reserved */
  1198. .long _ex_trap_c /* 0x34 - Reserved */
  1199. .long _ex_trap_c /* 0x35 - Reserved */
  1200. .long _ex_trap_c /* 0x36 - Reserved */
  1201. .long _ex_trap_c /* 0x37 - Reserved */
  1202. .long _ex_trap_c /* 0x38 - Reserved */
  1203. .long _ex_trap_c /* 0x39 - Reserved */
  1204. .long _ex_trap_c /* 0x3A - Reserved */
  1205. .long _ex_trap_c /* 0x3B - Reserved */
  1206. .long _ex_trap_c /* 0x3C - Reserved */
  1207. .long _ex_trap_c /* 0x3D - Reserved */
  1208. .long _ex_trap_c /* 0x3E - Reserved */
  1209. .long _ex_trap_c /* 0x3F - Reserved */
  1210. END(_ex_table)
  1211. ENTRY(_sys_call_table)
  1212. .long _sys_restart_syscall /* 0 */
  1213. .long _sys_exit
  1214. .long _sys_fork
  1215. .long _sys_read
  1216. .long _sys_write
  1217. .long _sys_open /* 5 */
  1218. .long _sys_close
  1219. .long _sys_ni_syscall /* old waitpid */
  1220. .long _sys_creat
  1221. .long _sys_link
  1222. .long _sys_unlink /* 10 */
  1223. .long _sys_execve
  1224. .long _sys_chdir
  1225. .long _sys_time
  1226. .long _sys_mknod
  1227. .long _sys_chmod /* 15 */
  1228. .long _sys_chown /* chown16 */
  1229. .long _sys_ni_syscall /* old break syscall holder */
  1230. .long _sys_ni_syscall /* old stat */
  1231. .long _sys_lseek
  1232. .long _sys_getpid /* 20 */
  1233. .long _sys_mount
  1234. .long _sys_ni_syscall /* old umount */
  1235. .long _sys_setuid
  1236. .long _sys_getuid
  1237. .long _sys_stime /* 25 */
  1238. .long _sys_ptrace
  1239. .long _sys_alarm
  1240. .long _sys_ni_syscall /* old fstat */
  1241. .long _sys_pause
  1242. .long _sys_ni_syscall /* old utime */ /* 30 */
  1243. .long _sys_ni_syscall /* old stty syscall holder */
  1244. .long _sys_ni_syscall /* old gtty syscall holder */
  1245. .long _sys_access
  1246. .long _sys_nice
  1247. .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1248. .long _sys_sync
  1249. .long _sys_kill
  1250. .long _sys_rename
  1251. .long _sys_mkdir
  1252. .long _sys_rmdir /* 40 */
  1253. .long _sys_dup
  1254. .long _sys_pipe
  1255. .long _sys_times
  1256. .long _sys_ni_syscall /* old prof syscall holder */
  1257. .long _sys_brk /* 45 */
  1258. .long _sys_setgid
  1259. .long _sys_getgid
  1260. .long _sys_ni_syscall /* old sys_signal */
  1261. .long _sys_geteuid /* geteuid16 */
  1262. .long _sys_getegid /* getegid16 */ /* 50 */
  1263. .long _sys_acct
  1264. .long _sys_umount /* recycled never used phys() */
  1265. .long _sys_ni_syscall /* old lock syscall holder */
  1266. .long _sys_ioctl
  1267. .long _sys_fcntl /* 55 */
  1268. .long _sys_ni_syscall /* old mpx syscall holder */
  1269. .long _sys_setpgid
  1270. .long _sys_ni_syscall /* old ulimit syscall holder */
  1271. .long _sys_ni_syscall /* old old uname */
  1272. .long _sys_umask /* 60 */
  1273. .long _sys_chroot
  1274. .long _sys_ustat
  1275. .long _sys_dup2
  1276. .long _sys_getppid
  1277. .long _sys_getpgrp /* 65 */
  1278. .long _sys_setsid
  1279. .long _sys_ni_syscall /* old sys_sigaction */
  1280. .long _sys_sgetmask
  1281. .long _sys_ssetmask
  1282. .long _sys_setreuid /* setreuid16 */ /* 70 */
  1283. .long _sys_setregid /* setregid16 */
  1284. .long _sys_ni_syscall /* old sys_sigsuspend */
  1285. .long _sys_ni_syscall /* old sys_sigpending */
  1286. .long _sys_sethostname
  1287. .long _sys_setrlimit /* 75 */
  1288. .long _sys_ni_syscall /* old getrlimit */
  1289. .long _sys_getrusage
  1290. .long _sys_gettimeofday
  1291. .long _sys_settimeofday
  1292. .long _sys_getgroups /* getgroups16 */ /* 80 */
  1293. .long _sys_setgroups /* setgroups16 */
  1294. .long _sys_ni_syscall /* old_select */
  1295. .long _sys_symlink
  1296. .long _sys_ni_syscall /* old lstat */
  1297. .long _sys_readlink /* 85 */
  1298. .long _sys_uselib
  1299. .long _sys_ni_syscall /* sys_swapon */
  1300. .long _sys_reboot
  1301. .long _sys_ni_syscall /* old_readdir */
  1302. .long _sys_ni_syscall /* sys_mmap */ /* 90 */
  1303. .long _sys_munmap
  1304. .long _sys_truncate
  1305. .long _sys_ftruncate
  1306. .long _sys_fchmod
  1307. .long _sys_fchown /* fchown16 */ /* 95 */
  1308. .long _sys_getpriority
  1309. .long _sys_setpriority
  1310. .long _sys_ni_syscall /* old profil syscall holder */
  1311. .long _sys_statfs
  1312. .long _sys_fstatfs /* 100 */
  1313. .long _sys_ni_syscall
  1314. .long _sys_ni_syscall /* old sys_socketcall */
  1315. .long _sys_syslog
  1316. .long _sys_setitimer
  1317. .long _sys_getitimer /* 105 */
  1318. .long _sys_newstat
  1319. .long _sys_newlstat
  1320. .long _sys_newfstat
  1321. .long _sys_ni_syscall /* old uname */
  1322. .long _sys_ni_syscall /* iopl for i386 */ /* 110 */
  1323. .long _sys_vhangup
  1324. .long _sys_ni_syscall /* obsolete idle() syscall */
  1325. .long _sys_ni_syscall /* vm86old for i386 */
  1326. .long _sys_wait4
  1327. .long _sys_ni_syscall /* 115 */ /* sys_swapoff */
  1328. .long _sys_sysinfo
  1329. .long _sys_ni_syscall /* old sys_ipc */
  1330. .long _sys_fsync
  1331. .long _sys_ni_syscall /* old sys_sigreturn */
  1332. .long _sys_clone /* 120 */
  1333. .long _sys_setdomainname
  1334. .long _sys_newuname
  1335. .long _sys_ni_syscall /* old sys_modify_ldt */
  1336. .long _sys_adjtimex
  1337. .long _sys_mprotect /* 125 */
  1338. .long _sys_ni_syscall /* old sys_sigprocmask */
  1339. .long _sys_ni_syscall /* old "creat_module" */
  1340. .long _sys_init_module
  1341. .long _sys_delete_module
  1342. .long _sys_ni_syscall /* 130: old "get_kernel_syms" */
  1343. .long _sys_quotactl
  1344. .long _sys_getpgid
  1345. .long _sys_fchdir
  1346. .long _sys_bdflush
  1347. .long _sys_ni_syscall /* 135 */ /* sys_sysfs */
  1348. .long _sys_personality
  1349. .long _sys_ni_syscall /* for afs_syscall */
  1350. .long _sys_setfsuid /* setfsuid16 */
  1351. .long _sys_setfsgid /* setfsgid16 */
  1352. .long _sys_llseek /* 140 */
  1353. .long _sys_getdents
  1354. .long _sys_ni_syscall /* sys_select */
  1355. .long _sys_flock
  1356. .long _sys_msync
  1357. .long _sys_readv /* 145 */
  1358. .long _sys_writev
  1359. .long _sys_getsid
  1360. .long _sys_fdatasync
  1361. .long _sys_sysctl
  1362. .long _sys_mlock /* 150 */
  1363. .long _sys_munlock
  1364. .long _sys_mlockall
  1365. .long _sys_munlockall
  1366. .long _sys_sched_setparam
  1367. .long _sys_sched_getparam /* 155 */
  1368. .long _sys_sched_setscheduler
  1369. .long _sys_sched_getscheduler
  1370. .long _sys_sched_yield
  1371. .long _sys_sched_get_priority_max
  1372. .long _sys_sched_get_priority_min /* 160 */
  1373. .long _sys_sched_rr_get_interval
  1374. .long _sys_nanosleep
  1375. .long _sys_mremap
  1376. .long _sys_setresuid /* setresuid16 */
  1377. .long _sys_getresuid /* getresuid16 */ /* 165 */
  1378. .long _sys_ni_syscall /* for vm86 */
  1379. .long _sys_ni_syscall /* old "query_module" */
  1380. .long _sys_ni_syscall /* sys_poll */
  1381. .long _sys_ni_syscall /* old nfsservctl */
  1382. .long _sys_setresgid /* setresgid16 */ /* 170 */
  1383. .long _sys_getresgid /* getresgid16 */
  1384. .long _sys_prctl
  1385. .long _sys_rt_sigreturn
  1386. .long _sys_rt_sigaction
  1387. .long _sys_rt_sigprocmask /* 175 */
  1388. .long _sys_rt_sigpending
  1389. .long _sys_rt_sigtimedwait
  1390. .long _sys_rt_sigqueueinfo
  1391. .long _sys_rt_sigsuspend
  1392. .long _sys_pread64 /* 180 */
  1393. .long _sys_pwrite64
  1394. .long _sys_lchown /* lchown16 */
  1395. .long _sys_getcwd
  1396. .long _sys_capget
  1397. .long _sys_capset /* 185 */
  1398. .long _sys_sigaltstack
  1399. .long _sys_sendfile
  1400. .long _sys_ni_syscall /* streams1 */
  1401. .long _sys_ni_syscall /* streams2 */
  1402. .long _sys_vfork /* 190 */
  1403. .long _sys_getrlimit
  1404. .long _sys_mmap_pgoff
  1405. .long _sys_truncate64
  1406. .long _sys_ftruncate64
  1407. .long _sys_stat64 /* 195 */
  1408. .long _sys_lstat64
  1409. .long _sys_fstat64
  1410. .long _sys_chown
  1411. .long _sys_getuid
  1412. .long _sys_getgid /* 200 */
  1413. .long _sys_geteuid
  1414. .long _sys_getegid
  1415. .long _sys_setreuid
  1416. .long _sys_setregid
  1417. .long _sys_getgroups /* 205 */
  1418. .long _sys_setgroups
  1419. .long _sys_fchown
  1420. .long _sys_setresuid
  1421. .long _sys_getresuid
  1422. .long _sys_setresgid /* 210 */
  1423. .long _sys_getresgid
  1424. .long _sys_lchown
  1425. .long _sys_setuid
  1426. .long _sys_setgid
  1427. .long _sys_setfsuid /* 215 */
  1428. .long _sys_setfsgid
  1429. .long _sys_pivot_root
  1430. .long _sys_mincore
  1431. .long _sys_madvise
  1432. .long _sys_getdents64 /* 220 */
  1433. .long _sys_fcntl64
  1434. .long _sys_ni_syscall /* reserved for TUX */
  1435. .long _sys_ni_syscall
  1436. .long _sys_gettid
  1437. .long _sys_readahead /* 225 */
  1438. .long _sys_setxattr
  1439. .long _sys_lsetxattr
  1440. .long _sys_fsetxattr
  1441. .long _sys_getxattr
  1442. .long _sys_lgetxattr /* 230 */
  1443. .long _sys_fgetxattr
  1444. .long _sys_listxattr
  1445. .long _sys_llistxattr
  1446. .long _sys_flistxattr
  1447. .long _sys_removexattr /* 235 */
  1448. .long _sys_lremovexattr
  1449. .long _sys_fremovexattr
  1450. .long _sys_tkill
  1451. .long _sys_sendfile64
  1452. .long _sys_futex /* 240 */
  1453. .long _sys_sched_setaffinity
  1454. .long _sys_sched_getaffinity
  1455. .long _sys_ni_syscall /* sys_set_thread_area */
  1456. .long _sys_ni_syscall /* sys_get_thread_area */
  1457. .long _sys_io_setup /* 245 */
  1458. .long _sys_io_destroy
  1459. .long _sys_io_getevents
  1460. .long _sys_io_submit
  1461. .long _sys_io_cancel
  1462. .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */
  1463. .long _sys_ni_syscall /* sys_freec_hugepages */
  1464. .long _sys_exit_group
  1465. .long _sys_lookup_dcookie
  1466. .long _sys_bfin_spinlock
  1467. .long _sys_epoll_create /* 255 */
  1468. .long _sys_epoll_ctl
  1469. .long _sys_epoll_wait
  1470. .long _sys_ni_syscall /* remap_file_pages */
  1471. .long _sys_set_tid_address
  1472. .long _sys_timer_create /* 260 */
  1473. .long _sys_timer_settime
  1474. .long _sys_timer_gettime
  1475. .long _sys_timer_getoverrun
  1476. .long _sys_timer_delete
  1477. .long _sys_clock_settime /* 265 */
  1478. .long _sys_clock_gettime
  1479. .long _sys_clock_getres
  1480. .long _sys_clock_nanosleep
  1481. .long _sys_statfs64
  1482. .long _sys_fstatfs64 /* 270 */
  1483. .long _sys_tgkill
  1484. .long _sys_utimes
  1485. .long _sys_fadvise64_64
  1486. .long _sys_ni_syscall /* vserver */
  1487. .long _sys_mbind /* 275 */
  1488. .long _sys_ni_syscall /* get_mempolicy */
  1489. .long _sys_ni_syscall /* set_mempolicy */
  1490. .long _sys_mq_open
  1491. .long _sys_mq_unlink
  1492. .long _sys_mq_timedsend /* 280 */
  1493. .long _sys_mq_timedreceive
  1494. .long _sys_mq_notify
  1495. .long _sys_mq_getsetattr
  1496. .long _sys_ni_syscall /* kexec_load */
  1497. .long _sys_waitid /* 285 */
  1498. .long _sys_add_key
  1499. .long _sys_request_key
  1500. .long _sys_keyctl
  1501. .long _sys_ioprio_set
  1502. .long _sys_ioprio_get /* 290 */
  1503. .long _sys_inotify_init
  1504. .long _sys_inotify_add_watch
  1505. .long _sys_inotify_rm_watch
  1506. .long _sys_ni_syscall /* migrate_pages */
  1507. .long _sys_openat /* 295 */
  1508. .long _sys_mkdirat
  1509. .long _sys_mknodat
  1510. .long _sys_fchownat
  1511. .long _sys_futimesat
  1512. .long _sys_fstatat64 /* 300 */
  1513. .long _sys_unlinkat
  1514. .long _sys_renameat
  1515. .long _sys_linkat
  1516. .long _sys_symlinkat
  1517. .long _sys_readlinkat /* 305 */
  1518. .long _sys_fchmodat
  1519. .long _sys_faccessat
  1520. .long _sys_pselect6
  1521. .long _sys_ppoll
  1522. .long _sys_unshare /* 310 */
  1523. .long _sys_sram_alloc
  1524. .long _sys_sram_free
  1525. .long _sys_dma_memcpy
  1526. .long _sys_accept
  1527. .long _sys_bind /* 315 */
  1528. .long _sys_connect
  1529. .long _sys_getpeername
  1530. .long _sys_getsockname
  1531. .long _sys_getsockopt
  1532. .long _sys_listen /* 320 */
  1533. .long _sys_recv
  1534. .long _sys_recvfrom
  1535. .long _sys_recvmsg
  1536. .long _sys_send
  1537. .long _sys_sendmsg /* 325 */
  1538. .long _sys_sendto
  1539. .long _sys_setsockopt
  1540. .long _sys_shutdown
  1541. .long _sys_socket
  1542. .long _sys_socketpair /* 330 */
  1543. .long _sys_semctl
  1544. .long _sys_semget
  1545. .long _sys_semop
  1546. .long _sys_msgctl
  1547. .long _sys_msgget /* 335 */
  1548. .long _sys_msgrcv
  1549. .long _sys_msgsnd
  1550. .long _sys_shmat
  1551. .long _sys_shmctl
  1552. .long _sys_shmdt /* 340 */
  1553. .long _sys_shmget
  1554. .long _sys_splice
  1555. .long _sys_sync_file_range
  1556. .long _sys_tee
  1557. .long _sys_vmsplice /* 345 */
  1558. .long _sys_epoll_pwait
  1559. .long _sys_utimensat
  1560. .long _sys_signalfd
  1561. .long _sys_timerfd_create
  1562. .long _sys_eventfd /* 350 */
  1563. .long _sys_pread64
  1564. .long _sys_pwrite64
  1565. .long _sys_fadvise64
  1566. .long _sys_set_robust_list
  1567. .long _sys_get_robust_list /* 355 */
  1568. .long _sys_fallocate
  1569. .long _sys_semtimedop
  1570. .long _sys_timerfd_settime
  1571. .long _sys_timerfd_gettime
  1572. .long _sys_signalfd4 /* 360 */
  1573. .long _sys_eventfd2
  1574. .long _sys_epoll_create1
  1575. .long _sys_dup3
  1576. .long _sys_pipe2
  1577. .long _sys_inotify_init1 /* 365 */
  1578. .long _sys_preadv
  1579. .long _sys_pwritev
  1580. .long _sys_rt_tgsigqueueinfo
  1581. .long _sys_perf_event_open
  1582. .long _sys_recvmmsg /* 370 */
  1583. .long _sys_fanotify_init
  1584. .long _sys_fanotify_mark
  1585. .long _sys_prlimit64
  1586. .long _sys_cacheflush
  1587. .long _sys_name_to_handle_at /* 375 */
  1588. .long _sys_open_by_handle_at
  1589. .long _sys_clock_adjtime
  1590. .long _sys_syncfs
  1591. .long _sys_setns
  1592. .long _sys_sendmmsg /* 380 */
  1593. .long _sys_process_vm_readv
  1594. .long _sys_process_vm_writev
  1595. .rept NR_syscalls-(.-_sys_call_table)/4
  1596. .long _sys_ni_syscall
  1597. .endr
  1598. END(_sys_call_table)