kgdb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License as published by the
  4. * Free Software Foundation; either version 2, or (at your option) any
  5. * later version.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. *
  12. */
  13. /*
  14. * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
  15. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  16. * Copyright (C) 2002 Andi Kleen, SuSE Labs
  17. * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
  18. * Copyright (C) 2007 MontaVista Software, Inc.
  19. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
  20. */
  21. /****************************************************************************
  22. * Contributor: Lake Stevens Instrument Division$
  23. * Written by: Glenn Engel $
  24. * Updated by: Amit Kale<akale@veritas.com>
  25. * Updated by: Tom Rini <trini@kernel.crashing.org>
  26. * Updated by: Jason Wessel <jason.wessel@windriver.com>
  27. * Modified for 386 by Jim Kingdon, Cygnus Support.
  28. * Origianl kgdb, compatibility with 2.1.xx kernel by
  29. * David Grothe <dave@gcom.com>
  30. * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  31. * X86_64 changes from Andi Kleen's patch merged by Jim Houston
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/kdebug.h>
  35. #include <linux/string.h>
  36. #include <linux/kernel.h>
  37. #include <linux/ptrace.h>
  38. #include <linux/sched.h>
  39. #include <linux/delay.h>
  40. #include <linux/kgdb.h>
  41. #include <linux/init.h>
  42. #include <linux/smp.h>
  43. #include <linux/nmi.h>
  44. #include <linux/hw_breakpoint.h>
  45. #include <linux/uaccess.h>
  46. #include <linux/memory.h>
  47. #include <asm/debugreg.h>
  48. #include <asm/apicdef.h>
  49. #include <asm/apic.h>
  50. #include <asm/nmi.h>
  51. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
  52. {
  53. #ifdef CONFIG_X86_32
  54. { "ax", 4, offsetof(struct pt_regs, ax) },
  55. { "cx", 4, offsetof(struct pt_regs, cx) },
  56. { "dx", 4, offsetof(struct pt_regs, dx) },
  57. { "bx", 4, offsetof(struct pt_regs, bx) },
  58. { "sp", 4, offsetof(struct pt_regs, sp) },
  59. { "bp", 4, offsetof(struct pt_regs, bp) },
  60. { "si", 4, offsetof(struct pt_regs, si) },
  61. { "di", 4, offsetof(struct pt_regs, di) },
  62. { "ip", 4, offsetof(struct pt_regs, ip) },
  63. { "flags", 4, offsetof(struct pt_regs, flags) },
  64. { "cs", 4, offsetof(struct pt_regs, cs) },
  65. { "ss", 4, offsetof(struct pt_regs, ss) },
  66. { "ds", 4, offsetof(struct pt_regs, ds) },
  67. { "es", 4, offsetof(struct pt_regs, es) },
  68. #else
  69. { "ax", 8, offsetof(struct pt_regs, ax) },
  70. { "bx", 8, offsetof(struct pt_regs, bx) },
  71. { "cx", 8, offsetof(struct pt_regs, cx) },
  72. { "dx", 8, offsetof(struct pt_regs, dx) },
  73. { "si", 8, offsetof(struct pt_regs, dx) },
  74. { "di", 8, offsetof(struct pt_regs, di) },
  75. { "bp", 8, offsetof(struct pt_regs, bp) },
  76. { "sp", 8, offsetof(struct pt_regs, sp) },
  77. { "r8", 8, offsetof(struct pt_regs, r8) },
  78. { "r9", 8, offsetof(struct pt_regs, r9) },
  79. { "r10", 8, offsetof(struct pt_regs, r10) },
  80. { "r11", 8, offsetof(struct pt_regs, r11) },
  81. { "r12", 8, offsetof(struct pt_regs, r12) },
  82. { "r13", 8, offsetof(struct pt_regs, r13) },
  83. { "r14", 8, offsetof(struct pt_regs, r14) },
  84. { "r15", 8, offsetof(struct pt_regs, r15) },
  85. { "ip", 8, offsetof(struct pt_regs, ip) },
  86. { "flags", 4, offsetof(struct pt_regs, flags) },
  87. { "cs", 4, offsetof(struct pt_regs, cs) },
  88. { "ss", 4, offsetof(struct pt_regs, ss) },
  89. { "ds", 4, -1 },
  90. { "es", 4, -1 },
  91. #endif
  92. { "fs", 4, -1 },
  93. { "gs", 4, -1 },
  94. };
  95. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  96. {
  97. if (
  98. #ifdef CONFIG_X86_32
  99. regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
  100. #endif
  101. regno == GDB_SP || regno == GDB_ORIG_AX)
  102. return 0;
  103. if (dbg_reg_def[regno].offset != -1)
  104. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  105. dbg_reg_def[regno].size);
  106. return 0;
  107. }
  108. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  109. {
  110. if (regno == GDB_ORIG_AX) {
  111. memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax));
  112. return "orig_ax";
  113. }
  114. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  115. return NULL;
  116. if (dbg_reg_def[regno].offset != -1)
  117. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  118. dbg_reg_def[regno].size);
  119. #ifdef CONFIG_X86_32
  120. switch (regno) {
  121. case GDB_SS:
  122. if (!user_mode_vm(regs))
  123. *(unsigned long *)mem = __KERNEL_DS;
  124. break;
  125. case GDB_SP:
  126. if (!user_mode_vm(regs))
  127. *(unsigned long *)mem = kernel_stack_pointer(regs);
  128. break;
  129. case GDB_GS:
  130. case GDB_FS:
  131. *(unsigned long *)mem = 0xFFFF;
  132. break;
  133. }
  134. #endif
  135. return dbg_reg_def[regno].name;
  136. }
  137. /**
  138. * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
  139. * @gdb_regs: A pointer to hold the registers in the order GDB wants.
  140. * @p: The &struct task_struct of the desired process.
  141. *
  142. * Convert the register values of the sleeping process in @p to
  143. * the format that GDB expects.
  144. * This function is called when kgdb does not have access to the
  145. * &struct pt_regs and therefore it should fill the gdb registers
  146. * @gdb_regs with what has been saved in &struct thread_struct
  147. * thread field during switch_to.
  148. */
  149. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  150. {
  151. #ifndef CONFIG_X86_32
  152. u32 *gdb_regs32 = (u32 *)gdb_regs;
  153. #endif
  154. gdb_regs[GDB_AX] = 0;
  155. gdb_regs[GDB_BX] = 0;
  156. gdb_regs[GDB_CX] = 0;
  157. gdb_regs[GDB_DX] = 0;
  158. gdb_regs[GDB_SI] = 0;
  159. gdb_regs[GDB_DI] = 0;
  160. gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
  161. #ifdef CONFIG_X86_32
  162. gdb_regs[GDB_DS] = __KERNEL_DS;
  163. gdb_regs[GDB_ES] = __KERNEL_DS;
  164. gdb_regs[GDB_PS] = 0;
  165. gdb_regs[GDB_CS] = __KERNEL_CS;
  166. gdb_regs[GDB_PC] = p->thread.ip;
  167. gdb_regs[GDB_SS] = __KERNEL_DS;
  168. gdb_regs[GDB_FS] = 0xFFFF;
  169. gdb_regs[GDB_GS] = 0xFFFF;
  170. #else
  171. gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
  172. gdb_regs32[GDB_CS] = __KERNEL_CS;
  173. gdb_regs32[GDB_SS] = __KERNEL_DS;
  174. gdb_regs[GDB_PC] = 0;
  175. gdb_regs[GDB_R8] = 0;
  176. gdb_regs[GDB_R9] = 0;
  177. gdb_regs[GDB_R10] = 0;
  178. gdb_regs[GDB_R11] = 0;
  179. gdb_regs[GDB_R12] = 0;
  180. gdb_regs[GDB_R13] = 0;
  181. gdb_regs[GDB_R14] = 0;
  182. gdb_regs[GDB_R15] = 0;
  183. #endif
  184. gdb_regs[GDB_SP] = p->thread.sp;
  185. }
  186. static struct hw_breakpoint {
  187. unsigned enabled;
  188. unsigned long addr;
  189. int len;
  190. int type;
  191. struct perf_event * __percpu *pev;
  192. } breakinfo[HBP_NUM];
  193. static unsigned long early_dr7;
  194. static void kgdb_correct_hw_break(void)
  195. {
  196. int breakno;
  197. for (breakno = 0; breakno < HBP_NUM; breakno++) {
  198. struct perf_event *bp;
  199. struct arch_hw_breakpoint *info;
  200. int val;
  201. int cpu = raw_smp_processor_id();
  202. if (!breakinfo[breakno].enabled)
  203. continue;
  204. if (dbg_is_early) {
  205. set_debugreg(breakinfo[breakno].addr, breakno);
  206. early_dr7 |= encode_dr7(breakno,
  207. breakinfo[breakno].len,
  208. breakinfo[breakno].type);
  209. set_debugreg(early_dr7, 7);
  210. continue;
  211. }
  212. bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
  213. info = counter_arch_bp(bp);
  214. if (bp->attr.disabled != 1)
  215. continue;
  216. bp->attr.bp_addr = breakinfo[breakno].addr;
  217. bp->attr.bp_len = breakinfo[breakno].len;
  218. bp->attr.bp_type = breakinfo[breakno].type;
  219. info->address = breakinfo[breakno].addr;
  220. info->len = breakinfo[breakno].len;
  221. info->type = breakinfo[breakno].type;
  222. val = arch_install_hw_breakpoint(bp);
  223. if (!val)
  224. bp->attr.disabled = 0;
  225. }
  226. if (!dbg_is_early)
  227. hw_breakpoint_restore();
  228. }
  229. static int hw_break_reserve_slot(int breakno)
  230. {
  231. int cpu;
  232. int cnt = 0;
  233. struct perf_event **pevent;
  234. if (dbg_is_early)
  235. return 0;
  236. for_each_online_cpu(cpu) {
  237. cnt++;
  238. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  239. if (dbg_reserve_bp_slot(*pevent))
  240. goto fail;
  241. }
  242. return 0;
  243. fail:
  244. for_each_online_cpu(cpu) {
  245. cnt--;
  246. if (!cnt)
  247. break;
  248. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  249. dbg_release_bp_slot(*pevent);
  250. }
  251. return -1;
  252. }
  253. static int hw_break_release_slot(int breakno)
  254. {
  255. struct perf_event **pevent;
  256. int cpu;
  257. if (dbg_is_early)
  258. return 0;
  259. for_each_online_cpu(cpu) {
  260. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  261. if (dbg_release_bp_slot(*pevent))
  262. /*
  263. * The debugger is responsible for handing the retry on
  264. * remove failure.
  265. */
  266. return -1;
  267. }
  268. return 0;
  269. }
  270. static int
  271. kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  272. {
  273. int i;
  274. for (i = 0; i < HBP_NUM; i++)
  275. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  276. break;
  277. if (i == HBP_NUM)
  278. return -1;
  279. if (hw_break_release_slot(i)) {
  280. printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
  281. return -1;
  282. }
  283. breakinfo[i].enabled = 0;
  284. return 0;
  285. }
  286. static void kgdb_remove_all_hw_break(void)
  287. {
  288. int i;
  289. int cpu = raw_smp_processor_id();
  290. struct perf_event *bp;
  291. for (i = 0; i < HBP_NUM; i++) {
  292. if (!breakinfo[i].enabled)
  293. continue;
  294. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  295. if (!bp->attr.disabled) {
  296. arch_uninstall_hw_breakpoint(bp);
  297. bp->attr.disabled = 1;
  298. continue;
  299. }
  300. if (dbg_is_early)
  301. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  302. breakinfo[i].type);
  303. else if (hw_break_release_slot(i))
  304. printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
  305. breakinfo[i].addr);
  306. breakinfo[i].enabled = 0;
  307. }
  308. }
  309. static int
  310. kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  311. {
  312. int i;
  313. for (i = 0; i < HBP_NUM; i++)
  314. if (!breakinfo[i].enabled)
  315. break;
  316. if (i == HBP_NUM)
  317. return -1;
  318. switch (bptype) {
  319. case BP_HARDWARE_BREAKPOINT:
  320. len = 1;
  321. breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
  322. break;
  323. case BP_WRITE_WATCHPOINT:
  324. breakinfo[i].type = X86_BREAKPOINT_WRITE;
  325. break;
  326. case BP_ACCESS_WATCHPOINT:
  327. breakinfo[i].type = X86_BREAKPOINT_RW;
  328. break;
  329. default:
  330. return -1;
  331. }
  332. switch (len) {
  333. case 1:
  334. breakinfo[i].len = X86_BREAKPOINT_LEN_1;
  335. break;
  336. case 2:
  337. breakinfo[i].len = X86_BREAKPOINT_LEN_2;
  338. break;
  339. case 4:
  340. breakinfo[i].len = X86_BREAKPOINT_LEN_4;
  341. break;
  342. #ifdef CONFIG_X86_64
  343. case 8:
  344. breakinfo[i].len = X86_BREAKPOINT_LEN_8;
  345. break;
  346. #endif
  347. default:
  348. return -1;
  349. }
  350. breakinfo[i].addr = addr;
  351. if (hw_break_reserve_slot(i)) {
  352. breakinfo[i].addr = 0;
  353. return -1;
  354. }
  355. breakinfo[i].enabled = 1;
  356. return 0;
  357. }
  358. /**
  359. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  360. * @regs: Current &struct pt_regs.
  361. *
  362. * This function will be called if the particular architecture must
  363. * disable hardware debugging while it is processing gdb packets or
  364. * handling exception.
  365. */
  366. static void kgdb_disable_hw_debug(struct pt_regs *regs)
  367. {
  368. int i;
  369. int cpu = raw_smp_processor_id();
  370. struct perf_event *bp;
  371. /* Disable hardware debugging while we are in kgdb: */
  372. set_debugreg(0UL, 7);
  373. for (i = 0; i < HBP_NUM; i++) {
  374. if (!breakinfo[i].enabled)
  375. continue;
  376. if (dbg_is_early) {
  377. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  378. breakinfo[i].type);
  379. continue;
  380. }
  381. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  382. if (bp->attr.disabled == 1)
  383. continue;
  384. arch_uninstall_hw_breakpoint(bp);
  385. bp->attr.disabled = 1;
  386. }
  387. }
  388. #ifdef CONFIG_SMP
  389. /**
  390. * kgdb_roundup_cpus - Get other CPUs into a holding pattern
  391. * @flags: Current IRQ state
  392. *
  393. * On SMP systems, we need to get the attention of the other CPUs
  394. * and get them be in a known state. This should do what is needed
  395. * to get the other CPUs to call kgdb_wait(). Note that on some arches,
  396. * the NMI approach is not used for rounding up all the CPUs. For example,
  397. * in case of MIPS, smp_call_function() is used to roundup CPUs. In
  398. * this case, we have to make sure that interrupts are enabled before
  399. * calling smp_call_function(). The argument to this function is
  400. * the flags that will be used when restoring the interrupts. There is
  401. * local_irq_save() call before kgdb_roundup_cpus().
  402. *
  403. * On non-SMP systems, this is not called.
  404. */
  405. void kgdb_roundup_cpus(unsigned long flags)
  406. {
  407. apic->send_IPI_allbutself(APIC_DM_NMI);
  408. }
  409. #endif
  410. /**
  411. * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  412. * @vector: The error vector of the exception that happened.
  413. * @signo: The signal number of the exception that happened.
  414. * @err_code: The error code of the exception that happened.
  415. * @remcom_in_buffer: The buffer of the packet we have read.
  416. * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
  417. * @regs: The &struct pt_regs of the current process.
  418. *
  419. * This function MUST handle the 'c' and 's' command packets,
  420. * as well packets to set / remove a hardware breakpoint, if used.
  421. * If there are additional packets which the hardware needs to handle,
  422. * they are handled here. The code should return -1 if it wants to
  423. * process more packets, and a %0 or %1 if it wants to exit from the
  424. * kgdb callback.
  425. */
  426. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  427. char *remcomInBuffer, char *remcomOutBuffer,
  428. struct pt_regs *linux_regs)
  429. {
  430. unsigned long addr;
  431. char *ptr;
  432. switch (remcomInBuffer[0]) {
  433. case 'c':
  434. case 's':
  435. /* try to read optional parameter, pc unchanged if no parm */
  436. ptr = &remcomInBuffer[1];
  437. if (kgdb_hex2long(&ptr, &addr))
  438. linux_regs->ip = addr;
  439. case 'D':
  440. case 'k':
  441. /* clear the trace bit */
  442. linux_regs->flags &= ~X86_EFLAGS_TF;
  443. atomic_set(&kgdb_cpu_doing_single_step, -1);
  444. /* set the trace bit if we're stepping */
  445. if (remcomInBuffer[0] == 's') {
  446. linux_regs->flags |= X86_EFLAGS_TF;
  447. atomic_set(&kgdb_cpu_doing_single_step,
  448. raw_smp_processor_id());
  449. }
  450. return 0;
  451. }
  452. /* this means that we do not want to exit from the handler: */
  453. return -1;
  454. }
  455. static inline int
  456. single_step_cont(struct pt_regs *regs, struct die_args *args)
  457. {
  458. /*
  459. * Single step exception from kernel space to user space so
  460. * eat the exception and continue the process:
  461. */
  462. printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
  463. "resuming...\n");
  464. kgdb_arch_handle_exception(args->trapnr, args->signr,
  465. args->err, "c", "", regs);
  466. /*
  467. * Reset the BS bit in dr6 (pointed by args->err) to
  468. * denote completion of processing
  469. */
  470. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  471. return NOTIFY_STOP;
  472. }
  473. static int was_in_debug_nmi[NR_CPUS];
  474. static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  475. {
  476. switch (cmd) {
  477. case NMI_LOCAL:
  478. if (atomic_read(&kgdb_active) != -1) {
  479. /* KGDB CPU roundup */
  480. kgdb_nmicallback(raw_smp_processor_id(), regs);
  481. was_in_debug_nmi[raw_smp_processor_id()] = 1;
  482. touch_nmi_watchdog();
  483. return NMI_HANDLED;
  484. }
  485. break;
  486. case NMI_UNKNOWN:
  487. if (was_in_debug_nmi[raw_smp_processor_id()]) {
  488. was_in_debug_nmi[raw_smp_processor_id()] = 0;
  489. return NMI_HANDLED;
  490. }
  491. break;
  492. default:
  493. /* do nothing */
  494. break;
  495. }
  496. return NMI_DONE;
  497. }
  498. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  499. {
  500. struct pt_regs *regs = args->regs;
  501. switch (cmd) {
  502. case DIE_DEBUG:
  503. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  504. if (user_mode(regs))
  505. return single_step_cont(regs, args);
  506. break;
  507. } else if (test_thread_flag(TIF_SINGLESTEP))
  508. /* This means a user thread is single stepping
  509. * a system call which should be ignored
  510. */
  511. return NOTIFY_DONE;
  512. /* fall through */
  513. default:
  514. if (user_mode(regs))
  515. return NOTIFY_DONE;
  516. }
  517. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  518. return NOTIFY_DONE;
  519. /* Must touch watchdog before return to normal operation */
  520. touch_nmi_watchdog();
  521. return NOTIFY_STOP;
  522. }
  523. int kgdb_ll_trap(int cmd, const char *str,
  524. struct pt_regs *regs, long err, int trap, int sig)
  525. {
  526. struct die_args args = {
  527. .regs = regs,
  528. .str = str,
  529. .err = err,
  530. .trapnr = trap,
  531. .signr = sig,
  532. };
  533. if (!kgdb_io_module_registered)
  534. return NOTIFY_DONE;
  535. return __kgdb_notify(&args, cmd);
  536. }
  537. static int
  538. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  539. {
  540. unsigned long flags;
  541. int ret;
  542. local_irq_save(flags);
  543. ret = __kgdb_notify(ptr, cmd);
  544. local_irq_restore(flags);
  545. return ret;
  546. }
  547. static struct notifier_block kgdb_notifier = {
  548. .notifier_call = kgdb_notify,
  549. };
  550. /**
  551. * kgdb_arch_init - Perform any architecture specific initalization.
  552. *
  553. * This function will handle the initalization of any architecture
  554. * specific callbacks.
  555. */
  556. int kgdb_arch_init(void)
  557. {
  558. int retval;
  559. retval = register_die_notifier(&kgdb_notifier);
  560. if (retval)
  561. goto out;
  562. retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
  563. 0, "kgdb");
  564. if (retval)
  565. goto out1;
  566. retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
  567. 0, "kgdb");
  568. if (retval)
  569. goto out2;
  570. return retval;
  571. out2:
  572. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  573. out1:
  574. unregister_die_notifier(&kgdb_notifier);
  575. out:
  576. return retval;
  577. }
  578. static void kgdb_hw_overflow_handler(struct perf_event *event,
  579. struct perf_sample_data *data, struct pt_regs *regs)
  580. {
  581. struct task_struct *tsk = current;
  582. int i;
  583. for (i = 0; i < 4; i++)
  584. if (breakinfo[i].enabled)
  585. tsk->thread.debugreg6 |= (DR_TRAP0 << i);
  586. }
  587. void kgdb_arch_late(void)
  588. {
  589. int i, cpu;
  590. struct perf_event_attr attr;
  591. struct perf_event **pevent;
  592. /*
  593. * Pre-allocate the hw breakpoint structions in the non-atomic
  594. * portion of kgdb because this operation requires mutexs to
  595. * complete.
  596. */
  597. hw_breakpoint_init(&attr);
  598. attr.bp_addr = (unsigned long)kgdb_arch_init;
  599. attr.bp_len = HW_BREAKPOINT_LEN_1;
  600. attr.bp_type = HW_BREAKPOINT_W;
  601. attr.disabled = 1;
  602. for (i = 0; i < HBP_NUM; i++) {
  603. if (breakinfo[i].pev)
  604. continue;
  605. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  606. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  607. printk(KERN_ERR "kgdb: Could not allocate hw"
  608. "breakpoints\nDisabling the kernel debugger\n");
  609. breakinfo[i].pev = NULL;
  610. kgdb_arch_exit();
  611. return;
  612. }
  613. for_each_online_cpu(cpu) {
  614. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  615. pevent[0]->hw.sample_period = 1;
  616. pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
  617. if (pevent[0]->destroy != NULL) {
  618. pevent[0]->destroy = NULL;
  619. release_bp_slot(*pevent);
  620. }
  621. }
  622. }
  623. }
  624. /**
  625. * kgdb_arch_exit - Perform any architecture specific uninitalization.
  626. *
  627. * This function will handle the uninitalization of any architecture
  628. * specific callbacks, for dynamic registration and unregistration.
  629. */
  630. void kgdb_arch_exit(void)
  631. {
  632. int i;
  633. for (i = 0; i < 4; i++) {
  634. if (breakinfo[i].pev) {
  635. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  636. breakinfo[i].pev = NULL;
  637. }
  638. }
  639. unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
  640. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  641. unregister_die_notifier(&kgdb_notifier);
  642. }
  643. /**
  644. *
  645. * kgdb_skipexception - Bail out of KGDB when we've been triggered.
  646. * @exception: Exception vector number
  647. * @regs: Current &struct pt_regs.
  648. *
  649. * On some architectures we need to skip a breakpoint exception when
  650. * it occurs after a breakpoint has been removed.
  651. *
  652. * Skip an int3 exception when it occurs after a breakpoint has been
  653. * removed. Backtrack eip by 1 since the int3 would have caused it to
  654. * increment by 1.
  655. */
  656. int kgdb_skipexception(int exception, struct pt_regs *regs)
  657. {
  658. if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
  659. regs->ip -= 1;
  660. return 1;
  661. }
  662. return 0;
  663. }
  664. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  665. {
  666. if (exception == 3)
  667. return instruction_pointer(regs) - 1;
  668. return instruction_pointer(regs);
  669. }
  670. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  671. {
  672. regs->ip = ip;
  673. }
  674. int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  675. {
  676. int err;
  677. char opc[BREAK_INSTR_SIZE];
  678. bpt->type = BP_BREAKPOINT;
  679. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  680. BREAK_INSTR_SIZE);
  681. if (err)
  682. return err;
  683. err = probe_kernel_write((char *)bpt->bpt_addr,
  684. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  685. #ifdef CONFIG_DEBUG_RODATA
  686. if (!err)
  687. return err;
  688. /*
  689. * It is safe to call text_poke() because normal kernel execution
  690. * is stopped on all cores, so long as the text_mutex is not locked.
  691. */
  692. if (mutex_is_locked(&text_mutex))
  693. return -EBUSY;
  694. text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
  695. BREAK_INSTR_SIZE);
  696. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  697. if (err)
  698. return err;
  699. if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
  700. return -EINVAL;
  701. bpt->type = BP_POKE_BREAKPOINT;
  702. #endif /* CONFIG_DEBUG_RODATA */
  703. return err;
  704. }
  705. int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  706. {
  707. #ifdef CONFIG_DEBUG_RODATA
  708. int err;
  709. char opc[BREAK_INSTR_SIZE];
  710. if (bpt->type != BP_POKE_BREAKPOINT)
  711. goto knl_write;
  712. /*
  713. * It is safe to call text_poke() because normal kernel execution
  714. * is stopped on all cores, so long as the text_mutex is not locked.
  715. */
  716. if (mutex_is_locked(&text_mutex))
  717. goto knl_write;
  718. text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
  719. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  720. if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
  721. goto knl_write;
  722. return err;
  723. knl_write:
  724. #endif /* CONFIG_DEBUG_RODATA */
  725. return probe_kernel_write((char *)bpt->bpt_addr,
  726. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  727. }
  728. struct kgdb_arch arch_kgdb_ops = {
  729. /* Breakpoint instruction: */
  730. .gdb_bpt_instr = { 0xcc },
  731. .flags = KGDB_HW_BREAKPOINT,
  732. .set_hw_breakpoint = kgdb_set_hw_break,
  733. .remove_hw_breakpoint = kgdb_remove_hw_break,
  734. .disable_hw_break = kgdb_disable_hw_debug,
  735. .remove_all_hw_break = kgdb_remove_all_hw_break,
  736. .correct_hw_break = kgdb_correct_hw_break,
  737. };