debug_core.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #include <linux/pid_namespace.h>
  31. #include <linux/clocksource.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/console.h>
  35. #include <linux/threads.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/string.h>
  41. #include <linux/delay.h>
  42. #include <linux/sched.h>
  43. #include <linux/sysrq.h>
  44. #include <linux/init.h>
  45. #include <linux/kgdb.h>
  46. #include <linux/kdb.h>
  47. #include <linux/pid.h>
  48. #include <linux/smp.h>
  49. #include <linux/mm.h>
  50. #include <linux/rcupdate.h>
  51. #include <asm/cacheflush.h>
  52. #include <asm/byteorder.h>
  53. #include <asm/atomic.h>
  54. #include <asm/system.h>
  55. #include "debug_core.h"
  56. static int kgdb_break_asap;
  57. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  58. /**
  59. * kgdb_connected - Is a host GDB connected to us?
  60. */
  61. int kgdb_connected;
  62. EXPORT_SYMBOL_GPL(kgdb_connected);
  63. /* All the KGDB handlers are installed */
  64. int kgdb_io_module_registered;
  65. /* Guard for recursive entry */
  66. static int exception_level;
  67. struct kgdb_io *dbg_io_ops;
  68. static DEFINE_SPINLOCK(kgdb_registration_lock);
  69. /* kgdb console driver is loaded */
  70. static int kgdb_con_registered;
  71. /* determine if kgdb console output should be used */
  72. static int kgdb_use_con;
  73. /* Flag for alternate operations for early debugging */
  74. bool dbg_is_early = true;
  75. /* Next cpu to become the master debug core */
  76. int dbg_switch_cpu;
  77. /* Use kdb or gdbserver mode */
  78. int dbg_kdb_mode = 1;
  79. static int __init opt_kgdb_con(char *str)
  80. {
  81. kgdb_use_con = 1;
  82. return 0;
  83. }
  84. early_param("kgdbcon", opt_kgdb_con);
  85. module_param(kgdb_use_con, int, 0644);
  86. /*
  87. * Holds information about breakpoints in a kernel. These breakpoints are
  88. * added and removed by gdb.
  89. */
  90. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  91. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  92. };
  93. /*
  94. * The CPU# of the active CPU, or -1 if none:
  95. */
  96. atomic_t kgdb_active = ATOMIC_INIT(-1);
  97. EXPORT_SYMBOL_GPL(kgdb_active);
  98. static DEFINE_RAW_SPINLOCK(dbg_master_lock);
  99. static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  100. /*
  101. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  102. * bootup code (which might not have percpu set up yet):
  103. */
  104. static atomic_t masters_in_kgdb;
  105. static atomic_t slaves_in_kgdb;
  106. static atomic_t kgdb_break_tasklet_var;
  107. atomic_t kgdb_setting_breakpoint;
  108. struct task_struct *kgdb_usethread;
  109. struct task_struct *kgdb_contthread;
  110. int kgdb_single_step;
  111. static pid_t kgdb_sstep_pid;
  112. /* to keep track of the CPU which is doing the single stepping*/
  113. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  114. /*
  115. * If you are debugging a problem where roundup (the collection of
  116. * all other CPUs) is a problem [this should be extremely rare],
  117. * then use the nokgdbroundup option to avoid roundup. In that case
  118. * the other CPUs might interfere with your debugging context, so
  119. * use this with care:
  120. */
  121. static int kgdb_do_roundup = 1;
  122. static int __init opt_nokgdbroundup(char *str)
  123. {
  124. kgdb_do_roundup = 0;
  125. return 0;
  126. }
  127. early_param("nokgdbroundup", opt_nokgdbroundup);
  128. /*
  129. * Finally, some KGDB code :-)
  130. */
  131. /*
  132. * Weak aliases for breakpoint management,
  133. * can be overriden by architectures when needed:
  134. */
  135. int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  136. {
  137. int err;
  138. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  139. BREAK_INSTR_SIZE);
  140. if (err)
  141. return err;
  142. err = probe_kernel_write((char *)bpt->bpt_addr,
  143. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  144. return err;
  145. }
  146. int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  147. {
  148. return probe_kernel_write((char *)bpt->bpt_addr,
  149. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  150. }
  151. int __weak kgdb_validate_break_address(unsigned long addr)
  152. {
  153. struct kgdb_bkpt tmp;
  154. int err;
  155. /* Validate setting the breakpoint and then removing it. If the
  156. * remove fails, the kernel needs to emit a bad message because we
  157. * are deep trouble not being able to put things back the way we
  158. * found them.
  159. */
  160. tmp.bpt_addr = addr;
  161. err = kgdb_arch_set_breakpoint(&tmp);
  162. if (err)
  163. return err;
  164. err = kgdb_arch_remove_breakpoint(&tmp);
  165. if (err)
  166. printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
  167. "memory destroyed at: %lx", addr);
  168. return err;
  169. }
  170. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  171. {
  172. return instruction_pointer(regs);
  173. }
  174. int __weak kgdb_arch_init(void)
  175. {
  176. return 0;
  177. }
  178. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  179. {
  180. return 0;
  181. }
  182. /*
  183. * Some architectures need cache flushes when we set/clear a
  184. * breakpoint:
  185. */
  186. static void kgdb_flush_swbreak_addr(unsigned long addr)
  187. {
  188. if (!CACHE_FLUSH_IS_SAFE)
  189. return;
  190. if (current->mm && current->mm->mmap_cache) {
  191. flush_cache_range(current->mm->mmap_cache,
  192. addr, addr + BREAK_INSTR_SIZE);
  193. }
  194. /* Force flush instruction cache if it was outside the mm */
  195. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  196. }
  197. /*
  198. * SW breakpoint management:
  199. */
  200. int dbg_activate_sw_breakpoints(void)
  201. {
  202. int error;
  203. int ret = 0;
  204. int i;
  205. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  206. if (kgdb_break[i].state != BP_SET)
  207. continue;
  208. error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
  209. if (error) {
  210. ret = error;
  211. printk(KERN_INFO "KGDB: BP install failed: %lx",
  212. kgdb_break[i].bpt_addr);
  213. continue;
  214. }
  215. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  216. kgdb_break[i].state = BP_ACTIVE;
  217. }
  218. return ret;
  219. }
  220. int dbg_set_sw_break(unsigned long addr)
  221. {
  222. int err = kgdb_validate_break_address(addr);
  223. int breakno = -1;
  224. int i;
  225. if (err)
  226. return err;
  227. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  228. if ((kgdb_break[i].state == BP_SET) &&
  229. (kgdb_break[i].bpt_addr == addr))
  230. return -EEXIST;
  231. }
  232. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  233. if (kgdb_break[i].state == BP_REMOVED &&
  234. kgdb_break[i].bpt_addr == addr) {
  235. breakno = i;
  236. break;
  237. }
  238. }
  239. if (breakno == -1) {
  240. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  241. if (kgdb_break[i].state == BP_UNDEFINED) {
  242. breakno = i;
  243. break;
  244. }
  245. }
  246. }
  247. if (breakno == -1)
  248. return -E2BIG;
  249. kgdb_break[breakno].state = BP_SET;
  250. kgdb_break[breakno].type = BP_BREAKPOINT;
  251. kgdb_break[breakno].bpt_addr = addr;
  252. return 0;
  253. }
  254. int dbg_deactivate_sw_breakpoints(void)
  255. {
  256. int error;
  257. int ret = 0;
  258. int i;
  259. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  260. if (kgdb_break[i].state != BP_ACTIVE)
  261. continue;
  262. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  263. if (error) {
  264. printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
  265. kgdb_break[i].bpt_addr);
  266. ret = error;
  267. }
  268. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  269. kgdb_break[i].state = BP_SET;
  270. }
  271. return ret;
  272. }
  273. int dbg_remove_sw_break(unsigned long addr)
  274. {
  275. int i;
  276. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  277. if ((kgdb_break[i].state == BP_SET) &&
  278. (kgdb_break[i].bpt_addr == addr)) {
  279. kgdb_break[i].state = BP_REMOVED;
  280. return 0;
  281. }
  282. }
  283. return -ENOENT;
  284. }
  285. int kgdb_isremovedbreak(unsigned long addr)
  286. {
  287. int i;
  288. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  289. if ((kgdb_break[i].state == BP_REMOVED) &&
  290. (kgdb_break[i].bpt_addr == addr))
  291. return 1;
  292. }
  293. return 0;
  294. }
  295. int dbg_remove_all_break(void)
  296. {
  297. int error;
  298. int i;
  299. /* Clear memory breakpoints. */
  300. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  301. if (kgdb_break[i].state != BP_ACTIVE)
  302. goto setundefined;
  303. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  304. if (error)
  305. printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
  306. kgdb_break[i].bpt_addr);
  307. setundefined:
  308. kgdb_break[i].state = BP_UNDEFINED;
  309. }
  310. /* Clear hardware breakpoints. */
  311. if (arch_kgdb_ops.remove_all_hw_break)
  312. arch_kgdb_ops.remove_all_hw_break();
  313. return 0;
  314. }
  315. /*
  316. * Return true if there is a valid kgdb I/O module. Also if no
  317. * debugger is attached a message can be printed to the console about
  318. * waiting for the debugger to attach.
  319. *
  320. * The print_wait argument is only to be true when called from inside
  321. * the core kgdb_handle_exception, because it will wait for the
  322. * debugger to attach.
  323. */
  324. static int kgdb_io_ready(int print_wait)
  325. {
  326. if (!dbg_io_ops)
  327. return 0;
  328. if (kgdb_connected)
  329. return 1;
  330. if (atomic_read(&kgdb_setting_breakpoint))
  331. return 1;
  332. if (print_wait) {
  333. #ifdef CONFIG_KGDB_KDB
  334. if (!dbg_kdb_mode)
  335. printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
  336. #else
  337. printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
  338. #endif
  339. }
  340. return 1;
  341. }
  342. static int kgdb_reenter_check(struct kgdb_state *ks)
  343. {
  344. unsigned long addr;
  345. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  346. return 0;
  347. /* Panic on recursive debugger calls: */
  348. exception_level++;
  349. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  350. dbg_deactivate_sw_breakpoints();
  351. /*
  352. * If the break point removed ok at the place exception
  353. * occurred, try to recover and print a warning to the end
  354. * user because the user planted a breakpoint in a place that
  355. * KGDB needs in order to function.
  356. */
  357. if (dbg_remove_sw_break(addr) == 0) {
  358. exception_level = 0;
  359. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  360. dbg_activate_sw_breakpoints();
  361. printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
  362. addr);
  363. WARN_ON_ONCE(1);
  364. return 1;
  365. }
  366. dbg_remove_all_break();
  367. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  368. if (exception_level > 1) {
  369. dump_stack();
  370. panic("Recursive entry to debugger");
  371. }
  372. printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
  373. #ifdef CONFIG_KGDB_KDB
  374. /* Allow kdb to debug itself one level */
  375. return 0;
  376. #endif
  377. dump_stack();
  378. panic("Recursive entry to debugger");
  379. return 1;
  380. }
  381. static void dbg_touch_watchdogs(void)
  382. {
  383. touch_softlockup_watchdog_sync();
  384. clocksource_touch_watchdog();
  385. rcu_cpu_stall_reset();
  386. }
  387. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
  388. int exception_state)
  389. {
  390. unsigned long flags;
  391. int sstep_tries = 100;
  392. int error;
  393. int cpu;
  394. int trace_on = 0;
  395. int online_cpus = num_online_cpus();
  396. kgdb_info[ks->cpu].enter_kgdb++;
  397. kgdb_info[ks->cpu].exception_state |= exception_state;
  398. if (exception_state == DCPU_WANT_MASTER)
  399. atomic_inc(&masters_in_kgdb);
  400. else
  401. atomic_inc(&slaves_in_kgdb);
  402. if (arch_kgdb_ops.disable_hw_break)
  403. arch_kgdb_ops.disable_hw_break(regs);
  404. acquirelock:
  405. /*
  406. * Interrupts will be restored by the 'trap return' code, except when
  407. * single stepping.
  408. */
  409. local_irq_save(flags);
  410. cpu = ks->cpu;
  411. kgdb_info[cpu].debuggerinfo = regs;
  412. kgdb_info[cpu].task = current;
  413. kgdb_info[cpu].ret_state = 0;
  414. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  415. /* Make sure the above info reaches the primary CPU */
  416. smp_mb();
  417. if (exception_level == 1) {
  418. if (raw_spin_trylock(&dbg_master_lock))
  419. atomic_xchg(&kgdb_active, cpu);
  420. goto cpu_master_loop;
  421. }
  422. /*
  423. * CPU will loop if it is a slave or request to become a kgdb
  424. * master cpu and acquire the kgdb_active lock:
  425. */
  426. while (1) {
  427. cpu_loop:
  428. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  429. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  430. goto cpu_master_loop;
  431. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  432. if (raw_spin_trylock(&dbg_master_lock)) {
  433. atomic_xchg(&kgdb_active, cpu);
  434. break;
  435. }
  436. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  437. if (!raw_spin_is_locked(&dbg_slave_lock))
  438. goto return_normal;
  439. } else {
  440. return_normal:
  441. /* Return to normal operation by executing any
  442. * hw breakpoint fixup.
  443. */
  444. if (arch_kgdb_ops.correct_hw_break)
  445. arch_kgdb_ops.correct_hw_break();
  446. if (trace_on)
  447. tracing_on();
  448. kgdb_info[cpu].exception_state &=
  449. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  450. kgdb_info[cpu].enter_kgdb--;
  451. smp_mb__before_atomic_dec();
  452. atomic_dec(&slaves_in_kgdb);
  453. dbg_touch_watchdogs();
  454. local_irq_restore(flags);
  455. return 0;
  456. }
  457. cpu_relax();
  458. }
  459. /*
  460. * For single stepping, try to only enter on the processor
  461. * that was single stepping. To guard against a deadlock, the
  462. * kernel will only try for the value of sstep_tries before
  463. * giving up and continuing on.
  464. */
  465. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  466. (kgdb_info[cpu].task &&
  467. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  468. atomic_set(&kgdb_active, -1);
  469. raw_spin_unlock(&dbg_master_lock);
  470. dbg_touch_watchdogs();
  471. local_irq_restore(flags);
  472. goto acquirelock;
  473. }
  474. if (!kgdb_io_ready(1)) {
  475. kgdb_info[cpu].ret_state = 1;
  476. goto kgdb_restore; /* No I/O connection, resume the system */
  477. }
  478. /*
  479. * Don't enter if we have hit a removed breakpoint.
  480. */
  481. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  482. goto kgdb_restore;
  483. /* Call the I/O driver's pre_exception routine */
  484. if (dbg_io_ops->pre_exception)
  485. dbg_io_ops->pre_exception();
  486. /*
  487. * Get the passive CPU lock which will hold all the non-primary
  488. * CPU in a spin state while the debugger is active
  489. */
  490. if (!kgdb_single_step)
  491. raw_spin_lock(&dbg_slave_lock);
  492. #ifdef CONFIG_SMP
  493. /* Signal the other CPUs to enter kgdb_wait() */
  494. if ((!kgdb_single_step) && kgdb_do_roundup)
  495. kgdb_roundup_cpus(flags);
  496. #endif
  497. /*
  498. * Wait for the other CPUs to be notified and be waiting for us:
  499. */
  500. while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
  501. atomic_read(&slaves_in_kgdb)) != online_cpus)
  502. cpu_relax();
  503. /*
  504. * At this point the primary processor is completely
  505. * in the debugger and all secondary CPUs are quiescent
  506. */
  507. dbg_deactivate_sw_breakpoints();
  508. kgdb_single_step = 0;
  509. kgdb_contthread = current;
  510. exception_level = 0;
  511. trace_on = tracing_is_on();
  512. if (trace_on)
  513. tracing_off();
  514. while (1) {
  515. cpu_master_loop:
  516. if (dbg_kdb_mode) {
  517. kgdb_connected = 1;
  518. error = kdb_stub(ks);
  519. if (error == -1)
  520. continue;
  521. kgdb_connected = 0;
  522. } else {
  523. error = gdb_serial_stub(ks);
  524. }
  525. if (error == DBG_PASS_EVENT) {
  526. dbg_kdb_mode = !dbg_kdb_mode;
  527. } else if (error == DBG_SWITCH_CPU_EVENT) {
  528. kgdb_info[dbg_switch_cpu].exception_state |=
  529. DCPU_NEXT_MASTER;
  530. goto cpu_loop;
  531. } else {
  532. kgdb_info[cpu].ret_state = error;
  533. break;
  534. }
  535. }
  536. /* Call the I/O driver's post_exception routine */
  537. if (dbg_io_ops->post_exception)
  538. dbg_io_ops->post_exception();
  539. if (!kgdb_single_step) {
  540. raw_spin_unlock(&dbg_slave_lock);
  541. /* Wait till all the CPUs have quit from the debugger. */
  542. while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
  543. cpu_relax();
  544. }
  545. kgdb_restore:
  546. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  547. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  548. if (kgdb_info[sstep_cpu].task)
  549. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  550. else
  551. kgdb_sstep_pid = 0;
  552. }
  553. if (arch_kgdb_ops.correct_hw_break)
  554. arch_kgdb_ops.correct_hw_break();
  555. if (trace_on)
  556. tracing_on();
  557. kgdb_info[cpu].exception_state &=
  558. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  559. kgdb_info[cpu].enter_kgdb--;
  560. smp_mb__before_atomic_dec();
  561. atomic_dec(&masters_in_kgdb);
  562. /* Free kgdb_active */
  563. atomic_set(&kgdb_active, -1);
  564. raw_spin_unlock(&dbg_master_lock);
  565. dbg_touch_watchdogs();
  566. local_irq_restore(flags);
  567. return kgdb_info[cpu].ret_state;
  568. }
  569. /*
  570. * kgdb_handle_exception() - main entry point from a kernel exception
  571. *
  572. * Locking hierarchy:
  573. * interface locks, if any (begin_session)
  574. * kgdb lock (kgdb_active)
  575. */
  576. int
  577. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  578. {
  579. struct kgdb_state kgdb_var;
  580. struct kgdb_state *ks = &kgdb_var;
  581. ks->cpu = raw_smp_processor_id();
  582. ks->ex_vector = evector;
  583. ks->signo = signo;
  584. ks->err_code = ecode;
  585. ks->kgdb_usethreadid = 0;
  586. ks->linux_regs = regs;
  587. if (kgdb_reenter_check(ks))
  588. return 0; /* Ouch, double exception ! */
  589. if (kgdb_info[ks->cpu].enter_kgdb != 0)
  590. return 0;
  591. return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  592. }
  593. int kgdb_nmicallback(int cpu, void *regs)
  594. {
  595. #ifdef CONFIG_SMP
  596. struct kgdb_state kgdb_var;
  597. struct kgdb_state *ks = &kgdb_var;
  598. memset(ks, 0, sizeof(struct kgdb_state));
  599. ks->cpu = cpu;
  600. ks->linux_regs = regs;
  601. if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
  602. raw_spin_is_locked(&dbg_master_lock)) {
  603. kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
  604. return 0;
  605. }
  606. #endif
  607. return 1;
  608. }
  609. static void kgdb_console_write(struct console *co, const char *s,
  610. unsigned count)
  611. {
  612. unsigned long flags;
  613. /* If we're debugging, or KGDB has not connected, don't try
  614. * and print. */
  615. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  616. return;
  617. local_irq_save(flags);
  618. gdbstub_msg_write(s, count);
  619. local_irq_restore(flags);
  620. }
  621. static struct console kgdbcons = {
  622. .name = "kgdb",
  623. .write = kgdb_console_write,
  624. .flags = CON_PRINTBUFFER | CON_ENABLED,
  625. .index = -1,
  626. };
  627. #ifdef CONFIG_MAGIC_SYSRQ
  628. static void sysrq_handle_dbg(int key)
  629. {
  630. if (!dbg_io_ops) {
  631. printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
  632. return;
  633. }
  634. if (!kgdb_connected) {
  635. #ifdef CONFIG_KGDB_KDB
  636. if (!dbg_kdb_mode)
  637. printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
  638. #else
  639. printk(KERN_CRIT "Entering KGDB\n");
  640. #endif
  641. }
  642. kgdb_breakpoint();
  643. }
  644. static struct sysrq_key_op sysrq_dbg_op = {
  645. .handler = sysrq_handle_dbg,
  646. .help_msg = "debug(G)",
  647. .action_msg = "DEBUG",
  648. };
  649. #endif
  650. static int kgdb_panic_event(struct notifier_block *self,
  651. unsigned long val,
  652. void *data)
  653. {
  654. if (dbg_kdb_mode)
  655. kdb_printf("PANIC: %s\n", (char *)data);
  656. kgdb_breakpoint();
  657. return NOTIFY_DONE;
  658. }
  659. static struct notifier_block kgdb_panic_event_nb = {
  660. .notifier_call = kgdb_panic_event,
  661. .priority = INT_MAX,
  662. };
  663. void __weak kgdb_arch_late(void)
  664. {
  665. }
  666. void __init dbg_late_init(void)
  667. {
  668. dbg_is_early = false;
  669. if (kgdb_io_module_registered)
  670. kgdb_arch_late();
  671. kdb_init(KDB_INIT_FULL);
  672. }
  673. static void kgdb_register_callbacks(void)
  674. {
  675. if (!kgdb_io_module_registered) {
  676. kgdb_io_module_registered = 1;
  677. kgdb_arch_init();
  678. if (!dbg_is_early)
  679. kgdb_arch_late();
  680. atomic_notifier_chain_register(&panic_notifier_list,
  681. &kgdb_panic_event_nb);
  682. #ifdef CONFIG_MAGIC_SYSRQ
  683. register_sysrq_key('g', &sysrq_dbg_op);
  684. #endif
  685. if (kgdb_use_con && !kgdb_con_registered) {
  686. register_console(&kgdbcons);
  687. kgdb_con_registered = 1;
  688. }
  689. }
  690. }
  691. static void kgdb_unregister_callbacks(void)
  692. {
  693. /*
  694. * When this routine is called KGDB should unregister from the
  695. * panic handler and clean up, making sure it is not handling any
  696. * break exceptions at the time.
  697. */
  698. if (kgdb_io_module_registered) {
  699. kgdb_io_module_registered = 0;
  700. atomic_notifier_chain_unregister(&panic_notifier_list,
  701. &kgdb_panic_event_nb);
  702. kgdb_arch_exit();
  703. #ifdef CONFIG_MAGIC_SYSRQ
  704. unregister_sysrq_key('g', &sysrq_dbg_op);
  705. #endif
  706. if (kgdb_con_registered) {
  707. unregister_console(&kgdbcons);
  708. kgdb_con_registered = 0;
  709. }
  710. }
  711. }
  712. /*
  713. * There are times a tasklet needs to be used vs a compiled in
  714. * break point so as to cause an exception outside a kgdb I/O module,
  715. * such as is the case with kgdboe, where calling a breakpoint in the
  716. * I/O driver itself would be fatal.
  717. */
  718. static void kgdb_tasklet_bpt(unsigned long ing)
  719. {
  720. kgdb_breakpoint();
  721. atomic_set(&kgdb_break_tasklet_var, 0);
  722. }
  723. static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
  724. void kgdb_schedule_breakpoint(void)
  725. {
  726. if (atomic_read(&kgdb_break_tasklet_var) ||
  727. atomic_read(&kgdb_active) != -1 ||
  728. atomic_read(&kgdb_setting_breakpoint))
  729. return;
  730. atomic_inc(&kgdb_break_tasklet_var);
  731. tasklet_schedule(&kgdb_tasklet_breakpoint);
  732. }
  733. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  734. static void kgdb_initial_breakpoint(void)
  735. {
  736. kgdb_break_asap = 0;
  737. printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
  738. kgdb_breakpoint();
  739. }
  740. /**
  741. * kgdb_register_io_module - register KGDB IO module
  742. * @new_dbg_io_ops: the io ops vector
  743. *
  744. * Register it with the KGDB core.
  745. */
  746. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  747. {
  748. int err;
  749. spin_lock(&kgdb_registration_lock);
  750. if (dbg_io_ops) {
  751. spin_unlock(&kgdb_registration_lock);
  752. printk(KERN_ERR "kgdb: Another I/O driver is already "
  753. "registered with KGDB.\n");
  754. return -EBUSY;
  755. }
  756. if (new_dbg_io_ops->init) {
  757. err = new_dbg_io_ops->init();
  758. if (err) {
  759. spin_unlock(&kgdb_registration_lock);
  760. return err;
  761. }
  762. }
  763. dbg_io_ops = new_dbg_io_ops;
  764. spin_unlock(&kgdb_registration_lock);
  765. printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
  766. new_dbg_io_ops->name);
  767. /* Arm KGDB now. */
  768. kgdb_register_callbacks();
  769. if (kgdb_break_asap)
  770. kgdb_initial_breakpoint();
  771. return 0;
  772. }
  773. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  774. /**
  775. * kkgdb_unregister_io_module - unregister KGDB IO module
  776. * @old_dbg_io_ops: the io ops vector
  777. *
  778. * Unregister it with the KGDB core.
  779. */
  780. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  781. {
  782. BUG_ON(kgdb_connected);
  783. /*
  784. * KGDB is no longer able to communicate out, so
  785. * unregister our callbacks and reset state.
  786. */
  787. kgdb_unregister_callbacks();
  788. spin_lock(&kgdb_registration_lock);
  789. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  790. dbg_io_ops = NULL;
  791. spin_unlock(&kgdb_registration_lock);
  792. printk(KERN_INFO
  793. "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
  794. old_dbg_io_ops->name);
  795. }
  796. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  797. int dbg_io_get_char(void)
  798. {
  799. int ret = dbg_io_ops->read_char();
  800. if (ret == NO_POLL_CHAR)
  801. return -1;
  802. if (!dbg_kdb_mode)
  803. return ret;
  804. if (ret == 127)
  805. return 8;
  806. return ret;
  807. }
  808. /**
  809. * kgdb_breakpoint - generate breakpoint exception
  810. *
  811. * This function will generate a breakpoint exception. It is used at the
  812. * beginning of a program to sync up with a debugger and can be used
  813. * otherwise as a quick means to stop program execution and "break" into
  814. * the debugger.
  815. */
  816. void kgdb_breakpoint(void)
  817. {
  818. atomic_inc(&kgdb_setting_breakpoint);
  819. wmb(); /* Sync point before breakpoint */
  820. arch_kgdb_breakpoint();
  821. wmb(); /* Sync point after breakpoint */
  822. atomic_dec(&kgdb_setting_breakpoint);
  823. }
  824. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  825. static int __init opt_kgdb_wait(char *str)
  826. {
  827. kgdb_break_asap = 1;
  828. kdb_init(KDB_INIT_EARLY);
  829. if (kgdb_io_module_registered)
  830. kgdb_initial_breakpoint();
  831. return 0;
  832. }
  833. early_param("kgdbwait", opt_kgdb_wait);