hw_breakpoint.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) 2007 Alan Stern
  17. * Copyright (C) IBM Corporation, 2009
  18. * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
  19. *
  20. * Thanks to Ingo Molnar for his many suggestions.
  21. *
  22. * Authors: Alan Stern <stern@rowland.harvard.edu>
  23. * K.Prasad <prasad@linux.vnet.ibm.com>
  24. * Frederic Weisbecker <fweisbec@gmail.com>
  25. */
  26. /*
  27. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  28. * using the CPU's debug registers.
  29. * This file contains the arch-independent routines.
  30. */
  31. #include <linux/irqflags.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/notifier.h>
  34. #include <linux/kprobes.h>
  35. #include <linux/kdebug.h>
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/percpu.h>
  39. #include <linux/sched.h>
  40. #include <linux/init.h>
  41. #include <linux/slab.h>
  42. #include <linux/list.h>
  43. #include <linux/cpu.h>
  44. #include <linux/smp.h>
  45. #include <linux/hw_breakpoint.h>
  46. /*
  47. * Constraints data
  48. */
  49. struct bp_cpuinfo {
  50. /* Number of pinned cpu breakpoints in a cpu */
  51. unsigned int cpu_pinned;
  52. /* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
  53. unsigned int *tsk_pinned;
  54. /* Number of non-pinned cpu/task breakpoints in a cpu */
  55. unsigned int flexible; /* XXX: placeholder, see fetch_this_slot() */
  56. };
  57. static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
  58. static int nr_slots[TYPE_MAX];
  59. static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
  60. {
  61. return per_cpu_ptr(bp_cpuinfo + type, cpu);
  62. }
  63. /* Keep track of the breakpoints attached to tasks */
  64. static LIST_HEAD(bp_task_head);
  65. static int constraints_initialized;
  66. /* Gather the number of total pinned and un-pinned bp in a cpuset */
  67. struct bp_busy_slots {
  68. unsigned int pinned;
  69. unsigned int flexible;
  70. };
  71. /* Serialize accesses to the above constraints */
  72. static DEFINE_MUTEX(nr_bp_mutex);
  73. __weak int hw_breakpoint_weight(struct perf_event *bp)
  74. {
  75. return 1;
  76. }
  77. static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
  78. {
  79. if (bp->attr.bp_type & HW_BREAKPOINT_RW)
  80. return TYPE_DATA;
  81. return TYPE_INST;
  82. }
  83. /*
  84. * Report the maximum number of pinned breakpoints a task
  85. * have in this cpu
  86. */
  87. static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  88. {
  89. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  90. int i;
  91. for (i = nr_slots[type] - 1; i >= 0; i--) {
  92. if (tsk_pinned[i] > 0)
  93. return i + 1;
  94. }
  95. return 0;
  96. }
  97. /*
  98. * Count the number of breakpoints of the same type and same task.
  99. * The given event must be not on the list.
  100. */
  101. static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
  102. {
  103. struct task_struct *tsk = bp->hw.target;
  104. struct perf_event *iter;
  105. int count = 0;
  106. list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
  107. if (iter->hw.target == tsk &&
  108. find_slot_idx(iter) == type &&
  109. (iter->cpu < 0 || cpu == iter->cpu))
  110. count += hw_breakpoint_weight(iter);
  111. }
  112. return count;
  113. }
  114. static const struct cpumask *cpumask_of_bp(struct perf_event *bp)
  115. {
  116. if (bp->cpu >= 0)
  117. return cpumask_of(bp->cpu);
  118. return cpu_possible_mask;
  119. }
  120. /*
  121. * Report the number of pinned/un-pinned breakpoints we have in
  122. * a given cpu (cpu > -1) or in all of them (cpu = -1).
  123. */
  124. static void
  125. fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
  126. enum bp_type_idx type)
  127. {
  128. const struct cpumask *cpumask = cpumask_of_bp(bp);
  129. int cpu;
  130. for_each_cpu(cpu, cpumask) {
  131. struct bp_cpuinfo *info = get_bp_info(cpu, type);
  132. int nr;
  133. nr = info->cpu_pinned;
  134. if (!bp->hw.target)
  135. nr += max_task_bp_pinned(cpu, type);
  136. else
  137. nr += task_bp_pinned(cpu, bp, type);
  138. if (nr > slots->pinned)
  139. slots->pinned = nr;
  140. nr = info->flexible;
  141. if (nr > slots->flexible)
  142. slots->flexible = nr;
  143. }
  144. }
  145. /*
  146. * For now, continue to consider flexible as pinned, until we can
  147. * ensure no flexible event can ever be scheduled before a pinned event
  148. * in a same cpu.
  149. */
  150. static void
  151. fetch_this_slot(struct bp_busy_slots *slots, int weight)
  152. {
  153. slots->pinned += weight;
  154. }
  155. /*
  156. * Add a pinned breakpoint for the given task in our constraint table
  157. */
  158. static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
  159. enum bp_type_idx type, int weight)
  160. {
  161. unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
  162. int old_idx, new_idx;
  163. old_idx = task_bp_pinned(cpu, bp, type) - 1;
  164. new_idx = old_idx + weight;
  165. if (old_idx >= 0)
  166. tsk_pinned[old_idx]--;
  167. if (new_idx >= 0)
  168. tsk_pinned[new_idx]++;
  169. }
  170. /*
  171. * Add/remove the given breakpoint in our constraint table
  172. */
  173. static void
  174. toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
  175. int weight)
  176. {
  177. const struct cpumask *cpumask = cpumask_of_bp(bp);
  178. int cpu;
  179. if (!enable)
  180. weight = -weight;
  181. /* Pinned counter cpu profiling */
  182. if (!bp->hw.target) {
  183. get_bp_info(bp->cpu, type)->cpu_pinned += weight;
  184. return;
  185. }
  186. /* Pinned counter task profiling */
  187. for_each_cpu(cpu, cpumask)
  188. toggle_bp_task_slot(bp, cpu, type, weight);
  189. if (enable)
  190. list_add_tail(&bp->hw.bp_list, &bp_task_head);
  191. else
  192. list_del(&bp->hw.bp_list);
  193. }
  194. /*
  195. * Function to perform processor-specific cleanup during unregistration
  196. */
  197. __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  198. {
  199. /*
  200. * A weak stub function here for those archs that don't define
  201. * it inside arch/.../kernel/hw_breakpoint.c
  202. */
  203. }
  204. /*
  205. * Contraints to check before allowing this new breakpoint counter:
  206. *
  207. * == Non-pinned counter == (Considered as pinned for now)
  208. *
  209. * - If attached to a single cpu, check:
  210. *
  211. * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
  212. * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
  213. *
  214. * -> If there are already non-pinned counters in this cpu, it means
  215. * there is already a free slot for them.
  216. * Otherwise, we check that the maximum number of per task
  217. * breakpoints (for this cpu) plus the number of per cpu breakpoint
  218. * (for this cpu) doesn't cover every registers.
  219. *
  220. * - If attached to every cpus, check:
  221. *
  222. * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
  223. * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
  224. *
  225. * -> This is roughly the same, except we check the number of per cpu
  226. * bp for every cpu and we keep the max one. Same for the per tasks
  227. * breakpoints.
  228. *
  229. *
  230. * == Pinned counter ==
  231. *
  232. * - If attached to a single cpu, check:
  233. *
  234. * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
  235. * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
  236. *
  237. * -> Same checks as before. But now the info->flexible, if any, must keep
  238. * one register at least (or they will never be fed).
  239. *
  240. * - If attached to every cpus, check:
  241. *
  242. * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
  243. * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  244. */
  245. static int __reserve_bp_slot(struct perf_event *bp)
  246. {
  247. struct bp_busy_slots slots = {0};
  248. enum bp_type_idx type;
  249. int weight;
  250. /* We couldn't initialize breakpoint constraints on boot */
  251. if (!constraints_initialized)
  252. return -ENOMEM;
  253. /* Basic checks */
  254. if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
  255. bp->attr.bp_type == HW_BREAKPOINT_INVALID)
  256. return -EINVAL;
  257. type = find_slot_idx(bp);
  258. weight = hw_breakpoint_weight(bp);
  259. fetch_bp_busy_slots(&slots, bp, type);
  260. /*
  261. * Simulate the addition of this breakpoint to the constraints
  262. * and see the result.
  263. */
  264. fetch_this_slot(&slots, weight);
  265. /* Flexible counters need to keep at least one slot */
  266. if (slots.pinned + (!!slots.flexible) > nr_slots[type])
  267. return -ENOSPC;
  268. toggle_bp_slot(bp, true, type, weight);
  269. return 0;
  270. }
  271. int reserve_bp_slot(struct perf_event *bp)
  272. {
  273. int ret;
  274. mutex_lock(&nr_bp_mutex);
  275. ret = __reserve_bp_slot(bp);
  276. mutex_unlock(&nr_bp_mutex);
  277. return ret;
  278. }
  279. static void __release_bp_slot(struct perf_event *bp)
  280. {
  281. enum bp_type_idx type;
  282. int weight;
  283. type = find_slot_idx(bp);
  284. weight = hw_breakpoint_weight(bp);
  285. toggle_bp_slot(bp, false, type, weight);
  286. }
  287. void release_bp_slot(struct perf_event *bp)
  288. {
  289. mutex_lock(&nr_bp_mutex);
  290. arch_unregister_hw_breakpoint(bp);
  291. __release_bp_slot(bp);
  292. mutex_unlock(&nr_bp_mutex);
  293. }
  294. /*
  295. * Allow the kernel debugger to reserve breakpoint slots without
  296. * taking a lock using the dbg_* variant of for the reserve and
  297. * release breakpoint slots.
  298. */
  299. int dbg_reserve_bp_slot(struct perf_event *bp)
  300. {
  301. if (mutex_is_locked(&nr_bp_mutex))
  302. return -1;
  303. return __reserve_bp_slot(bp);
  304. }
  305. int dbg_release_bp_slot(struct perf_event *bp)
  306. {
  307. if (mutex_is_locked(&nr_bp_mutex))
  308. return -1;
  309. __release_bp_slot(bp);
  310. return 0;
  311. }
  312. static int validate_hw_breakpoint(struct perf_event *bp)
  313. {
  314. int ret;
  315. ret = arch_validate_hwbkpt_settings(bp);
  316. if (ret)
  317. return ret;
  318. if (arch_check_bp_in_kernelspace(bp)) {
  319. if (bp->attr.exclude_kernel)
  320. return -EINVAL;
  321. /*
  322. * Don't let unprivileged users set a breakpoint in the trap
  323. * path to avoid trap recursion attacks.
  324. */
  325. if (!capable(CAP_SYS_ADMIN))
  326. return -EPERM;
  327. }
  328. return 0;
  329. }
  330. int register_perf_hw_breakpoint(struct perf_event *bp)
  331. {
  332. int ret;
  333. ret = reserve_bp_slot(bp);
  334. if (ret)
  335. return ret;
  336. ret = validate_hw_breakpoint(bp);
  337. /* if arch_validate_hwbkpt_settings() fails then release bp slot */
  338. if (ret)
  339. release_bp_slot(bp);
  340. return ret;
  341. }
  342. /**
  343. * register_user_hw_breakpoint - register a hardware breakpoint for user space
  344. * @attr: breakpoint attributes
  345. * @triggered: callback to trigger when we hit the breakpoint
  346. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  347. */
  348. struct perf_event *
  349. register_user_hw_breakpoint(struct perf_event_attr *attr,
  350. perf_overflow_handler_t triggered,
  351. void *context,
  352. struct task_struct *tsk)
  353. {
  354. return perf_event_create_kernel_counter(attr, -1, tsk, triggered,
  355. context);
  356. }
  357. EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  358. /**
  359. * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  360. * @bp: the breakpoint structure to modify
  361. * @attr: new breakpoint attributes
  362. */
  363. int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
  364. {
  365. /*
  366. * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it
  367. * will not be possible to raise IPIs that invoke __perf_event_disable.
  368. * So call the function directly after making sure we are targeting the
  369. * current task.
  370. */
  371. if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
  372. perf_event_disable_local(bp);
  373. else
  374. perf_event_disable(bp);
  375. bp->attr.bp_addr = attr->bp_addr;
  376. bp->attr.bp_type = attr->bp_type;
  377. bp->attr.bp_len = attr->bp_len;
  378. bp->attr.disabled = 1;
  379. if (!attr->disabled) {
  380. int err = validate_hw_breakpoint(bp);
  381. if (err)
  382. return err;
  383. perf_event_enable(bp);
  384. bp->attr.disabled = 0;
  385. }
  386. return 0;
  387. }
  388. EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
  389. /**
  390. * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
  391. * @bp: the breakpoint structure to unregister
  392. */
  393. void unregister_hw_breakpoint(struct perf_event *bp)
  394. {
  395. if (!bp)
  396. return;
  397. perf_event_release_kernel(bp);
  398. }
  399. EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
  400. /**
  401. * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
  402. * @attr: breakpoint attributes
  403. * @triggered: callback to trigger when we hit the breakpoint
  404. *
  405. * @return a set of per_cpu pointers to perf events
  406. */
  407. struct perf_event * __percpu *
  408. register_wide_hw_breakpoint(struct perf_event_attr *attr,
  409. perf_overflow_handler_t triggered,
  410. void *context)
  411. {
  412. struct perf_event * __percpu *cpu_events, *bp;
  413. long err = 0;
  414. int cpu;
  415. cpu_events = alloc_percpu(typeof(*cpu_events));
  416. if (!cpu_events)
  417. return (void __percpu __force *)ERR_PTR(-ENOMEM);
  418. get_online_cpus();
  419. for_each_online_cpu(cpu) {
  420. bp = perf_event_create_kernel_counter(attr, cpu, NULL,
  421. triggered, context);
  422. if (IS_ERR(bp)) {
  423. err = PTR_ERR(bp);
  424. break;
  425. }
  426. per_cpu(*cpu_events, cpu) = bp;
  427. }
  428. put_online_cpus();
  429. if (likely(!err))
  430. return cpu_events;
  431. unregister_wide_hw_breakpoint(cpu_events);
  432. return (void __percpu __force *)ERR_PTR(err);
  433. }
  434. EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
  435. /**
  436. * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
  437. * @cpu_events: the per cpu set of events to unregister
  438. */
  439. void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
  440. {
  441. int cpu;
  442. for_each_possible_cpu(cpu)
  443. unregister_hw_breakpoint(per_cpu(*cpu_events, cpu));
  444. free_percpu(cpu_events);
  445. }
  446. EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
  447. static struct notifier_block hw_breakpoint_exceptions_nb = {
  448. .notifier_call = hw_breakpoint_exceptions_notify,
  449. /* we need to be notified first */
  450. .priority = 0x7fffffff
  451. };
  452. static void bp_perf_event_destroy(struct perf_event *event)
  453. {
  454. release_bp_slot(event);
  455. }
  456. static int hw_breakpoint_event_init(struct perf_event *bp)
  457. {
  458. int err;
  459. if (bp->attr.type != PERF_TYPE_BREAKPOINT)
  460. return -ENOENT;
  461. /*
  462. * no branch sampling for breakpoint events
  463. */
  464. if (has_branch_stack(bp))
  465. return -EOPNOTSUPP;
  466. err = register_perf_hw_breakpoint(bp);
  467. if (err)
  468. return err;
  469. bp->destroy = bp_perf_event_destroy;
  470. return 0;
  471. }
  472. static int hw_breakpoint_add(struct perf_event *bp, int flags)
  473. {
  474. if (!(flags & PERF_EF_START))
  475. bp->hw.state = PERF_HES_STOPPED;
  476. if (is_sampling_event(bp)) {
  477. bp->hw.last_period = bp->hw.sample_period;
  478. perf_swevent_set_period(bp);
  479. }
  480. return arch_install_hw_breakpoint(bp);
  481. }
  482. static void hw_breakpoint_del(struct perf_event *bp, int flags)
  483. {
  484. arch_uninstall_hw_breakpoint(bp);
  485. }
  486. static void hw_breakpoint_start(struct perf_event *bp, int flags)
  487. {
  488. bp->hw.state = 0;
  489. }
  490. static void hw_breakpoint_stop(struct perf_event *bp, int flags)
  491. {
  492. bp->hw.state = PERF_HES_STOPPED;
  493. }
  494. static struct pmu perf_breakpoint = {
  495. .task_ctx_nr = perf_sw_context, /* could eventually get its own */
  496. .event_init = hw_breakpoint_event_init,
  497. .add = hw_breakpoint_add,
  498. .del = hw_breakpoint_del,
  499. .start = hw_breakpoint_start,
  500. .stop = hw_breakpoint_stop,
  501. .read = hw_breakpoint_pmu_read,
  502. };
  503. int __init init_hw_breakpoint(void)
  504. {
  505. int cpu, err_cpu;
  506. int i;
  507. for (i = 0; i < TYPE_MAX; i++)
  508. nr_slots[i] = hw_breakpoint_slots(i);
  509. for_each_possible_cpu(cpu) {
  510. for (i = 0; i < TYPE_MAX; i++) {
  511. struct bp_cpuinfo *info = get_bp_info(cpu, i);
  512. info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
  513. GFP_KERNEL);
  514. if (!info->tsk_pinned)
  515. goto err_alloc;
  516. }
  517. }
  518. constraints_initialized = 1;
  519. perf_pmu_register(&perf_breakpoint, "breakpoint", PERF_TYPE_BREAKPOINT);
  520. return register_die_notifier(&hw_breakpoint_exceptions_nb);
  521. err_alloc:
  522. for_each_possible_cpu(err_cpu) {
  523. for (i = 0; i < TYPE_MAX; i++)
  524. kfree(get_bp_info(err_cpu, i)->tsk_pinned);
  525. if (err_cpu == cpu)
  526. break;
  527. }
  528. return -ENOMEM;
  529. }