ring_buffer.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /*
  2. * Performance events ring-buffer code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/perf_event.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/slab.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/poll.h>
  16. #include <linux/nospec.h>
  17. #include "internal.h"
  18. static void perf_output_wakeup(struct perf_output_handle *handle)
  19. {
  20. atomic_set(&handle->rb->poll, POLLIN);
  21. handle->event->pending_wakeup = 1;
  22. irq_work_queue(&handle->event->pending);
  23. }
  24. /*
  25. * We need to ensure a later event_id doesn't publish a head when a former
  26. * event isn't done writing. However since we need to deal with NMIs we
  27. * cannot fully serialize things.
  28. *
  29. * We only publish the head (and generate a wakeup) when the outer-most
  30. * event completes.
  31. */
  32. static void perf_output_get_handle(struct perf_output_handle *handle)
  33. {
  34. struct ring_buffer *rb = handle->rb;
  35. preempt_disable();
  36. local_inc(&rb->nest);
  37. handle->wakeup = local_read(&rb->wakeup);
  38. }
  39. static void perf_output_put_handle(struct perf_output_handle *handle)
  40. {
  41. struct ring_buffer *rb = handle->rb;
  42. unsigned long head;
  43. again:
  44. /*
  45. * In order to avoid publishing a head value that goes backwards,
  46. * we must ensure the load of @rb->head happens after we've
  47. * incremented @rb->nest.
  48. *
  49. * Otherwise we can observe a @rb->head value before one published
  50. * by an IRQ/NMI happening between the load and the increment.
  51. */
  52. barrier();
  53. head = local_read(&rb->head);
  54. /*
  55. * IRQ/NMI can happen here and advance @rb->head, causing our
  56. * load above to be stale.
  57. */
  58. /*
  59. * If this isn't the outermost nesting, we don't have to update
  60. * @rb->user_page->data_head.
  61. */
  62. if (local_read(&rb->nest) > 1) {
  63. local_dec(&rb->nest);
  64. goto out;
  65. }
  66. /*
  67. * Since the mmap() consumer (userspace) can run on a different CPU:
  68. *
  69. * kernel user
  70. *
  71. * if (LOAD ->data_tail) { LOAD ->data_head
  72. * (A) smp_rmb() (C)
  73. * STORE $data LOAD $data
  74. * smp_wmb() (B) smp_mb() (D)
  75. * STORE ->data_head STORE ->data_tail
  76. * }
  77. *
  78. * Where A pairs with D, and B pairs with C.
  79. *
  80. * In our case (A) is a control dependency that separates the load of
  81. * the ->data_tail and the stores of $data. In case ->data_tail
  82. * indicates there is no room in the buffer to store $data we do not.
  83. *
  84. * D needs to be a full barrier since it separates the data READ
  85. * from the tail WRITE.
  86. *
  87. * For B a WMB is sufficient since it separates two WRITEs, and for C
  88. * an RMB is sufficient since it separates two READs.
  89. *
  90. * See perf_output_begin().
  91. */
  92. smp_wmb(); /* B, matches C */
  93. WRITE_ONCE(rb->user_page->data_head, head);
  94. /*
  95. * We must publish the head before decrementing the nest count,
  96. * otherwise an IRQ/NMI can publish a more recent head value and our
  97. * write will (temporarily) publish a stale value.
  98. */
  99. barrier();
  100. local_set(&rb->nest, 0);
  101. /*
  102. * Ensure we decrement @rb->nest before we validate the @rb->head.
  103. * Otherwise we cannot be sure we caught the 'last' nested update.
  104. */
  105. barrier();
  106. if (unlikely(head != local_read(&rb->head))) {
  107. local_inc(&rb->nest);
  108. goto again;
  109. }
  110. if (handle->wakeup != local_read(&rb->wakeup))
  111. perf_output_wakeup(handle);
  112. out:
  113. preempt_enable();
  114. }
  115. static bool __always_inline
  116. ring_buffer_has_space(unsigned long head, unsigned long tail,
  117. unsigned long data_size, unsigned int size,
  118. bool backward)
  119. {
  120. if (!backward)
  121. return CIRC_SPACE(head, tail, data_size) >= size;
  122. else
  123. return CIRC_SPACE(tail, head, data_size) >= size;
  124. }
  125. static int __always_inline
  126. __perf_output_begin(struct perf_output_handle *handle,
  127. struct perf_event *event, unsigned int size,
  128. bool backward)
  129. {
  130. struct ring_buffer *rb;
  131. unsigned long tail, offset, head;
  132. int have_lost, page_shift;
  133. struct {
  134. struct perf_event_header header;
  135. u64 id;
  136. u64 lost;
  137. } lost_event;
  138. rcu_read_lock();
  139. /*
  140. * For inherited events we send all the output towards the parent.
  141. */
  142. if (event->parent)
  143. event = event->parent;
  144. rb = rcu_dereference(event->rb);
  145. if (unlikely(!rb))
  146. goto out;
  147. if (unlikely(rb->paused)) {
  148. if (rb->nr_pages)
  149. local_inc(&rb->lost);
  150. goto out;
  151. }
  152. handle->rb = rb;
  153. handle->event = event;
  154. have_lost = local_read(&rb->lost);
  155. if (unlikely(have_lost)) {
  156. size += sizeof(lost_event);
  157. if (event->attr.sample_id_all)
  158. size += event->id_header_size;
  159. }
  160. perf_output_get_handle(handle);
  161. do {
  162. tail = READ_ONCE(rb->user_page->data_tail);
  163. offset = head = local_read(&rb->head);
  164. if (!rb->overwrite) {
  165. if (unlikely(!ring_buffer_has_space(head, tail,
  166. perf_data_size(rb),
  167. size, backward)))
  168. goto fail;
  169. }
  170. /*
  171. * The above forms a control dependency barrier separating the
  172. * @tail load above from the data stores below. Since the @tail
  173. * load is required to compute the branch to fail below.
  174. *
  175. * A, matches D; the full memory barrier userspace SHOULD issue
  176. * after reading the data and before storing the new tail
  177. * position.
  178. *
  179. * See perf_output_put_handle().
  180. */
  181. if (!backward)
  182. head += size;
  183. else
  184. head -= size;
  185. } while (local_cmpxchg(&rb->head, offset, head) != offset);
  186. if (backward) {
  187. offset = head;
  188. head = (u64)(-head);
  189. }
  190. /*
  191. * We rely on the implied barrier() by local_cmpxchg() to ensure
  192. * none of the data stores below can be lifted up by the compiler.
  193. */
  194. if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
  195. local_add(rb->watermark, &rb->wakeup);
  196. page_shift = PAGE_SHIFT + page_order(rb);
  197. handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
  198. offset &= (1UL << page_shift) - 1;
  199. handle->addr = rb->data_pages[handle->page] + offset;
  200. handle->size = (1UL << page_shift) - offset;
  201. if (unlikely(have_lost)) {
  202. struct perf_sample_data sample_data;
  203. lost_event.header.size = sizeof(lost_event);
  204. lost_event.header.type = PERF_RECORD_LOST;
  205. lost_event.header.misc = 0;
  206. lost_event.id = event->id;
  207. lost_event.lost = local_xchg(&rb->lost, 0);
  208. perf_event_header__init_id(&lost_event.header,
  209. &sample_data, event);
  210. perf_output_put(handle, lost_event);
  211. perf_event__output_id_sample(event, handle, &sample_data);
  212. }
  213. return 0;
  214. fail:
  215. local_inc(&rb->lost);
  216. perf_output_put_handle(handle);
  217. out:
  218. rcu_read_unlock();
  219. return -ENOSPC;
  220. }
  221. int perf_output_begin_forward(struct perf_output_handle *handle,
  222. struct perf_event *event, unsigned int size)
  223. {
  224. return __perf_output_begin(handle, event, size, false);
  225. }
  226. int perf_output_begin_backward(struct perf_output_handle *handle,
  227. struct perf_event *event, unsigned int size)
  228. {
  229. return __perf_output_begin(handle, event, size, true);
  230. }
  231. int perf_output_begin(struct perf_output_handle *handle,
  232. struct perf_event *event, unsigned int size)
  233. {
  234. return __perf_output_begin(handle, event, size,
  235. unlikely(is_write_backward(event)));
  236. }
  237. unsigned int perf_output_copy(struct perf_output_handle *handle,
  238. const void *buf, unsigned int len)
  239. {
  240. return __output_copy(handle, buf, len);
  241. }
  242. unsigned int perf_output_skip(struct perf_output_handle *handle,
  243. unsigned int len)
  244. {
  245. return __output_skip(handle, NULL, len);
  246. }
  247. void perf_output_end(struct perf_output_handle *handle)
  248. {
  249. perf_output_put_handle(handle);
  250. rcu_read_unlock();
  251. }
  252. static void
  253. ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
  254. {
  255. long max_size = perf_data_size(rb);
  256. if (watermark)
  257. rb->watermark = min(max_size, watermark);
  258. if (!rb->watermark)
  259. rb->watermark = max_size / 2;
  260. if (flags & RING_BUFFER_WRITABLE)
  261. rb->overwrite = 0;
  262. else
  263. rb->overwrite = 1;
  264. atomic_set(&rb->refcount, 1);
  265. INIT_LIST_HEAD(&rb->event_list);
  266. spin_lock_init(&rb->event_lock);
  267. /*
  268. * perf_output_begin() only checks rb->paused, therefore
  269. * rb->paused must be true if we have no pages for output.
  270. */
  271. if (!rb->nr_pages)
  272. rb->paused = 1;
  273. }
  274. void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
  275. {
  276. /*
  277. * OVERWRITE is determined by perf_aux_output_end() and can't
  278. * be passed in directly.
  279. */
  280. if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
  281. return;
  282. handle->aux_flags |= flags;
  283. }
  284. EXPORT_SYMBOL_GPL(perf_aux_output_flag);
  285. /*
  286. * This is called before hardware starts writing to the AUX area to
  287. * obtain an output handle and make sure there's room in the buffer.
  288. * When the capture completes, call perf_aux_output_end() to commit
  289. * the recorded data to the buffer.
  290. *
  291. * The ordering is similar to that of perf_output_{begin,end}, with
  292. * the exception of (B), which should be taken care of by the pmu
  293. * driver, since ordering rules will differ depending on hardware.
  294. *
  295. * Call this from pmu::start(); see the comment in perf_aux_output_end()
  296. * about its use in pmu callbacks. Both can also be called from the PMI
  297. * handler if needed.
  298. */
  299. void *perf_aux_output_begin(struct perf_output_handle *handle,
  300. struct perf_event *event)
  301. {
  302. struct perf_event *output_event = event;
  303. unsigned long aux_head, aux_tail;
  304. struct ring_buffer *rb;
  305. if (output_event->parent)
  306. output_event = output_event->parent;
  307. /*
  308. * Since this will typically be open across pmu::add/pmu::del, we
  309. * grab ring_buffer's refcount instead of holding rcu read lock
  310. * to make sure it doesn't disappear under us.
  311. */
  312. rb = ring_buffer_get(output_event);
  313. if (!rb)
  314. return NULL;
  315. if (!rb_has_aux(rb))
  316. goto err;
  317. /*
  318. * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
  319. * about to get freed, so we leave immediately.
  320. *
  321. * Checking rb::aux_mmap_count and rb::refcount has to be done in
  322. * the same order, see perf_mmap_close. Otherwise we end up freeing
  323. * aux pages in this path, which is a bug, because in_atomic().
  324. */
  325. if (!atomic_read(&rb->aux_mmap_count))
  326. goto err;
  327. if (!atomic_inc_not_zero(&rb->aux_refcount))
  328. goto err;
  329. /*
  330. * Nesting is not supported for AUX area, make sure nested
  331. * writers are caught early
  332. */
  333. if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
  334. goto err_put;
  335. aux_head = rb->aux_head;
  336. handle->rb = rb;
  337. handle->event = event;
  338. handle->head = aux_head;
  339. handle->size = 0;
  340. handle->aux_flags = 0;
  341. /*
  342. * In overwrite mode, AUX data stores do not depend on aux_tail,
  343. * therefore (A) control dependency barrier does not exist. The
  344. * (B) <-> (C) ordering is still observed by the pmu driver.
  345. */
  346. if (!rb->aux_overwrite) {
  347. aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
  348. handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
  349. if (aux_head - aux_tail < perf_aux_size(rb))
  350. handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
  351. /*
  352. * handle->size computation depends on aux_tail load; this forms a
  353. * control dependency barrier separating aux_tail load from aux data
  354. * store that will be enabled on successful return
  355. */
  356. if (!handle->size) { /* A, matches D */
  357. event->pending_disable = 1;
  358. perf_output_wakeup(handle);
  359. local_set(&rb->aux_nest, 0);
  360. goto err_put;
  361. }
  362. }
  363. return handle->rb->aux_priv;
  364. err_put:
  365. /* can't be last */
  366. rb_free_aux(rb);
  367. err:
  368. ring_buffer_put(rb);
  369. handle->event = NULL;
  370. return NULL;
  371. }
  372. static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
  373. {
  374. if (rb->aux_overwrite)
  375. return false;
  376. if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
  377. rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
  378. return true;
  379. }
  380. return false;
  381. }
  382. /*
  383. * Commit the data written by hardware into the ring buffer by adjusting
  384. * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
  385. * pmu driver's responsibility to observe ordering rules of the hardware,
  386. * so that all the data is externally visible before this is called.
  387. *
  388. * Note: this has to be called from pmu::stop() callback, as the assumption
  389. * of the AUX buffer management code is that after pmu::stop(), the AUX
  390. * transaction must be stopped and therefore drop the AUX reference count.
  391. */
  392. void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
  393. {
  394. bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
  395. struct ring_buffer *rb = handle->rb;
  396. unsigned long aux_head;
  397. /* in overwrite mode, driver provides aux_head via handle */
  398. if (rb->aux_overwrite) {
  399. handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
  400. aux_head = handle->head;
  401. rb->aux_head = aux_head;
  402. } else {
  403. handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
  404. aux_head = rb->aux_head;
  405. rb->aux_head += size;
  406. }
  407. if (size || handle->aux_flags) {
  408. /*
  409. * Only send RECORD_AUX if we have something useful to communicate
  410. */
  411. perf_event_aux_event(handle->event, aux_head, size,
  412. handle->aux_flags);
  413. }
  414. WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
  415. if (rb_need_aux_wakeup(rb))
  416. wakeup = true;
  417. if (wakeup) {
  418. if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
  419. handle->event->pending_disable = 1;
  420. perf_output_wakeup(handle);
  421. }
  422. handle->event = NULL;
  423. local_set(&rb->aux_nest, 0);
  424. /* can't be last */
  425. rb_free_aux(rb);
  426. ring_buffer_put(rb);
  427. }
  428. /*
  429. * Skip over a given number of bytes in the AUX buffer, due to, for example,
  430. * hardware's alignment constraints.
  431. */
  432. int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
  433. {
  434. struct ring_buffer *rb = handle->rb;
  435. if (size > handle->size)
  436. return -ENOSPC;
  437. rb->aux_head += size;
  438. WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
  439. if (rb_need_aux_wakeup(rb)) {
  440. perf_output_wakeup(handle);
  441. handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
  442. }
  443. handle->head = rb->aux_head;
  444. handle->size -= size;
  445. return 0;
  446. }
  447. void *perf_get_aux(struct perf_output_handle *handle)
  448. {
  449. /* this is only valid between perf_aux_output_begin and *_end */
  450. if (!handle->event)
  451. return NULL;
  452. return handle->rb->aux_priv;
  453. }
  454. #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
  455. static struct page *rb_alloc_aux_page(int node, int order)
  456. {
  457. struct page *page;
  458. if (order > MAX_ORDER)
  459. order = MAX_ORDER;
  460. do {
  461. page = alloc_pages_node(node, PERF_AUX_GFP, order);
  462. } while (!page && order--);
  463. if (page && order) {
  464. /*
  465. * Communicate the allocation size to the driver:
  466. * if we managed to secure a high-order allocation,
  467. * set its first page's private to this order;
  468. * !PagePrivate(page) means it's just a normal page.
  469. */
  470. split_page(page, order);
  471. SetPagePrivate(page);
  472. set_page_private(page, order);
  473. }
  474. return page;
  475. }
  476. static void rb_free_aux_page(struct ring_buffer *rb, int idx)
  477. {
  478. struct page *page = virt_to_page(rb->aux_pages[idx]);
  479. ClearPagePrivate(page);
  480. page->mapping = NULL;
  481. __free_page(page);
  482. }
  483. static void __rb_free_aux(struct ring_buffer *rb)
  484. {
  485. int pg;
  486. /*
  487. * Should never happen, the last reference should be dropped from
  488. * perf_mmap_close() path, which first stops aux transactions (which
  489. * in turn are the atomic holders of aux_refcount) and then does the
  490. * last rb_free_aux().
  491. */
  492. WARN_ON_ONCE(in_atomic());
  493. if (rb->aux_priv) {
  494. rb->free_aux(rb->aux_priv);
  495. rb->free_aux = NULL;
  496. rb->aux_priv = NULL;
  497. }
  498. if (rb->aux_nr_pages) {
  499. for (pg = 0; pg < rb->aux_nr_pages; pg++)
  500. rb_free_aux_page(rb, pg);
  501. kfree(rb->aux_pages);
  502. rb->aux_nr_pages = 0;
  503. }
  504. }
  505. int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  506. pgoff_t pgoff, int nr_pages, long watermark, int flags)
  507. {
  508. bool overwrite = !(flags & RING_BUFFER_WRITABLE);
  509. int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
  510. int ret = -ENOMEM, max_order = 0;
  511. if (!has_aux(event))
  512. return -EOPNOTSUPP;
  513. if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
  514. /*
  515. * We need to start with the max_order that fits in nr_pages,
  516. * not the other way around, hence ilog2() and not get_order.
  517. */
  518. max_order = ilog2(nr_pages);
  519. /*
  520. * PMU requests more than one contiguous chunks of memory
  521. * for SW double buffering
  522. */
  523. if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
  524. !overwrite) {
  525. if (!max_order)
  526. return -EINVAL;
  527. max_order--;
  528. }
  529. }
  530. rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
  531. if (!rb->aux_pages)
  532. return -ENOMEM;
  533. rb->free_aux = event->pmu->free_aux;
  534. for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
  535. struct page *page;
  536. int last, order;
  537. order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
  538. page = rb_alloc_aux_page(node, order);
  539. if (!page)
  540. goto out;
  541. for (last = rb->aux_nr_pages + (1 << page_private(page));
  542. last > rb->aux_nr_pages; rb->aux_nr_pages++)
  543. rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
  544. }
  545. /*
  546. * In overwrite mode, PMUs that don't support SG may not handle more
  547. * than one contiguous allocation, since they rely on PMI to do double
  548. * buffering. In this case, the entire buffer has to be one contiguous
  549. * chunk.
  550. */
  551. if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
  552. overwrite) {
  553. struct page *page = virt_to_page(rb->aux_pages[0]);
  554. if (page_private(page) != max_order)
  555. goto out;
  556. }
  557. rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
  558. overwrite);
  559. if (!rb->aux_priv)
  560. goto out;
  561. ret = 0;
  562. /*
  563. * aux_pages (and pmu driver's private data, aux_priv) will be
  564. * referenced in both producer's and consumer's contexts, thus
  565. * we keep a refcount here to make sure either of the two can
  566. * reference them safely.
  567. */
  568. atomic_set(&rb->aux_refcount, 1);
  569. rb->aux_overwrite = overwrite;
  570. rb->aux_watermark = watermark;
  571. if (!rb->aux_watermark && !rb->aux_overwrite)
  572. rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
  573. out:
  574. if (!ret)
  575. rb->aux_pgoff = pgoff;
  576. else
  577. __rb_free_aux(rb);
  578. return ret;
  579. }
  580. void rb_free_aux(struct ring_buffer *rb)
  581. {
  582. if (atomic_dec_and_test(&rb->aux_refcount))
  583. __rb_free_aux(rb);
  584. }
  585. #ifndef CONFIG_PERF_USE_VMALLOC
  586. /*
  587. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  588. */
  589. static struct page *
  590. __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  591. {
  592. if (pgoff > rb->nr_pages)
  593. return NULL;
  594. if (pgoff == 0)
  595. return virt_to_page(rb->user_page);
  596. return virt_to_page(rb->data_pages[pgoff - 1]);
  597. }
  598. static void *perf_mmap_alloc_page(int cpu)
  599. {
  600. struct page *page;
  601. int node;
  602. node = (cpu == -1) ? cpu : cpu_to_node(cpu);
  603. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  604. if (!page)
  605. return NULL;
  606. return page_address(page);
  607. }
  608. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  609. {
  610. struct ring_buffer *rb;
  611. unsigned long size;
  612. int i;
  613. size = sizeof(struct ring_buffer);
  614. size += nr_pages * sizeof(void *);
  615. if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
  616. goto fail;
  617. rb = kzalloc(size, GFP_KERNEL);
  618. if (!rb)
  619. goto fail;
  620. rb->user_page = perf_mmap_alloc_page(cpu);
  621. if (!rb->user_page)
  622. goto fail_user_page;
  623. for (i = 0; i < nr_pages; i++) {
  624. rb->data_pages[i] = perf_mmap_alloc_page(cpu);
  625. if (!rb->data_pages[i])
  626. goto fail_data_pages;
  627. }
  628. rb->nr_pages = nr_pages;
  629. ring_buffer_init(rb, watermark, flags);
  630. return rb;
  631. fail_data_pages:
  632. for (i--; i >= 0; i--)
  633. free_page((unsigned long)rb->data_pages[i]);
  634. free_page((unsigned long)rb->user_page);
  635. fail_user_page:
  636. kfree(rb);
  637. fail:
  638. return NULL;
  639. }
  640. static void perf_mmap_free_page(unsigned long addr)
  641. {
  642. struct page *page = virt_to_page((void *)addr);
  643. page->mapping = NULL;
  644. __free_page(page);
  645. }
  646. void rb_free(struct ring_buffer *rb)
  647. {
  648. int i;
  649. perf_mmap_free_page((unsigned long)rb->user_page);
  650. for (i = 0; i < rb->nr_pages; i++)
  651. perf_mmap_free_page((unsigned long)rb->data_pages[i]);
  652. kfree(rb);
  653. }
  654. #else
  655. static int data_page_nr(struct ring_buffer *rb)
  656. {
  657. return rb->nr_pages << page_order(rb);
  658. }
  659. static struct page *
  660. __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  661. {
  662. /* The '>' counts in the user page. */
  663. if (pgoff > data_page_nr(rb))
  664. return NULL;
  665. return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
  666. }
  667. static void perf_mmap_unmark_page(void *addr)
  668. {
  669. struct page *page = vmalloc_to_page(addr);
  670. page->mapping = NULL;
  671. }
  672. static void rb_free_work(struct work_struct *work)
  673. {
  674. struct ring_buffer *rb;
  675. void *base;
  676. int i, nr;
  677. rb = container_of(work, struct ring_buffer, work);
  678. nr = data_page_nr(rb);
  679. base = rb->user_page;
  680. /* The '<=' counts in the user page. */
  681. for (i = 0; i <= nr; i++)
  682. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  683. vfree(base);
  684. kfree(rb);
  685. }
  686. void rb_free(struct ring_buffer *rb)
  687. {
  688. schedule_work(&rb->work);
  689. }
  690. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  691. {
  692. struct ring_buffer *rb;
  693. unsigned long size;
  694. void *all_buf;
  695. size = sizeof(struct ring_buffer);
  696. size += sizeof(void *);
  697. rb = kzalloc(size, GFP_KERNEL);
  698. if (!rb)
  699. goto fail;
  700. INIT_WORK(&rb->work, rb_free_work);
  701. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  702. if (!all_buf)
  703. goto fail_all_buf;
  704. rb->user_page = all_buf;
  705. rb->data_pages[0] = all_buf + PAGE_SIZE;
  706. if (nr_pages) {
  707. rb->nr_pages = 1;
  708. rb->page_order = ilog2(nr_pages);
  709. }
  710. ring_buffer_init(rb, watermark, flags);
  711. return rb;
  712. fail_all_buf:
  713. kfree(rb);
  714. fail:
  715. return NULL;
  716. }
  717. #endif
  718. struct page *
  719. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  720. {
  721. if (rb->aux_nr_pages) {
  722. /* above AUX space */
  723. if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
  724. return NULL;
  725. /* AUX space */
  726. if (pgoff >= rb->aux_pgoff) {
  727. int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
  728. return virt_to_page(rb->aux_pages[aux_pgoff]);
  729. }
  730. }
  731. return __perf_mmap_to_page(rb, pgoff);
  732. }