percpu_freelist.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. /* Copyright (c) 2016 Facebook
  2. *
  3. * This program is free software; you can redistribute it and/or
  4. * modify it under the terms of version 2 of the GNU General Public
  5. * License as published by the Free Software Foundation.
  6. */
  7. #include "percpu_freelist.h"
  8. int pcpu_freelist_init(struct pcpu_freelist *s)
  9. {
  10. int cpu;
  11. s->freelist = alloc_percpu(struct pcpu_freelist_head);
  12. if (!s->freelist)
  13. return -ENOMEM;
  14. for_each_possible_cpu(cpu) {
  15. struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
  16. raw_spin_lock_init(&head->lock);
  17. head->first = NULL;
  18. }
  19. return 0;
  20. }
  21. void pcpu_freelist_destroy(struct pcpu_freelist *s)
  22. {
  23. free_percpu(s->freelist);
  24. }
  25. static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
  26. struct pcpu_freelist_node *node)
  27. {
  28. raw_spin_lock(&head->lock);
  29. node->next = head->first;
  30. head->first = node;
  31. raw_spin_unlock(&head->lock);
  32. }
  33. void pcpu_freelist_push(struct pcpu_freelist *s,
  34. struct pcpu_freelist_node *node)
  35. {
  36. struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
  37. __pcpu_freelist_push(head, node);
  38. }
  39. void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
  40. u32 nr_elems)
  41. {
  42. struct pcpu_freelist_head *head;
  43. unsigned long flags;
  44. int i, cpu, pcpu_entries;
  45. pcpu_entries = nr_elems / num_possible_cpus() + 1;
  46. i = 0;
  47. /* disable irq to workaround lockdep false positive
  48. * in bpf usage pcpu_freelist_populate() will never race
  49. * with pcpu_freelist_push()
  50. */
  51. local_irq_save(flags);
  52. for_each_possible_cpu(cpu) {
  53. again:
  54. head = per_cpu_ptr(s->freelist, cpu);
  55. __pcpu_freelist_push(head, buf);
  56. i++;
  57. buf += elem_size;
  58. if (i == nr_elems)
  59. break;
  60. if (i % pcpu_entries)
  61. goto again;
  62. }
  63. local_irq_restore(flags);
  64. }
  65. struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
  66. {
  67. struct pcpu_freelist_head *head;
  68. struct pcpu_freelist_node *node;
  69. unsigned long flags;
  70. int orig_cpu, cpu;
  71. local_irq_save(flags);
  72. orig_cpu = cpu = raw_smp_processor_id();
  73. while (1) {
  74. head = per_cpu_ptr(s->freelist, cpu);
  75. raw_spin_lock(&head->lock);
  76. node = head->first;
  77. if (node) {
  78. head->first = node->next;
  79. raw_spin_unlock_irqrestore(&head->lock, flags);
  80. return node;
  81. }
  82. raw_spin_unlock(&head->lock);
  83. cpu = cpumask_next(cpu, cpu_possible_mask);
  84. if (cpu >= nr_cpu_ids)
  85. cpu = 0;
  86. if (cpu == orig_cpu) {
  87. local_irq_restore(flags);
  88. return NULL;
  89. }
  90. }
  91. }