membarrier.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * Copyright (C) 2010-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  3. *
  4. * membarrier system call
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/syscalls.h>
  17. #include <linux/membarrier.h>
  18. #include <linux/tick.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/atomic.h>
  21. #include "sched.h" /* for cpu_rq(). */
  22. /*
  23. * Bitmask made from a "or" of all commands within enum membarrier_cmd,
  24. * except MEMBARRIER_CMD_QUERY.
  25. */
  26. #define MEMBARRIER_CMD_BITMASK \
  27. (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \
  28. | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED)
  29. static void ipi_mb(void *info)
  30. {
  31. smp_mb(); /* IPIs should be serializing but paranoid. */
  32. }
  33. static int membarrier_private_expedited(void)
  34. {
  35. int cpu;
  36. bool fallback = false;
  37. cpumask_var_t tmpmask;
  38. if (!(atomic_read(&current->mm->membarrier_state)
  39. & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
  40. return -EPERM;
  41. if (num_online_cpus() == 1)
  42. return 0;
  43. /*
  44. * Matches memory barriers around rq->curr modification in
  45. * scheduler.
  46. */
  47. smp_mb(); /* system call entry is not a mb. */
  48. /*
  49. * Expedited membarrier commands guarantee that they won't
  50. * block, hence the GFP_NOWAIT allocation flag and fallback
  51. * implementation.
  52. */
  53. if (!zalloc_cpumask_var(&tmpmask, GFP_NOWAIT)) {
  54. /* Fallback for OOM. */
  55. fallback = true;
  56. }
  57. cpus_read_lock();
  58. for_each_online_cpu(cpu) {
  59. struct task_struct *p;
  60. /*
  61. * Skipping the current CPU is OK even through we can be
  62. * migrated at any point. The current CPU, at the point
  63. * where we read raw_smp_processor_id(), is ensured to
  64. * be in program order with respect to the caller
  65. * thread. Therefore, we can skip this CPU from the
  66. * iteration.
  67. */
  68. if (cpu == raw_smp_processor_id())
  69. continue;
  70. rcu_read_lock();
  71. p = task_rcu_dereference(&cpu_rq(cpu)->curr);
  72. if (p && p->mm == current->mm) {
  73. if (!fallback)
  74. __cpumask_set_cpu(cpu, tmpmask);
  75. else
  76. smp_call_function_single(cpu, ipi_mb, NULL, 1);
  77. }
  78. rcu_read_unlock();
  79. }
  80. if (!fallback) {
  81. preempt_disable();
  82. smp_call_function_many(tmpmask, ipi_mb, NULL, 1);
  83. preempt_enable();
  84. free_cpumask_var(tmpmask);
  85. }
  86. cpus_read_unlock();
  87. /*
  88. * Memory barrier on the caller thread _after_ we finished
  89. * waiting for the last IPI. Matches memory barriers around
  90. * rq->curr modification in scheduler.
  91. */
  92. smp_mb(); /* exit from system call is not a mb */
  93. return 0;
  94. }
  95. static void membarrier_register_private_expedited(void)
  96. {
  97. struct task_struct *p = current;
  98. struct mm_struct *mm = p->mm;
  99. /*
  100. * We need to consider threads belonging to different thread
  101. * groups, which use the same mm. (CLONE_VM but not
  102. * CLONE_THREAD).
  103. */
  104. if (atomic_read(&mm->membarrier_state)
  105. & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)
  106. return;
  107. atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY,
  108. &mm->membarrier_state);
  109. }
  110. /**
  111. * sys_membarrier - issue memory barriers on a set of threads
  112. * @cmd: Takes command values defined in enum membarrier_cmd.
  113. * @flags: Currently needs to be 0. For future extensions.
  114. *
  115. * If this system call is not implemented, -ENOSYS is returned. If the
  116. * command specified does not exist, not available on the running
  117. * kernel, or if the command argument is invalid, this system call
  118. * returns -EINVAL. For a given command, with flags argument set to 0,
  119. * this system call is guaranteed to always return the same value until
  120. * reboot.
  121. *
  122. * All memory accesses performed in program order from each targeted thread
  123. * is guaranteed to be ordered with respect to sys_membarrier(). If we use
  124. * the semantic "barrier()" to represent a compiler barrier forcing memory
  125. * accesses to be performed in program order across the barrier, and
  126. * smp_mb() to represent explicit memory barriers forcing full memory
  127. * ordering across the barrier, we have the following ordering table for
  128. * each pair of barrier(), sys_membarrier() and smp_mb():
  129. *
  130. * The pair ordering is detailed as (O: ordered, X: not ordered):
  131. *
  132. * barrier() smp_mb() sys_membarrier()
  133. * barrier() X X O
  134. * smp_mb() X O O
  135. * sys_membarrier() O O O
  136. */
  137. SYSCALL_DEFINE2(membarrier, int, cmd, int, flags)
  138. {
  139. if (unlikely(flags))
  140. return -EINVAL;
  141. switch (cmd) {
  142. case MEMBARRIER_CMD_QUERY:
  143. {
  144. int cmd_mask = MEMBARRIER_CMD_BITMASK;
  145. if (tick_nohz_full_enabled())
  146. cmd_mask &= ~MEMBARRIER_CMD_SHARED;
  147. return cmd_mask;
  148. }
  149. case MEMBARRIER_CMD_SHARED:
  150. /* MEMBARRIER_CMD_SHARED is not compatible with nohz_full. */
  151. if (tick_nohz_full_enabled())
  152. return -EINVAL;
  153. if (num_online_cpus() > 1)
  154. synchronize_sched();
  155. return 0;
  156. case MEMBARRIER_CMD_PRIVATE_EXPEDITED:
  157. return membarrier_private_expedited();
  158. case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED:
  159. membarrier_register_private_expedited();
  160. return 0;
  161. default:
  162. return -EINVAL;
  163. }
  164. }