atomic_arm.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /*
  2. * Linux 2.6.32 and later Kernel module for VMware MVP Hypervisor Support
  3. *
  4. * Copyright (C) 2010-2013 VMware, Inc. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; see the file COPYING. If not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. #line 5
  20. /**
  21. * @file
  22. *
  23. * @brief bus-atomic operators, ARM implementation.
  24. * Do not include directly, include 'atomic.h' instead.
  25. * Memory where the atomic reside must be shared.
  26. *
  27. * These operations assume that the exclusive access monitor is cleared during
  28. * abort entry but they do not assume that cooperative scheduling (e.g. Linux
  29. * schedule()) clears the monitor and hence the use of "clrex" when required.
  30. */
  31. #ifndef _ATOMIC_ARM_H
  32. #define _ATOMIC_ARM_H
  33. #define INCLUDE_ALLOW_MVPD
  34. #define INCLUDE_ALLOW_VMX
  35. #define INCLUDE_ALLOW_MODULE
  36. #define INCLUDE_ALLOW_MONITOR
  37. #define INCLUDE_ALLOW_PV
  38. #define INCLUDE_ALLOW_GPL
  39. #define INCLUDE_ALLOW_HOSTUSER
  40. #define INCLUDE_ALLOW_GUESTUSER
  41. #include "include_check.h"
  42. #include "mvp_assert.h"
  43. /**
  44. * @brief Atomic Add
  45. * @param atm atomic cell to operate on
  46. * @param modval value to apply to atomic cell
  47. * @return the original value of 'atm'
  48. */
  49. #define ATOMIC_ADDO(atm, modval) ATOMIC_OPO_PRIVATE(atm, modval, add)
  50. /**
  51. * @brief Atomic Add
  52. * @param atm atomic cell to operate on
  53. * @param modval value to apply to atomic cell
  54. * @return nothing
  55. */
  56. #define ATOMIC_ADDV(atm, modval) ATOMIC_OPV_PRIVATE(atm, modval, add)
  57. /**
  58. * @brief Atomic And
  59. * @param atm atomic cell to operate on
  60. * @param modval value to apply to atomic cell
  61. * @return the original value of 'atm'
  62. */
  63. #define ATOMIC_ANDO(atm, modval) ATOMIC_OPO_PRIVATE(atm, modval, and)
  64. /**
  65. * @brief Atomic And
  66. * @param atm atomic cell to operate on
  67. * @param modval value to apply to atomic cell
  68. * @return nothing
  69. */
  70. #define ATOMIC_ANDV(atm, modval) ATOMIC_OPV_PRIVATE(atm, modval, and)
  71. /**
  72. * @brief Retrieve an atomic value
  73. * @param atm atomic cell to operate on
  74. * @return the value of 'atm'
  75. */
  76. #define ATOMIC_GETO(atm) ({ \
  77. typeof((atm).atm_Normal) _oldval; \
  78. \
  79. switch (sizeof(_oldval)) { \
  80. case 4: \
  81. asm volatile ("ldrex %0, [%1]\n" \
  82. "clrex" \
  83. : "=&r" (_oldval) \
  84. : "r" (&((atm).atm_Volatl))); \
  85. break; \
  86. case 8: \
  87. asm volatile ("ldrexd %0, %H0, [%1]\n" \
  88. "clrex" \
  89. : "=&r" (_oldval) \
  90. : "r" (&((atm).atm_Volatl))); \
  91. break; \
  92. default: \
  93. FATAL(); \
  94. } \
  95. _oldval; \
  96. })
  97. /**
  98. * @brief Atomic Or
  99. * @param atm atomic cell to operate on
  100. * @param modval value to apply to atomic cell
  101. * @return the original value of 'atm'
  102. */
  103. #define ATOMIC_ORO(atm, modval) ATOMIC_OPO_PRIVATE(atm, modval, orr)
  104. /**
  105. * @brief Atomic Or
  106. * @param atm atomic cell to operate on
  107. * @param modval value to apply to atomic cell
  108. * @return nothing
  109. */
  110. #define ATOMIC_ORV(atm, modval) ATOMIC_OPV_PRIVATE(atm, modval, orr)
  111. /**
  112. * @brief Atomic Conditional Write, ie,
  113. * set 'atm' to 'newval' iff it was 'oldval'.
  114. * @param atm atomic cell to operate on
  115. * @param newval value to possibly write to atomic cell
  116. * @param oldval value that atomic cell must equal
  117. * @return 0 if failed; 1 if successful
  118. */
  119. #define ATOMIC_SETIF(atm, newval, oldval) ({ \
  120. int _failed; \
  121. typeof((atm).atm_Normal) _newval = newval; \
  122. typeof((atm).atm_Normal) _oldval = oldval; \
  123. \
  124. ASSERT_ON_COMPILE(sizeof(_newval) == 4); \
  125. asm volatile ("1: ldrex %0, [%1]\n" \
  126. " cmp %0, %2\n" \
  127. " mov %0, #2\n" \
  128. " IT eq\n" \
  129. " strexeq %0, %3, [%1]\n" \
  130. " cmp %0, #1\n" \
  131. " beq 1b\n" \
  132. " clrex" \
  133. : "=&r" (_failed) \
  134. : "r" (&((atm).atm_Volatl)), \
  135. "r" (_oldval), \
  136. "r" (_newval) \
  137. : "cc", "memory"); \
  138. !_failed; \
  139. })
  140. /**
  141. * @brief Atomic Write (unconditional)
  142. * @param atm atomic cell to operate on
  143. * @param newval value to write to atomic cell
  144. * @return the original value of 'atm'
  145. */
  146. #define ATOMIC_SETO(atm, newval) ({ \
  147. int _failed; \
  148. typeof((atm).atm_Normal) _newval = newval; \
  149. typeof((atm).atm_Normal) _oldval; \
  150. \
  151. switch (sizeof(_newval)) { \
  152. case 4: \
  153. asm volatile ("1: ldrex %0, [%2]\n" \
  154. " strex %1, %3, [%2]\n" \
  155. " teq %1, #0\n" \
  156. " bne 1b" \
  157. : "=&r" (_oldval), \
  158. "=&r" (_failed) \
  159. : "r" (&((atm).atm_Volatl)), \
  160. "r" (_newval) \
  161. : "cc", "memory"); \
  162. break; \
  163. case 8: \
  164. asm volatile ("1: ldrexd %0, %H0, [%2]\n" \
  165. " strexd %1, %3, %H3, [%2]\n" \
  166. " teq %1, #0\n" \
  167. " bne 1b" \
  168. : "=&r" (_oldval), \
  169. "=&r" (_failed) \
  170. : "r" (&((atm).atm_Volatl)), \
  171. "r" (_newval) \
  172. : "cc", "memory"); \
  173. break; \
  174. default: \
  175. FATAL(); \
  176. } \
  177. _oldval; \
  178. })
  179. /**
  180. * @brief Atomic Write (unconditional)
  181. * @param atm atomic cell to operate on
  182. * @param newval value to write to atomic cell
  183. * @return nothing
  184. */
  185. #define ATOMIC_SETV(atm, newval) ATOMIC_SETO((atm), (newval))
  186. /**
  187. * @brief Atomic Subtract
  188. * @param atm atomic cell to operate on
  189. * @param modval value to apply to atomic cell
  190. * @return the original value of 'atm'
  191. */
  192. #define ATOMIC_SUBO(atm, modval) ATOMIC_OPO_PRIVATE(atm, modval, sub)
  193. /**
  194. * @brief Atomic Subtract
  195. * @param atm atomic cell to operate on
  196. * @param modval value to apply to atomic cell
  197. * @return nothing
  198. */
  199. #define ATOMIC_SUBV(atm, modval) ATOMIC_OPV_PRIVATE(atm, modval, sub)
  200. /**
  201. * @brief Atomic Generic Binary Operation
  202. * @param atm atomic cell to operate on
  203. * @param modval value to apply to atomic cell
  204. * @param op ARM instruction (add, and, orr, etc)
  205. * @return the original value of 'atm'
  206. */
  207. #define ATOMIC_OPO_PRIVATE(atm, modval, op) ({ \
  208. int _failed; \
  209. typeof((atm).atm_Normal) _modval = modval; \
  210. typeof((atm).atm_Normal) _oldval; \
  211. typeof((atm).atm_Normal) _newval; \
  212. \
  213. ASSERT_ON_COMPILE(sizeof(_modval) == 4); \
  214. asm volatile ("1: ldrex %0, [%3]\n" \
  215. #op " %1, %0, %4\n" \
  216. " strex %2, %1, [%3]\n" \
  217. " teq %2, #0\n" \
  218. " bne 1b" \
  219. : "=&r" (_oldval), \
  220. "=&r" (_newval), \
  221. "=&r" (_failed) \
  222. : "r" (&((atm).atm_Volatl)), \
  223. "r" (_modval) \
  224. : "cc", "memory"); \
  225. _oldval; \
  226. })
  227. /**
  228. * @brief Atomic Generic Binary Operation
  229. * @param atm atomic cell to operate on
  230. * @param modval value to apply to atomic cell
  231. * @param op ARM instruction (add, and, orr, etc)
  232. * @return nothing
  233. */
  234. #define ATOMIC_OPV_PRIVATE(atm, modval, op) do { \
  235. int _failed; \
  236. typeof((atm).atm_Normal) _modval = modval; \
  237. typeof((atm).atm_Normal) _sample; \
  238. \
  239. ASSERT_ON_COMPILE(sizeof(_modval) == 4); \
  240. asm volatile ("1: ldrex %0, [%2]\n" \
  241. #op " %0, %3\n" \
  242. " strex %1, %0, [%2]\n" \
  243. " teq %1, #0\n" \
  244. " bne 1b" \
  245. : "=&r" (_sample), \
  246. "=&r" (_failed) \
  247. : "r" (&((atm).atm_Volatl)), \
  248. "r" (_modval) \
  249. : "cc", "memory"); \
  250. } while (0)
  251. /**
  252. * @brief Single-copy atomic word write.
  253. *
  254. * ARMv7 defines world-aligned word writes to be single-copy atomic. See
  255. * A3-26 ARM DDI 0406A.
  256. *
  257. * @param p word aligned location to write to
  258. * @param val word-sized value to write to p
  259. */
  260. #define ATOMIC_SINGLE_COPY_WRITE32(p, val) do { \
  261. ASSERT(sizeof(val) == 4); \
  262. ASSERT((MVA)(p) % sizeof(val) == 0); \
  263. asm volatile("str %0, [%1]" \
  264. : \
  265. : "r" (val), "r" (p) \
  266. : "memory"); \
  267. } while (0)
  268. /**
  269. * @brief Single-copy atomic word read.
  270. *
  271. * ARMv7 defines world-aligned word reads to be single-copy atomic. See
  272. * A3-26 ARM DDI 0406A.
  273. *
  274. * @param p word aligned location to read from
  275. *
  276. * @return word-sized value from p
  277. */
  278. #define ATOMIC_SINGLE_COPY_READ32(p) ({ \
  279. ASSERT((MVA)(p) % sizeof(uint32) == 0); \
  280. uint32 _val; \
  281. asm volatile("ldr %0, [%1]" \
  282. : "=r" (_val) \
  283. : "r" (p)); \
  284. _val; \
  285. })
  286. /**
  287. * @brief Single-copy atomic double word write.
  288. *
  289. * LPAE defines double world-aligned double word writes to be single-copy
  290. * atomic. See 6.7 ARM PRD03-GENC-008469 13.0.
  291. *
  292. * @param p double word aligned location to write to
  293. * @param val double word-sized value to write to p
  294. */
  295. #define ATOMIC_SINGLE_COPY_WRITE64(p, val) do { \
  296. ASSERT(sizeof(val) == 8); \
  297. ASSERT((MVA)(p) % sizeof(val) == 0); \
  298. asm volatile("mov r0, %0\n" \
  299. "mov r1, %1\n" \
  300. "strd r0, r1, [%2]" \
  301. : \
  302. : "r" ((uint32)(val)), \
  303. "r" (((uint64)(val)) >> 32), \
  304. "r" (p) \
  305. : "r0", "r1", "memory"); \
  306. } while (0)
  307. #endif