icswx.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * ICSWX and ACOP Management
  3. *
  4. * Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/types.h>
  16. #include <linux/mm.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/module.h>
  19. #include <linux/uaccess.h>
  20. #include "icswx.h"
  21. /*
  22. * The processor and its L2 cache cause the icswx instruction to
  23. * generate a COP_REQ transaction on PowerBus. The transaction has no
  24. * address, and the processor does not perform an MMU access to
  25. * authenticate the transaction. The command portion of the PowerBus
  26. * COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
  27. * Process ID (PID), which the coprocessor compares to the authorized
  28. * LPID and PID held in the coprocessor, to determine if the process
  29. * is authorized to generate the transaction. The data of the COP_REQ
  30. * transaction is 128-byte or less in size and is placed in cacheable
  31. * memory on a 128-byte cache line boundary.
  32. *
  33. * The task to use a coprocessor should use use_cop() to mark the use
  34. * of the Coprocessor Type (CT) and context switching. On a server
  35. * class processor, the PID register is used only for coprocessor
  36. * management + * and so a coprocessor PID is allocated before
  37. * executing icswx + * instruction. Drop_cop() is used to free the
  38. * coprocessor PID.
  39. *
  40. * Example:
  41. * Host Fabric Interface (HFI) is a PowerPC network coprocessor.
  42. * Each HFI have multiple windows. Each HFI window serves as a
  43. * network device sending to and receiving from HFI network.
  44. * HFI immediate send function uses icswx instruction. The immediate
  45. * send function allows small (single cache-line) packets be sent
  46. * without using the regular HFI send FIFO and doorbell, which are
  47. * much slower than immediate send.
  48. *
  49. * For each task intending to use HFI immediate send, the HFI driver
  50. * calls use_cop() to obtain a coprocessor PID for the task.
  51. * The HFI driver then allocate a free HFI window and save the
  52. * coprocessor PID to the HFI window to allow the task to use the
  53. * HFI window.
  54. *
  55. * The HFI driver repeatedly creates immediate send packets and
  56. * issues icswx instruction to send data through the HFI window.
  57. * The HFI compares the coprocessor PID in the CPU PID register
  58. * to the PID held in the HFI window to determine if the transaction
  59. * is allowed.
  60. *
  61. * When the task to release the HFI window, the HFI driver calls
  62. * drop_cop() to release the coprocessor PID.
  63. */
  64. void switch_cop(struct mm_struct *next)
  65. {
  66. #ifdef CONFIG_PPC_ICSWX_PID
  67. mtspr(SPRN_PID, next->context.cop_pid);
  68. #endif
  69. mtspr(SPRN_ACOP, next->context.acop);
  70. }
  71. /**
  72. * Start using a coprocessor.
  73. * @acop: mask of coprocessor to be used.
  74. * @mm: The mm the coprocessor to associate with. Most likely current mm.
  75. *
  76. * Return a positive PID if successful. Negative errno otherwise.
  77. * The returned PID will be fed to the coprocessor to determine if an
  78. * icswx transaction is authenticated.
  79. */
  80. int use_cop(unsigned long acop, struct mm_struct *mm)
  81. {
  82. int ret;
  83. if (!cpu_has_feature(CPU_FTR_ICSWX))
  84. return -ENODEV;
  85. if (!mm || !acop)
  86. return -EINVAL;
  87. /* The page_table_lock ensures mm_users won't change under us */
  88. spin_lock(&mm->page_table_lock);
  89. spin_lock(mm->context.cop_lockp);
  90. ret = get_cop_pid(mm);
  91. if (ret < 0)
  92. goto out;
  93. /* update acop */
  94. mm->context.acop |= acop;
  95. sync_cop(mm);
  96. /*
  97. * If this is a threaded process then there might be other threads
  98. * running. We need to send an IPI to force them to pick up any
  99. * change in PID and ACOP.
  100. */
  101. if (atomic_read(&mm->mm_users) > 1)
  102. smp_call_function(sync_cop, mm, 1);
  103. out:
  104. spin_unlock(mm->context.cop_lockp);
  105. spin_unlock(&mm->page_table_lock);
  106. return ret;
  107. }
  108. EXPORT_SYMBOL_GPL(use_cop);
  109. /**
  110. * Stop using a coprocessor.
  111. * @acop: mask of coprocessor to be stopped.
  112. * @mm: The mm the coprocessor associated with.
  113. */
  114. void drop_cop(unsigned long acop, struct mm_struct *mm)
  115. {
  116. int free_pid;
  117. if (!cpu_has_feature(CPU_FTR_ICSWX))
  118. return;
  119. if (WARN_ON_ONCE(!mm))
  120. return;
  121. /* The page_table_lock ensures mm_users won't change under us */
  122. spin_lock(&mm->page_table_lock);
  123. spin_lock(mm->context.cop_lockp);
  124. mm->context.acop &= ~acop;
  125. free_pid = disable_cop_pid(mm);
  126. sync_cop(mm);
  127. /*
  128. * If this is a threaded process then there might be other threads
  129. * running. We need to send an IPI to force them to pick up any
  130. * change in PID and ACOP.
  131. */
  132. if (atomic_read(&mm->mm_users) > 1)
  133. smp_call_function(sync_cop, mm, 1);
  134. if (free_pid != COP_PID_NONE)
  135. free_cop_pid(free_pid);
  136. spin_unlock(mm->context.cop_lockp);
  137. spin_unlock(&mm->page_table_lock);
  138. }
  139. EXPORT_SYMBOL_GPL(drop_cop);
  140. static int acop_use_cop(int ct)
  141. {
  142. /* There is no alternate policy, yet */
  143. return -1;
  144. }
  145. /*
  146. * Get the instruction word at the NIP
  147. */
  148. static u32 acop_get_inst(struct pt_regs *regs)
  149. {
  150. u32 inst;
  151. u32 __user *p;
  152. p = (u32 __user *)regs->nip;
  153. if (!access_ok(VERIFY_READ, p, sizeof(*p)))
  154. return 0;
  155. if (__get_user(inst, p))
  156. return 0;
  157. return inst;
  158. }
  159. /**
  160. * @regs: regsiters at time of interrupt
  161. * @address: storage address
  162. * @error_code: Fault code, usually the DSISR or ESR depending on
  163. * processor type
  164. *
  165. * Return 0 if we are able to resolve the data storage fault that
  166. * results from a CT miss in the ACOP register.
  167. */
  168. int acop_handle_fault(struct pt_regs *regs, unsigned long address,
  169. unsigned long error_code)
  170. {
  171. int ct;
  172. u32 inst = 0;
  173. if (!cpu_has_feature(CPU_FTR_ICSWX)) {
  174. pr_info("No coprocessors available");
  175. _exception(SIGILL, regs, ILL_ILLOPN, address);
  176. }
  177. if (!user_mode(regs)) {
  178. /* this could happen if the HV denies the
  179. * kernel access, for now we just die */
  180. die("ICSWX from kernel failed", regs, SIGSEGV);
  181. }
  182. /* Some implementations leave us a hint for the CT */
  183. ct = ICSWX_GET_CT_HINT(error_code);
  184. if (ct < 0) {
  185. /* we have to peek at the instruction word to figure out CT */
  186. u32 ccw;
  187. u32 rs;
  188. inst = acop_get_inst(regs);
  189. if (inst == 0)
  190. return -1;
  191. rs = (inst >> (31 - 10)) & 0x1f;
  192. ccw = regs->gpr[rs];
  193. ct = (ccw >> 16) & 0x3f;
  194. }
  195. /*
  196. * We could be here because another thread has enabled acop
  197. * but the ACOP register has yet to be updated.
  198. *
  199. * This should have been taken care of by the IPI to sync all
  200. * the threads (see smp_call_function(sync_cop, mm, 1)), but
  201. * that could take forever if there are a significant amount
  202. * of threads.
  203. *
  204. * Given the number of threads on some of these systems,
  205. * perhaps this is the best way to sync ACOP rather than whack
  206. * every thread with an IPI.
  207. */
  208. if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
  209. sync_cop(current->active_mm);
  210. return 0;
  211. }
  212. /* check for alternate policy */
  213. if (!acop_use_cop(ct))
  214. return 0;
  215. /* at this point the CT is unknown to the system */
  216. pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
  217. current->comm, current->pid, ct);
  218. /* get inst if we don't already have it */
  219. if (inst == 0) {
  220. inst = acop_get_inst(regs);
  221. if (inst == 0)
  222. return -1;
  223. }
  224. /* Check if the instruction is the "record form" */
  225. if (inst & 1) {
  226. /*
  227. * the instruction is "record" form so we can reject
  228. * using CR0
  229. */
  230. regs->ccr &= ~(0xful << 28);
  231. regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
  232. /* Move on to the next instruction */
  233. regs->nip += 4;
  234. } else {
  235. /*
  236. * There is no architected mechanism to report a bad
  237. * CT so we could either SIGILL or report nothing.
  238. * Since the non-record version should only bu used
  239. * for "hints" or "don't care" we should probably do
  240. * nothing. However, I could see how some people
  241. * might want an SIGILL so it here if you want it.
  242. */
  243. #ifdef CONFIG_PPC_ICSWX_USE_SIGILL
  244. _exception(SIGILL, regs, ILL_ILLOPN, address);
  245. #else
  246. regs->nip += 4;
  247. #endif
  248. }
  249. return 0;
  250. }
  251. EXPORT_SYMBOL_GPL(acop_handle_fault);