grukdump.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * Dump GRU State
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/delay.h>
  27. #include <linux/bitops.h>
  28. #include <asm/uv/uv_hub.h>
  29. #include "gru.h"
  30. #include "grutables.h"
  31. #include "gruhandles.h"
  32. #include "grulib.h"
  33. #define CCH_LOCK_ATTEMPTS 10
  34. static int gru_user_copy_handle(void __user **dp, void *s)
  35. {
  36. if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
  37. return -1;
  38. *dp += GRU_HANDLE_BYTES;
  39. return 0;
  40. }
  41. static int gru_dump_context_data(void *grubase,
  42. struct gru_context_configuration_handle *cch,
  43. void __user *ubuf, int ctxnum, int dsrcnt,
  44. int flush_cbrs)
  45. {
  46. void *cb, *cbe, *tfh, *gseg;
  47. int i, scr;
  48. gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
  49. cb = gseg + GRU_CB_BASE;
  50. cbe = grubase + GRU_CBE_BASE;
  51. tfh = grubase + GRU_TFH_BASE;
  52. for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
  53. if (flush_cbrs)
  54. gru_flush_cache(cb);
  55. if (gru_user_copy_handle(&ubuf, cb))
  56. goto fail;
  57. if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
  58. goto fail;
  59. if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
  60. goto fail;
  61. cb += GRU_HANDLE_STRIDE;
  62. }
  63. if (dsrcnt)
  64. memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
  65. return 0;
  66. fail:
  67. return -EFAULT;
  68. }
  69. static int gru_dump_tfm(struct gru_state *gru,
  70. void __user *ubuf, void __user *ubufend)
  71. {
  72. struct gru_tlb_fault_map *tfm;
  73. int i, ret, bytes;
  74. bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
  75. if (bytes > ubufend - ubuf)
  76. ret = -EFBIG;
  77. for (i = 0; i < GRU_NUM_TFM; i++) {
  78. tfm = get_tfm(gru->gs_gru_base_vaddr, i);
  79. if (gru_user_copy_handle(&ubuf, tfm))
  80. goto fail;
  81. }
  82. return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
  83. fail:
  84. return -EFAULT;
  85. }
  86. static int gru_dump_tgh(struct gru_state *gru,
  87. void __user *ubuf, void __user *ubufend)
  88. {
  89. struct gru_tlb_global_handle *tgh;
  90. int i, ret, bytes;
  91. bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
  92. if (bytes > ubufend - ubuf)
  93. ret = -EFBIG;
  94. for (i = 0; i < GRU_NUM_TGH; i++) {
  95. tgh = get_tgh(gru->gs_gru_base_vaddr, i);
  96. if (gru_user_copy_handle(&ubuf, tgh))
  97. goto fail;
  98. }
  99. return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
  100. fail:
  101. return -EFAULT;
  102. }
  103. static int gru_dump_context(struct gru_state *gru, int ctxnum,
  104. void __user *ubuf, void __user *ubufend, char data_opt,
  105. char lock_cch, char flush_cbrs)
  106. {
  107. struct gru_dump_context_header hdr;
  108. struct gru_dump_context_header __user *uhdr = ubuf;
  109. struct gru_context_configuration_handle *cch, *ubufcch;
  110. struct gru_thread_state *gts;
  111. int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
  112. void *grubase;
  113. memset(&hdr, 0, sizeof(hdr));
  114. grubase = gru->gs_gru_base_vaddr;
  115. cch = get_cch(grubase, ctxnum);
  116. for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
  117. cch_locked = trylock_cch_handle(cch);
  118. if (cch_locked)
  119. break;
  120. msleep(1);
  121. }
  122. ubuf += sizeof(hdr);
  123. ubufcch = ubuf;
  124. if (gru_user_copy_handle(&ubuf, cch))
  125. goto fail;
  126. if (cch_locked)
  127. ubufcch->delresp = 0;
  128. bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
  129. if (cch_locked || !lock_cch) {
  130. gts = gru->gs_gts[ctxnum];
  131. if (gts && gts->ts_vma) {
  132. hdr.pid = gts->ts_tgid_owner;
  133. hdr.vaddr = gts->ts_vma->vm_start;
  134. }
  135. if (cch->state != CCHSTATE_INACTIVE) {
  136. cbrcnt = hweight64(cch->cbr_allocation_map) *
  137. GRU_CBR_AU_SIZE;
  138. dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
  139. GRU_DSR_AU_CL : 0;
  140. }
  141. bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
  142. if (bytes > ubufend - ubuf)
  143. ret = -EFBIG;
  144. else
  145. ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
  146. dsrcnt, flush_cbrs);
  147. }
  148. if (cch_locked)
  149. unlock_cch_handle(cch);
  150. if (ret)
  151. return ret;
  152. hdr.magic = GRU_DUMP_MAGIC;
  153. hdr.gid = gru->gs_gid;
  154. hdr.ctxnum = ctxnum;
  155. hdr.cbrcnt = cbrcnt;
  156. hdr.dsrcnt = dsrcnt;
  157. hdr.cch_locked = cch_locked;
  158. if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
  159. ret = -EFAULT;
  160. return ret ? ret : bytes;
  161. fail:
  162. unlock_cch_handle(cch);
  163. return -EFAULT;
  164. }
  165. int gru_dump_chiplet_request(unsigned long arg)
  166. {
  167. struct gru_state *gru;
  168. struct gru_dump_chiplet_state_req req;
  169. void __user *ubuf;
  170. void __user *ubufend;
  171. int ctxnum, ret, cnt = 0;
  172. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  173. return -EFAULT;
  174. /* Currently, only dump by gid is implemented */
  175. if (req.gid >= gru_max_gids || req.gid < 0)
  176. return -EINVAL;
  177. gru = GID_TO_GRU(req.gid);
  178. ubuf = req.buf;
  179. ubufend = req.buf + req.buflen;
  180. ret = gru_dump_tfm(gru, ubuf, ubufend);
  181. if (ret < 0)
  182. goto fail;
  183. ubuf += ret;
  184. ret = gru_dump_tgh(gru, ubuf, ubufend);
  185. if (ret < 0)
  186. goto fail;
  187. ubuf += ret;
  188. for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
  189. if (req.ctxnum == ctxnum || req.ctxnum < 0) {
  190. ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
  191. req.data_opt, req.lock_cch,
  192. req.flush_cbrs);
  193. if (ret < 0)
  194. goto fail;
  195. ubuf += ret;
  196. cnt++;
  197. }
  198. }
  199. if (copy_to_user((void __user *)arg, &req, sizeof(req)))
  200. return -EFAULT;
  201. return cnt;
  202. fail:
  203. return ret;
  204. }