grukdump.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * Dump GRU State
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/mm.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/uaccess.h>
  26. #include <linux/delay.h>
  27. #include <linux/bitops.h>
  28. #include <asm/uv/uv_hub.h>
  29. #include "gru.h"
  30. #include "grutables.h"
  31. #include "gruhandles.h"
  32. #include "grulib.h"
  33. #define CCH_LOCK_ATTEMPTS 10
  34. static int gru_user_copy_handle(void __user **dp, void *s)
  35. {
  36. if (copy_to_user(*dp, s, GRU_HANDLE_BYTES))
  37. return -1;
  38. *dp += GRU_HANDLE_BYTES;
  39. return 0;
  40. }
  41. static int gru_dump_context_data(void *grubase,
  42. struct gru_context_configuration_handle *cch,
  43. void __user *ubuf, int ctxnum, int dsrcnt,
  44. int flush_cbrs)
  45. {
  46. void *cb, *cbe, *tfh, *gseg;
  47. int i, scr;
  48. gseg = grubase + ctxnum * GRU_GSEG_STRIDE;
  49. cb = gseg + GRU_CB_BASE;
  50. cbe = grubase + GRU_CBE_BASE;
  51. tfh = grubase + GRU_TFH_BASE;
  52. for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) {
  53. if (flush_cbrs)
  54. gru_flush_cache(cb);
  55. if (gru_user_copy_handle(&ubuf, cb))
  56. goto fail;
  57. if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE))
  58. goto fail;
  59. if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE))
  60. goto fail;
  61. cb += GRU_HANDLE_STRIDE;
  62. }
  63. if (dsrcnt)
  64. memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE);
  65. return 0;
  66. fail:
  67. return -EFAULT;
  68. }
  69. static int gru_dump_tfm(struct gru_state *gru,
  70. void __user *ubuf, void __user *ubufend)
  71. {
  72. struct gru_tlb_fault_map *tfm;
  73. int i;
  74. if (GRU_NUM_TFM * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
  75. return -EFBIG;
  76. for (i = 0; i < GRU_NUM_TFM; i++) {
  77. tfm = get_tfm(gru->gs_gru_base_vaddr, i);
  78. if (gru_user_copy_handle(&ubuf, tfm))
  79. goto fail;
  80. }
  81. return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES;
  82. fail:
  83. return -EFAULT;
  84. }
  85. static int gru_dump_tgh(struct gru_state *gru,
  86. void __user *ubuf, void __user *ubufend)
  87. {
  88. struct gru_tlb_global_handle *tgh;
  89. int i;
  90. if (GRU_NUM_TGH * GRU_CACHE_LINE_BYTES > ubufend - ubuf)
  91. return -EFBIG;
  92. for (i = 0; i < GRU_NUM_TGH; i++) {
  93. tgh = get_tgh(gru->gs_gru_base_vaddr, i);
  94. if (gru_user_copy_handle(&ubuf, tgh))
  95. goto fail;
  96. }
  97. return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES;
  98. fail:
  99. return -EFAULT;
  100. }
  101. static int gru_dump_context(struct gru_state *gru, int ctxnum,
  102. void __user *ubuf, void __user *ubufend, char data_opt,
  103. char lock_cch, char flush_cbrs)
  104. {
  105. struct gru_dump_context_header hdr;
  106. struct gru_dump_context_header __user *uhdr = ubuf;
  107. struct gru_context_configuration_handle *cch, *ubufcch;
  108. struct gru_thread_state *gts;
  109. int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0;
  110. void *grubase;
  111. memset(&hdr, 0, sizeof(hdr));
  112. grubase = gru->gs_gru_base_vaddr;
  113. cch = get_cch(grubase, ctxnum);
  114. for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) {
  115. cch_locked = trylock_cch_handle(cch);
  116. if (cch_locked)
  117. break;
  118. msleep(1);
  119. }
  120. ubuf += sizeof(hdr);
  121. ubufcch = ubuf;
  122. if (gru_user_copy_handle(&ubuf, cch)) {
  123. if (cch_locked)
  124. unlock_cch_handle(cch);
  125. return -EFAULT;
  126. }
  127. if (cch_locked)
  128. ubufcch->delresp = 0;
  129. bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
  130. if (cch_locked || !lock_cch) {
  131. gts = gru->gs_gts[ctxnum];
  132. if (gts && gts->ts_vma) {
  133. hdr.pid = gts->ts_tgid_owner;
  134. hdr.vaddr = gts->ts_vma->vm_start;
  135. }
  136. if (cch->state != CCHSTATE_INACTIVE) {
  137. cbrcnt = hweight64(cch->cbr_allocation_map) *
  138. GRU_CBR_AU_SIZE;
  139. dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) *
  140. GRU_DSR_AU_CL : 0;
  141. }
  142. bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES;
  143. if (bytes > ubufend - ubuf)
  144. ret = -EFBIG;
  145. else
  146. ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum,
  147. dsrcnt, flush_cbrs);
  148. }
  149. if (cch_locked)
  150. unlock_cch_handle(cch);
  151. if (ret)
  152. return ret;
  153. hdr.magic = GRU_DUMP_MAGIC;
  154. hdr.gid = gru->gs_gid;
  155. hdr.ctxnum = ctxnum;
  156. hdr.cbrcnt = cbrcnt;
  157. hdr.dsrcnt = dsrcnt;
  158. hdr.cch_locked = cch_locked;
  159. if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
  160. return -EFAULT;
  161. return bytes;
  162. }
  163. int gru_dump_chiplet_request(unsigned long arg)
  164. {
  165. struct gru_state *gru;
  166. struct gru_dump_chiplet_state_req req;
  167. void __user *ubuf;
  168. void __user *ubufend;
  169. int ctxnum, ret, cnt = 0;
  170. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  171. return -EFAULT;
  172. /* Currently, only dump by gid is implemented */
  173. if (req.gid >= gru_max_gids)
  174. return -EINVAL;
  175. gru = GID_TO_GRU(req.gid);
  176. ubuf = req.buf;
  177. ubufend = req.buf + req.buflen;
  178. ret = gru_dump_tfm(gru, ubuf, ubufend);
  179. if (ret < 0)
  180. goto fail;
  181. ubuf += ret;
  182. ret = gru_dump_tgh(gru, ubuf, ubufend);
  183. if (ret < 0)
  184. goto fail;
  185. ubuf += ret;
  186. for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
  187. if (req.ctxnum == ctxnum || req.ctxnum < 0) {
  188. ret = gru_dump_context(gru, ctxnum, ubuf, ubufend,
  189. req.data_opt, req.lock_cch,
  190. req.flush_cbrs);
  191. if (ret < 0)
  192. goto fail;
  193. ubuf += ret;
  194. cnt++;
  195. }
  196. }
  197. if (copy_to_user((void __user *)arg, &req, sizeof(req)))
  198. return -EFAULT;
  199. return cnt;
  200. fail:
  201. return ret;
  202. }