grufault.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * FAULT HANDLER FOR GRU DETECTED TLB MISSES
  5. *
  6. * This file contains code that handles TLB misses within the GRU.
  7. * These misses are reported either via interrupts or user polling of
  8. * the user CB.
  9. *
  10. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  25. */
  26. #include <linux/kernel.h>
  27. #include <linux/errno.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/mm.h>
  30. #include <linux/hugetlb.h>
  31. #include <linux/device.h>
  32. #include <linux/io.h>
  33. #include <linux/uaccess.h>
  34. #include <linux/security.h>
  35. #include <linux/prefetch.h>
  36. #include <asm/pgtable.h>
  37. #include "gru.h"
  38. #include "grutables.h"
  39. #include "grulib.h"
  40. #include "gru_instructions.h"
  41. #include <asm/uv/uv_hub.h>
  42. /* Return codes for vtop functions */
  43. #define VTOP_SUCCESS 0
  44. #define VTOP_INVALID -1
  45. #define VTOP_RETRY -2
  46. /*
  47. * Test if a physical address is a valid GRU GSEG address
  48. */
  49. static inline int is_gru_paddr(unsigned long paddr)
  50. {
  51. return paddr >= gru_start_paddr && paddr < gru_end_paddr;
  52. }
  53. /*
  54. * Find the vma of a GRU segment. Caller must hold mmap_sem.
  55. */
  56. struct vm_area_struct *gru_find_vma(unsigned long vaddr)
  57. {
  58. struct vm_area_struct *vma;
  59. vma = find_vma(current->mm, vaddr);
  60. if (vma && vma->vm_start <= vaddr && vma->vm_ops == &gru_vm_ops)
  61. return vma;
  62. return NULL;
  63. }
  64. /*
  65. * Find and lock the gts that contains the specified user vaddr.
  66. *
  67. * Returns:
  68. * - *gts with the mmap_sem locked for read and the GTS locked.
  69. * - NULL if vaddr invalid OR is not a valid GSEG vaddr.
  70. */
  71. static struct gru_thread_state *gru_find_lock_gts(unsigned long vaddr)
  72. {
  73. struct mm_struct *mm = current->mm;
  74. struct vm_area_struct *vma;
  75. struct gru_thread_state *gts = NULL;
  76. down_read(&mm->mmap_sem);
  77. vma = gru_find_vma(vaddr);
  78. if (vma)
  79. gts = gru_find_thread_state(vma, TSID(vaddr, vma));
  80. if (gts)
  81. mutex_lock(&gts->ts_ctxlock);
  82. else
  83. up_read(&mm->mmap_sem);
  84. return gts;
  85. }
  86. static struct gru_thread_state *gru_alloc_locked_gts(unsigned long vaddr)
  87. {
  88. struct mm_struct *mm = current->mm;
  89. struct vm_area_struct *vma;
  90. struct gru_thread_state *gts = ERR_PTR(-EINVAL);
  91. down_write(&mm->mmap_sem);
  92. vma = gru_find_vma(vaddr);
  93. if (!vma)
  94. goto err;
  95. gts = gru_alloc_thread_state(vma, TSID(vaddr, vma));
  96. if (IS_ERR(gts))
  97. goto err;
  98. mutex_lock(&gts->ts_ctxlock);
  99. downgrade_write(&mm->mmap_sem);
  100. return gts;
  101. err:
  102. up_write(&mm->mmap_sem);
  103. return gts;
  104. }
  105. /*
  106. * Unlock a GTS that was previously locked with gru_find_lock_gts().
  107. */
  108. static void gru_unlock_gts(struct gru_thread_state *gts)
  109. {
  110. mutex_unlock(&gts->ts_ctxlock);
  111. up_read(&current->mm->mmap_sem);
  112. }
  113. /*
  114. * Set a CB.istatus to active using a user virtual address. This must be done
  115. * just prior to a TFH RESTART. The new cb.istatus is an in-cache status ONLY.
  116. * If the line is evicted, the status may be lost. The in-cache update
  117. * is necessary to prevent the user from seeing a stale cb.istatus that will
  118. * change as soon as the TFH restart is complete. Races may cause an
  119. * occasional failure to clear the cb.istatus, but that is ok.
  120. */
  121. static void gru_cb_set_istatus_active(struct gru_instruction_bits *cbk)
  122. {
  123. if (cbk) {
  124. cbk->istatus = CBS_ACTIVE;
  125. }
  126. }
  127. /*
  128. * Read & clear a TFM
  129. *
  130. * The GRU has an array of fault maps. A map is private to a cpu
  131. * Only one cpu will be accessing a cpu's fault map.
  132. *
  133. * This function scans the cpu-private fault map & clears all bits that
  134. * are set. The function returns a bitmap that indicates the bits that
  135. * were cleared. Note that sense the maps may be updated asynchronously by
  136. * the GRU, atomic operations must be used to clear bits.
  137. */
  138. static void get_clear_fault_map(struct gru_state *gru,
  139. struct gru_tlb_fault_map *imap,
  140. struct gru_tlb_fault_map *dmap)
  141. {
  142. unsigned long i, k;
  143. struct gru_tlb_fault_map *tfm;
  144. tfm = get_tfm_for_cpu(gru, gru_cpu_fault_map_id());
  145. prefetchw(tfm); /* Helps on hardware, required for emulator */
  146. for (i = 0; i < BITS_TO_LONGS(GRU_NUM_CBE); i++) {
  147. k = tfm->fault_bits[i];
  148. if (k)
  149. k = xchg(&tfm->fault_bits[i], 0UL);
  150. imap->fault_bits[i] = k;
  151. k = tfm->done_bits[i];
  152. if (k)
  153. k = xchg(&tfm->done_bits[i], 0UL);
  154. dmap->fault_bits[i] = k;
  155. }
  156. /*
  157. * Not functionally required but helps performance. (Required
  158. * on emulator)
  159. */
  160. gru_flush_cache(tfm);
  161. }
  162. /*
  163. * Atomic (interrupt context) & non-atomic (user context) functions to
  164. * convert a vaddr into a physical address. The size of the page
  165. * is returned in pageshift.
  166. * returns:
  167. * 0 - successful
  168. * < 0 - error code
  169. * 1 - (atomic only) try again in non-atomic context
  170. */
  171. static int non_atomic_pte_lookup(struct vm_area_struct *vma,
  172. unsigned long vaddr, int write,
  173. unsigned long *paddr, int *pageshift)
  174. {
  175. struct page *page;
  176. #ifdef CONFIG_HUGETLB_PAGE
  177. *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
  178. #else
  179. *pageshift = PAGE_SHIFT;
  180. #endif
  181. if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
  182. return -EFAULT;
  183. *paddr = page_to_phys(page);
  184. put_page(page);
  185. return 0;
  186. }
  187. /*
  188. * atomic_pte_lookup
  189. *
  190. * Convert a user virtual address to a physical address
  191. * Only supports Intel large pages (2MB only) on x86_64.
  192. * ZZZ - hugepage support is incomplete
  193. *
  194. * NOTE: mmap_sem is already held on entry to this function. This
  195. * guarantees existence of the page tables.
  196. */
  197. static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
  198. int write, unsigned long *paddr, int *pageshift)
  199. {
  200. pgd_t *pgdp;
  201. pmd_t *pmdp;
  202. pud_t *pudp;
  203. pte_t pte;
  204. pgdp = pgd_offset(vma->vm_mm, vaddr);
  205. if (unlikely(pgd_none(*pgdp)))
  206. goto err;
  207. pudp = pud_offset(pgdp, vaddr);
  208. if (unlikely(pud_none(*pudp)))
  209. goto err;
  210. pmdp = pmd_offset(pudp, vaddr);
  211. if (unlikely(pmd_none(*pmdp)))
  212. goto err;
  213. #ifdef CONFIG_X86_64
  214. if (unlikely(pmd_large(*pmdp)))
  215. pte = *(pte_t *) pmdp;
  216. else
  217. #endif
  218. pte = *pte_offset_kernel(pmdp, vaddr);
  219. if (unlikely(!pte_present(pte) ||
  220. (write && (!pte_write(pte) || !pte_dirty(pte)))))
  221. return 1;
  222. *paddr = pte_pfn(pte) << PAGE_SHIFT;
  223. #ifdef CONFIG_HUGETLB_PAGE
  224. *pageshift = is_vm_hugetlb_page(vma) ? HPAGE_SHIFT : PAGE_SHIFT;
  225. #else
  226. *pageshift = PAGE_SHIFT;
  227. #endif
  228. return 0;
  229. err:
  230. return 1;
  231. }
  232. static int gru_vtop(struct gru_thread_state *gts, unsigned long vaddr,
  233. int write, int atomic, unsigned long *gpa, int *pageshift)
  234. {
  235. struct mm_struct *mm = gts->ts_mm;
  236. struct vm_area_struct *vma;
  237. unsigned long paddr;
  238. int ret, ps;
  239. vma = find_vma(mm, vaddr);
  240. if (!vma)
  241. goto inval;
  242. /*
  243. * Atomic lookup is faster & usually works even if called in non-atomic
  244. * context.
  245. */
  246. rmb(); /* Must/check ms_range_active before loading PTEs */
  247. ret = atomic_pte_lookup(vma, vaddr, write, &paddr, &ps);
  248. if (ret) {
  249. if (atomic)
  250. goto upm;
  251. if (non_atomic_pte_lookup(vma, vaddr, write, &paddr, &ps))
  252. goto inval;
  253. }
  254. if (is_gru_paddr(paddr))
  255. goto inval;
  256. paddr = paddr & ~((1UL << ps) - 1);
  257. *gpa = uv_soc_phys_ram_to_gpa(paddr);
  258. *pageshift = ps;
  259. return VTOP_SUCCESS;
  260. inval:
  261. return VTOP_INVALID;
  262. upm:
  263. return VTOP_RETRY;
  264. }
  265. /*
  266. * Flush a CBE from cache. The CBE is clean in the cache. Dirty the
  267. * CBE cacheline so that the line will be written back to home agent.
  268. * Otherwise the line may be silently dropped. This has no impact
  269. * except on performance.
  270. */
  271. static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe)
  272. {
  273. if (unlikely(cbe)) {
  274. cbe->cbrexecstatus = 0; /* make CL dirty */
  275. gru_flush_cache(cbe);
  276. }
  277. }
  278. /*
  279. * Preload the TLB with entries that may be required. Currently, preloading
  280. * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to
  281. * the end of the bcopy tranfer, whichever is smaller.
  282. */
  283. static void gru_preload_tlb(struct gru_state *gru,
  284. struct gru_thread_state *gts, int atomic,
  285. unsigned long fault_vaddr, int asid, int write,
  286. unsigned char tlb_preload_count,
  287. struct gru_tlb_fault_handle *tfh,
  288. struct gru_control_block_extended *cbe)
  289. {
  290. unsigned long vaddr = 0, gpa;
  291. int ret, pageshift;
  292. if (cbe->opccpy != OP_BCOPY)
  293. return;
  294. if (fault_vaddr == cbe->cbe_baddr0)
  295. vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1;
  296. else if (fault_vaddr == cbe->cbe_baddr1)
  297. vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1;
  298. fault_vaddr &= PAGE_MASK;
  299. vaddr &= PAGE_MASK;
  300. vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE);
  301. while (vaddr > fault_vaddr) {
  302. ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
  303. if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write,
  304. GRU_PAGESIZE(pageshift)))
  305. return;
  306. gru_dbg(grudev,
  307. "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n",
  308. atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh,
  309. vaddr, asid, write, pageshift, gpa);
  310. vaddr -= PAGE_SIZE;
  311. STAT(tlb_preload_page);
  312. }
  313. }
  314. /*
  315. * Drop a TLB entry into the GRU. The fault is described by info in an TFH.
  316. * Input:
  317. * cb Address of user CBR. Null if not running in user context
  318. * Return:
  319. * 0 = dropin, exception, or switch to UPM successful
  320. * 1 = range invalidate active
  321. * < 0 = error code
  322. *
  323. */
  324. static int gru_try_dropin(struct gru_state *gru,
  325. struct gru_thread_state *gts,
  326. struct gru_tlb_fault_handle *tfh,
  327. struct gru_instruction_bits *cbk)
  328. {
  329. struct gru_control_block_extended *cbe = NULL;
  330. unsigned char tlb_preload_count = gts->ts_tlb_preload_count;
  331. int pageshift = 0, asid, write, ret, atomic = !cbk, indexway;
  332. unsigned long gpa = 0, vaddr = 0;
  333. /*
  334. * NOTE: The GRU contains magic hardware that eliminates races between
  335. * TLB invalidates and TLB dropins. If an invalidate occurs
  336. * in the window between reading the TFH and the subsequent TLB dropin,
  337. * the dropin is ignored. This eliminates the need for additional locks.
  338. */
  339. /*
  340. * Prefetch the CBE if doing TLB preloading
  341. */
  342. if (unlikely(tlb_preload_count)) {
  343. cbe = gru_tfh_to_cbe(tfh);
  344. prefetchw(cbe);
  345. }
  346. /*
  347. * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call.
  348. * Might be a hardware race OR a stupid user. Ignore FMM because FMM
  349. * is a transient state.
  350. */
  351. if (tfh->status != TFHSTATUS_EXCEPTION) {
  352. gru_flush_cache(tfh);
  353. sync_core();
  354. if (tfh->status != TFHSTATUS_EXCEPTION)
  355. goto failnoexception;
  356. STAT(tfh_stale_on_fault);
  357. }
  358. if (tfh->state == TFHSTATE_IDLE)
  359. goto failidle;
  360. if (tfh->state == TFHSTATE_MISS_FMM && cbk)
  361. goto failfmm;
  362. write = (tfh->cause & TFHCAUSE_TLB_MOD) != 0;
  363. vaddr = tfh->missvaddr;
  364. asid = tfh->missasid;
  365. indexway = tfh->indexway;
  366. if (asid == 0)
  367. goto failnoasid;
  368. rmb(); /* TFH must be cache resident before reading ms_range_active */
  369. /*
  370. * TFH is cache resident - at least briefly. Fail the dropin
  371. * if a range invalidate is active.
  372. */
  373. if (atomic_read(&gts->ts_gms->ms_range_active))
  374. goto failactive;
  375. ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift);
  376. if (ret == VTOP_INVALID)
  377. goto failinval;
  378. if (ret == VTOP_RETRY)
  379. goto failupm;
  380. if (!(gts->ts_sizeavail & GRU_SIZEAVAIL(pageshift))) {
  381. gts->ts_sizeavail |= GRU_SIZEAVAIL(pageshift);
  382. if (atomic || !gru_update_cch(gts)) {
  383. gts->ts_force_cch_reload = 1;
  384. goto failupm;
  385. }
  386. }
  387. if (unlikely(cbe) && pageshift == PAGE_SHIFT) {
  388. gru_preload_tlb(gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe);
  389. gru_flush_cache_cbe(cbe);
  390. }
  391. gru_cb_set_istatus_active(cbk);
  392. gts->ustats.tlbdropin++;
  393. tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write,
  394. GRU_PAGESIZE(pageshift));
  395. gru_dbg(grudev,
  396. "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, indexway 0x%x,"
  397. " rw %d, ps %d, gpa 0x%lx\n",
  398. atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, vaddr, asid,
  399. indexway, write, pageshift, gpa);
  400. STAT(tlb_dropin);
  401. return 0;
  402. failnoasid:
  403. /* No asid (delayed unload). */
  404. STAT(tlb_dropin_fail_no_asid);
  405. gru_dbg(grudev, "FAILED no_asid tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
  406. if (!cbk)
  407. tfh_user_polling_mode(tfh);
  408. else
  409. gru_flush_cache(tfh);
  410. gru_flush_cache_cbe(cbe);
  411. return -EAGAIN;
  412. failupm:
  413. /* Atomic failure switch CBR to UPM */
  414. tfh_user_polling_mode(tfh);
  415. gru_flush_cache_cbe(cbe);
  416. STAT(tlb_dropin_fail_upm);
  417. gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
  418. return 1;
  419. failfmm:
  420. /* FMM state on UPM call */
  421. gru_flush_cache(tfh);
  422. gru_flush_cache_cbe(cbe);
  423. STAT(tlb_dropin_fail_fmm);
  424. gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state);
  425. return 0;
  426. failnoexception:
  427. /* TFH status did not show exception pending */
  428. gru_flush_cache(tfh);
  429. gru_flush_cache_cbe(cbe);
  430. if (cbk)
  431. gru_flush_cache(cbk);
  432. STAT(tlb_dropin_fail_no_exception);
  433. gru_dbg(grudev, "FAILED non-exception tfh: 0x%p, status %d, state %d\n",
  434. tfh, tfh->status, tfh->state);
  435. return 0;
  436. failidle:
  437. /* TFH state was idle - no miss pending */
  438. gru_flush_cache(tfh);
  439. gru_flush_cache_cbe(cbe);
  440. if (cbk)
  441. gru_flush_cache(cbk);
  442. STAT(tlb_dropin_fail_idle);
  443. gru_dbg(grudev, "FAILED idle tfh: 0x%p, state %d\n", tfh, tfh->state);
  444. return 0;
  445. failinval:
  446. /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */
  447. tfh_exception(tfh);
  448. gru_flush_cache_cbe(cbe);
  449. STAT(tlb_dropin_fail_invalid);
  450. gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr);
  451. return -EFAULT;
  452. failactive:
  453. /* Range invalidate active. Switch to UPM iff atomic */
  454. if (!cbk)
  455. tfh_user_polling_mode(tfh);
  456. else
  457. gru_flush_cache(tfh);
  458. gru_flush_cache_cbe(cbe);
  459. STAT(tlb_dropin_fail_range_active);
  460. gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n",
  461. tfh, vaddr);
  462. return 1;
  463. }
  464. /*
  465. * Process an external interrupt from the GRU. This interrupt is
  466. * caused by a TLB miss.
  467. * Note that this is the interrupt handler that is registered with linux
  468. * interrupt handlers.
  469. */
  470. static irqreturn_t gru_intr(int chiplet, int blade)
  471. {
  472. struct gru_state *gru;
  473. struct gru_tlb_fault_map imap, dmap;
  474. struct gru_thread_state *gts;
  475. struct gru_tlb_fault_handle *tfh = NULL;
  476. struct completion *cmp;
  477. int cbrnum, ctxnum;
  478. STAT(intr);
  479. gru = &gru_base[blade]->bs_grus[chiplet];
  480. if (!gru) {
  481. dev_err(grudev, "GRU: invalid interrupt: cpu %d, chiplet %d\n",
  482. raw_smp_processor_id(), chiplet);
  483. return IRQ_NONE;
  484. }
  485. get_clear_fault_map(gru, &imap, &dmap);
  486. gru_dbg(grudev,
  487. "cpu %d, chiplet %d, gid %d, imap %016lx %016lx, dmap %016lx %016lx\n",
  488. smp_processor_id(), chiplet, gru->gs_gid,
  489. imap.fault_bits[0], imap.fault_bits[1],
  490. dmap.fault_bits[0], dmap.fault_bits[1]);
  491. for_each_cbr_in_tfm(cbrnum, dmap.fault_bits) {
  492. STAT(intr_cbr);
  493. cmp = gru->gs_blade->bs_async_wq;
  494. if (cmp)
  495. complete(cmp);
  496. gru_dbg(grudev, "gid %d, cbr_done %d, done %d\n",
  497. gru->gs_gid, cbrnum, cmp ? cmp->done : -1);
  498. }
  499. for_each_cbr_in_tfm(cbrnum, imap.fault_bits) {
  500. STAT(intr_tfh);
  501. tfh = get_tfh_by_index(gru, cbrnum);
  502. prefetchw(tfh); /* Helps on hdw, required for emulator */
  503. /*
  504. * When hardware sets a bit in the faultmap, it implicitly
  505. * locks the GRU context so that it cannot be unloaded.
  506. * The gts cannot change until a TFH start/writestart command
  507. * is issued.
  508. */
  509. ctxnum = tfh->ctxnum;
  510. gts = gru->gs_gts[ctxnum];
  511. /* Spurious interrupts can cause this. Ignore. */
  512. if (!gts) {
  513. STAT(intr_spurious);
  514. continue;
  515. }
  516. /*
  517. * This is running in interrupt context. Trylock the mmap_sem.
  518. * If it fails, retry the fault in user context.
  519. */
  520. gts->ustats.fmm_tlbmiss++;
  521. if (!gts->ts_force_cch_reload &&
  522. down_read_trylock(&gts->ts_mm->mmap_sem)) {
  523. gru_try_dropin(gru, gts, tfh, NULL);
  524. up_read(&gts->ts_mm->mmap_sem);
  525. } else {
  526. tfh_user_polling_mode(tfh);
  527. STAT(intr_mm_lock_failed);
  528. }
  529. }
  530. return IRQ_HANDLED;
  531. }
  532. irqreturn_t gru0_intr(int irq, void *dev_id)
  533. {
  534. return gru_intr(0, uv_numa_blade_id());
  535. }
  536. irqreturn_t gru1_intr(int irq, void *dev_id)
  537. {
  538. return gru_intr(1, uv_numa_blade_id());
  539. }
  540. irqreturn_t gru_intr_mblade(int irq, void *dev_id)
  541. {
  542. int blade;
  543. for_each_possible_blade(blade) {
  544. if (uv_blade_nr_possible_cpus(blade))
  545. continue;
  546. gru_intr(0, blade);
  547. gru_intr(1, blade);
  548. }
  549. return IRQ_HANDLED;
  550. }
  551. static int gru_user_dropin(struct gru_thread_state *gts,
  552. struct gru_tlb_fault_handle *tfh,
  553. void *cb)
  554. {
  555. struct gru_mm_struct *gms = gts->ts_gms;
  556. int ret;
  557. gts->ustats.upm_tlbmiss++;
  558. while (1) {
  559. wait_event(gms->ms_wait_queue,
  560. atomic_read(&gms->ms_range_active) == 0);
  561. prefetchw(tfh); /* Helps on hdw, required for emulator */
  562. ret = gru_try_dropin(gts->ts_gru, gts, tfh, cb);
  563. if (ret <= 0)
  564. return ret;
  565. STAT(call_os_wait_queue);
  566. }
  567. }
  568. /*
  569. * This interface is called as a result of a user detecting a "call OS" bit
  570. * in a user CB. Normally means that a TLB fault has occurred.
  571. * cb - user virtual address of the CB
  572. */
  573. int gru_handle_user_call_os(unsigned long cb)
  574. {
  575. struct gru_tlb_fault_handle *tfh;
  576. struct gru_thread_state *gts;
  577. void *cbk;
  578. int ucbnum, cbrnum, ret = -EINVAL;
  579. STAT(call_os);
  580. /* sanity check the cb pointer */
  581. ucbnum = get_cb_number((void *)cb);
  582. if ((cb & (GRU_HANDLE_STRIDE - 1)) || ucbnum >= GRU_NUM_CB)
  583. return -EINVAL;
  584. gts = gru_find_lock_gts(cb);
  585. if (!gts)
  586. return -EINVAL;
  587. gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
  588. if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
  589. goto exit;
  590. gru_check_context_placement(gts);
  591. /*
  592. * CCH may contain stale data if ts_force_cch_reload is set.
  593. */
  594. if (gts->ts_gru && gts->ts_force_cch_reload) {
  595. gts->ts_force_cch_reload = 0;
  596. gru_update_cch(gts);
  597. }
  598. ret = -EAGAIN;
  599. cbrnum = thread_cbr_number(gts, ucbnum);
  600. if (gts->ts_gru) {
  601. tfh = get_tfh_by_index(gts->ts_gru, cbrnum);
  602. cbk = get_gseg_base_address_cb(gts->ts_gru->gs_gru_base_vaddr,
  603. gts->ts_ctxnum, ucbnum);
  604. ret = gru_user_dropin(gts, tfh, cbk);
  605. }
  606. exit:
  607. gru_unlock_gts(gts);
  608. return ret;
  609. }
  610. /*
  611. * Fetch the exception detail information for a CB that terminated with
  612. * an exception.
  613. */
  614. int gru_get_exception_detail(unsigned long arg)
  615. {
  616. struct control_block_extended_exc_detail excdet;
  617. struct gru_control_block_extended *cbe;
  618. struct gru_thread_state *gts;
  619. int ucbnum, cbrnum, ret;
  620. STAT(user_exception);
  621. if (copy_from_user(&excdet, (void __user *)arg, sizeof(excdet)))
  622. return -EFAULT;
  623. gts = gru_find_lock_gts(excdet.cb);
  624. if (!gts)
  625. return -EINVAL;
  626. gru_dbg(grudev, "address 0x%lx, gid %d, gts 0x%p\n", excdet.cb, gts->ts_gru ? gts->ts_gru->gs_gid : -1, gts);
  627. ucbnum = get_cb_number((void *)excdet.cb);
  628. if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) {
  629. ret = -EINVAL;
  630. } else if (gts->ts_gru) {
  631. cbrnum = thread_cbr_number(gts, ucbnum);
  632. cbe = get_cbe_by_index(gts->ts_gru, cbrnum);
  633. gru_flush_cache(cbe); /* CBE not coherent */
  634. sync_core(); /* make sure we are have current data */
  635. excdet.opc = cbe->opccpy;
  636. excdet.exopc = cbe->exopccpy;
  637. excdet.ecause = cbe->ecause;
  638. excdet.exceptdet0 = cbe->idef1upd;
  639. excdet.exceptdet1 = cbe->idef3upd;
  640. excdet.cbrstate = cbe->cbrstate;
  641. excdet.cbrexecstatus = cbe->cbrexecstatus;
  642. gru_flush_cache_cbe(cbe);
  643. ret = 0;
  644. } else {
  645. ret = -EAGAIN;
  646. }
  647. gru_unlock_gts(gts);
  648. gru_dbg(grudev,
  649. "cb 0x%lx, op %d, exopc %d, cbrstate %d, cbrexecstatus 0x%x, ecause 0x%x, "
  650. "exdet0 0x%lx, exdet1 0x%x\n",
  651. excdet.cb, excdet.opc, excdet.exopc, excdet.cbrstate, excdet.cbrexecstatus,
  652. excdet.ecause, excdet.exceptdet0, excdet.exceptdet1);
  653. if (!ret && copy_to_user((void __user *)arg, &excdet, sizeof(excdet)))
  654. ret = -EFAULT;
  655. return ret;
  656. }
  657. /*
  658. * User request to unload a context. Content is saved for possible reload.
  659. */
  660. static int gru_unload_all_contexts(void)
  661. {
  662. struct gru_thread_state *gts;
  663. struct gru_state *gru;
  664. int gid, ctxnum;
  665. if (!capable(CAP_SYS_ADMIN))
  666. return -EPERM;
  667. foreach_gid(gid) {
  668. gru = GID_TO_GRU(gid);
  669. spin_lock(&gru->gs_lock);
  670. for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) {
  671. gts = gru->gs_gts[ctxnum];
  672. if (gts && mutex_trylock(&gts->ts_ctxlock)) {
  673. spin_unlock(&gru->gs_lock);
  674. gru_unload_context(gts, 1);
  675. mutex_unlock(&gts->ts_ctxlock);
  676. spin_lock(&gru->gs_lock);
  677. }
  678. }
  679. spin_unlock(&gru->gs_lock);
  680. }
  681. return 0;
  682. }
  683. int gru_user_unload_context(unsigned long arg)
  684. {
  685. struct gru_thread_state *gts;
  686. struct gru_unload_context_req req;
  687. STAT(user_unload_context);
  688. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  689. return -EFAULT;
  690. gru_dbg(grudev, "gseg 0x%lx\n", req.gseg);
  691. if (!req.gseg)
  692. return gru_unload_all_contexts();
  693. gts = gru_find_lock_gts(req.gseg);
  694. if (!gts)
  695. return -EINVAL;
  696. if (gts->ts_gru)
  697. gru_unload_context(gts, 1);
  698. gru_unlock_gts(gts);
  699. return 0;
  700. }
  701. /*
  702. * User request to flush a range of virtual addresses from the GRU TLB
  703. * (Mainly for testing).
  704. */
  705. int gru_user_flush_tlb(unsigned long arg)
  706. {
  707. struct gru_thread_state *gts;
  708. struct gru_flush_tlb_req req;
  709. struct gru_mm_struct *gms;
  710. STAT(user_flush_tlb);
  711. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  712. return -EFAULT;
  713. gru_dbg(grudev, "gseg 0x%lx, vaddr 0x%lx, len 0x%lx\n", req.gseg,
  714. req.vaddr, req.len);
  715. gts = gru_find_lock_gts(req.gseg);
  716. if (!gts)
  717. return -EINVAL;
  718. gms = gts->ts_gms;
  719. gru_unlock_gts(gts);
  720. gru_flush_tlb_range(gms, req.vaddr, req.len);
  721. return 0;
  722. }
  723. /*
  724. * Fetch GSEG statisticss
  725. */
  726. long gru_get_gseg_statistics(unsigned long arg)
  727. {
  728. struct gru_thread_state *gts;
  729. struct gru_get_gseg_statistics_req req;
  730. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  731. return -EFAULT;
  732. /*
  733. * The library creates arrays of contexts for threaded programs.
  734. * If no gts exists in the array, the context has never been used & all
  735. * statistics are implicitly 0.
  736. */
  737. gts = gru_find_lock_gts(req.gseg);
  738. if (gts) {
  739. memcpy(&req.stats, &gts->ustats, sizeof(gts->ustats));
  740. gru_unlock_gts(gts);
  741. } else {
  742. memset(&req.stats, 0, sizeof(gts->ustats));
  743. }
  744. if (copy_to_user((void __user *)arg, &req, sizeof(req)))
  745. return -EFAULT;
  746. return 0;
  747. }
  748. /*
  749. * Register the current task as the user of the GSEG slice.
  750. * Needed for TLB fault interrupt targeting.
  751. */
  752. int gru_set_context_option(unsigned long arg)
  753. {
  754. struct gru_thread_state *gts;
  755. struct gru_set_context_option_req req;
  756. int ret = 0;
  757. STAT(set_context_option);
  758. if (copy_from_user(&req, (void __user *)arg, sizeof(req)))
  759. return -EFAULT;
  760. gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1);
  761. gts = gru_find_lock_gts(req.gseg);
  762. if (!gts) {
  763. gts = gru_alloc_locked_gts(req.gseg);
  764. if (IS_ERR(gts))
  765. return PTR_ERR(gts);
  766. }
  767. switch (req.op) {
  768. case sco_blade_chiplet:
  769. /* Select blade/chiplet for GRU context */
  770. if (req.val0 < -1 || req.val0 >= GRU_CHIPLETS_PER_HUB ||
  771. req.val1 < -1 || req.val1 >= GRU_MAX_BLADES ||
  772. (req.val1 >= 0 && !gru_base[req.val1])) {
  773. ret = -EINVAL;
  774. } else {
  775. gts->ts_user_blade_id = req.val1;
  776. gts->ts_user_chiplet_id = req.val0;
  777. gru_check_context_placement(gts);
  778. }
  779. break;
  780. case sco_gseg_owner:
  781. /* Register the current task as the GSEG owner */
  782. gts->ts_tgid_owner = current->tgid;
  783. break;
  784. case sco_cch_req_slice:
  785. /* Set the CCH slice option */
  786. gts->ts_cch_req_slice = req.val1 & 3;
  787. break;
  788. default:
  789. ret = -EINVAL;
  790. }
  791. gru_unlock_gts(gts);
  792. return ret;
  793. }