adreno_drawctxt.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/slab.h>
  14. #include <linux/msm_kgsl.h>
  15. #include <linux/sched.h>
  16. #include "kgsl.h"
  17. #include "kgsl_sharedmem.h"
  18. #include "adreno.h"
  19. #include "adreno_trace.h"
  20. #define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
  21. /* quad for copying GMEM to context shadow */
  22. #define QUAD_LEN 12
  23. #define QUAD_RESTORE_LEN 14
  24. static unsigned int gmem_copy_quad[QUAD_LEN] = {
  25. 0x00000000, 0x00000000, 0x3f800000,
  26. 0x00000000, 0x00000000, 0x3f800000,
  27. 0x00000000, 0x00000000, 0x3f800000,
  28. 0x00000000, 0x00000000, 0x3f800000
  29. };
  30. static unsigned int gmem_restore_quad[QUAD_RESTORE_LEN] = {
  31. 0x00000000, 0x3f800000, 0x3f800000,
  32. 0x00000000, 0x00000000, 0x00000000,
  33. 0x3f800000, 0x00000000, 0x00000000,
  34. 0x3f800000, 0x00000000, 0x00000000,
  35. 0x3f800000, 0x3f800000,
  36. };
  37. #define TEXCOORD_LEN 8
  38. static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
  39. 0x00000000, 0x3f800000,
  40. 0x3f800000, 0x3f800000,
  41. 0x00000000, 0x00000000,
  42. 0x3f800000, 0x00000000
  43. };
  44. /*
  45. * Helper functions
  46. * These are global helper functions used by the GPUs during context switch
  47. */
  48. /**
  49. * uint2float - convert a uint to IEEE754 single precision float
  50. * @ uintval - value to convert
  51. */
  52. unsigned int uint2float(unsigned int uintval)
  53. {
  54. unsigned int exp, frac = 0;
  55. if (uintval == 0)
  56. return 0;
  57. exp = ilog2(uintval);
  58. /* Calculate fraction */
  59. if (23 > exp)
  60. frac = (uintval & (~(1 << exp))) << (23 - exp);
  61. /* Exp is biased by 127 and shifted 23 bits */
  62. exp = (exp + 127) << 23;
  63. return exp | frac;
  64. }
  65. static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
  66. {
  67. /* set vertex buffer values */
  68. gmem_copy_quad[1] = uint2float(shadow->height);
  69. gmem_copy_quad[3] = uint2float(shadow->width);
  70. gmem_copy_quad[4] = uint2float(shadow->height);
  71. gmem_copy_quad[9] = uint2float(shadow->width);
  72. gmem_restore_quad[5] = uint2float(shadow->height);
  73. gmem_restore_quad[7] = uint2float(shadow->width);
  74. memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
  75. memcpy(shadow->quad_vertices_restore.hostptr, gmem_restore_quad,
  76. QUAD_RESTORE_LEN << 2);
  77. memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
  78. TEXCOORD_LEN << 2);
  79. }
  80. /**
  81. * build_quad_vtxbuff - Create a quad for saving/restoring GMEM
  82. * @ context - Pointer to the context being created
  83. * @ shadow - Pointer to the GMEM shadow structure
  84. * @ incmd - Pointer to pointer to the temporary command buffer
  85. */
  86. /* quad for saving/restoring gmem */
  87. void build_quad_vtxbuff(struct adreno_context *drawctxt,
  88. struct gmem_shadow_t *shadow, unsigned int **incmd)
  89. {
  90. unsigned int *cmd = *incmd;
  91. /* quad vertex buffer location (in GPU space) */
  92. shadow->quad_vertices.hostptr = cmd;
  93. shadow->quad_vertices.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
  94. cmd += QUAD_LEN;
  95. /* Used by A3XX, but define for both to make the code easier */
  96. shadow->quad_vertices_restore.hostptr = cmd;
  97. shadow->quad_vertices_restore.gpuaddr =
  98. virt2gpu(cmd, &drawctxt->gpustate);
  99. cmd += QUAD_RESTORE_LEN;
  100. /* tex coord buffer location (in GPU space) */
  101. shadow->quad_texcoords.hostptr = cmd;
  102. shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
  103. cmd += TEXCOORD_LEN;
  104. set_gmem_copy_quad(shadow);
  105. *incmd = cmd;
  106. }
  107. static void wait_callback(struct kgsl_device *device,
  108. struct kgsl_context *context, void *priv, int result)
  109. {
  110. struct adreno_context *drawctxt = priv;
  111. wake_up_all(&drawctxt->waiting);
  112. }
  113. #define adreno_wait_event_interruptible_timeout(wq, condition, timeout, io) \
  114. ({ \
  115. long __ret = timeout; \
  116. if (io) \
  117. __wait_io_event_interruptible_timeout(wq, condition, __ret); \
  118. else \
  119. __wait_event_interruptible_timeout(wq, condition, __ret); \
  120. __ret; \
  121. })
  122. #define adreno_wait_event_interruptible(wq, condition, io) \
  123. ({ \
  124. long __ret; \
  125. if (io) \
  126. __wait_io_event_interruptible(wq, condition, __ret); \
  127. else \
  128. __wait_event_interruptible(wq, condition, __ret); \
  129. __ret; \
  130. })
  131. static int _check_context_timestamp(struct kgsl_device *device,
  132. struct adreno_context *drawctxt, unsigned int timestamp)
  133. {
  134. int ret = 0;
  135. /* Bail if the drawctxt has been invalidated or destroyed */
  136. if (kgsl_context_detached(&drawctxt->base) ||
  137. drawctxt->state != ADRENO_CONTEXT_STATE_ACTIVE)
  138. return 1;
  139. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  140. ret = kgsl_check_timestamp(device, &drawctxt->base, timestamp);
  141. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  142. return ret;
  143. }
  144. /**
  145. * adreno_drawctxt_dump() - dump information about a draw context
  146. * @device: KGSL device that owns the context
  147. * @context: KGSL context to dump information about
  148. *
  149. * Dump specific information about the context to the kernel log. Used for
  150. * fence timeout callbacks
  151. */
  152. void adreno_drawctxt_dump(struct kgsl_device *device,
  153. struct kgsl_context *context)
  154. {
  155. unsigned int queue, start, retire;
  156. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  157. queue = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED);
  158. start = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_CONSUMED);
  159. retire = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED);
  160. spin_lock(&drawctxt->lock);
  161. dev_err(device->dev,
  162. " context[%d]: queue=%d, submit=%d, start=%d, retire=%d\n",
  163. context->id, queue, drawctxt->submitted_timestamp,
  164. start, retire);
  165. if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
  166. struct kgsl_cmdbatch *cmdbatch =
  167. drawctxt->cmdqueue[drawctxt->cmdqueue_head];
  168. if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
  169. dev_err(device->dev,
  170. " possible deadlock. Context %d might be blocked for itself\n",
  171. context->id);
  172. goto done;
  173. }
  174. spin_lock(&cmdbatch->lock);
  175. if (!list_empty(&cmdbatch->synclist)) {
  176. dev_err(device->dev,
  177. " context[%d] (ts=%d) Active sync points:\n",
  178. context->id, cmdbatch->timestamp);
  179. kgsl_dump_syncpoints(device, cmdbatch);
  180. }
  181. spin_unlock(&cmdbatch->lock);
  182. }
  183. done:
  184. spin_unlock(&drawctxt->lock);
  185. }
  186. /**
  187. * adreno_drawctxt_wait() - sleep until a timestamp expires
  188. * @adreno_dev: pointer to the adreno_device struct
  189. * @drawctxt: Pointer to the draw context to sleep for
  190. * @timetamp: Timestamp to wait on
  191. * @timeout: Number of jiffies to wait (0 for infinite)
  192. *
  193. * Register an event to wait for a timestamp on a context and sleep until it
  194. * has past. Returns < 0 on error, -ETIMEDOUT if the timeout expires or 0
  195. * on success
  196. */
  197. int adreno_drawctxt_wait(struct adreno_device *adreno_dev,
  198. struct kgsl_context *context,
  199. uint32_t timestamp, unsigned int timeout)
  200. {
  201. static unsigned int io_cnt;
  202. struct kgsl_device *device = &adreno_dev->dev;
  203. struct kgsl_pwrctrl *pwr = &device->pwrctrl;
  204. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  205. int ret, io;
  206. if (kgsl_context_detached(context))
  207. return -EINVAL;
  208. if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  209. return -EDEADLK;
  210. /* Needs to hold the device mutex */
  211. BUG_ON(!mutex_is_locked(&device->mutex));
  212. trace_adreno_drawctxt_wait_start(context->id, timestamp);
  213. ret = kgsl_add_event(device, &context->events, timestamp,
  214. wait_callback, (void *) drawctxt);
  215. if (ret)
  216. goto done;
  217. /*
  218. * For proper power accounting sometimes we need to call
  219. * io_wait_interruptible_timeout and sometimes we need to call
  220. * plain old wait_interruptible_timeout. We call the regular
  221. * timeout N times out of 100, where N is a number specified by
  222. * the current power level
  223. */
  224. io_cnt = (io_cnt + 1) % 100;
  225. io = (io_cnt < pwr->pwrlevels[pwr->active_pwrlevel].io_fraction)
  226. ? 0 : 1;
  227. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  228. if (timeout) {
  229. ret = (int) adreno_wait_event_interruptible_timeout(
  230. drawctxt->waiting,
  231. _check_context_timestamp(device, drawctxt, timestamp),
  232. msecs_to_jiffies(timeout), io);
  233. if (ret == 0)
  234. ret = -ETIMEDOUT;
  235. else if (ret > 0)
  236. ret = 0;
  237. } else {
  238. ret = (int) adreno_wait_event_interruptible(drawctxt->waiting,
  239. _check_context_timestamp(device, drawctxt, timestamp),
  240. io);
  241. }
  242. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  243. /* -EDEADLK if the context was invalidated while we were waiting */
  244. if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  245. ret = -EDEADLK;
  246. /* Return -EINVAL if the context was detached while we were waiting */
  247. if (kgsl_context_detached(context))
  248. ret = -EINVAL;
  249. done:
  250. trace_adreno_drawctxt_wait_done(context->id, timestamp, ret);
  251. return ret;
  252. }
  253. static void global_wait_callback(struct kgsl_device *device,
  254. struct kgsl_context *context, void *priv, int result)
  255. {
  256. struct adreno_context *drawctxt = priv;
  257. wake_up_all(&drawctxt->waiting);
  258. kgsl_context_put(&drawctxt->base);
  259. }
  260. static int _check_global_timestamp(struct kgsl_device *device,
  261. struct adreno_context *drawctxt, unsigned int timestamp)
  262. {
  263. /* Stop waiting if the context is invalidated */
  264. if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
  265. return 1;
  266. return kgsl_check_timestamp(device, NULL, timestamp);
  267. }
  268. int adreno_drawctxt_wait_global(struct adreno_device *adreno_dev,
  269. struct kgsl_context *context,
  270. uint32_t timestamp, unsigned int timeout)
  271. {
  272. struct kgsl_device *device = &adreno_dev->dev;
  273. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  274. int ret = 0;
  275. /* Needs to hold the device mutex */
  276. BUG_ON(!mutex_is_locked(&device->mutex));
  277. if (!_kgsl_context_get(context)) {
  278. ret = -EINVAL;
  279. goto done;
  280. }
  281. /*
  282. * If the context is invalid then return immediately - we may end up
  283. * waiting for a timestamp that will never come
  284. */
  285. if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID) {
  286. kgsl_context_put(context);
  287. goto done;
  288. }
  289. trace_adreno_drawctxt_wait_start(KGSL_MEMSTORE_GLOBAL, timestamp);
  290. ret = kgsl_add_event(device, &device->global_events, timestamp,
  291. global_wait_callback, (void *) drawctxt);
  292. if (ret) {
  293. kgsl_context_put(context);
  294. goto done;
  295. }
  296. kgsl_mutex_unlock(&device->mutex, &device->mutex_owner);
  297. if (timeout) {
  298. if (0 == (int) wait_event_timeout(drawctxt->waiting,
  299. _check_global_timestamp(device, drawctxt, timestamp),
  300. msecs_to_jiffies(timeout)))
  301. ret = -ETIMEDOUT;
  302. } else {
  303. wait_event(drawctxt->waiting,
  304. _check_global_timestamp(device, drawctxt, timestamp));
  305. }
  306. kgsl_mutex_lock(&device->mutex, &device->mutex_owner);
  307. if (ret)
  308. kgsl_cancel_events_timestamp(device, &device->global_events,
  309. timestamp);
  310. done:
  311. trace_adreno_drawctxt_wait_done(KGSL_MEMSTORE_GLOBAL, timestamp, ret);
  312. return ret;
  313. }
  314. /**
  315. * adreno_drawctxt_invalidate() - Invalidate an adreno draw context
  316. * @device: Pointer to the KGSL device structure for the GPU
  317. * @context: Pointer to the KGSL context structure
  318. *
  319. * Invalidate the context and remove all queued commands and cancel any pending
  320. * waiters
  321. */
  322. void adreno_drawctxt_invalidate(struct kgsl_device *device,
  323. struct kgsl_context *context)
  324. {
  325. struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
  326. trace_adreno_drawctxt_invalidate(drawctxt);
  327. spin_lock(&drawctxt->lock);
  328. drawctxt->state = ADRENO_CONTEXT_STATE_INVALID;
  329. /*
  330. * set the timestamp to the last value since the context is invalidated
  331. * and we want the pending events for this context to go away
  332. */
  333. kgsl_sharedmem_writel(device, &device->memstore,
  334. KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
  335. drawctxt->timestamp);
  336. kgsl_sharedmem_writel(device, &device->memstore,
  337. KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
  338. drawctxt->timestamp);
  339. while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
  340. struct kgsl_cmdbatch *cmdbatch =
  341. drawctxt->cmdqueue[drawctxt->cmdqueue_head];
  342. drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
  343. ADRENO_CONTEXT_CMDQUEUE_SIZE;
  344. kgsl_cancel_events_timestamp(device, &context->events,
  345. cmdbatch->timestamp);
  346. kgsl_cmdbatch_destroy(cmdbatch);
  347. }
  348. spin_unlock(&drawctxt->lock);
  349. /* Give the bad news to everybody waiting around */
  350. wake_up_all(&drawctxt->waiting);
  351. wake_up_all(&drawctxt->wq);
  352. }
  353. /**
  354. * adreno_drawctxt_create - create a new adreno draw context
  355. * @dev_priv: the owner of the context
  356. * @flags: flags for the context (passed from user space)
  357. *
  358. * Create and return a new draw context for the 3D core.
  359. */
  360. struct kgsl_context *
  361. adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
  362. uint32_t *flags)
  363. {
  364. struct adreno_context *drawctxt;
  365. struct kgsl_device *device = dev_priv->device;
  366. struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
  367. int ret;
  368. drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
  369. if (drawctxt == NULL)
  370. return ERR_PTR(-ENOMEM);
  371. ret = kgsl_context_init(dev_priv, &drawctxt->base);
  372. if (ret != 0) {
  373. kfree(drawctxt);
  374. return ERR_PTR(ret);
  375. }
  376. drawctxt->bin_base_offset = 0;
  377. drawctxt->timestamp = 0;
  378. drawctxt->base.flags = *flags & (KGSL_CONTEXT_PREAMBLE |
  379. KGSL_CONTEXT_NO_GMEM_ALLOC |
  380. KGSL_CONTEXT_PER_CONTEXT_TS |
  381. KGSL_CONTEXT_USER_GENERATED_TS |
  382. KGSL_CONTEXT_NO_FAULT_TOLERANCE |
  383. KGSL_CONTEXT_CTX_SWITCH |
  384. KGSL_CONTEXT_TYPE_MASK |
  385. KGSL_CONTEXT_PWR_CONSTRAINT);
  386. /* Always enable per-context timestamps */
  387. drawctxt->base.flags |= KGSL_CONTEXT_PER_CONTEXT_TS;
  388. drawctxt->type = (drawctxt->base.flags & KGSL_CONTEXT_TYPE_MASK)
  389. >> KGSL_CONTEXT_TYPE_SHIFT;
  390. spin_lock_init(&drawctxt->lock);
  391. init_waitqueue_head(&drawctxt->wq);
  392. init_waitqueue_head(&drawctxt->waiting);
  393. /*
  394. * Set up the plist node for the dispatcher. For now all contexts have
  395. * the same priority, but later the priority will be set at create time
  396. * by the user
  397. */
  398. plist_node_init(&drawctxt->pending, ADRENO_CONTEXT_DEFAULT_PRIORITY);
  399. if (adreno_dev->gpudev->ctxt_create) {
  400. ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
  401. if (ret)
  402. goto err;
  403. } else if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) == 0 ||
  404. (drawctxt->base.flags & KGSL_CONTEXT_NO_GMEM_ALLOC) == 0) {
  405. KGSL_DEV_ERR_ONCE(device,
  406. "legacy context switch not supported\n");
  407. ret = -EINVAL;
  408. goto err;
  409. } else {
  410. drawctxt->ops = &adreno_preamble_ctx_ops;
  411. }
  412. kgsl_sharedmem_writel(device, &device->memstore,
  413. KGSL_MEMSTORE_OFFSET(drawctxt->base.id, soptimestamp),
  414. 0);
  415. kgsl_sharedmem_writel(device, &device->memstore,
  416. KGSL_MEMSTORE_OFFSET(drawctxt->base.id, eoptimestamp),
  417. 0);
  418. /* copy back whatever flags we dediced were valid */
  419. *flags = drawctxt->base.flags;
  420. return &drawctxt->base;
  421. err:
  422. kgsl_context_detach(&drawctxt->base);
  423. return ERR_PTR(ret);
  424. }
  425. /**
  426. * adreno_drawctxt_sched() - Schedule a previously blocked context
  427. * @device: pointer to a KGSL device
  428. * @drawctxt: drawctxt to rechedule
  429. *
  430. * This function is called by the core when it knows that a previously blocked
  431. * context has been unblocked. The default adreno response is to reschedule the
  432. * context on the dispatcher
  433. */
  434. void adreno_drawctxt_sched(struct kgsl_device *device,
  435. struct kgsl_context *context)
  436. {
  437. adreno_dispatcher_queue_context(device, ADRENO_CONTEXT(context));
  438. }
  439. /**
  440. * adreno_drawctxt_detach(): detach a context from the GPU
  441. * @context: Generic KGSL context container for the context
  442. *
  443. */
  444. int adreno_drawctxt_detach(struct kgsl_context *context)
  445. {
  446. struct kgsl_device *device;
  447. struct adreno_device *adreno_dev;
  448. struct adreno_context *drawctxt;
  449. int ret;
  450. if (context == NULL)
  451. return 0;
  452. device = context->device;
  453. adreno_dev = ADRENO_DEVICE(device);
  454. drawctxt = ADRENO_CONTEXT(context);
  455. /* deactivate context */
  456. if (adreno_dev->drawctxt_active == drawctxt)
  457. adreno_drawctxt_switch(adreno_dev, NULL, 0);
  458. spin_lock(&drawctxt->lock);
  459. while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
  460. struct kgsl_cmdbatch *cmdbatch =
  461. drawctxt->cmdqueue[drawctxt->cmdqueue_head];
  462. drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
  463. ADRENO_CONTEXT_CMDQUEUE_SIZE;
  464. spin_unlock(&drawctxt->lock);
  465. /*
  466. * If the context is deteached while we are waiting for
  467. * the next command in GFT SKIP CMD, print the context
  468. * detached status here.
  469. */
  470. adreno_fault_skipcmd_detached(device, drawctxt, cmdbatch);
  471. /*
  472. * Don't hold the drawctxt mutex while the cmdbatch is being
  473. * destroyed because the cmdbatch destroy takes the device
  474. * mutex and the world falls in on itself
  475. */
  476. kgsl_cmdbatch_destroy(cmdbatch);
  477. spin_lock(&drawctxt->lock);
  478. }
  479. spin_unlock(&drawctxt->lock);
  480. /*
  481. * internal_timestamp is set in adreno_ringbuffer_addcmds,
  482. * which holds the device mutex. The entire context destroy
  483. * process requires the device mutex as well. But lets
  484. * make sure we notice if the locking changes.
  485. */
  486. BUG_ON(!mutex_is_locked(&device->mutex));
  487. /* Wait for the last global timestamp to pass before continuing.
  488. * The maxumum wait time is 30s, some large IB's can take longer
  489. * than 10s and if hang happens then the time for the context's
  490. * commands to retire will be greater than 10s. 30s should be sufficient
  491. * time to wait for the commands even if a hang happens.
  492. */
  493. ret = adreno_drawctxt_wait_global(adreno_dev, context,
  494. drawctxt->internal_timestamp, 30 * 1000);
  495. /*
  496. * If the wait for global fails then nothing after this point is likely
  497. * to work very well - BUG_ON() so we can take advantage of the debug
  498. * tools to figure out what the h - e - double hockey sticks happened
  499. */
  500. BUG_ON(ret);
  501. kgsl_sharedmem_writel(device, &device->memstore,
  502. KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
  503. drawctxt->timestamp);
  504. kgsl_sharedmem_writel(device, &device->memstore,
  505. KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
  506. drawctxt->timestamp);
  507. adreno_profile_process_results(device);
  508. if (drawctxt->ops && drawctxt->ops->detach)
  509. drawctxt->ops->detach(drawctxt);
  510. /* wake threads waiting to submit commands from this context */
  511. wake_up_all(&drawctxt->waiting);
  512. wake_up_all(&drawctxt->wq);
  513. return ret;
  514. }
  515. void adreno_drawctxt_destroy(struct kgsl_context *context)
  516. {
  517. struct adreno_context *drawctxt;
  518. if (context == NULL)
  519. return;
  520. drawctxt = ADRENO_CONTEXT(context);
  521. kfree(drawctxt);
  522. }
  523. /**
  524. * adreno_context_restore() - generic context restore handler
  525. * @adreno_dev: the device
  526. * @context: the context
  527. *
  528. * Basic context restore handler that writes the context identifier
  529. * to the ringbuffer and issues pagetable switch commands if necessary.
  530. * May be called directly from the adreno_context_ops.restore function
  531. * pointer or as the first action in a hardware specific restore
  532. * function.
  533. */
  534. int adreno_context_restore(struct adreno_device *adreno_dev,
  535. struct adreno_context *context)
  536. {
  537. struct kgsl_device *device;
  538. unsigned int cmds[8];
  539. if (adreno_dev == NULL || context == NULL)
  540. return -EINVAL;
  541. device = &adreno_dev->dev;
  542. /* write the context identifier to the ringbuffer */
  543. cmds[0] = cp_nop_packet(1);
  544. cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
  545. cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
  546. cmds[3] = device->memstore.gpuaddr +
  547. KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
  548. cmds[4] = context->base.id;
  549. /* Flush the UCHE for new context */
  550. cmds[5] = cp_type0_packet(
  551. adreno_getreg(adreno_dev, ADRENO_REG_UCHE_INVALIDATE0), 2);
  552. cmds[6] = 0;
  553. if (adreno_is_a3xx(adreno_dev))
  554. cmds[7] = 0x90000000;
  555. return adreno_ringbuffer_issuecmds(device, context,
  556. KGSL_CMD_FLAGS_NONE, cmds, 8);
  557. }
  558. const struct adreno_context_ops adreno_preamble_ctx_ops = {
  559. .restore = adreno_context_restore,
  560. };
  561. /**
  562. * context_save() - save old context when necessary
  563. * @drawctxt - the old context
  564. *
  565. * For legacy context switching, we need to issue save
  566. * commands unless the context is being destroyed.
  567. */
  568. static inline int context_save(struct adreno_device *adreno_dev,
  569. struct adreno_context *context)
  570. {
  571. if (context->ops->save == NULL
  572. || kgsl_context_detached(&context->base)
  573. || context->state == ADRENO_CONTEXT_STATE_INVALID)
  574. return 0;
  575. return context->ops->save(adreno_dev, context);
  576. }
  577. /**
  578. * adreno_drawctxt_set_bin_base_offset - set bin base offset for the context
  579. * @device - KGSL device that owns the context
  580. * @context- Generic KGSL context container for the context
  581. * @offset - Offset to set
  582. *
  583. * Set the bin base offset for A2XX devices. Not valid for A3XX devices.
  584. */
  585. void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
  586. struct kgsl_context *context,
  587. unsigned int offset)
  588. {
  589. struct adreno_context *drawctxt;
  590. if (context == NULL)
  591. return;
  592. drawctxt = ADRENO_CONTEXT(context);
  593. drawctxt->bin_base_offset = offset;
  594. }
  595. /**
  596. * adreno_drawctxt_switch - switch the current draw context
  597. * @adreno_dev - The 3D device that owns the context
  598. * @drawctxt - the 3D context to switch to
  599. * @flags - Flags to accompany the switch (from user space)
  600. *
  601. * Switch the current draw context
  602. */
  603. int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
  604. struct adreno_context *drawctxt,
  605. unsigned int flags)
  606. {
  607. struct kgsl_device *device = &adreno_dev->dev;
  608. int ret = 0;
  609. if (drawctxt) {
  610. /*
  611. * Handle legacy gmem / save restore flag on each IB.
  612. * Userspace sets to guard IB sequences that require
  613. * gmem to be saved and clears it at the end of the
  614. * sequence.
  615. */
  616. if (flags & KGSL_CONTEXT_SAVE_GMEM)
  617. /* Set the flag in context so that the save is done
  618. * when this context is switched out. */
  619. set_bit(ADRENO_CONTEXT_GMEM_SAVE, &drawctxt->priv);
  620. else
  621. /* Remove GMEM saving flag from the context */
  622. clear_bit(ADRENO_CONTEXT_GMEM_SAVE, &drawctxt->priv);
  623. }
  624. /* already current? */
  625. if (adreno_dev->drawctxt_active == drawctxt) {
  626. if (drawctxt && drawctxt->ops->draw_workaround)
  627. ret = drawctxt->ops->draw_workaround(adreno_dev,
  628. drawctxt);
  629. return ret;
  630. }
  631. trace_adreno_drawctxt_switch(adreno_dev->drawctxt_active,
  632. drawctxt, flags);
  633. if (adreno_dev->drawctxt_active) {
  634. ret = context_save(adreno_dev, adreno_dev->drawctxt_active);
  635. if (ret) {
  636. KGSL_DRV_ERR(device,
  637. "Error in GPU context %d save: %d\n",
  638. adreno_dev->drawctxt_active->base.id, ret);
  639. return ret;
  640. }
  641. }
  642. /* Get a refcount to the new instance */
  643. if (drawctxt) {
  644. if (!_kgsl_context_get(&drawctxt->base))
  645. return -EINVAL;
  646. ret = kgsl_mmu_setstate(&device->mmu,
  647. drawctxt->base.proc_priv->pagetable,
  648. adreno_dev->drawctxt_active ?
  649. adreno_dev->drawctxt_active->base.id :
  650. KGSL_CONTEXT_INVALID);
  651. /* Set the new context */
  652. ret = drawctxt->ops->restore(adreno_dev, drawctxt);
  653. if (ret) {
  654. KGSL_DRV_ERR(device,
  655. "Error in GPU context %d restore: %d\n",
  656. drawctxt->base.id, ret);
  657. return ret;
  658. }
  659. } else {
  660. /*
  661. * No context - set the default pagetable and thats it.
  662. * If there isn't a current context, the kgsl_mmu_setstate
  663. * will use the CPU path so we don't need to give
  664. * it a valid context id.
  665. */
  666. ret = kgsl_mmu_setstate(&device->mmu,
  667. device->mmu.defaultpagetable,
  668. adreno_dev->drawctxt_active->base.id);
  669. }
  670. /* Put the old instance of the active drawctxt */
  671. if (adreno_dev->drawctxt_active)
  672. kgsl_context_put(&adreno_dev->drawctxt_active->base);
  673. adreno_dev->drawctxt_active = drawctxt;
  674. return 0;
  675. }