kgsl_device.h 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #ifndef __KGSL_DEVICE_H
  14. #define __KGSL_DEVICE_H
  15. #include <linux/slab.h>
  16. #include <linux/idr.h>
  17. #include <linux/pm_qos.h>
  18. #include <linux/sched.h>
  19. #include <linux/workqueue.h>
  20. #include "kgsl.h"
  21. #include "kgsl_mmu.h"
  22. #include "kgsl_pwrctrl.h"
  23. #include "kgsl_log.h"
  24. #include "kgsl_pwrscale.h"
  25. #include <linux/sync.h>
  26. #define KGSL_TIMEOUT_NONE 0
  27. #define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
  28. #define KGSL_TIMEOUT_PART 50 /* 50 msec */
  29. /* KGSL device state is initialized to INIT when platform_probe *
  30. * sucessfully initialized the device. Once a device has been opened *
  31. * (started) it becomes active. NAP implies that only low latency *
  32. * resources (for now clocks on some platforms) are off. SLEEP implies *
  33. * that the KGSL module believes a device is idle (has been inactive *
  34. * past its timer) and all system resources are released. SUSPEND is *
  35. * requested by the kernel and will be enforced upon all open devices. */
  36. #define KGSL_STATE_NONE 0x00000000
  37. #define KGSL_STATE_INIT 0x00000001
  38. #define KGSL_STATE_ACTIVE 0x00000002
  39. #define KGSL_STATE_NAP 0x00000004
  40. #define KGSL_STATE_SLEEP 0x00000008
  41. #define KGSL_STATE_SUSPEND 0x00000010
  42. #define KGSL_STATE_HUNG 0x00000020
  43. #define KGSL_STATE_SLUMBER 0x00000080
  44. #define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
  45. #define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
  46. /**
  47. * enum kgsl_event_results - result codes passed to an event callback when the
  48. * event is retired or cancelled
  49. * @KGSL_EVENT_RETIRED: The timestamp associated with the event retired
  50. * successflly
  51. * @KGSL_EVENT_CANCELLED: The event was cancelled before the event was fired
  52. */
  53. enum kgsl_event_results {
  54. KGSL_EVENT_RETIRED = 1,
  55. KGSL_EVENT_CANCELLED = 2,
  56. };
  57. #define KGSL_FLAG_WAKE_ON_TOUCH BIT(0)
  58. /*
  59. * "list" of event types for ftrace symbolic magic
  60. */
  61. #define KGSL_EVENT_TYPES \
  62. { KGSL_EVENT_RETIRED, "retired" }, \
  63. { KGSL_EVENT_CANCELLED, "cancelled" }
  64. #define KGSL_CONTEXT_ID(_context) \
  65. ((_context != NULL) ? (_context)->id : KGSL_MEMSTORE_GLOBAL)
  66. struct kgsl_device;
  67. struct platform_device;
  68. struct kgsl_device_private;
  69. struct kgsl_context;
  70. struct kgsl_power_stats;
  71. struct kgsl_event;
  72. struct kgsl_cmdbatch;
  73. struct kgsl_functable {
  74. /* Mandatory functions - these functions must be implemented
  75. by the client device. The driver will not check for a NULL
  76. pointer before calling the hook.
  77. */
  78. void (*regread) (struct kgsl_device *device,
  79. unsigned int offsetwords, unsigned int *value);
  80. void (*regwrite) (struct kgsl_device *device,
  81. unsigned int offsetwords, unsigned int value);
  82. int (*idle) (struct kgsl_device *device);
  83. bool (*isidle) (struct kgsl_device *device);
  84. int (*suspend_context) (struct kgsl_device *device);
  85. int (*init) (struct kgsl_device *device);
  86. int (*start) (struct kgsl_device *device, int priority);
  87. int (*stop) (struct kgsl_device *device);
  88. int (*getproperty) (struct kgsl_device *device,
  89. enum kgsl_property_type type, void *value,
  90. unsigned int sizebytes);
  91. int (*waittimestamp) (struct kgsl_device *device,
  92. struct kgsl_context *context, unsigned int timestamp,
  93. unsigned int msecs);
  94. unsigned int (*readtimestamp) (struct kgsl_device *device,
  95. struct kgsl_context *context, enum kgsl_timestamp_type type);
  96. int (*issueibcmds) (struct kgsl_device_private *dev_priv,
  97. struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
  98. uint32_t *timestamps);
  99. int (*setup_pt)(struct kgsl_device *device,
  100. struct kgsl_pagetable *pagetable);
  101. void (*cleanup_pt)(struct kgsl_device *device,
  102. struct kgsl_pagetable *pagetable);
  103. void (*power_stats)(struct kgsl_device *device,
  104. struct kgsl_power_stats *stats);
  105. void (*irqctrl)(struct kgsl_device *device, int state);
  106. unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
  107. void * (*snapshot)(struct kgsl_device *device, void *snapshot,
  108. int *remain, int hang);
  109. irqreturn_t (*irq_handler)(struct kgsl_device *device);
  110. int (*drain)(struct kgsl_device *device);
  111. /* Optional functions - these functions are not mandatory. The
  112. driver will check that the function pointer is not NULL before
  113. calling the hook */
  114. int (*setstate) (struct kgsl_device *device, unsigned int context_id,
  115. uint32_t flags);
  116. struct kgsl_context *(*drawctxt_create) (struct kgsl_device_private *,
  117. uint32_t *flags);
  118. int (*drawctxt_detach) (struct kgsl_context *context);
  119. void (*drawctxt_destroy) (struct kgsl_context *context);
  120. void (*drawctxt_dump) (struct kgsl_device *device,
  121. struct kgsl_context *context);
  122. long (*ioctl) (struct kgsl_device_private *dev_priv,
  123. unsigned int cmd, void *data);
  124. int (*setproperty) (struct kgsl_device_private *dev_priv,
  125. enum kgsl_property_type type, void *value,
  126. unsigned int sizebytes);
  127. int (*postmortem_dump) (struct kgsl_device *device, int manual);
  128. int (*next_event)(struct kgsl_device *device,
  129. struct kgsl_event *event);
  130. void (*drawctxt_sched)(struct kgsl_device *device,
  131. struct kgsl_context *context);
  132. void (*resume)(struct kgsl_device *device);
  133. };
  134. /* MH register values */
  135. struct kgsl_mh {
  136. unsigned int mharb;
  137. unsigned int mh_intf_cfg1;
  138. unsigned int mh_intf_cfg2;
  139. uint32_t mpu_base;
  140. int mpu_range;
  141. };
  142. typedef void (*kgsl_event_func)(struct kgsl_device *, struct kgsl_context *,
  143. void *, int);
  144. /**
  145. * struct kgsl_event - KGSL GPU timestamp event
  146. * @device: Pointer to the KGSL device that owns the event
  147. * @context: Pointer to the context that owns the event
  148. * @timestamp: Timestamp for the event to expire
  149. * @func: Callback function for for the event when it expires
  150. * @priv: Private data passed to the callback function
  151. * @node: List node for the kgsl_event_group list
  152. * @created: Jiffies when the event was created
  153. * @work: Work struct for dispatching the callback
  154. * @result: KGSL event result type to pass to the callback
  155. */
  156. struct kgsl_event {
  157. struct kgsl_device *device;
  158. struct kgsl_context *context;
  159. unsigned int timestamp;
  160. kgsl_event_func func;
  161. void *priv;
  162. struct list_head node;
  163. unsigned int created;
  164. struct work_struct work;
  165. int result;
  166. };
  167. /**
  168. * struct event_group - A list of GPU events
  169. * @context: Pointer to the active context for the events
  170. * @lock: Spinlock for protecting the list
  171. * @events: List of active GPU events
  172. * @group: Node for the master group list
  173. * @processed: Last processed timestamp
  174. */
  175. struct kgsl_event_group {
  176. struct kgsl_context *context;
  177. spinlock_t lock;
  178. struct list_head events;
  179. struct list_head group;
  180. unsigned int processed;
  181. };
  182. /* Flag to mark the memobj_node as a preamble */
  183. #define MEMOBJ_PREAMBLE BIT(0)
  184. /* Flag to mark that the memobj_node should not go to the hadrware */
  185. #define MEMOBJ_SKIP BIT(1)
  186. /**
  187. * struct kgsl_memobj_node - Memory object descriptor
  188. * @node: Local list node for the cmdbatch
  189. * @cmdbatch: Cmdbatch the node belongs to
  190. * @addr: memory start address
  191. * @sizedwords: size of memory @addr
  192. * @flags: any special case flags
  193. */
  194. struct kgsl_memobj_node {
  195. struct list_head node;
  196. unsigned long gpuaddr;
  197. size_t sizedwords;
  198. unsigned long priv;
  199. };
  200. /**
  201. * struct kgsl_cmdbatch - KGSl command descriptor
  202. * @device: KGSL GPU device that the command was created for
  203. * @context: KGSL context that created the command
  204. * @timestamp: Timestamp assigned to the command
  205. * @flags: flags
  206. * @priv: Internal flags
  207. * @fault_policy: Internal policy describing how to handle this command in case
  208. * of a fault
  209. * @fault_recovery: recovery actions actually tried for this batch
  210. * @expires: Point in time when the cmdbatch is considered to be hung
  211. * @refcount: kref structure to maintain the reference count
  212. * @cmdlist: List of IBs to issue
  213. * @memlist: List of all memory used in this command batch
  214. * @synclist: List of context/timestamp tuples to wait for before issuing
  215. * @timer: a timer used to track possible sync timeouts for this cmdbatch
  216. * @marker_timestamp: For markers, the timestamp of the last "real" command that
  217. * was queued
  218. *
  219. * This struture defines an atomic batch of command buffers issued from
  220. * userspace.
  221. */
  222. struct kgsl_cmdbatch {
  223. struct kgsl_device *device;
  224. struct kgsl_context *context;
  225. spinlock_t lock;
  226. uint32_t timestamp;
  227. uint32_t flags;
  228. unsigned long priv;
  229. unsigned long fault_policy;
  230. unsigned long fault_recovery;
  231. unsigned long expires;
  232. struct kref refcount;
  233. struct list_head cmdlist;
  234. struct list_head memlist;
  235. struct list_head synclist;
  236. struct timer_list timer;
  237. unsigned int marker_timestamp;
  238. };
  239. /**
  240. * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
  241. * @CMDBATCH_FLAG_SKIP - skip the entire command batch
  242. * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
  243. * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
  244. * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
  245. * cmdbatch timer - this is used to avoid recursion
  246. */
  247. enum kgsl_cmdbatch_priv {
  248. CMDBATCH_FLAG_SKIP = 0,
  249. CMDBATCH_FLAG_FORCE_PREAMBLE,
  250. CMDBATCH_FLAG_WFI,
  251. CMDBATCH_FLAG_FENCE_LOG,
  252. };
  253. struct kgsl_device {
  254. struct device *dev;
  255. const char *name;
  256. unsigned int ver_major;
  257. unsigned int ver_minor;
  258. uint32_t flags;
  259. enum kgsl_deviceid id;
  260. /* Starting physical address for GPU registers */
  261. unsigned long reg_phys;
  262. /* Starting Kernel virtual address for GPU registers */
  263. void *reg_virt;
  264. /* Total memory size for all GPU registers */
  265. unsigned int reg_len;
  266. /* Kernel virtual address for GPU shader memory */
  267. void *shader_mem_virt;
  268. /* Starting physical address for GPU shader memory */
  269. unsigned long shader_mem_phys;
  270. /* GPU shader memory size */
  271. unsigned int shader_mem_len;
  272. struct kgsl_memdesc memstore;
  273. const char *iomemname;
  274. const char *shadermemname;
  275. struct kgsl_mh mh;
  276. struct kgsl_mmu mmu;
  277. struct completion hwaccess_gate;
  278. struct completion cmdbatch_gate;
  279. const struct kgsl_functable *ftbl;
  280. struct work_struct idle_check_ws;
  281. struct timer_list idle_timer;
  282. struct kgsl_pwrctrl pwrctrl;
  283. int open_count;
  284. struct mutex mutex;
  285. atomic64_t mutex_owner;
  286. uint32_t state;
  287. uint32_t requested_state;
  288. atomic_t active_cnt;
  289. wait_queue_head_t wait_queue;
  290. wait_queue_head_t active_cnt_wq;
  291. struct workqueue_struct *work_queue;
  292. struct device *parentdev;
  293. struct dentry *d_debugfs;
  294. struct idr context_idr;
  295. rwlock_t context_lock;
  296. void *snapshot; /* Pointer to the snapshot memory region */
  297. int snapshot_maxsize; /* Max size of the snapshot region */
  298. int snapshot_size; /* Current size of the snapshot region */
  299. u32 snapshot_timestamp; /* Timestamp of the last valid snapshot */
  300. u32 snapshot_faultcount; /* Total number of faults since boot */
  301. int snapshot_frozen; /* 1 if the snapshot output is frozen until
  302. it gets read by the user. This avoids
  303. losing the output on multiple hangs */
  304. struct kobject snapshot_kobj;
  305. /*
  306. * List of GPU buffers that have been frozen in memory until they can be
  307. * dumped
  308. */
  309. struct list_head snapshot_obj_list;
  310. /* Logging levels */
  311. int cmd_log;
  312. int ctxt_log;
  313. int drv_log;
  314. int mem_log;
  315. int pwr_log;
  316. int pm_dump_enable;
  317. struct kgsl_pwrscale pwrscale;
  318. struct kobject pwrscale_kobj;
  319. struct work_struct event_work;
  320. /* Postmortem Control switches */
  321. int pm_regs_enabled;
  322. int pm_ib_enabled;
  323. int reset_counter; /* Track how many GPU core resets have occured */
  324. int cff_dump_enable;
  325. struct workqueue_struct *events_wq;
  326. struct kgsl_event_group global_events;
  327. struct kgsl_event_group iommu_events;
  328. };
  329. #define KGSL_DEVICE_COMMON_INIT(_dev) \
  330. .hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
  331. .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
  332. .idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
  333. kgsl_idle_check),\
  334. .event_work = __WORK_INITIALIZER((_dev).event_work,\
  335. kgsl_process_events),\
  336. .context_idr = IDR_INIT((_dev).context_idr),\
  337. .wait_queue = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).wait_queue),\
  338. .active_cnt_wq = __WAIT_QUEUE_HEAD_INITIALIZER((_dev).active_cnt_wq),\
  339. .mutex = __MUTEX_INITIALIZER((_dev).mutex),\
  340. .state = KGSL_STATE_INIT,\
  341. .ver_major = DRIVER_VERSION_MAJOR,\
  342. .ver_minor = DRIVER_VERSION_MINOR
  343. /* bits for struct kgsl_context.priv */
  344. /* the context has been destroyed by userspace and is no longer using the gpu */
  345. #define KGSL_CONTEXT_DETACHED 0
  346. /* the context has caused a pagefault */
  347. #define KGSL_CONTEXT_PAGEFAULT 1
  348. struct kgsl_process_private;
  349. /**
  350. * struct kgsl_context - Master structure for a KGSL context object
  351. * @refcount: kref object for reference counting the context
  352. * @id: integer identifier for the context
  353. * @priv: in-kernel context flags, use KGSL_CONTEXT_* values
  354. * @dev_priv: pointer to the owning device instance
  355. * @reset_status: status indication whether a gpu reset occured and whether
  356. * this context was responsible for causing it
  357. * @wait_on_invalid_ts: flag indicating if this context has tried to wait on a
  358. * bad timestamp
  359. * @timeline: sync timeline used to create fences that can be signaled when a
  360. * sync_pt timestamp expires
  361. * @events: A kgsl_event_group for this context - contains the list of GPU
  362. * events
  363. * @tid: task that created this context.
  364. * @pagefault_ts: global timestamp of the pagefault, if KGSL_CONTEXT_PAGEFAULT
  365. * is set.
  366. * @flags: flags from userspace controlling the behavior of this context
  367. * @pwr_constraint: power constraint from userspace for this context
  368. * @fault_count: number of times gpu hanged in last _context_throttle_time ms
  369. * @fault_time: time of the first gpu hang in last _context_throttle_time ms
  370. */
  371. struct kgsl_context {
  372. struct kref refcount;
  373. uint32_t id;
  374. pid_t tid;
  375. struct kgsl_device_private *dev_priv;
  376. struct kgsl_process_private *proc_priv;
  377. unsigned long priv;
  378. struct kgsl_device *device;
  379. unsigned int reset_status;
  380. bool wait_on_invalid_ts;
  381. struct sync_timeline *timeline;
  382. struct kgsl_event_group events;
  383. unsigned int pagefault_ts;
  384. unsigned int flags;
  385. struct kgsl_pwr_constraint pwr_constraint;
  386. unsigned int fault_count;
  387. unsigned long fault_time;
  388. };
  389. /**
  390. * struct kgsl_process_private - Private structure for a KGSL process (across
  391. * all devices)
  392. * @priv: Internal flags, use KGSL_PROCESS_* values
  393. * @pid: Identification structure for the task owner of the process
  394. * @comm: task name of the process
  395. * @mem_lock: Spinlock to protect the process memory lists
  396. * @refcount: kref object for reference counting the process
  397. * @process_private_mutex: Mutex to synchronize access to the process struct
  398. * @mem_rb: RB tree node for the memory owned by this process
  399. * @idr: Iterator for assigning IDs to memory allocations
  400. * @pagetable: Pointer to the pagetable owned by this process
  401. * @kobj: Pointer to a kobj for the sysfs directory for this process
  402. * @debug_root: Pointer to the debugfs root for this process
  403. * @stats: Memory allocation statistics for this process
  404. */
  405. struct kgsl_process_private {
  406. unsigned long priv;
  407. struct pid *pid;
  408. char comm[TASK_COMM_LEN];
  409. spinlock_t mem_lock;
  410. /* General refcount for process private struct obj */
  411. struct kref refcount;
  412. /* Mutex to synchronize access to each process_private struct obj */
  413. struct mutex process_private_mutex;
  414. struct rb_root mem_rb;
  415. struct idr mem_idr;
  416. struct kgsl_pagetable *pagetable;
  417. struct list_head list;
  418. struct kobject kobj;
  419. struct dentry *debug_root;
  420. struct {
  421. unsigned int cur;
  422. unsigned int max;
  423. } stats[KGSL_MEM_ENTRY_MAX];
  424. };
  425. /**
  426. * enum kgsl_process_priv_flags - Private flags for kgsl_process_private
  427. * @KGSL_PROCESS_INIT: Set if the process structure has been set up
  428. */
  429. enum kgsl_process_priv_flags {
  430. KGSL_PROCESS_INIT = 0,
  431. };
  432. struct kgsl_device_private {
  433. struct kgsl_device *device;
  434. struct kgsl_process_private *process_priv;
  435. };
  436. struct kgsl_device *kgsl_get_device(int dev_idx);
  437. static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
  438. unsigned int type, size_t size)
  439. {
  440. priv->stats[type].cur += size;
  441. if (priv->stats[type].max < priv->stats[type].cur)
  442. priv->stats[type].max = priv->stats[type].cur;
  443. }
  444. static inline void kgsl_regread(struct kgsl_device *device,
  445. unsigned int offsetwords,
  446. unsigned int *value)
  447. {
  448. device->ftbl->regread(device, offsetwords, value);
  449. }
  450. static inline void kgsl_regwrite(struct kgsl_device *device,
  451. unsigned int offsetwords,
  452. unsigned int value)
  453. {
  454. device->ftbl->regwrite(device, offsetwords, value);
  455. }
  456. static inline int kgsl_idle(struct kgsl_device *device)
  457. {
  458. return device->ftbl->idle(device);
  459. }
  460. static inline unsigned int kgsl_gpuid(struct kgsl_device *device,
  461. unsigned int *chipid)
  462. {
  463. return device->ftbl->gpuid(device, chipid);
  464. }
  465. static inline unsigned int kgsl_readtimestamp(struct kgsl_device *device,
  466. struct kgsl_context *context,
  467. enum kgsl_timestamp_type type)
  468. {
  469. return device->ftbl->readtimestamp(device, context, type);
  470. }
  471. static inline int kgsl_create_device_sysfs_files(struct device *root,
  472. const struct device_attribute **list)
  473. {
  474. int ret = 0, i;
  475. for (i = 0; list[i] != NULL; i++)
  476. ret |= device_create_file(root, list[i]);
  477. return ret;
  478. }
  479. static inline void kgsl_remove_device_sysfs_files(struct device *root,
  480. const struct device_attribute **list)
  481. {
  482. int i;
  483. for (i = 0; list[i] != NULL; i++)
  484. device_remove_file(root, list[i]);
  485. }
  486. static inline struct kgsl_mmu *
  487. kgsl_get_mmu(struct kgsl_device *device)
  488. {
  489. return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
  490. }
  491. static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
  492. {
  493. int i;
  494. for (i = 0; i < KGSL_DEVICE_MAX; i++) {
  495. if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
  496. return kgsl_driver.devp[i];
  497. }
  498. return NULL;
  499. }
  500. static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
  501. {
  502. device->work_queue = create_singlethread_workqueue(device->name);
  503. if (!device->work_queue) {
  504. KGSL_DRV_ERR(device,
  505. "create_singlethread_workqueue(%s) failed\n",
  506. device->name);
  507. return -EINVAL;
  508. }
  509. return 0;
  510. }
  511. int kgsl_check_timestamp(struct kgsl_device *device,
  512. struct kgsl_context *context, unsigned int timestamp);
  513. int kgsl_device_platform_probe(struct kgsl_device *device);
  514. void kgsl_device_platform_remove(struct kgsl_device *device);
  515. const char *kgsl_pwrstate_to_str(unsigned int state);
  516. int kgsl_device_snapshot_init(struct kgsl_device *device);
  517. int kgsl_device_snapshot(struct kgsl_device *device, int hang);
  518. void kgsl_device_snapshot_close(struct kgsl_device *device);
  519. void kgsl_events_init(void);
  520. void kgsl_events_exit(void);
  521. void kgsl_del_event_group(struct kgsl_event_group *group);
  522. void kgsl_add_event_group(struct kgsl_event_group *group,
  523. struct kgsl_context *context);
  524. void kgsl_cancel_events_timestamp(struct kgsl_device *device,
  525. struct kgsl_event_group *group, unsigned int timestamp);
  526. void kgsl_cancel_events(struct kgsl_device *device,
  527. struct kgsl_event_group *group);
  528. void kgsl_cancel_event(struct kgsl_device *device,
  529. struct kgsl_event_group *group, unsigned int timestamp,
  530. kgsl_event_func func, void *priv);
  531. int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group,
  532. unsigned int timestamp, kgsl_event_func func, void *priv);
  533. void kgsl_process_event_group(struct kgsl_device *device,
  534. struct kgsl_event_group *group);
  535. void kgsl_process_events(struct work_struct *work);
  536. static inline struct kgsl_device_platform_data *
  537. kgsl_device_get_drvdata(struct kgsl_device *dev)
  538. {
  539. struct platform_device *pdev =
  540. container_of(dev->parentdev, struct platform_device, dev);
  541. return pdev->dev.platform_data;
  542. }
  543. void kgsl_context_destroy(struct kref *kref);
  544. int kgsl_context_init(struct kgsl_device_private *, struct kgsl_context
  545. *context);
  546. int kgsl_context_detach(struct kgsl_context *context);
  547. void kgsl_context_dump(struct kgsl_context *context);
  548. int kgsl_memfree_find_entry(pid_t pid, unsigned long *gpuaddr,
  549. unsigned long *size, unsigned int *flags);
  550. /**
  551. * kgsl_context_put() - Release context reference count
  552. * @context: Pointer to the KGSL context to be released
  553. *
  554. * Reduce the reference count on a KGSL context and destroy it if it is no
  555. * longer needed
  556. */
  557. static inline void
  558. kgsl_context_put(struct kgsl_context *context)
  559. {
  560. if (context)
  561. kref_put(&context->refcount, kgsl_context_destroy);
  562. }
  563. /**
  564. * kgsl_context_detached() - check if a context is detached
  565. * @context: the context
  566. *
  567. * Check if a context has been destroyed by userspace and is only waiting
  568. * for reference counts to go away. This check is used to weed out
  569. * contexts that shouldn't use the gpu so NULL is considered detached.
  570. */
  571. static inline bool kgsl_context_detached(struct kgsl_context *context)
  572. {
  573. return (context == NULL || test_bit(KGSL_CONTEXT_DETACHED,
  574. &context->priv));
  575. }
  576. /**
  577. * kgsl_context_get() - get a pointer to a KGSL context
  578. * @device: Pointer to the KGSL device that owns the context
  579. * @id: Context ID
  580. *
  581. * Find the context associated with the given ID number, increase the reference
  582. * count on it and return it. The caller must make sure that this call is
  583. * paired with a kgsl_context_put. This function is for internal use because it
  584. * doesn't validate the ownership of the context with the calling process - use
  585. * kgsl_context_get_owner for that
  586. */
  587. static inline struct kgsl_context *kgsl_context_get(struct kgsl_device *device,
  588. uint32_t id)
  589. {
  590. int result = 0;
  591. struct kgsl_context *context = NULL;
  592. read_lock(&device->context_lock);
  593. context = idr_find(&device->context_idr, id);
  594. /* Don't return a context that has been detached */
  595. if (kgsl_context_detached(context))
  596. context = NULL;
  597. else
  598. result = kref_get_unless_zero(&context->refcount);
  599. read_unlock(&device->context_lock);
  600. if (!result)
  601. return NULL;
  602. return context;
  603. }
  604. /**
  605. * _kgsl_context_get() - lightweight function to just increment the ref count
  606. * @context: Pointer to the KGSL context
  607. *
  608. * Get a reference to the specified KGSL context structure. This is a
  609. * lightweight way to just increase the refcount on a known context rather than
  610. * walking through kgsl_context_get and searching the iterator
  611. */
  612. static inline int _kgsl_context_get(struct kgsl_context *context)
  613. {
  614. int ret = 0;
  615. if (context) {
  616. ret = kref_get_unless_zero(&context->refcount);
  617. /*
  618. * We shouldn't realistically fail kref_get_unless_zero unless
  619. * we did something really dumb so make the failure both public
  620. * and painful
  621. */
  622. WARN_ON(!ret);
  623. }
  624. return ret;
  625. }
  626. /**
  627. * kgsl_context_get_owner() - get a pointer to a KGSL context in a specific
  628. * process
  629. * @dev_priv: Pointer to the process struct
  630. * @id: Context ID to return
  631. *
  632. * Find the context associated with the given ID number, increase the reference
  633. * count on it and return it. The caller must make sure that this call is
  634. * paired with a kgsl_context_put. This function validates that the context id
  635. * given is owned by the dev_priv instancet that is passed in. See
  636. * kgsl_context_get for the internal version that doesn't do the check
  637. */
  638. static inline struct kgsl_context *kgsl_context_get_owner(
  639. struct kgsl_device_private *dev_priv, uint32_t id)
  640. {
  641. struct kgsl_context *context;
  642. context = kgsl_context_get(dev_priv->device, id);
  643. /* Verify that the context belongs to current calling fd. */
  644. if (context != NULL && context->dev_priv != dev_priv) {
  645. kgsl_context_put(context);
  646. return NULL;
  647. }
  648. return context;
  649. }
  650. void kgsl_dump_syncpoints(struct kgsl_device *device,
  651. struct kgsl_cmdbatch *cmdbatch);
  652. void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
  653. void kgsl_cmdbatch_destroy_object(struct kref *kref);
  654. /**
  655. * kgsl_process_private_get() - increment the refcount on a kgsl_process_private
  656. * struct
  657. * @process: Pointer to the KGSL process_private
  658. *
  659. * Returns 0 if the structure is invalid and a reference count could not be
  660. * obtained, nonzero otherwise.
  661. */
  662. static inline int kgsl_process_private_get(struct kgsl_process_private *process)
  663. {
  664. int ret = 0;
  665. if (process != NULL)
  666. ret = kref_get_unless_zero(&process->refcount);
  667. return ret;
  668. }
  669. void kgsl_process_private_put(struct kgsl_process_private *private);
  670. struct kgsl_process_private *kgsl_process_private_find(pid_t pid);
  671. /**
  672. * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
  673. * @cmdbatch: Pointer to the command batch object
  674. */
  675. static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
  676. {
  677. if (cmdbatch)
  678. kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
  679. }
  680. /**
  681. * kgsl_sysfs_store() - parse a string from a sysfs store function
  682. * @buf: Incoming string to parse
  683. * @ptr: Pointer to an unsigned int to store the value
  684. */
  685. static inline int kgsl_sysfs_store(const char *buf, unsigned int *ptr)
  686. {
  687. unsigned int val;
  688. int rc;
  689. rc = kstrtou32(buf, 0, &val);
  690. if (rc)
  691. return rc;
  692. if (ptr)
  693. *ptr = val;
  694. return 0;
  695. }
  696. /**
  697. * kgsl_mutex_lock() -- try to acquire the mutex if current thread does not
  698. * already own it
  699. * @mutex: mutex to lock
  700. * @owner: current mutex owner
  701. */
  702. static inline int kgsl_mutex_lock(struct mutex *mutex, atomic64_t *owner)
  703. {
  704. if (atomic64_read(owner) != (long)current) {
  705. mutex_lock(mutex);
  706. atomic64_set(owner, (long)current);
  707. /* Barrier to make sure owner is updated */
  708. smp_wmb();
  709. return 0;
  710. }
  711. return 1;
  712. }
  713. /**
  714. * kgsl_mutex_unlock() -- Clear the owner and unlock the mutex
  715. * @mutex: mutex to unlock
  716. * @owner: current mutex owner
  717. */
  718. static inline void kgsl_mutex_unlock(struct mutex *mutex, atomic64_t *owner)
  719. {
  720. atomic64_set(owner, 0);
  721. mutex_unlock(mutex);
  722. }
  723. #endif /* __KGSL_DEVICE_H */