mtk_vcu.c 71 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500
  1. /*
  2. * Copyright (c) 2016 MediaTek Inc.
  3. * Copyright (C) 2021 XiaoMi, Inc.
  4. * Author: Andrew-CT Chen <andrew-ct.chen@mediatek.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <asm/cacheflush.h>
  16. #include <linux/cdev.h>
  17. #include <linux/delay.h>
  18. #include <linux/dma-mapping.h>
  19. #include <linux/file.h>
  20. #include <linux/firmware.h>
  21. #include <linux/fs.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/list.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kthread.h>
  27. #include <linux/module.h>
  28. #include <linux/of_address.h>
  29. #include <linux/of_irq.h>
  30. #include <linux/of_platform.h>
  31. #include <linux/sched.h>
  32. #include <linux/sched/signal.h>
  33. #include <linux/sched/task.h>
  34. #include <linux/semaphore.h>
  35. #include <linux/suspend.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/compat.h>
  38. #include <linux/freezer.h>
  39. #include <linux/pm_runtime.h>
  40. #include <linux/pm_wakeup.h>
  41. #include <linux/soc/mediatek/mtk-cmdq.h>
  42. #include <linux/mailbox/mtk-cmdq-mailbox.h>
  43. #include <linux/mailbox_controller.h>
  44. #include <linux/signal.h>
  45. #include <trace/events/signal.h>
  46. #include <linux/string.h>
  47. #ifdef CONFIG_MTK_IOMMU_V2
  48. #include <linux/iommu.h>
  49. #endif
  50. #include "mtk_vcodec_mem.h"
  51. #include <uapi/linux/mtk_vcu_controls.h>
  52. #include "mtk_vcu.h"
  53. #include "smi_public.h"
  54. /*
  55. #undef pr_debug
  56. #define pr_debug pr_info
  57. #undef dev_dbg
  58. #define dev_dbg dev_info
  59. */
  60. /**
  61. * VCU (Video Communication/Controller Unit) is a tiny processor
  62. * controlling video hardware related to video codec, scaling and color
  63. * format converting.
  64. * VCU interfaces with other blocks by share memory and interrupt.
  65. **/
  66. #define VCU_PATH "/dev/vpud"
  67. #define MDP_PATH "/dev/mdpd"
  68. #define CAM_PATH "/dev/camd"
  69. #define VCU_DEVNAME "vcu"
  70. #ifdef CONFIG_MTK_ENG_BUILD
  71. #define IPI_TIMEOUT_MS 16000U
  72. #else
  73. #define IPI_TIMEOUT_MS 5000U
  74. #endif
  75. #define VCU_FW_VER_LEN 16
  76. #define VCODEC_INST_MAX 64
  77. #define GCE_EVENT_MAX 64
  78. #define GCE_THNUM_MAX 2
  79. #define GCE_PENDING_CNT 10
  80. /*mtk vcu support mpd max value*/
  81. #define MTK_VCU_NR_MAX 3
  82. /* vcu extended mapping length */
  83. #define VCU_PMEM0_LEN(vcu_data) (vcu_data->extmem.p_len)
  84. #define VCU_DMEM0_LEN(vcu_data) (vcu_data->extmem.d_len)
  85. /* vcu extended user virtural address */
  86. #define VCU_PMEM0_VMA(vcu_data) (vcu_data->extmem.p_vma)
  87. #define VCU_DMEM0_VMA(vcu_data) (vcu_data->extmem.d_vma)
  88. /* vcu extended kernel virtural address */
  89. #define VCU_PMEM0_VIRT(vcu_data) (vcu_data->extmem.p_va)
  90. #define VCU_DMEM0_VIRT(vcu_data) (vcu_data->extmem.d_va)
  91. /* vcu extended phsyial address */
  92. #define VCU_PMEM0_PHY(vcu_data) (vcu_data->extmem.p_pa)
  93. #define VCU_DMEM0_PHY(vcu_data) (vcu_data->extmem.d_pa)
  94. /* vcu extended iova address*/
  95. #define VCU_PMEM0_IOVA(vcu_data) (vcu_data->extmem.p_iova)
  96. #define VCU_DMEM0_IOVA(vcu_data) (vcu_data->extmem.d_iova)
  97. #define VCU_SHMEM_SIZE 0x80000
  98. #define MAP_SHMEM_ALLOC_BASE 0x80000000UL
  99. #define MAP_SHMEM_ALLOC_RANGE VCU_SHMEM_SIZE
  100. #define MAP_SHMEM_ALLOC_END (MAP_SHMEM_ALLOC_BASE + MAP_SHMEM_ALLOC_RANGE)
  101. #define MAP_SHMEM_COMMIT_BASE 0x88000000UL
  102. #define MAP_SHMEM_COMMIT_RANGE VCU_SHMEM_SIZE
  103. #define MAP_SHMEM_COMMIT_END (MAP_SHMEM_COMMIT_BASE + MAP_SHMEM_COMMIT_RANGE)
  104. #define MAP_SHMEM_MM_BASE 0x200000000UL
  105. #define MAP_SHMEM_MM_CACHEABLE_BASE 0x400000000UL
  106. #define MAP_SHMEM_PA_BASE 0x800000000UL
  107. #define MAP_SHMEM_MM_RANGE 0x1FFFFFFFFUL
  108. #define MAP_SHMEM_MM_END (MAP_SHMEM_MM_BASE + MAP_SHMEM_MM_RANGE)
  109. #define MAP_SHMEM_MM_CACHEABLE_END (MAP_SHMEM_MM_CACHEABLE_BASE \
  110. + MAP_SHMEM_MM_RANGE)
  111. struct mtk_vcu *vcu_ptr;
  112. static char *vcodec_param_string = "";
  113. inline unsigned int ipi_id_to_inst_id(int id)
  114. {
  115. /* Assume VENC uses instance 1 and others use 0. */
  116. if (id < IPI_VENC_COMMON && id >= IPI_VCU_INIT)
  117. return VCU_VDEC;
  118. else
  119. return VCU_VENC;
  120. }
  121. #define vcu_dbg_log(fmt, arg...) do { \
  122. if (vcu_ptr->enable_vcu_dbg_log) \
  123. pr_info(fmt, ##arg); \
  124. } while (0)
  125. #define MAP_PA_BASE_1GB 0x40000000 /* < 1GB registers */
  126. #define VCU_MAP_HW_REG_NUM 4
  127. /* VDEC VDEC_LAT VENC_CORE0 VENC_CORE1 */
  128. /* Default vcu_mtkdev[0] handle vdec, vcu_mtkdev[1] handle mdp */
  129. static struct mtk_vcu *vcu_mtkdev[MTK_VCU_NR_MAX];
  130. static struct task_struct *vcud_task;
  131. static struct files_struct *files;
  132. /* for protecting vpud file struct */
  133. struct mutex vpud_file_mutex;
  134. static __attribute__((used)) unsigned int time_ms_s, time_ms_e;
  135. #define time_check_start() { \
  136. time_ms_s = jiffies_to_msecs(jiffies); \
  137. }
  138. #define time_check_end(timeout_ms, debug) do { \
  139. time_ms_e = jiffies_to_msecs(jiffies); \
  140. if ((time_ms_e - time_ms_s) > timeout_ms || \
  141. debug) \
  142. pr_info("[VCU][Info] %s L:%d take %u timeout %u ms", \
  143. __func__, __LINE__, \
  144. time_ms_e - time_ms_s, \
  145. timeout_ms); \
  146. } while (0)
  147. /**
  148. * struct vcu_mem - VCU memory information
  149. *
  150. * @p_vma: the user virtual memory address of
  151. * VCU extended program memory
  152. * @d_vma: the user virtual memory address of VCU extended data memory
  153. * @p_va: the kernel virtual memory address of
  154. * VCU extended program memory
  155. * @d_va: the kernel virtual memory address of VCU extended data memory
  156. * @p_pa: the physical memory address of VCU extended program memory
  157. * @d_pa: the physical memory address of VCU extended data memory
  158. * @p_iova: the iova memory address of VCU extended program memory
  159. * @d_iova: the iova memory address of VCU extended data memory
  160. */
  161. struct vcu_mem {
  162. unsigned long p_vma;
  163. unsigned long d_vma;
  164. void *p_va;
  165. void *d_va;
  166. dma_addr_t p_pa;
  167. dma_addr_t d_pa;
  168. dma_addr_t p_iova;
  169. dma_addr_t d_iova;
  170. unsigned long p_len;
  171. unsigned long d_len;
  172. };
  173. /**
  174. * struct vcu_run - VCU initialization status
  175. *
  176. * @signaled: the signal of vcu initialization completed
  177. * @fw_ver: VCU firmware version
  178. * @dec_capability: decoder capability which is not used for now and
  179. * the value is reserved for future use
  180. * @enc_capability: encoder capability which is not used for now and
  181. * the value is reserved for future use
  182. * @wq: wait queue for VCU initialization status
  183. */
  184. struct vcu_run {
  185. u32 signaled;
  186. char fw_ver[VCU_FW_VER_LEN];
  187. unsigned int dec_capability;
  188. unsigned int enc_capability;
  189. wait_queue_head_t wq;
  190. };
  191. /**
  192. * struct vcu_ipi_desc - VCU IPI descriptor
  193. *
  194. * @handler: IPI handler
  195. * @name: the name of IPI handler
  196. * @priv: the private data of IPI handler
  197. */
  198. struct vcu_ipi_desc {
  199. ipi_handler_t handler;
  200. const char *name;
  201. void *priv;
  202. };
  203. struct map_hw_reg {
  204. unsigned long base;
  205. unsigned long len;
  206. };
  207. struct gce_callback_data {
  208. struct gce_cmdq_obj cmdq_buff;
  209. struct mtk_vcu *vcu_ptr;
  210. struct cmdq_pkt *pkt_ptr;
  211. struct mtk_vcu_queue *vcu_queue;
  212. };
  213. struct gce_ctx_info {
  214. void *v4l2_ctx;
  215. u64 user_hdl;
  216. atomic_t flush_done;
  217. /* gce callbacked but user not waited cnt */
  218. struct gce_callback_data buff[GCE_PENDING_CNT];
  219. atomic_t flush_pending;
  220. /* gce not callbacked cnt */
  221. };
  222. /**
  223. * struct mtk_vcu - vcu driver data
  224. * @extmem: VCU extended memory information
  225. * @run: VCU initialization status
  226. * @ipi_desc: VCU IPI descriptor
  227. * @dev: VCU struct device
  228. * @vcu_mutex: protect mtk_vcu (except recv_buf) and ensure only
  229. * one client to use VCU service at a time. For example,
  230. * suppose a client is using VCU to decode VP8.
  231. * If the other client wants to encode VP8,
  232. * it has to wait until VP8 decode completes.
  233. * @vcu_gce_mutex protect mtk_vcu gce flush & callback power sequence
  234. * @file: VCU daemon file pointer
  235. * @is_open: The flag to indicate if VCUD device is open.
  236. * @ack_wq: The wait queue for each codec and mdp. When sleeping
  237. * processes wake up, they will check the condition
  238. * "ipi_id_ack" to run the corresponding action or
  239. * go back to sleep.
  240. * @ipi_id_ack: The ACKs for registered IPI function sending
  241. * interrupt to VCU
  242. * @get_wq: When sleeping process waking up, it will check the
  243. * condition "ipi_got" to run the corresponding action or
  244. * go back to sleep.
  245. * @ipi_got: The flags for IPI message polling from user.
  246. * @ipi_done: The flags for IPI message polling from user again, which
  247. * means the previous messages has been dispatched done in
  248. * daemon.
  249. * @user_obj: Temporary share_obj used for ipi_msg_get.
  250. * @vcu_devno: The vcu_devno for vcu init vcu character device
  251. * @vcu_cdev: The point of vcu character device.
  252. * @vcu_class: The class_create for create vcu device
  253. * @vcu_device: VCU struct device
  254. * @vcuname: VCU struct device name in dtsi
  255. * @path: The path to keep mdpd path or vcud path.
  256. * @vpuid: VCU device id
  257. *
  258. */
  259. struct mtk_vcu {
  260. struct vcu_mem extmem;
  261. struct vcu_run run;
  262. struct vcu_ipi_desc ipi_desc[IPI_MAX];
  263. struct device *dev;
  264. struct mutex vcu_mutex[VCU_CODEC_MAX];
  265. struct mutex vcu_gce_mutex[VCU_CODEC_MAX];
  266. struct mutex ctx_ipi_binding[VCU_CODEC_MAX];
  267. /* for protecting vcu data structure */
  268. struct mutex vcu_share;
  269. struct file *file;
  270. struct iommu_domain *io_domain;
  271. bool iommu_padding;
  272. /* temp for 33bits larb adding bits "1" iommu */
  273. struct map_hw_reg map_base[VCU_MAP_HW_REG_NUM];
  274. bool is_open;
  275. wait_queue_head_t ack_wq[VCU_CODEC_MAX];
  276. bool ipi_id_ack[IPI_MAX];
  277. wait_queue_head_t get_wq[VCU_CODEC_MAX];
  278. atomic_t ipi_got[VCU_CODEC_MAX];
  279. atomic_t ipi_done[VCU_CODEC_MAX];
  280. struct share_obj user_obj[VCU_CODEC_MAX];
  281. dev_t vcu_devno;
  282. struct cdev *vcu_cdev;
  283. struct class *vcu_class;
  284. struct device *vcu_device;
  285. const char *vcuname;
  286. const char *path;
  287. int vcuid;
  288. struct log_test_nofuse *vdec_log_info;
  289. wait_queue_head_t vdec_log_get_wq;
  290. atomic_t vdec_log_got;
  291. struct cmdq_base *clt_base;
  292. struct cmdq_client *clt_vdec[GCE_THNUM_MAX];
  293. struct cmdq_client *clt_venc[GCE_THNUM_MAX];
  294. struct cmdq_client *clt_venc_sec[GCE_THNUM_MAX];
  295. int gce_th_num[VCU_CODEC_MAX];
  296. int gce_codec_eid[GCE_EVENT_MAX];
  297. struct gce_cmds *gce_cmds[VCU_CODEC_MAX];
  298. void *curr_ctx[VCU_CODEC_MAX];
  299. struct vb2_buffer *curr_src_vb[VCU_CODEC_MAX];
  300. struct vb2_buffer *curr_dst_vb[VCU_CODEC_MAX];
  301. wait_queue_head_t gce_wq[VCU_CODEC_MAX];
  302. struct gce_ctx_info gce_info[VCODEC_INST_MAX];
  303. atomic_t gce_job_cnt[VCU_CODEC_MAX][GCE_THNUM_MAX];
  304. unsigned long flags[VCU_CODEC_MAX];
  305. int open_cnt;
  306. bool abort;
  307. struct semaphore vpud_killed;
  308. bool is_entering_suspend;
  309. u32 gce_gpr[GCE_THNUM_MAX];
  310. /* for gce poll timer, multi-thread sync */
  311. /* for vpud sig check */
  312. spinlock_t vpud_sig_lock;
  313. int vpud_is_going_down;
  314. /* for vcu dbg log*/
  315. int enable_vcu_dbg_log;
  316. };
  317. static inline bool vcu_running(struct mtk_vcu *vcu)
  318. {
  319. return (bool)vcu->run.signaled;
  320. }
  321. int vcu_ipi_register(struct platform_device *pdev,
  322. enum ipi_id id, ipi_handler_t handler,
  323. const char *name, void *priv)
  324. {
  325. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  326. struct vcu_ipi_desc *ipi_desc;
  327. unsigned int i = 0;
  328. if (vcu == NULL) {
  329. dev_err(&pdev->dev, "vcu device in not ready\n");
  330. return -EPROBE_DEFER;
  331. }
  332. if (id < IPI_VCU_INIT || id >= IPI_MAX) {
  333. dev_info(&pdev->dev, "[VCU] failed to register ipi message (Invalid arg.)\n");
  334. return -EINVAL;
  335. }
  336. i = ipi_id_to_inst_id(id);
  337. mutex_lock(&vcu->vcu_mutex[i]);
  338. if (id >= IPI_VCU_INIT && id < IPI_MAX && handler != NULL) {
  339. ipi_desc = vcu->ipi_desc;
  340. ipi_desc[id].name = name;
  341. ipi_desc[id].handler = handler;
  342. ipi_desc[id].priv = priv;
  343. mutex_unlock(&vcu->vcu_mutex[i]);
  344. return 0;
  345. }
  346. mutex_unlock(&vcu->vcu_mutex[i]);
  347. dev_err(&pdev->dev, "register vcu ipi id %d with invalid arguments\n",
  348. id);
  349. return -EINVAL;
  350. }
  351. EXPORT_SYMBOL_GPL(vcu_ipi_register);
  352. int vcu_ipi_send(struct platform_device *pdev,
  353. enum ipi_id id, void *buf,
  354. unsigned int len, void *priv)
  355. {
  356. unsigned int i = 0;
  357. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  358. struct vcu_ipi_desc *ipi_desc;
  359. struct share_obj send_obj;
  360. unsigned long timeout;
  361. int ret;
  362. if (id <= IPI_VCU_INIT || id >= IPI_MAX ||
  363. len > sizeof(send_obj.share_buf) || buf == NULL) {
  364. dev_err(&pdev->dev, "[VCU] failed to send ipi message (Invalid arg.)\n");
  365. return -EINVAL;
  366. }
  367. if (vcu_running(vcu) == false) {
  368. dev_err(&pdev->dev, "[VCU] %s: VCU is not running\n", __func__);
  369. return -EPERM;
  370. }
  371. i = ipi_id_to_inst_id(id);
  372. mutex_lock(&vcu->vcu_mutex[i]);
  373. if (vcu_ptr->abort) {
  374. if (vcu_ptr->open_cnt > 0) {
  375. dev_info(vcu->dev, "wait for vpud killed %d\n",
  376. vcu_ptr->vpud_killed.count);
  377. ret = down_interruptible(&vcu_ptr->vpud_killed);
  378. }
  379. dev_info(&pdev->dev, "[VCU] vpud killed\n");
  380. mutex_unlock(&vcu->vcu_mutex[i]);
  381. return -EIO;
  382. }
  383. vcu->ipi_id_ack[id] = false;
  384. if (id >= IPI_VCU_INIT && id < IPI_MAX) {
  385. ipi_desc = vcu->ipi_desc;
  386. ipi_desc[id].priv = priv;
  387. }
  388. /* send the command to VCU */
  389. memcpy((void *)vcu->user_obj[i].share_buf, buf, len);
  390. vcu->user_obj[i].len = len;
  391. vcu->user_obj[i].id = (int)id;
  392. atomic_set(&vcu->ipi_got[i], 1);
  393. atomic_set(&vcu->ipi_done[i], 0);
  394. wake_up(&vcu->get_wq[i]);
  395. /* wait for VCU's ACK */
  396. timeout = msecs_to_jiffies(IPI_TIMEOUT_MS);
  397. ret = wait_event_timeout(vcu->ack_wq[i], vcu->ipi_id_ack[id], timeout);
  398. vcu->ipi_id_ack[id] = false;
  399. if (vcu_ptr->abort || ret == 0) {
  400. dev_info(&pdev->dev, "vcu ipi %d ack time out !%d", id, ret);
  401. if (!vcu_ptr->abort) {
  402. task_lock(vcud_task);
  403. send_sig(SIGTERM, vcud_task, 0);
  404. task_unlock(vcud_task);
  405. }
  406. if (vcu_ptr->open_cnt > 0) {
  407. dev_info(vcu->dev, "wait for vpud killed %d\n",
  408. vcu_ptr->vpud_killed.count);
  409. ret = down_interruptible(&vcu_ptr->vpud_killed);
  410. }
  411. dev_info(&pdev->dev, "[VCU] vpud killed\n");
  412. ret = -EIO;
  413. mutex_unlock(&vcu->vcu_mutex[i]);
  414. goto end;
  415. } else if (-ERESTARTSYS == ret) {
  416. dev_err(&pdev->dev, "vcu ipi %d ack wait interrupted by a signal",
  417. id);
  418. ret = -ERESTARTSYS;
  419. mutex_unlock(&vcu->vcu_mutex[i]);
  420. goto end;
  421. } else {
  422. ret = 0;
  423. mutex_unlock(&vcu->vcu_mutex[i]);
  424. }
  425. /* Waiting ipi_done, success means the daemon receiver thread
  426. * dispatchs ipi msg done and returns to kernel for get next
  427. * ipi msg.
  428. * The dispatched ipi msg is being processed by app service.
  429. * Usually, it takes dozens of microseconds in average.
  430. */
  431. while (atomic_read(&vcu->ipi_done[i]) == 0)
  432. cond_resched();
  433. end:
  434. return ret;
  435. }
  436. EXPORT_SYMBOL_GPL(vcu_ipi_send);
  437. static int vcu_ipi_get(struct mtk_vcu *vcu, unsigned long arg)
  438. {
  439. unsigned int i = 0;
  440. int ret;
  441. unsigned char *user_data_addr = NULL;
  442. struct share_obj share_buff_data;
  443. user_data_addr = (unsigned char *)arg;
  444. ret = (long)copy_from_user(&share_buff_data, user_data_addr,
  445. (unsigned long)sizeof(struct share_obj));
  446. if (ret != 0) {
  447. pr_info("[VCU] %s(%d) Copy data from user failed!\n",
  448. __func__, __LINE__);
  449. return -EINVAL;
  450. }
  451. i = ipi_id_to_inst_id(share_buff_data.id);
  452. /* mutex protection here is unnecessary, since different app service
  453. * threads of daemon are corresponding to different vcu_ipi_get thread.
  454. * Different threads use differnet variables, e.g. ipi_done.
  455. */
  456. atomic_set(&vcu->ipi_done[i], 1);
  457. ret = wait_event_freezable(vcu->get_wq[i],
  458. atomic_read(&vcu->ipi_got[i]));
  459. if (ret != 0) {
  460. pr_info("[VCU][%d][%d] wait event return %d @%s\n",
  461. vcu->vcuid, i, ret, __func__);
  462. return ret;
  463. }
  464. ret = copy_to_user(user_data_addr, &vcu->user_obj[i],
  465. (unsigned long)sizeof(struct share_obj));
  466. if (ret != 0) {
  467. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  468. __func__, __LINE__);
  469. ret = -EINVAL;
  470. }
  471. atomic_set(&vcu->ipi_got[i], 0);
  472. return ret;
  473. }
  474. static int vcu_log_get(struct mtk_vcu *vcu, unsigned long arg)
  475. {
  476. int ret;
  477. unsigned char *user_data_addr = NULL;
  478. user_data_addr = (unsigned char *)arg;
  479. ret = wait_event_freezable(vcu->vdec_log_get_wq,
  480. atomic_read(&vcu->vdec_log_got));
  481. if (ret != 0) {
  482. pr_info("[VCU][%d] wait event return %d @%s\n",
  483. vcu->vcuid, ret, __func__);
  484. return ret;
  485. }
  486. ret = copy_to_user(user_data_addr, vcu->vdec_log_info,
  487. (unsigned long)sizeof(struct log_test_nofuse));
  488. if (ret != 0) {
  489. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  490. __func__, __LINE__);
  491. ret = -EINVAL;
  492. }
  493. atomic_set(&vcu->vdec_log_got, 0);
  494. return ret;
  495. }
  496. static int vcu_gce_set_inst_id(void *ctx, u64 gce_handle)
  497. {
  498. int i;
  499. char data;
  500. mutex_lock(&vcu_ptr->vcu_share);
  501. for (i = 0; i < VCODEC_INST_MAX; i++) {
  502. if (vcu_ptr->gce_info[i].v4l2_ctx == NULL &&
  503. !probe_kernel_address(ctx, data)) {
  504. vcu_ptr->gce_info[i].v4l2_ctx = ctx;
  505. vcu_ptr->gce_info[i].user_hdl = gce_handle;
  506. mutex_unlock(&vcu_ptr->vcu_share);
  507. vcu_dbg_log("[VCU] %s ctx %p hndl %llu create id %d\n",
  508. __func__, ctx, gce_handle, i);
  509. return i;
  510. }
  511. }
  512. mutex_unlock(&vcu_ptr->vcu_share);
  513. pr_info("[VCU] %s fail ctx %p hndl %llu\n",
  514. __func__, ctx, gce_handle);
  515. return -1;
  516. }
  517. static int vcu_gce_get_inst_id(u64 gce_handle)
  518. {
  519. int i, temp;
  520. mutex_lock(&vcu_ptr->vcu_share);
  521. for (i = 0; i < VCODEC_INST_MAX; i++) {
  522. if (vcu_ptr->gce_info[i].user_hdl == gce_handle) {
  523. temp = atomic_read(&vcu_ptr->gce_info[i].flush_done);
  524. mutex_unlock(&vcu_ptr->vcu_share);
  525. vcu_dbg_log("[VCU] %s hndl %llu get id %d cnt %d\n",
  526. __func__, gce_handle, i, temp);
  527. return i;
  528. }
  529. }
  530. mutex_unlock(&vcu_ptr->vcu_share);
  531. return -1;
  532. }
  533. static void vcu_gce_clear_inst_id(void *ctx)
  534. {
  535. int i, temp, temp2;
  536. u64 gce_handle;
  537. mutex_lock(&vcu_ptr->vcu_share);
  538. for (i = 0; i < VCODEC_INST_MAX; i++) {
  539. if (vcu_ptr->gce_info[i].v4l2_ctx == ctx) {
  540. gce_handle = vcu_ptr->gce_info[i].user_hdl;
  541. vcu_ptr->gce_info[i].v4l2_ctx = NULL;
  542. vcu_ptr->gce_info[i].user_hdl = 0;
  543. temp = atomic_read(&vcu_ptr->gce_info[i].flush_pending);
  544. /* flush_pending > 0, ctx hw not unprepared */
  545. temp2 = atomic_read(&vcu_ptr->gce_info[i].flush_done);
  546. /* flush_done > 0, user event not waited */
  547. atomic_set(&vcu_ptr->gce_info[i].flush_done, 0);
  548. atomic_set(&vcu_ptr->gce_info[i].flush_pending, 0);
  549. mutex_unlock(&vcu_ptr->vcu_share);
  550. if (temp > 0)
  551. vcu_aee_print(
  552. "%s %p hndl %llu free id %d cnt %d %d\n",
  553. __func__, ctx, gce_handle,
  554. i, temp, temp2);
  555. else if (temp2 > 0)
  556. pr_info("%s %p hndl %llu free id %d cnt %d %d\n",
  557. __func__, ctx, gce_handle,
  558. i, temp, temp2);
  559. else
  560. vcu_dbg_log(
  561. "%s %p hndl %llu free id %d cnt %d %d\n",
  562. __func__, ctx, gce_handle,
  563. i, temp, temp2);
  564. return;
  565. }
  566. }
  567. mutex_unlock(&vcu_ptr->vcu_share);
  568. pr_info("%s ctx %p not found!\n", __func__, ctx);
  569. }
  570. static void *vcu_check_gce_pa_base(struct mtk_vcu_queue *vcu_queue, u64 addr, u64 length)
  571. {
  572. struct vcu_pa_pages *tmp;
  573. struct list_head *p, *q;
  574. list_for_each_safe(p, q, &vcu_queue->pa_pages.list) {
  575. tmp = list_entry(p, struct vcu_pa_pages, list);
  576. if (addr >= (u64)tmp->pa &&
  577. addr + length <= (u64)tmp->pa + PAGE_SIZE)
  578. return tmp;
  579. }
  580. pr_info("%s addr %x length %x not found!\n", __func__, addr, length);
  581. return NULL;
  582. }
  583. static int vcu_check_reg_base(struct mtk_vcu *vcu, u64 addr, u64 length)
  584. {
  585. int i;
  586. if (vcu->vcuid != 0 || addr >= MAP_PA_BASE_1GB)
  587. return -EINVAL;
  588. for (i = 0; i < (int)VCU_MAP_HW_REG_NUM; i++)
  589. if (addr >= (u64)vcu->map_base[i].base &&
  590. addr + length <= (u64)vcu->map_base[i].base + vcu->map_base[i].len)
  591. return 0;
  592. pr_info("%s addr %x length %x not found!\n", __func__, addr, length);
  593. return -EINVAL;
  594. }
  595. static void vcu_set_gce_cmd(struct cmdq_pkt *pkt,
  596. struct mtk_vcu *vcu, struct mtk_vcu_queue *q, unsigned char cmd,
  597. u64 addr, u64 data, u32 mask, u32 gpr, u32 dma_offset, u32 dma_size)
  598. {
  599. switch (cmd) {
  600. case CMD_READ:
  601. if (vcu_check_reg_base(vcu, addr, 4) == 0)
  602. cmdq_pkt_read_addr(pkt, addr, CMDQ_THR_SPR_IDX1);
  603. else
  604. pr_info("[VCU] CMD_READ wrong addr: 0x%x\n", addr);
  605. break;
  606. case CMD_WRITE:
  607. if (vcu_check_reg_base(vcu, addr, 4) == 0)
  608. cmdq_pkt_write(pkt, vcu->clt_base, addr, data, mask);
  609. else
  610. pr_info("[VCU] CMD_WRITE wrong addr: 0x%x 0x%x 0x%x\n",
  611. addr, data, mask);
  612. break;
  613. #if defined(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT)
  614. case CMD_SEC_WRITE:
  615. #if defined(CONFIG_MTK_CMDQ_MBOX_EXT)
  616. if (vcu_check_reg_base(vcu, addr, 4) == 0)
  617. cmdq_sec_pkt_write_reg(pkt,
  618. addr,
  619. data,
  620. CMDQ_IWC_H_2_MVA,
  621. dma_offset,
  622. dma_size,
  623. 0);
  624. else
  625. pr_info("[VCU] CMD_SEC_WRITE wrong addr: 0x%x 0x%x 0x%x 0x%x\n",
  626. addr, data, dma_offset, dma_size);
  627. #endif
  628. pr_debug("[VCU] %s addr: 0x%x, data: 0x%x, offset: 0x%x, size: 0x%x\n",
  629. __func__, addr, data, dma_offset, dma_size);
  630. break;
  631. #endif
  632. case CMD_POLL_REG:
  633. if (vcu_check_reg_base(vcu, addr, 4) == 0)
  634. cmdq_pkt_poll_addr(pkt, data, addr, mask, gpr);
  635. else
  636. pr_info("[VCU] CMD_POLL_REG wrong addr: 0x%x 0x%x 0x%x\n",
  637. addr, data, mask);
  638. break;
  639. case CMD_WAIT_EVENT:
  640. if (data < GCE_EVENT_MAX)
  641. cmdq_pkt_wfe(pkt, vcu->gce_codec_eid[data]);
  642. else
  643. pr_info("[VCU] %s got wrong eid %llu\n",
  644. __func__, data);
  645. break;
  646. case CMD_MEM_MV:
  647. if ((vcu_check_reg_base(vcu, addr, 4) == 0 ||
  648. vcu_check_gce_pa_base(q, addr, 4) != NULL) &&
  649. vcu_check_gce_pa_base(q, data, 4) != NULL)
  650. cmdq_pkt_mem_move(pkt, vcu->clt_base, addr,
  651. data, CMDQ_THR_SPR_IDX1);
  652. else
  653. pr_info("[VCU] CMD_MEM_MV wrong addr/data: 0x%x 0x%x\n",
  654. addr, data);
  655. break;
  656. case CMD_POLL_ADDR:
  657. if (vcu_check_reg_base(vcu, addr, 4) == 0 ||
  658. vcu_check_gce_pa_base(q, addr, 4) != NULL)
  659. cmdq_pkt_poll_timeout(pkt, data, SUBSYS_NO_SUPPORT,
  660. addr, mask, ~0, gpr);
  661. else
  662. pr_info("[VCU] CMD_POLL_REG wrong addr: 0x%x 0x%x 0x%x\n",
  663. addr, data, mask);
  664. break;
  665. default:
  666. vcu_dbg_log("[VCU] unknown GCE cmd %d\n", cmd);
  667. break;
  668. }
  669. }
  670. static void vcu_gce_flush_callback(struct cmdq_cb_data data)
  671. {
  672. int i, j;
  673. struct gce_callback_data *buff;
  674. struct mtk_vcu *vcu;
  675. unsigned int core_id;
  676. buff = (struct gce_callback_data *)data.data;
  677. i = (buff->cmdq_buff.codec_type == VCU_VDEC) ? VCU_VDEC : VCU_VENC;
  678. core_id = buff->cmdq_buff.core_id;
  679. vcu = buff->vcu_ptr;
  680. j = vcu_gce_get_inst_id(buff->cmdq_buff.gce_handle);
  681. if (j < 0) {
  682. pr_info("[VCU] flush_callback get_inst_id fail!!%d\n", j);
  683. return;
  684. }
  685. atomic_inc(&vcu->gce_info[j].flush_done);
  686. atomic_dec(&vcu->gce_info[j].flush_pending);
  687. mutex_lock(&vcu->vcu_gce_mutex[i]);
  688. venc_encode_pmqos_gce_end(vcu->gce_info[j].v4l2_ctx, core_id,
  689. vcu->gce_job_cnt[i][core_id].counter);
  690. if (atomic_dec_and_test(&vcu->gce_job_cnt[i][core_id]) &&
  691. vcu->gce_info[j].v4l2_ctx != NULL){
  692. if (i == VCU_VENC)
  693. venc_encode_unprepare(vcu->gce_info[j].v4l2_ctx,
  694. buff->cmdq_buff.core_id, &vcu->flags[i]);
  695. }
  696. mutex_unlock(&vcu->vcu_gce_mutex[i]);
  697. wake_up(&vcu->gce_wq[i]);
  698. vcu_dbg_log("[VCU][%d] %s: buff %p type %d order %d handle %llx\n",
  699. core_id, __func__, buff, buff->cmdq_buff.codec_type,
  700. buff->cmdq_buff.flush_order, buff->cmdq_buff.gce_handle);
  701. cmdq_pkt_destroy(buff->pkt_ptr);
  702. }
  703. static void vcu_gce_timeout_callback(struct cmdq_cb_data data)
  704. {
  705. struct gce_callback_data *buff;
  706. struct mtk_vcu *vcu;
  707. struct list_head *p, *q;
  708. struct mtk_vcu_queue *vcu_queue;
  709. struct vcu_pa_pages *tmp;
  710. buff = (struct gce_callback_data *)data.data;
  711. vcu = buff->vcu_ptr;
  712. vcu_queue = buff->vcu_queue;
  713. vcu_dbg_log("%s: buff %p vcu: %p, codec_typ: %d\n",
  714. __func__, buff, vcu, buff->cmdq_buff.codec_type);
  715. if (buff->cmdq_buff.codec_type == VCU_VENC)
  716. mtk_vcodec_gce_timeout_dump(vcu->curr_ctx[VCU_VENC]);
  717. else if (buff->cmdq_buff.codec_type == VCU_VDEC)
  718. mtk_vcodec_gce_timeout_dump(vcu->curr_ctx[VCU_VDEC]);
  719. list_for_each_safe(p, q, &vcu_queue->pa_pages.list) {
  720. tmp = list_entry(p, struct vcu_pa_pages, list);
  721. pr_info("%s: vcu_pa_pages %lx kva %lx data %lx\n",
  722. __func__, tmp->pa, tmp->kva,
  723. *(unsigned long *)tmp->kva);
  724. }
  725. }
  726. static int vcu_gce_cmd_flush(struct mtk_vcu *vcu,
  727. struct mtk_vcu_queue *q, unsigned long arg)
  728. {
  729. int i, j, ret;
  730. unsigned char *user_data_addr = NULL;
  731. struct gce_callback_data buff;
  732. struct cmdq_pkt *pkt_ptr;
  733. struct cmdq_client *cl;
  734. struct gce_cmds *cmds;
  735. unsigned int suspend_block_cnt = 0;
  736. unsigned int core_id;
  737. vcu_dbg_log("[VCU] %s +\n", __func__);
  738. time_check_start();
  739. user_data_addr = (unsigned char *)arg;
  740. ret = (long)copy_from_user(&buff.cmdq_buff, user_data_addr,
  741. (unsigned long)sizeof(struct gce_cmdq_obj));
  742. if (ret != 0L) {
  743. pr_info("[VCU] %s(%d) gce_cmdq_obj copy_from_user failed!%d\n",
  744. __func__, __LINE__, ret);
  745. return -EINVAL;
  746. }
  747. i = (buff.cmdq_buff.codec_type == VCU_VDEC) ? VCU_VDEC : VCU_VENC;
  748. cmds = vcu->gce_cmds[i];
  749. user_data_addr = (unsigned char *)
  750. (unsigned long)buff.cmdq_buff.cmds_user_ptr;
  751. ret = (long)copy_from_user(cmds, user_data_addr,
  752. (unsigned long)sizeof(struct gce_cmds));
  753. if (ret != 0L) {
  754. pr_info("[VCU] %s(%d) gce_cmds copy_from_user failed!%d\n",
  755. __func__, __LINE__, ret);
  756. return -EINVAL;
  757. }
  758. buff.cmdq_buff.cmds_user_ptr = (u64)(unsigned long)cmds;
  759. core_id = buff.cmdq_buff.core_id;
  760. if (buff.cmdq_buff.codec_type >= VCU_CODEC_MAX ||
  761. core_id >=
  762. vcu->gce_th_num[buff.cmdq_buff.codec_type]) {
  763. pr_info("[VCU] %s invalid core(th) id %d\n",
  764. __func__, core_id);
  765. return -EINVAL;
  766. }
  767. cl = (buff.cmdq_buff.codec_type == VCU_VDEC) ?
  768. vcu->clt_vdec[core_id] :
  769. vcu->clt_venc[core_id];
  770. #if defined(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT)
  771. if (buff.cmdq_buff.codec_type == VCU_VENC) {
  772. if (buff.cmdq_buff.secure != 0) {
  773. cl = vcu->clt_venc_sec[0];
  774. }
  775. }
  776. #endif
  777. if (cl == NULL) {
  778. pr_info("[VCU] %s gce thread is null id %d type %d\n",
  779. __func__, core_id,
  780. buff.cmdq_buff.codec_type);
  781. return -EINVAL;
  782. }
  783. buff.vcu_ptr = vcu;
  784. buff.vcu_queue = q;
  785. while (vcu_ptr->is_entering_suspend == 1) {
  786. suspend_block_cnt++;
  787. if (suspend_block_cnt > 500) {
  788. pr_info("[VCU] gce_flush blocked by suspend\n");
  789. suspend_block_cnt = 0;
  790. }
  791. usleep_range(10000, 20000);
  792. }
  793. j = vcu_gce_get_inst_id(buff.cmdq_buff.gce_handle);
  794. if (j < 0)
  795. j = vcu_gce_set_inst_id(vcu->curr_ctx[i],
  796. buff.cmdq_buff.gce_handle);
  797. if (j < 0) {
  798. return -EINVAL;
  799. }
  800. time_check_end(100, strlen(vcodec_param_string));
  801. time_check_start();
  802. mutex_lock(&vcu->vcu_gce_mutex[i]);
  803. if (atomic_read(&vcu->gce_job_cnt[i][core_id]) == 0 &&
  804. vcu->gce_info[j].v4l2_ctx != NULL){
  805. if (i == VCU_VENC) {
  806. venc_encode_prepare(vcu->gce_info[j].v4l2_ctx,
  807. core_id, &vcu->flags[i]);
  808. }
  809. }
  810. vcu_dbg_log("vcu gce_info[%d].v4l2_ctx %p\n",
  811. j, vcu->gce_info[j].v4l2_ctx);
  812. if (i == VCU_VENC) {
  813. venc_encode_pmqos_gce_begin(vcu->gce_info[j].v4l2_ctx, core_id,
  814. vcu->gce_job_cnt[i][core_id].counter);
  815. }
  816. atomic_inc(&vcu->gce_job_cnt[i][core_id]);
  817. mutex_unlock(&vcu->vcu_gce_mutex[i]);
  818. time_check_end(100, strlen(vcodec_param_string));
  819. time_check_start();
  820. pkt_ptr = cmdq_pkt_create(cl);
  821. if (IS_ERR_OR_NULL(pkt_ptr)) {
  822. pr_info("[VCU] cmdq_pkt_create fail\n");
  823. pkt_ptr = NULL;
  824. }
  825. buff.pkt_ptr = pkt_ptr;
  826. if (cmds->cmd_cnt >= VCODEC_CMDQ_CMD_MAX) {
  827. pr_info("[VCU] cmd_cnt (%d) overflow!!\n", cmds->cmd_cnt);
  828. cmds->cmd_cnt = VCODEC_CMDQ_CMD_MAX;
  829. ret = -EINVAL;
  830. }
  831. #if defined(CONFIG_MTK_SEC_VIDEO_PATH_SUPPORT)
  832. if (buff.cmdq_buff.codec_type == VCU_VENC) {
  833. if (buff.cmdq_buff.secure != 0) {
  834. const u64 dapc_engine =
  835. (1LL << CMDQ_SEC_VENC_BSDMA) |
  836. (1LL << CMDQ_SEC_VENC_CUR_LUMA) |
  837. (1LL << CMDQ_SEC_VENC_CUR_CHROMA) |
  838. (1LL << CMDQ_SEC_VENC_REF_LUMA) |
  839. (1LL << CMDQ_SEC_VENC_REF_CHROMA) |
  840. (1LL << CMDQ_SEC_VENC_REC) |
  841. (1LL << CMDQ_SEC_VENC_SV_COMV) |
  842. (1LL << CMDQ_SEC_VENC_RD_COMV);
  843. const u64 port_sec_engine =
  844. (1LL << CMDQ_SEC_VENC_BSDMA) |
  845. (1LL << CMDQ_SEC_VENC_CUR_LUMA) |
  846. (1LL << CMDQ_SEC_VENC_CUR_CHROMA) |
  847. (1LL << CMDQ_SEC_VENC_REF_LUMA) |
  848. (1LL << CMDQ_SEC_VENC_REF_CHROMA) |
  849. (1LL << CMDQ_SEC_VENC_REC) |
  850. (1LL << CMDQ_SEC_VENC_SV_COMV) |
  851. (1LL << CMDQ_SEC_VENC_RD_COMV);
  852. pr_debug("[VCU] dapc_engine: 0x%llx, port_sec_engine: 0x%llx\n",
  853. dapc_engine, port_sec_engine);
  854. #if defined(CONFIG_MTK_CMDQ_MBOX_EXT)
  855. cmdq_sec_pkt_set_data(pkt_ptr, dapc_engine,
  856. port_sec_engine, CMDQ_SEC_KERNEL_CONFIG_GENERAL,
  857. CMDQ_METAEX_VENC);
  858. #endif
  859. }
  860. }
  861. #endif
  862. for (i = 0; i < cmds->cmd_cnt; i++) {
  863. vcu_set_gce_cmd(pkt_ptr, vcu, q, cmds->cmd[i],
  864. cmds->addr[i], cmds->data[i],
  865. cmds->mask[i], vcu->gce_gpr[core_id],
  866. cmds->dma_offset[i], cmds->dma_size[i]);
  867. }
  868. i = buff.cmdq_buff.flush_order % GCE_PENDING_CNT;
  869. memcpy(&vcu_ptr->gce_info[j].buff[i], &buff, sizeof(buff));
  870. pkt_ptr->err_cb.cb =
  871. (buff.cmdq_buff.secure == 0)?vcu_gce_timeout_callback:NULL;
  872. pkt_ptr->err_cb.data = (void *)&vcu_ptr->gce_info[j].buff[i];
  873. pr_info("[VCU][%d] %s: buff %p type %d cnt %d order %d hndl %llx %d %d\n",
  874. core_id, __func__, &vcu_ptr->gce_info[j].buff[i],
  875. buff.cmdq_buff.codec_type,
  876. cmds->cmd_cnt, buff.cmdq_buff.flush_order,
  877. buff.cmdq_buff.gce_handle, ret, j);
  878. /* flush cmd async */
  879. cmdq_pkt_flush_threaded(pkt_ptr,
  880. vcu_gce_flush_callback, (void *)&vcu_ptr->gce_info[j].buff[i]);
  881. atomic_inc(&vcu_ptr->gce_info[j].flush_pending);
  882. time_check_end(100, strlen(vcodec_param_string));
  883. return ret;
  884. }
  885. static int vcu_wait_gce_callback(struct mtk_vcu *vcu, unsigned long arg)
  886. {
  887. int ret, i, j;
  888. unsigned char *user_data_addr = NULL;
  889. struct gce_obj obj;
  890. user_data_addr = (unsigned char *)arg;
  891. ret = (long)copy_from_user(&obj, user_data_addr,
  892. (unsigned long)sizeof(struct gce_obj));
  893. if (ret != 0L) {
  894. pr_info("[VCU] %s(%d) copy_from_user failed!%d\n",
  895. __func__, __LINE__, ret);
  896. return -EINVAL;
  897. }
  898. i = (obj.codec_type == VCU_VDEC) ? VCU_VDEC : VCU_VENC;
  899. vcu_dbg_log("[VCU] %s: type %d handle %llx\n",
  900. __func__, obj.codec_type, obj.gce_handle);
  901. /* use wait_event_interruptible not freezable due to
  902. * slowmotion GCE case vcu_gce_cmd_flush will hold
  903. * mutex in user process which cannot be freezed
  904. */
  905. j = vcu_gce_get_inst_id(obj.gce_handle);
  906. if (j < 0)
  907. return -EINVAL;
  908. ret = wait_event_interruptible(vcu->gce_wq[i],
  909. atomic_read(&vcu->gce_info[j].flush_done) > 0);
  910. if (ret != 0) {
  911. pr_info("[VCU][%d][%d] wait event return %d @%s\n",
  912. vcu->vcuid, i, ret, __func__);
  913. return ret;
  914. }
  915. atomic_dec(&vcu->gce_info[j].flush_done);
  916. return ret;
  917. }
  918. int vcu_get_ctx_ipi_binding_lock(struct platform_device *pdev,
  919. struct mutex **mutex, unsigned long type)
  920. {
  921. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  922. *mutex = &vcu->ctx_ipi_binding[type];
  923. return 0;
  924. }
  925. int vcu_set_codec_ctx(struct platform_device *pdev,
  926. void *codec_ctx, struct vb2_buffer *src_vb,
  927. struct vb2_buffer *dst_vb, unsigned long type)
  928. {
  929. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  930. pr_debug("[VCU] type %lu vcu_set_codec_ctx %p src_vb %p dst_vb %p\n",
  931. type, codec_ctx, src_vb, dst_vb);
  932. vcu_dbg_log("[VCU] %s %p type %lu src_vb %p dst_vb %p\n",
  933. __func__, codec_ctx, type, src_vb, dst_vb);
  934. vcu->curr_ctx[type] = codec_ctx;
  935. vcu->curr_src_vb[type] = src_vb;
  936. vcu->curr_dst_vb[type] = dst_vb;
  937. return 0;
  938. }
  939. int vcu_clear_codec_ctx(struct platform_device *pdev,
  940. void *codec_ctx, unsigned long type)
  941. {
  942. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  943. vcu_dbg_log("[VCU] %s %p type %lu\n", __func__, codec_ctx, type);
  944. mutex_lock(&vcu->vcu_gce_mutex[type]);
  945. vcu_gce_clear_inst_id(codec_ctx);
  946. vcu->curr_ctx[type] = NULL;
  947. vcu->curr_src_vb[type] = NULL;
  948. vcu->curr_dst_vb[type] = NULL;
  949. mutex_unlock(&vcu->vcu_gce_mutex[type]);
  950. return 0;
  951. }
  952. unsigned int vcu_get_vdec_hw_capa(struct platform_device *pdev)
  953. {
  954. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  955. return vcu->run.dec_capability;
  956. }
  957. EXPORT_SYMBOL_GPL(vcu_get_vdec_hw_capa);
  958. unsigned int vcu_get_venc_hw_capa(struct platform_device *pdev)
  959. {
  960. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  961. return vcu->run.enc_capability;
  962. }
  963. EXPORT_SYMBOL_GPL(vcu_get_venc_hw_capa);
  964. void *vcu_mapping_dm_addr(struct platform_device *pdev,
  965. uintptr_t dtcm_dmem_addr)
  966. {
  967. struct mtk_vcu *vcu;
  968. uintptr_t d_vma, d_va_start;
  969. uintptr_t d_off, d_va;
  970. if (!IS_ERR_OR_NULL(pdev))
  971. vcu = platform_get_drvdata(pdev);
  972. else {
  973. dev_info(&pdev->dev, "[VCU] %s: Invalid pdev %p\n",
  974. __func__, pdev);
  975. return NULL;
  976. }
  977. d_vma = (uintptr_t)(dtcm_dmem_addr);
  978. d_va_start = (uintptr_t)VCU_DMEM0_VIRT(vcu);
  979. d_off = d_vma - VCU_DMEM0_VMA(vcu);
  980. if (dtcm_dmem_addr == 0UL || d_off > VCU_DMEM0_LEN(vcu)) {
  981. dev_info(&pdev->dev, "[VCU] %s: Invalid vma 0x%lx len %lx\n",
  982. __func__, dtcm_dmem_addr, VCU_DMEM0_LEN(vcu));
  983. return NULL;
  984. }
  985. d_va = d_va_start + d_off;
  986. dev_dbg(&pdev->dev, "[VCU] %s: 0x%lx -> 0x%lx\n",
  987. __func__, d_vma, d_va);
  988. return (void *)d_va;
  989. }
  990. EXPORT_SYMBOL_GPL(vcu_mapping_dm_addr);
  991. struct platform_device *vcu_get_plat_device(struct platform_device *pdev)
  992. {
  993. struct device *dev = &pdev->dev;
  994. struct device_node *vcu_node = NULL;
  995. struct platform_device *vcu_pdev = NULL;
  996. dev_dbg(&pdev->dev, "[VCU] %s\n", __func__);
  997. vcu_node = of_parse_phandle(dev->of_node, "mediatek,vcu", 0);
  998. if (vcu_node == NULL) {
  999. dev_err(dev, "[VCU] can't get vcu node\n");
  1000. return NULL;
  1001. }
  1002. vcu_pdev = of_find_device_by_node(vcu_node);
  1003. if (WARN_ON(vcu_pdev == NULL) == true) {
  1004. dev_err(dev, "[VCU] vcu pdev failed\n");
  1005. of_node_put(vcu_node);
  1006. return NULL;
  1007. }
  1008. return vcu_pdev;
  1009. }
  1010. EXPORT_SYMBOL_GPL(vcu_get_plat_device);
  1011. int vcu_load_firmware(struct platform_device *pdev)
  1012. {
  1013. if (pdev == NULL) {
  1014. dev_err(&pdev->dev, "[VCU] VCU platform device is invalid\n");
  1015. return -EINVAL;
  1016. }
  1017. return 0;
  1018. }
  1019. EXPORT_SYMBOL_GPL(vcu_load_firmware);
  1020. int vcu_compare_version(struct platform_device *pdev,
  1021. const char *expected_version)
  1022. {
  1023. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  1024. int cur_major, cur_minor, cur_build, cur_rel, cur_ver_num;
  1025. int major, minor, build, rel, ver_num;
  1026. char *cur_version = vcu->run.fw_ver;
  1027. cur_ver_num = sscanf(cur_version, "%d.%d.%d-rc%d",
  1028. &cur_major, &cur_minor, &cur_build, &cur_rel);
  1029. if (cur_ver_num < 3)
  1030. return -1;
  1031. ver_num = sscanf(expected_version, "%d.%d.%d-rc%d",
  1032. &major, &minor, &build, &rel);
  1033. if (ver_num < 3)
  1034. return -1;
  1035. if (cur_major < major)
  1036. return -1;
  1037. if (cur_major > major)
  1038. return 1;
  1039. if (cur_minor < minor)
  1040. return -1;
  1041. if (cur_minor > minor)
  1042. return 1;
  1043. if (cur_build < build)
  1044. return -1;
  1045. if (cur_build > build)
  1046. return 1;
  1047. if (cur_ver_num < ver_num)
  1048. return -1;
  1049. if (cur_ver_num > ver_num)
  1050. return 1;
  1051. if (ver_num > 3) {
  1052. if (cur_rel < rel)
  1053. return -1;
  1054. if (cur_rel > rel)
  1055. return 1;
  1056. }
  1057. return 0;
  1058. }
  1059. EXPORT_SYMBOL_GPL(vcu_compare_version);
  1060. void vcu_get_file_lock(void)
  1061. {
  1062. mutex_lock(&vpud_file_mutex);
  1063. }
  1064. EXPORT_SYMBOL_GPL(vcu_get_file_lock);
  1065. void vcu_put_file_lock(void)
  1066. {
  1067. mutex_unlock(&vpud_file_mutex);
  1068. }
  1069. EXPORT_SYMBOL_GPL(vcu_put_file_lock);
  1070. int vcu_get_sig_lock(unsigned long *flags)
  1071. {
  1072. return spin_trylock_irqsave(&vcu_ptr->vpud_sig_lock, *flags);
  1073. }
  1074. EXPORT_SYMBOL_GPL(vcu_get_sig_lock);
  1075. void vcu_put_sig_lock(unsigned long flags)
  1076. {
  1077. spin_unlock_irqrestore(&vcu_ptr->vpud_sig_lock, flags);
  1078. }
  1079. EXPORT_SYMBOL_GPL(vcu_put_sig_lock);
  1080. int vcu_check_vpud_alive(void)
  1081. {
  1082. return (vcu_ptr->vpud_is_going_down > 0) ? 0:1;
  1083. }
  1084. EXPORT_SYMBOL_GPL(vcu_check_vpud_alive);
  1085. void vcu_get_task(struct task_struct **task, struct files_struct **f,
  1086. int reset)
  1087. {
  1088. vcu_dbg_log("mtk_vcu_get_task %p\n", vcud_task);
  1089. if (reset == 1) {
  1090. vcud_task = NULL;
  1091. files = NULL;
  1092. }
  1093. *task = vcud_task;
  1094. *f = files;
  1095. }
  1096. EXPORT_SYMBOL_GPL(vcu_get_task);
  1097. static int vcu_ipi_handler(struct mtk_vcu *vcu, struct share_obj *rcv_obj)
  1098. {
  1099. struct vcu_ipi_desc *ipi_desc = vcu->ipi_desc;
  1100. int non_ack = 0;
  1101. int ret = -1;
  1102. unsigned int i = 0;
  1103. i = ipi_id_to_inst_id(rcv_obj->id);
  1104. if (rcv_obj->id < (int)IPI_MAX &&
  1105. ipi_desc[rcv_obj->id].handler != NULL) {
  1106. non_ack = ipi_desc[rcv_obj->id].handler(rcv_obj->share_buf,
  1107. rcv_obj->len,
  1108. ipi_desc[rcv_obj->id].priv);
  1109. if (rcv_obj->id > (int)IPI_VCU_INIT && non_ack == 0) {
  1110. vcu->ipi_id_ack[rcv_obj->id] = true;
  1111. wake_up(&vcu->ack_wq[i]);
  1112. }
  1113. ret = 0;
  1114. } else
  1115. dev_err(vcu->dev, "[VCU] No such ipi id = %d\n", rcv_obj->id);
  1116. return ret;
  1117. }
  1118. static int vcu_ipi_init(struct mtk_vcu *vcu)
  1119. {
  1120. vcu->is_open = false;
  1121. mutex_init(&vcu->vcu_mutex[VCU_VDEC]);
  1122. mutex_init(&vcu->vcu_mutex[VCU_VENC]);
  1123. mutex_init(&vcu->vcu_gce_mutex[VCU_VDEC]);
  1124. mutex_init(&vcu->vcu_gce_mutex[VCU_VENC]);
  1125. mutex_init(&vcu->ctx_ipi_binding[VCU_VDEC]);
  1126. mutex_init(&vcu->ctx_ipi_binding[VCU_VENC]);
  1127. mutex_init(&vcu->vcu_share);
  1128. mutex_init(&vpud_file_mutex);
  1129. return 0;
  1130. }
  1131. static int vcu_init_ipi_handler(void *data, unsigned int len, void *priv)
  1132. {
  1133. struct mtk_vcu *vcu = (struct mtk_vcu *)priv;
  1134. struct vcu_run *run = (struct vcu_run *)data;
  1135. int wait_cnt = 0;
  1136. /* handle uninitialize message */
  1137. if (vcu->run.signaled == 1u && run->signaled == 0u) {
  1138. /* smi debug dump before wake up ack to worker
  1139. * which will send error event to omx
  1140. * to avoid omx release and disable larb
  1141. * which may cause smi dump devapc
  1142. */
  1143. //smi_debug_bus_hang_detect(0, "VDEC");
  1144. int i;
  1145. /* wake up the threads in daemon
  1146. * clear all pending ipi_msg
  1147. * release worker waiting timeout
  1148. */
  1149. vcu->abort = true;
  1150. for (i = 0; i < IPI_MAX; i++)
  1151. vcu->ipi_id_ack[i] = true;
  1152. /* wait for GCE done & let IPI ack power off */
  1153. while (
  1154. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][0]) > 0 ||
  1155. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][1]) > 0 ||
  1156. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][0]) > 0 ||
  1157. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][1]) > 0) {
  1158. wait_cnt++;
  1159. if (wait_cnt > 5) {
  1160. pr_info("[VCU] Vpud killed gce status %d %d\n",
  1161. atomic_read(
  1162. &vcu_ptr->gce_job_cnt[VCU_VDEC][0]),
  1163. atomic_read(
  1164. &vcu_ptr->gce_job_cnt[VCU_VENC][0]));
  1165. break;
  1166. }
  1167. usleep_range(10000, 20000);
  1168. }
  1169. for (i = 0; i < 2; i++) {
  1170. atomic_set(&vcu->ipi_got[i], 1);
  1171. atomic_set(&vcu->ipi_done[i], 0);
  1172. memset(&vcu->user_obj[i], 0,
  1173. sizeof(struct share_obj));
  1174. wake_up(&vcu->get_wq[i]);
  1175. wake_up(&vcu->ack_wq[i]);
  1176. }
  1177. atomic_set(&vcu->vdec_log_got, 1);
  1178. wake_up(&vcu->vdec_log_get_wq);
  1179. vcud_task = NULL;
  1180. files = NULL;
  1181. dev_info(vcu->dev, "[VCU] vpud killing\n");
  1182. return 0;
  1183. }
  1184. vcu->run.signaled = run->signaled;
  1185. strncpy(vcu->run.fw_ver, run->fw_ver, VCU_FW_VER_LEN);
  1186. vcu->run.dec_capability = run->dec_capability;
  1187. vcu->run.enc_capability = run->enc_capability;
  1188. dev_dbg(vcu->dev, "[VCU] fw ver: %s\n", vcu->run.fw_ver);
  1189. dev_dbg(vcu->dev, "[VCU] dec cap: %x\n", vcu->run.dec_capability);
  1190. dev_dbg(vcu->dev, "[VCU] enc cap: %x\n", vcu->run.enc_capability);
  1191. return 0;
  1192. }
  1193. static int mtk_vcu_open(struct inode *inode, struct file *file)
  1194. {
  1195. int vcuid = 0;
  1196. struct mtk_vcu_queue *vcu_queue;
  1197. if (strcmp(current->comm, "camd") == 0)
  1198. vcuid = 2;
  1199. else if (strcmp(current->comm, "mdpd") == 0)
  1200. vcuid = 1;
  1201. else if (strcmp(current->comm, "vpud") == 0) {
  1202. vcud_task = current;
  1203. files = vcud_task->files;
  1204. vcuid = 0;
  1205. } else if (strcmp(current->comm, "vdec_srv") == 0 ||
  1206. strcmp(current->comm, "venc_srv") == 0) {
  1207. vcuid = 0;
  1208. } else {
  1209. pr_info("[VCU] thread name: %s\n", current->comm);
  1210. }
  1211. vcu_mtkdev[vcuid]->vcuid = vcuid;
  1212. if (IS_ERR_OR_NULL(vcu_mtkdev[vcuid]->clt_vdec[0]))
  1213. return -EINVAL;
  1214. vcu_queue = mtk_vcu_mem_init(vcu_mtkdev[vcuid]->dev,
  1215. vcu_mtkdev[vcuid]->clt_vdec[0]->chan->mbox->dev);
  1216. if (vcu_queue == NULL)
  1217. return -ENOMEM;
  1218. vcu_queue->vcu = vcu_mtkdev[vcuid];
  1219. file->private_data = vcu_queue;
  1220. vcu_ptr->vpud_killed.count = 0;
  1221. vcu_ptr->open_cnt++;
  1222. vcu_ptr->abort = false;
  1223. vcu_ptr->vpud_is_going_down = 0;
  1224. pr_info("[VCU] %s name: %s pid %d open_cnt %d\n", __func__,
  1225. current->comm, current->tgid, vcu_ptr->open_cnt);
  1226. return 0;
  1227. }
  1228. static int mtk_vcu_release(struct inode *inode, struct file *file)
  1229. {
  1230. struct task_struct *task = NULL;
  1231. struct files_struct *f = NULL;
  1232. unsigned long flags;
  1233. if (file->private_data)
  1234. mtk_vcu_mem_release((struct mtk_vcu_queue *)file->private_data);
  1235. pr_info("[VCU] %s name: %s pid %d open_cnt %d\n", __func__,
  1236. current->comm, current->tgid, vcu_ptr->open_cnt);
  1237. vcu_ptr->open_cnt--;
  1238. if (vcu_ptr->open_cnt == 0) {
  1239. /* reset vpud due to abnormal situations. */
  1240. vcu_ptr->abort = true;
  1241. vcu_get_file_lock();
  1242. vcu_get_task(&task, &f, 1);
  1243. vcu_put_file_lock();
  1244. up(&vcu_ptr->vpud_killed); /* vdec worker */
  1245. up(&vcu_ptr->vpud_killed); /* venc worker */
  1246. /* reset vpud_is_going_down only on abnormal situations */
  1247. spin_lock_irqsave(&vcu_ptr->vpud_sig_lock, flags);
  1248. vcu_ptr->vpud_is_going_down = 0;
  1249. spin_unlock_irqrestore(&vcu_ptr->vpud_sig_lock, flags);
  1250. }
  1251. return 0;
  1252. }
  1253. static void vcu_free_d_ext_mem(struct mtk_vcu *vcu)
  1254. {
  1255. mutex_lock(&vcu->vcu_share);
  1256. mutex_lock(&vcu->vcu_mutex[VCU_VDEC]);
  1257. mutex_lock(&vcu->vcu_mutex[VCU_VENC]);
  1258. kfree(VCU_DMEM0_VIRT(vcu));
  1259. VCU_DMEM0_VIRT(vcu) = NULL;
  1260. mutex_unlock(&vcu->vcu_mutex[VCU_VENC]);
  1261. mutex_unlock(&vcu->vcu_mutex[VCU_VDEC]);
  1262. mutex_unlock(&vcu->vcu_share);
  1263. }
  1264. static int vcu_alloc_d_ext_mem(struct mtk_vcu *vcu, unsigned long len)
  1265. {
  1266. mutex_lock(&vcu->vcu_share);
  1267. mutex_lock(&vcu->vcu_mutex[VCU_VDEC]);
  1268. mutex_lock(&vcu->vcu_mutex[VCU_VENC]);
  1269. VCU_DMEM0_VIRT(vcu) = kmalloc(len, GFP_KERNEL);
  1270. VCU_DMEM0_PHY(vcu) = virt_to_phys(VCU_DMEM0_VIRT(vcu));
  1271. VCU_DMEM0_LEN(vcu) = len;
  1272. mutex_unlock(&vcu->vcu_mutex[VCU_VENC]);
  1273. mutex_unlock(&vcu->vcu_mutex[VCU_VDEC]);
  1274. mutex_unlock(&vcu->vcu_share);
  1275. if (!VCU_DMEM0_VIRT(vcu))
  1276. return -1;
  1277. dev_dbg(vcu->dev,
  1278. "[VCU] Data extend memory (len:%lu) phy=0x%llx virt=0x%p iova=0x%llx\n",
  1279. VCU_DMEM0_LEN(vcu),
  1280. (unsigned long long)VCU_DMEM0_PHY(vcu),
  1281. VCU_DMEM0_VIRT(vcu),
  1282. (unsigned long long)VCU_DMEM0_IOVA(vcu));
  1283. return 0;
  1284. }
  1285. static void mtk_vcu_page_vm_close(struct vm_area_struct *vma)
  1286. {
  1287. struct vcu_pa_pages *vcu_page = (struct vcu_pa_pages *)vma->vm_private_data;
  1288. if (atomic_read(&vcu_page->ref_cnt) > 0)
  1289. atomic_dec(&vcu_page->ref_cnt);
  1290. else
  1291. pr_info("[VCU][Error] %s ummap fail\n", __func__);
  1292. vcu_dbg_log("[VCU] %s vma->start 0x%lx, end 0x%lx, pgoff 0x%lx\n",
  1293. __func__, vma->vm_start, vma->vm_end, vma->vm_pgoff);
  1294. }
  1295. const struct vm_operations_struct mtk_vcu_page_vm_ops = {
  1296. .close = mtk_vcu_page_vm_close,
  1297. };
  1298. static void mtk_vcu_buf_vm_close(struct vm_area_struct *vma)
  1299. {
  1300. void *mem_priv = (void *)vma->vm_private_data;
  1301. struct file *file = vma->vm_file;
  1302. struct mtk_vcu_queue *vcu_queue =
  1303. (struct mtk_vcu_queue *)file->private_data;
  1304. mtk_vcu_buffer_ref_dec(vcu_queue, mem_priv);
  1305. vcu_dbg_log("[VCU] %s vma->start 0x%lx, end 0x%lx, pgoff 0x%lx\n",
  1306. __func__, vma->vm_start, vma->vm_end, vma->vm_pgoff);
  1307. }
  1308. const struct vm_operations_struct mtk_vcu_buf_vm_ops = {
  1309. .close = mtk_vcu_buf_vm_close,
  1310. };
  1311. static int mtk_vcu_mmap(struct file *file, struct vm_area_struct *vma)
  1312. {
  1313. unsigned long length = vma->vm_end - vma->vm_start;
  1314. unsigned long pa_start = vma->vm_pgoff << PAGE_SHIFT;
  1315. unsigned long pa_start_base = pa_start;
  1316. unsigned long pa_end = pa_start + length;
  1317. #ifdef CONFIG_MTK_IOMMU_V2
  1318. unsigned long start = vma->vm_start;
  1319. unsigned long pos = 0;
  1320. #endif
  1321. struct mtk_vcu *vcu_dev;
  1322. struct mtk_vcu_queue *vcu_queue =
  1323. (struct mtk_vcu_queue *)file->private_data;
  1324. struct mem_obj mem_buff_data;
  1325. struct vb2_buffer *src_vb, *dst_vb;
  1326. void *ret = NULL;
  1327. vcu_dev = (struct mtk_vcu *)vcu_queue->vcu;
  1328. vcu_dbg_log("[VCU] %s vma->start 0x%lx, end 0x%lx, pgoff 0x%lx\n",
  1329. __func__, vma->vm_start, vma->vm_end, vma->vm_pgoff);
  1330. // First handle map pa case, because maybe pa will smaller than
  1331. // MAP_PA_BASE_1GB in 32bit project
  1332. if (vcu_queue->map_buf_pa >= MAP_SHMEM_PA_BASE) {
  1333. vcu_queue->map_buf_pa = 0;
  1334. ret = vcu_check_gce_pa_base(vcu_queue, pa_start, length);
  1335. if (ret != NULL) {
  1336. atomic_inc(&((struct vcu_pa_pages *)ret)->ref_cnt);
  1337. vma->vm_ops = &mtk_vcu_page_vm_ops;
  1338. vma->vm_private_data = ret;
  1339. vma->vm_pgoff = pa_start >> PAGE_SHIFT;
  1340. vma->vm_page_prot =
  1341. pgprot_writecombine(vma->vm_page_prot);
  1342. goto valid_map;
  1343. }
  1344. pr_info("[VCU] map pa fail with pa_start=0x%lx\n",
  1345. pa_start);
  1346. return -EINVAL;
  1347. }
  1348. /*only vcud need this case*/
  1349. if (pa_start < MAP_PA_BASE_1GB) {
  1350. if (vcu_check_reg_base(vcu_dev, pa_start, length) == 0) {
  1351. vma->vm_pgoff = pa_start >> PAGE_SHIFT;
  1352. goto reg_valid_map;
  1353. }
  1354. }
  1355. if (pa_start >= MAP_SHMEM_ALLOC_BASE && pa_end <= MAP_SHMEM_ALLOC_END) {
  1356. vma->vm_pgoff =
  1357. (unsigned long)(VCU_DMEM0_PHY(vcu_dev) >> PAGE_SHIFT);
  1358. goto valid_map;
  1359. }
  1360. if (pa_start >= MAP_SHMEM_COMMIT_BASE &&
  1361. pa_end <= MAP_SHMEM_COMMIT_END) {
  1362. VCU_DMEM0_VMA(vcu_dev) = vma->vm_start;
  1363. vma->vm_pgoff =
  1364. (unsigned long)(VCU_DMEM0_PHY(vcu_dev) >> PAGE_SHIFT);
  1365. goto valid_map;
  1366. }
  1367. if (pa_start_base >= MAP_SHMEM_MM_BASE &&
  1368. pa_start_base < MAP_SHMEM_PA_BASE) {
  1369. if (pa_start_base >= MAP_SHMEM_MM_CACHEABLE_BASE)
  1370. pa_start -= MAP_SHMEM_MM_CACHEABLE_BASE;
  1371. else
  1372. pa_start -= MAP_SHMEM_MM_BASE;
  1373. mem_buff_data.iova = (vcu_ptr->iommu_padding) ?
  1374. (pa_start | 0x100000000UL) : pa_start;
  1375. mem_buff_data.len = length;
  1376. src_vb = NULL;
  1377. dst_vb = NULL;
  1378. if (strcmp(current->comm, "vdec_srv") == 0) {
  1379. src_vb = vcu_dev->curr_src_vb[VCU_VDEC];
  1380. dst_vb = vcu_dev->curr_dst_vb[VCU_VDEC];
  1381. } else if (strcmp(current->comm, "venc_srv") == 0) {
  1382. src_vb = vcu_dev->curr_src_vb[VCU_VENC];
  1383. dst_vb = vcu_dev->curr_dst_vb[VCU_VENC];
  1384. }
  1385. ret = mtk_vcu_set_buffer(vcu_queue, &mem_buff_data,
  1386. src_vb, dst_vb);
  1387. if (!IS_ERR_OR_NULL(ret)) {
  1388. vma->vm_ops = &mtk_vcu_buf_vm_ops;
  1389. vma->vm_private_data = ret;
  1390. vma->vm_file = file;
  1391. }
  1392. #ifdef CONFIG_MTK_IOMMU_V2
  1393. while (length > 0) {
  1394. vma->vm_pgoff = iommu_iova_to_phys(vcu_dev->io_domain,
  1395. (vcu_ptr->iommu_padding) ?
  1396. ((pa_start + pos) | 0x100000000UL) :
  1397. (pa_start + pos));
  1398. if (vma->vm_pgoff == 0) {
  1399. dev_info(vcu_dev->dev, "[VCU] iommu_iova_to_phys fail\n");
  1400. return -EINVAL;
  1401. }
  1402. vma->vm_pgoff >>= PAGE_SHIFT;
  1403. if (pa_start_base < MAP_SHMEM_MM_CACHEABLE_BASE) {
  1404. vma->vm_page_prot =
  1405. pgprot_writecombine(vma->vm_page_prot);
  1406. }
  1407. if (remap_pfn_range(vma, start, vma->vm_pgoff,
  1408. PAGE_SIZE, vma->vm_page_prot) == true)
  1409. return -EAGAIN;
  1410. start += PAGE_SIZE;
  1411. pos += PAGE_SIZE;
  1412. if (length > PAGE_SIZE)
  1413. length -= PAGE_SIZE;
  1414. else
  1415. length = 0;
  1416. }
  1417. return 0;
  1418. #endif
  1419. }
  1420. dev_info(vcu_dev->dev, "[VCU] Invalid argument\n");
  1421. return -EINVAL;
  1422. reg_valid_map:
  1423. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  1424. valid_map:
  1425. dev_dbg(vcu_dev->dev, "[VCU] Mapping pgoff 0x%lx\n", vma->vm_pgoff);
  1426. if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  1427. vma->vm_end - vma->vm_start,
  1428. vma->vm_page_prot) != 0)
  1429. return -EAGAIN;
  1430. return 0;
  1431. }
  1432. static long mtk_vcu_unlocked_ioctl(struct file *file, unsigned int cmd,
  1433. unsigned long arg)
  1434. {
  1435. long ret = -1;
  1436. void *mem_priv;
  1437. unsigned char *user_data_addr = NULL;
  1438. struct mtk_vcu *vcu_dev;
  1439. struct device *dev;
  1440. struct share_obj share_buff_data;
  1441. struct mem_obj mem_buff_data;
  1442. struct mtk_vcu_queue *vcu_queue =
  1443. (struct mtk_vcu_queue *)file->private_data;
  1444. vcu_dev = (struct mtk_vcu *)vcu_queue->vcu;
  1445. dev = vcu_dev->dev;
  1446. switch (cmd) {
  1447. case VCU_SET_OBJECT:
  1448. user_data_addr = (unsigned char *)arg;
  1449. ret = (long)copy_from_user(&share_buff_data, user_data_addr,
  1450. (unsigned long)sizeof(struct share_obj));
  1451. if (ret != 0L || share_buff_data.id >= (int)IPI_MAX ||
  1452. share_buff_data.id < (int)IPI_VCU_INIT) {
  1453. pr_info("[VCU] %s(%d) Copy data from user failed!\n",
  1454. __func__, __LINE__);
  1455. return -EINVAL;
  1456. }
  1457. ret = vcu_ipi_handler(vcu_dev, &share_buff_data);
  1458. ret = (long)copy_to_user(user_data_addr, &share_buff_data,
  1459. (unsigned long)sizeof(struct share_obj));
  1460. if (ret != 0L) {
  1461. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  1462. __func__, __LINE__);
  1463. return -EINVAL;
  1464. }
  1465. break;
  1466. case VCU_GET_OBJECT:
  1467. ret = vcu_ipi_get(vcu_dev, arg);
  1468. break;
  1469. case VCU_GET_LOG_OBJECT:
  1470. ret = vcu_log_get(vcu_dev, arg);
  1471. break;
  1472. case VCU_MVA_ALLOCATION:
  1473. case VCU_PA_ALLOCATION:
  1474. user_data_addr = (unsigned char *)arg;
  1475. ret = (long)copy_from_user(&mem_buff_data, user_data_addr,
  1476. (unsigned long)sizeof(struct mem_obj));
  1477. if (ret != 0L) {
  1478. pr_info("[VCU] %s(%d) Copy data from user failed!\n",
  1479. __func__, __LINE__);
  1480. return -EINVAL;
  1481. }
  1482. if (cmd == VCU_MVA_ALLOCATION) {
  1483. mem_priv =
  1484. mtk_vcu_get_buffer(vcu_queue, &mem_buff_data);
  1485. } else {
  1486. mem_priv =
  1487. mtk_vcu_get_page(vcu_queue, &mem_buff_data);
  1488. }
  1489. if (IS_ERR_OR_NULL(mem_priv) == true) {
  1490. mem_buff_data.va = (unsigned long)-1;
  1491. mem_buff_data.pa = (unsigned long)-1;
  1492. mem_buff_data.iova = (unsigned long)-1;
  1493. ret = (long)copy_to_user(user_data_addr,
  1494. &mem_buff_data,
  1495. (unsigned long)sizeof(struct mem_obj));
  1496. pr_info("[VCU] ALLOCATION %d failed!\n", cmd == VCU_MVA_ALLOCATION);
  1497. return PTR_ERR(mem_priv);
  1498. }
  1499. vcu_dbg_log("[VCU] ALLOCATION %d va %llx, pa %llx, iova %llx\n",
  1500. cmd == VCU_MVA_ALLOCATION, mem_buff_data.va,
  1501. mem_buff_data.pa, mem_buff_data.iova);
  1502. ret = (long)copy_to_user(user_data_addr, &mem_buff_data,
  1503. (unsigned long)sizeof(struct mem_obj));
  1504. if (ret != 0L) {
  1505. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  1506. __func__, __LINE__);
  1507. return -EINVAL;
  1508. }
  1509. /* store map pa buffer type flag which will use in mmap*/
  1510. if (cmd == VCU_PA_ALLOCATION)
  1511. vcu_queue->map_buf_pa = mem_buff_data.pa + MAP_SHMEM_PA_BASE;
  1512. ret = 0;
  1513. break;
  1514. case VCU_MVA_FREE:
  1515. case VCU_PA_FREE:
  1516. user_data_addr = (unsigned char *)arg;
  1517. ret = (long)copy_from_user(&mem_buff_data, user_data_addr,
  1518. (unsigned long)sizeof(struct mem_obj));
  1519. if ((ret != 0L) ||
  1520. (mem_buff_data.iova == 0UL &&
  1521. mem_buff_data.va == 0UL)) {
  1522. pr_info("[VCU] %s(%d) Free buf failed!\n",
  1523. __func__, __LINE__);
  1524. return -EINVAL;
  1525. }
  1526. if (cmd == VCU_MVA_FREE) {
  1527. if (vcu_ptr->iommu_padding)
  1528. mem_buff_data.iova |= 0x100000000UL;
  1529. ret = mtk_vcu_free_buffer(vcu_queue, &mem_buff_data);
  1530. } else {
  1531. ret = mtk_vcu_free_page(vcu_queue, &mem_buff_data);
  1532. }
  1533. if (ret != 0L) {
  1534. pr_info("[VCU] VCU_FREE failed %d va %llx, pa %llx, iova %llx\n",
  1535. cmd == VCU_MVA_FREE, mem_buff_data.va,
  1536. mem_buff_data.pa, mem_buff_data.iova);
  1537. return -EINVAL;
  1538. }
  1539. vcu_dbg_log("[VCU] FREE %d va %llx, pa %llx, iova %llx\n",
  1540. cmd == VCU_MVA_FREE, mem_buff_data.va,
  1541. mem_buff_data.pa, mem_buff_data.iova);
  1542. mem_buff_data.va = 0;
  1543. mem_buff_data.iova = 0;
  1544. mem_buff_data.pa = 0;
  1545. ret = (long)copy_to_user(user_data_addr, &mem_buff_data,
  1546. (unsigned long)sizeof(struct mem_obj));
  1547. if (ret != 0L) {
  1548. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  1549. __func__, __LINE__);
  1550. return -EINVAL;
  1551. }
  1552. ret = 0;
  1553. break;
  1554. case VCU_CACHE_FLUSH_ALL:
  1555. dev_dbg(dev, "[VCU] Flush cache in kernel\n");
  1556. vcu_buffer_flush_all(dev, vcu_queue);
  1557. ret = 0;
  1558. break;
  1559. case VCU_CACHE_FLUSH_BUFF:
  1560. case VCU_CACHE_INVALIDATE_BUFF:
  1561. user_data_addr = (unsigned char *)arg;
  1562. ret = (long)copy_from_user(&mem_buff_data, user_data_addr,
  1563. (unsigned long)sizeof(struct mem_obj));
  1564. if (ret != 0L) {
  1565. pr_info("[VCU] %s(%d) Copy data from user failed!\n",
  1566. __func__, __LINE__);
  1567. return -EINVAL;
  1568. }
  1569. if (vcu_ptr->iommu_padding)
  1570. mem_buff_data.iova |= 0x100000000UL;
  1571. ret = vcu_buffer_cache_sync(dev, vcu_queue,
  1572. (dma_addr_t)mem_buff_data.iova,
  1573. (size_t)mem_buff_data.len,
  1574. (cmd == VCU_CACHE_FLUSH_BUFF) ?
  1575. DMA_TO_DEVICE : DMA_FROM_DEVICE);
  1576. if (ret < 0)
  1577. return -EINVAL;
  1578. dev_dbg(dev, "[VCU] Cache flush buffer pa = %llx, size = %d\n",
  1579. mem_buff_data.iova, (unsigned int)mem_buff_data.len);
  1580. ret = (long)copy_to_user(user_data_addr, &mem_buff_data,
  1581. (unsigned long)sizeof(struct mem_obj));
  1582. if (ret != 0L) {
  1583. pr_info("[VCU] %s(%d) Copy data to user failed!\n",
  1584. __func__, __LINE__);
  1585. return -EINVAL;
  1586. }
  1587. ret = 0;
  1588. break;
  1589. case VCU_GCE_SET_CMD_FLUSH:
  1590. ret = vcu_gce_cmd_flush(vcu_dev, vcu_queue, arg);
  1591. break;
  1592. case VCU_GCE_WAIT_CALLBACK:
  1593. ret = vcu_wait_gce_callback(vcu_dev, arg);
  1594. break;
  1595. default:
  1596. dev_err(dev, "[VCU] Unknown cmd\n");
  1597. break;
  1598. }
  1599. return ret;
  1600. }
  1601. #if IS_ENABLED(CONFIG_COMPAT)
  1602. static int compat_get_vpud_allocation_data(
  1603. struct compat_mem_obj __user *data32,
  1604. struct mem_obj __user *data)
  1605. {
  1606. compat_ulong_t l;
  1607. compat_u64 u;
  1608. unsigned int err = 0;
  1609. err = get_user(l, &data32->iova);
  1610. err |= put_user(l, &data->iova);
  1611. err |= get_user(l, &data32->len);
  1612. err |= put_user(l, &data->len);
  1613. err |= get_user(u, &data32->pa);
  1614. err |= put_user(u, &data->pa);
  1615. err |= get_user(u, &data32->va);
  1616. err |= put_user(u, &data->va);
  1617. return (int)err;
  1618. }
  1619. static int compat_put_vpud_allocation_data(
  1620. struct compat_mem_obj __user *data32,
  1621. struct mem_obj __user *data)
  1622. {
  1623. compat_ulong_t l;
  1624. compat_u64 u;
  1625. unsigned int err = 0;
  1626. err = get_user(l, &data->iova);
  1627. err |= put_user(l, &data32->iova);
  1628. err |= get_user(l, &data->len);
  1629. err |= put_user(l, &data32->len);
  1630. err |= get_user(u, &data->pa);
  1631. err |= put_user(u, &data32->pa);
  1632. err |= get_user(u, &data->va);
  1633. err |= put_user(u, &data32->va);
  1634. return (int)err;
  1635. }
  1636. static long mtk_vcu_unlocked_compat_ioctl(struct file *file, unsigned int cmd,
  1637. unsigned long arg)
  1638. {
  1639. int err = 0;
  1640. long ret = -1;
  1641. struct share_obj __user *share_data32;
  1642. struct compat_mem_obj __user *data32;
  1643. struct mem_obj __user *data;
  1644. switch (cmd) {
  1645. case COMPAT_VCU_SET_OBJECT:
  1646. case VCU_GET_OBJECT:
  1647. case VCU_GET_LOG_OBJECT:
  1648. case VCU_GCE_SET_CMD_FLUSH:
  1649. case VCU_GCE_WAIT_CALLBACK:
  1650. share_data32 = compat_ptr((uint32_t)arg);
  1651. ret = file->f_op->unlocked_ioctl(file,
  1652. cmd, (unsigned long)share_data32);
  1653. break;
  1654. case COMPAT_VCU_MVA_ALLOCATION:
  1655. case COMPAT_VCU_PA_ALLOCATION:
  1656. data32 = compat_ptr((uint32_t)arg);
  1657. data = compat_alloc_user_space(sizeof(struct mem_obj));
  1658. if (data == NULL)
  1659. return -EFAULT;
  1660. err = compat_get_vpud_allocation_data(data32, data);
  1661. if (err != 0)
  1662. return err;
  1663. if (cmd == COMPAT_VCU_MVA_ALLOCATION)
  1664. ret = file->f_op->unlocked_ioctl(file,
  1665. (uint32_t)VCU_MVA_ALLOCATION,
  1666. (unsigned long)data);
  1667. else
  1668. ret = file->f_op->unlocked_ioctl(file,
  1669. (uint32_t)VCU_PA_ALLOCATION,
  1670. (unsigned long)data);
  1671. err = compat_put_vpud_allocation_data(data32, data);
  1672. if (err != 0)
  1673. return err;
  1674. break;
  1675. case COMPAT_VCU_MVA_FREE:
  1676. case COMPAT_VCU_PA_FREE:
  1677. data32 = compat_ptr((uint32_t)arg);
  1678. data = compat_alloc_user_space(sizeof(struct mem_obj));
  1679. if (data == NULL)
  1680. return -EFAULT;
  1681. err = compat_get_vpud_allocation_data(data32, data);
  1682. if (err != 0)
  1683. return err;
  1684. if (cmd == COMPAT_VCU_MVA_FREE)
  1685. ret = file->f_op->unlocked_ioctl(file,
  1686. (uint32_t)VCU_MVA_FREE, (unsigned long)data);
  1687. else
  1688. ret = file->f_op->unlocked_ioctl(file,
  1689. (uint32_t)VCU_PA_FREE, (unsigned long)data);
  1690. err = compat_put_vpud_allocation_data(data32, data);
  1691. if (err != 0)
  1692. return err;
  1693. break;
  1694. case COMPAT_VCU_CACHE_FLUSH_ALL:
  1695. ret = file->f_op->unlocked_ioctl(file,
  1696. (uint32_t)VCU_CACHE_FLUSH_ALL, 0);
  1697. break;
  1698. case COMPAT_VCU_CACHE_FLUSH_BUFF:
  1699. case COMPAT_VCU_CACHE_INVALIDATE_BUFF:
  1700. data32 = compat_ptr((uint32_t)arg);
  1701. data = compat_alloc_user_space(sizeof(struct mem_obj));
  1702. if (data == NULL)
  1703. return -EFAULT;
  1704. err = compat_get_vpud_allocation_data(data32, data);
  1705. if (err != 0)
  1706. return err;
  1707. if (cmd == COMPAT_VCU_CACHE_FLUSH_BUFF) {
  1708. ret = file->f_op->unlocked_ioctl(file,
  1709. (uint32_t)VCU_CACHE_FLUSH_BUFF,
  1710. (unsigned long)data);
  1711. } else {
  1712. ret = file->f_op->unlocked_ioctl(file,
  1713. (uint32_t)VCU_CACHE_INVALIDATE_BUFF,
  1714. (unsigned long)data);
  1715. }
  1716. err = compat_put_vpud_allocation_data(data32, data);
  1717. if (err != 0)
  1718. return err;
  1719. break;
  1720. default:
  1721. pr_err("[VCU] Invalid cmd_number 0x%x.\n", cmd);
  1722. break;
  1723. }
  1724. return ret;
  1725. }
  1726. #endif
  1727. static int mtk_vcu_write(const char *val, const struct kernel_param *kp)
  1728. {
  1729. long ret = -1;
  1730. if (vcu_ptr != NULL &&
  1731. vcu_ptr->vdec_log_info != NULL &&
  1732. val != NULL) {
  1733. ret = param_set_charp(val, kp);
  1734. if (ret != 0)
  1735. return -EINVAL;
  1736. memcpy(vcu_ptr->vdec_log_info->log_info,
  1737. val, strnlen(val, LOG_INFO_SIZE - 1) + 1);
  1738. } else
  1739. return -EFAULT;
  1740. vcu_ptr->vdec_log_info->log_info[LOG_INFO_SIZE - 1] = '\0';
  1741. // check if need to enable VCU debug log
  1742. if (strstr(vcu_ptr->vdec_log_info->log_info, "vcu_log 1")) {
  1743. vcu_ptr->enable_vcu_dbg_log = 1;
  1744. return 0;
  1745. } else if (strstr(vcu_ptr->vdec_log_info->log_info, "vcu_log 0")) {
  1746. vcu_ptr->enable_vcu_dbg_log = 0;
  1747. return 0;
  1748. }
  1749. pr_info("[log wakeup VPUD] log_info %p vcu_ptr %p val %p: %s %lu\n",
  1750. (char *)vcu_ptr->vdec_log_info->log_info,
  1751. vcu_ptr, val, val,
  1752. (unsigned long)strnlen(val, LOG_INFO_SIZE - 1) + 1);
  1753. atomic_set(&vcu_ptr->vdec_log_got, 1);
  1754. wake_up(&vcu_ptr->vdec_log_get_wq);
  1755. return 0;
  1756. }
  1757. static struct kernel_param_ops log_param_ops = {
  1758. .set = mtk_vcu_write,
  1759. .get = param_get_charp,
  1760. };
  1761. module_param_cb(test_info, &log_param_ops, &vcodec_param_string, 0644);
  1762. static const struct file_operations vcu_fops = {
  1763. .owner = THIS_MODULE,
  1764. .unlocked_ioctl = mtk_vcu_unlocked_ioctl,
  1765. .open = mtk_vcu_open,
  1766. .release = mtk_vcu_release,
  1767. .mmap = mtk_vcu_mmap,
  1768. #if IS_ENABLED(CONFIG_COMPAT)
  1769. .compat_ioctl = mtk_vcu_unlocked_compat_ioctl,
  1770. #endif
  1771. };
  1772. /**
  1773. * Suspsend callbacks after user space processes are frozen
  1774. * Since user space processes are frozen, there is no need and cannot hold same
  1775. * mutex that protects lock owner while checking status.
  1776. * If video codec hardware is still active now, must not to enter suspend.
  1777. **/
  1778. static int mtk_vcu_suspend(struct device *pDev)
  1779. {
  1780. if (atomic_read(&vcu_ptr->ipi_done[VCU_VDEC]) == 0 ||
  1781. atomic_read(&vcu_ptr->ipi_done[VCU_VENC]) == 0 ||
  1782. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][0]) > 0 ||
  1783. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][1]) > 0 ||
  1784. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][0]) > 0 ||
  1785. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][1]) > 0) {
  1786. pr_info("[VCU] %s fail due to videocodec activity\n", __func__);
  1787. return -EBUSY;
  1788. }
  1789. pr_info("[VCU] %s done\n", __func__);
  1790. return 0;
  1791. }
  1792. static int mtk_vcu_resume(struct device *pDev)
  1793. {
  1794. pr_info("[VCU] %s done\n", __func__);
  1795. return 0;
  1796. }
  1797. /**
  1798. * Suspend notifiers before user space processes are frozen.
  1799. * User space driver can still complete decoding/encoding of current frame.
  1800. * Change state to is_entering_suspend to stop send ipi_msg but allow current
  1801. * wait ipi_msg to be done.
  1802. * Since there is no critical section proection, it is possible for a new task
  1803. * to start after changing to is_entering_suspend state. This case will be
  1804. * handled by suspend callback mtk_vcu_suspend.
  1805. **/
  1806. static int mtk_vcu_suspend_notifier(struct notifier_block *nb,
  1807. unsigned long action, void *data)
  1808. {
  1809. int wait_cnt = 0;
  1810. pr_info("[VCU] %s ok action = %ld\n", __func__, action);
  1811. switch (action) {
  1812. case PM_SUSPEND_PREPARE:
  1813. vcu_ptr->is_entering_suspend = 1;
  1814. while (atomic_read(&vcu_ptr->ipi_done[VCU_VDEC]) == 0 ||
  1815. atomic_read(&vcu_ptr->ipi_done[VCU_VENC]) == 0 ||
  1816. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][0]) > 0 ||
  1817. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VDEC][1]) > 0 ||
  1818. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][0]) > 0 ||
  1819. atomic_read(&vcu_ptr->gce_job_cnt[VCU_VENC][1]) > 0) {
  1820. wait_cnt++;
  1821. if (wait_cnt > 5) {
  1822. pr_info("vcodec_pm_suspend waiting %d %d %d %d %d %d\n",
  1823. atomic_read(&vcu_ptr->ipi_done[VCU_VDEC]),
  1824. atomic_read(&vcu_ptr->ipi_done[VCU_VENC]),
  1825. atomic_read(
  1826. &vcu_ptr->gce_job_cnt[VCU_VDEC][0]),
  1827. atomic_read(
  1828. &vcu_ptr->gce_job_cnt[VCU_VDEC][1]),
  1829. atomic_read(
  1830. &vcu_ptr->gce_job_cnt[VCU_VENC][0]),
  1831. atomic_read(
  1832. &vcu_ptr->gce_job_cnt[VCU_VENC][1]));
  1833. /* Current task is still not finished, don't
  1834. * care, will check again in real suspend
  1835. */
  1836. return NOTIFY_DONE;
  1837. }
  1838. usleep_range(10000, 20000);
  1839. }
  1840. return NOTIFY_OK;
  1841. case PM_POST_SUSPEND:
  1842. vcu_ptr->is_entering_suspend = 0;
  1843. return NOTIFY_OK;
  1844. default:
  1845. return NOTIFY_DONE;
  1846. }
  1847. return NOTIFY_DONE;
  1848. }
  1849. static const char stat_nam[] = "OOXX";
  1850. static void probe_death_signal(void *ignore, int sig, struct siginfo *info,
  1851. struct task_struct *task, int _group, int result)
  1852. {
  1853. unsigned long flags;
  1854. if (strstr(task->comm, "vpud") && sig == SIGKILL) {
  1855. pr_info("[VPUD_PROBE_DEATH][signal][%d:%s]send death sig %d to[%d:%s]\n",
  1856. current->pid, current->comm,
  1857. sig, task->pid, task->comm);
  1858. spin_lock_irqsave(&vcu_ptr->vpud_sig_lock, flags);
  1859. vcu_ptr->vpud_is_going_down = 1;
  1860. spin_unlock_irqrestore(&vcu_ptr->vpud_sig_lock, flags);
  1861. }
  1862. }
  1863. static int mtk_vcu_probe(struct platform_device *pdev)
  1864. {
  1865. struct mtk_vcu *vcu;
  1866. struct device *dev;
  1867. struct resource *res;
  1868. int i, ret = 0;
  1869. unsigned int vcuid;
  1870. dev_dbg(&pdev->dev, "[VCU] initialization\n");
  1871. dev = &pdev->dev;
  1872. vcu = devm_kzalloc(dev, sizeof(*vcu), GFP_KERNEL);
  1873. if (vcu == NULL)
  1874. return -ENOMEM;
  1875. vcu_ptr = vcu;
  1876. ret = of_property_read_u32(dev->of_node, "mediatek,vcuid", &vcuid);
  1877. if (ret != 0) {
  1878. dev_err(dev, "[VCU] failed to find mediatek,vcuid\n");
  1879. return ret;
  1880. }
  1881. vcu_mtkdev[vcuid] = vcu;
  1882. #ifdef CONFIG_MTK_IOMMU_V2
  1883. vcu_mtkdev[vcuid]->io_domain = iommu_get_domain_for_dev(dev);
  1884. if (vcu_mtkdev[vcuid]->io_domain == NULL) {
  1885. dev_err(dev,
  1886. "[VCU] vcuid: %d get io_domain fail !!\n", vcuid);
  1887. return -EPROBE_DEFER;
  1888. }
  1889. dev_dbg(dev, "vcu io_domain: %p,vcuid:%d\n",
  1890. vcu_mtkdev[vcuid]->io_domain,
  1891. vcuid);
  1892. ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  1893. if (ret) {
  1894. ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  1895. if (ret) {
  1896. dev_info(&pdev->dev, "64-bit DMA enable failed\n");
  1897. return ret;
  1898. }
  1899. vcu->iommu_padding = 0;
  1900. } else
  1901. vcu->iommu_padding = 1;
  1902. #endif
  1903. if (vcuid == 2)
  1904. vcu_mtkdev[vcuid]->path = CAM_PATH;
  1905. else if (vcuid == 1)
  1906. vcu_mtkdev[vcuid]->path = MDP_PATH;
  1907. else if (vcuid == 0) {
  1908. vcu_mtkdev[vcuid]->vdec_log_info = devm_kzalloc(dev,
  1909. sizeof(struct log_test_nofuse), GFP_KERNEL);
  1910. pr_info("[VCU] vdec_log_info %p %d vcuid %d vcu_ptr %p\n",
  1911. vcu_mtkdev[vcuid]->vdec_log_info,
  1912. (int)sizeof(struct log_test_nofuse),
  1913. (int)vcuid, vcu_ptr);
  1914. vcu_mtkdev[vcuid]->path = VCU_PATH;
  1915. } else
  1916. return -ENXIO;
  1917. ret = of_property_read_string(dev->of_node, "mediatek,vcuname",
  1918. &vcu_mtkdev[vcuid]->vcuname);
  1919. if (ret != 0) {
  1920. dev_err(dev, "[VCU] failed to find mediatek,vcuname\n");
  1921. return ret;
  1922. }
  1923. vcu->dev = &pdev->dev;
  1924. platform_set_drvdata(pdev, vcu_mtkdev[vcuid]);
  1925. if (vcuid == 0) {
  1926. for (i = 0; i < (int)VCU_MAP_HW_REG_NUM; i++) {
  1927. res = platform_get_resource(pdev, IORESOURCE_MEM, i);
  1928. if (res == NULL) {
  1929. break;
  1930. }
  1931. vcu->map_base[i].base = res->start;
  1932. vcu->map_base[i].len = resource_size(res);
  1933. dev_dbg(dev, "[VCU] base[%d]: 0x%lx 0x%lx",
  1934. i, vcu->map_base[i].base,
  1935. vcu->map_base[i].len);
  1936. }
  1937. }
  1938. dev_dbg(dev, "[VCU] vcu ipi init\n");
  1939. ret = vcu_ipi_init(vcu);
  1940. if (ret != 0) {
  1941. dev_err(dev, "[VCU] Failed to init ipi\n");
  1942. goto err_ipi_init;
  1943. }
  1944. /* register vcu initialization IPI */
  1945. ret = vcu_ipi_register(pdev, IPI_VCU_INIT, vcu_init_ipi_handler,
  1946. "vcu_init", vcu);
  1947. if (ret != 0) {
  1948. dev_err(dev, "Failed to register IPI_VCU_INIT\n");
  1949. goto vcu_mutex_destroy;
  1950. }
  1951. init_waitqueue_head(&vcu->ack_wq[VCU_VDEC]);
  1952. init_waitqueue_head(&vcu->ack_wq[VCU_VENC]);
  1953. init_waitqueue_head(&vcu->get_wq[VCU_VDEC]);
  1954. init_waitqueue_head(&vcu->get_wq[VCU_VENC]);
  1955. init_waitqueue_head(&vcu->gce_wq[VCU_VDEC]);
  1956. init_waitqueue_head(&vcu->gce_wq[VCU_VENC]);
  1957. init_waitqueue_head(&vcu->vdec_log_get_wq);
  1958. atomic_set(&vcu->ipi_got[VCU_VDEC], 0);
  1959. atomic_set(&vcu->ipi_got[VCU_VENC], 0);
  1960. atomic_set(&vcu->ipi_done[VCU_VDEC], 1);
  1961. atomic_set(&vcu->ipi_done[VCU_VENC], 1);
  1962. atomic_set(&vcu->vdec_log_got, 0);
  1963. for (i = 0; i < (int)VCODEC_INST_MAX; i++) {
  1964. atomic_set(&vcu->gce_info[i].flush_done, 0);
  1965. atomic_set(&vcu->gce_info[i].flush_pending, 0);
  1966. vcu->gce_info[i].user_hdl = 0;
  1967. vcu->gce_info[i].v4l2_ctx = NULL;
  1968. }
  1969. atomic_set(&vcu->gce_job_cnt[VCU_VDEC][0], 0);
  1970. atomic_set(&vcu->gce_job_cnt[VCU_VDEC][1], 0);
  1971. atomic_set(&vcu->gce_job_cnt[VCU_VENC][0], 0);
  1972. atomic_set(&vcu->gce_job_cnt[VCU_VENC][1], 0);
  1973. /* init character device */
  1974. ret = alloc_chrdev_region(&vcu_mtkdev[vcuid]->vcu_devno, 0, 1,
  1975. vcu_mtkdev[vcuid]->vcuname);
  1976. if (ret < 0) {
  1977. dev_err(dev,
  1978. "[VCU] alloc_chrdev_region failed (%d)\n", ret);
  1979. goto err_alloc;
  1980. }
  1981. vcu_mtkdev[vcuid]->vcu_cdev = cdev_alloc();
  1982. vcu_mtkdev[vcuid]->vcu_cdev->owner = THIS_MODULE;
  1983. vcu_mtkdev[vcuid]->vcu_cdev->ops = &vcu_fops;
  1984. ret = cdev_add(vcu_mtkdev[vcuid]->vcu_cdev,
  1985. vcu_mtkdev[vcuid]->vcu_devno, 1);
  1986. if (ret < 0) {
  1987. dev_err(dev, "[VCU] class create fail (ret=%d)", ret);
  1988. goto err_add;
  1989. }
  1990. vcu_mtkdev[vcuid]->vcu_class = class_create(THIS_MODULE,
  1991. vcu_mtkdev[vcuid]->vcuname);
  1992. if (IS_ERR_OR_NULL(vcu_mtkdev[vcuid]->vcu_class) == true) {
  1993. ret = (int)PTR_ERR(vcu_mtkdev[vcuid]->vcu_class);
  1994. dev_err(dev, "[VCU] class create fail (ret=%d)", ret);
  1995. goto err_add;
  1996. }
  1997. vcu_mtkdev[vcuid]->vcu_device =
  1998. device_create(vcu_mtkdev[vcuid]->vcu_class,
  1999. NULL,
  2000. vcu_mtkdev[vcuid]->vcu_devno,
  2001. NULL, vcu_mtkdev[vcuid]->vcuname);
  2002. if (IS_ERR_OR_NULL(vcu_mtkdev[vcuid]->vcu_device) == true) {
  2003. ret = (int)PTR_ERR(vcu_mtkdev[vcuid]->vcu_device);
  2004. dev_err(dev, "[VCU] device_create fail (ret=%d)", ret);
  2005. goto err_device;
  2006. }
  2007. ret = of_property_read_u32(dev->of_node, "mediatek,dec_gce_th_num",
  2008. &vcu->gce_th_num[VCU_VDEC]);
  2009. if (ret != 0 || vcu->gce_th_num[VCU_VDEC] > GCE_THNUM_MAX)
  2010. vcu->gce_th_num[VCU_VDEC] = 1;
  2011. ret = of_property_read_u32(dev->of_node, "mediatek,enc_gce_th_num",
  2012. &vcu->gce_th_num[VCU_VENC]);
  2013. if (ret != 0 || vcu->gce_th_num[VCU_VENC] > GCE_THNUM_MAX)
  2014. vcu->gce_th_num[VCU_VENC] = 1;
  2015. for (i = 0; i < ARRAY_SIZE(vcu->gce_gpr); i++) {
  2016. ret = of_property_read_u32_index(dev->of_node, "gce-gpr",
  2017. i, &vcu->gce_gpr[i]);
  2018. if (ret < 0)
  2019. break;
  2020. }
  2021. vcu->clt_base = cmdq_register_device(dev);
  2022. for (i = 0; i < vcu->gce_th_num[VCU_VDEC]; i++)
  2023. vcu->clt_vdec[i] = cmdq_mbox_create(dev, i);
  2024. for (i = 0; i < vcu->gce_th_num[VCU_VENC]; i++)
  2025. vcu->clt_venc[i] =
  2026. cmdq_mbox_create(dev, i + vcu->gce_th_num[VCU_VDEC]);
  2027. vcu->clt_venc_sec[0] =
  2028. cmdq_mbox_create(dev,
  2029. vcu->gce_th_num[VCU_VDEC] + vcu->gce_th_num[VCU_VENC]);
  2030. if (IS_ERR_OR_NULL(vcu->clt_vdec[0]))
  2031. goto err_device;
  2032. dev_dbg(dev, "[VCU] GCE clt_base %p clt_vdec %d %p %p clt_venc %d %p %p %p dev %p",
  2033. vcu->clt_base, vcu->gce_th_num[VCU_VDEC],
  2034. vcu->clt_vdec[0], vcu->clt_vdec[1],
  2035. vcu->gce_th_num[VCU_VENC], vcu->clt_venc[0],
  2036. vcu->clt_venc[1], vcu->clt_venc_sec[0], dev);
  2037. for (i = 0; i < GCE_EVENT_MAX; i++)
  2038. vcu->gce_codec_eid[i] = -1;
  2039. vcu->gce_codec_eid[VDEC_EVENT_0] =
  2040. cmdq_dev_get_event(dev, "vdec_pic_start");
  2041. vcu->gce_codec_eid[VDEC_EVENT_1] =
  2042. cmdq_dev_get_event(dev, "vdec_decode_done");
  2043. vcu->gce_codec_eid[VDEC_EVENT_2] =
  2044. cmdq_dev_get_event(dev, "vdec_pause");
  2045. vcu->gce_codec_eid[VDEC_EVENT_3] =
  2046. cmdq_dev_get_event(dev, "vdec_dec_error");
  2047. vcu->gce_codec_eid[VDEC_EVENT_4] =
  2048. cmdq_dev_get_event(dev, "vdec_mc_busy_overflow_timeout");
  2049. vcu->gce_codec_eid[VDEC_EVENT_5] =
  2050. cmdq_dev_get_event(dev, "vdec_all_dram_req_done");
  2051. vcu->gce_codec_eid[VDEC_EVENT_6] =
  2052. cmdq_dev_get_event(dev, "vdec_ini_fetch_rdy");
  2053. vcu->gce_codec_eid[VDEC_EVENT_7] =
  2054. cmdq_dev_get_event(dev, "vdec_process_flag");
  2055. vcu->gce_codec_eid[VDEC_EVENT_8] =
  2056. cmdq_dev_get_event(dev, "vdec_search_start_code_done");
  2057. vcu->gce_codec_eid[VDEC_EVENT_9] =
  2058. cmdq_dev_get_event(dev, "vdec_ref_reorder_done");
  2059. vcu->gce_codec_eid[VDEC_EVENT_10] =
  2060. cmdq_dev_get_event(dev, "vdec_wp_tble_done");
  2061. vcu->gce_codec_eid[VDEC_EVENT_11] =
  2062. cmdq_dev_get_event(dev, "vdec_count_sram_clr_done");
  2063. vcu->gce_codec_eid[VDEC_EVENT_15] =
  2064. cmdq_dev_get_event(dev, "vdec_gce_cnt_op_threshold");
  2065. vcu->gce_codec_eid[VDEC_LAT_EVENT_0] =
  2066. cmdq_dev_get_event(dev, "vdec_lat_pic_start");
  2067. vcu->gce_codec_eid[VDEC_LAT_EVENT_1] =
  2068. cmdq_dev_get_event(dev, "vdec_lat_decode_done");
  2069. vcu->gce_codec_eid[VDEC_LAT_EVENT_2] =
  2070. cmdq_dev_get_event(dev, "vdec_lat_pause");
  2071. vcu->gce_codec_eid[VDEC_LAT_EVENT_3] =
  2072. cmdq_dev_get_event(dev, "vdec_lat_dec_error");
  2073. vcu->gce_codec_eid[VDEC_LAT_EVENT_4] =
  2074. cmdq_dev_get_event(dev, "vdec_lat_mc_busy_overflow_timeout");
  2075. vcu->gce_codec_eid[VDEC_LAT_EVENT_5] =
  2076. cmdq_dev_get_event(dev, "vdec_lat_all_dram_req_done");
  2077. vcu->gce_codec_eid[VDEC_LAT_EVENT_6] =
  2078. cmdq_dev_get_event(dev, "vdec_lat_ini_fetch_rdy");
  2079. vcu->gce_codec_eid[VDEC_LAT_EVENT_7] =
  2080. cmdq_dev_get_event(dev, "vdec_lat_process_flag");
  2081. vcu->gce_codec_eid[VDEC_LAT_EVENT_8] =
  2082. cmdq_dev_get_event(dev, "vdec_lat_search_start_code_done");
  2083. vcu->gce_codec_eid[VDEC_LAT_EVENT_9] =
  2084. cmdq_dev_get_event(dev, "vdec_lat_ref_reorder_done");
  2085. vcu->gce_codec_eid[VDEC_LAT_EVENT_10] =
  2086. cmdq_dev_get_event(dev, "vdec_lat_wp_tble_done");
  2087. vcu->gce_codec_eid[VDEC_LAT_EVENT_11] =
  2088. cmdq_dev_get_event(dev, "vdec_lat_count_sram_clr_done");
  2089. vcu->gce_codec_eid[VDEC_LAT_EVENT_15] =
  2090. cmdq_dev_get_event(dev, "vdec_lat_gce_cnt_op_threshold");
  2091. vcu->gce_codec_eid[VENC_EOF] =
  2092. cmdq_dev_get_event(dev, "venc_eof");
  2093. vcu->gce_codec_eid[VENC_CMDQ_PAUSE_DONE] =
  2094. cmdq_dev_get_event(dev, "venc_cmdq_pause_done");
  2095. vcu->gce_codec_eid[VENC_MB_DONE] =
  2096. cmdq_dev_get_event(dev, "venc_mb_done");
  2097. vcu->gce_codec_eid[VENC_128BYTE_CNT_DONE] =
  2098. cmdq_dev_get_event(dev, "venc_128B_cnt_done");
  2099. vcu->gce_codec_eid[VENC_EOF_C1] =
  2100. cmdq_dev_get_event(dev, "venc_eof_c1");
  2101. vcu->gce_codec_eid[VENC_WP_2ND_DONE] =
  2102. cmdq_dev_get_event(dev, "venc_wp_2nd_done");
  2103. vcu->gce_codec_eid[VENC_WP_3ND_DONE] =
  2104. cmdq_dev_get_event(dev, "venc_wp_3nd_done");
  2105. vcu->gce_codec_eid[VENC_SPS_DONE] =
  2106. cmdq_dev_get_event(dev, "venc_sps_done");
  2107. vcu->gce_codec_eid[VENC_PPS_DONE] =
  2108. cmdq_dev_get_event(dev, "venc_pps_done");
  2109. for (i = 0; i < (int)VCU_CODEC_MAX; i++) {
  2110. vcu->gce_cmds[i] = devm_kzalloc(dev,
  2111. sizeof(struct gce_cmds), GFP_KERNEL);
  2112. if (vcu->gce_cmds[i] == NULL)
  2113. goto err_device;
  2114. }
  2115. sema_init(&vcu->vpud_killed, 1);
  2116. for (i = 0; i < (int)VCU_CODEC_MAX; i++) {
  2117. vcu->curr_ctx[i] = NULL;
  2118. vcu->curr_src_vb[i] = NULL;
  2119. vcu->curr_dst_vb[i] = NULL;
  2120. }
  2121. vcu->is_entering_suspend = 0;
  2122. pm_notifier(mtk_vcu_suspend_notifier, 0);
  2123. ret = vcu_alloc_d_ext_mem(vcu, VCU_SHMEM_SIZE);
  2124. if (ret != 0) {
  2125. dev_dbg(dev, "[VCU] allocate SHMEM failed\n");
  2126. goto err_device;
  2127. }
  2128. register_trace_signal_generate(probe_death_signal, NULL);
  2129. spin_lock_init(&vcu_ptr->vpud_sig_lock);
  2130. vcu_ptr->vpud_is_going_down = 0;
  2131. vcu_ptr->enable_vcu_dbg_log = 0;
  2132. dev_dbg(dev, "[VCU] initialization completed\n");
  2133. return 0;
  2134. err_device:
  2135. class_destroy(vcu_mtkdev[vcuid]->vcu_class);
  2136. err_add:
  2137. cdev_del(vcu_mtkdev[vcuid]->vcu_cdev);
  2138. err_alloc:
  2139. unregister_chrdev_region(vcu_mtkdev[vcuid]->vcu_devno, 1);
  2140. vcu_mutex_destroy:
  2141. mutex_destroy(&vcu->vcu_mutex[VCU_VDEC]);
  2142. mutex_destroy(&vcu->vcu_mutex[VCU_VENC]);
  2143. mutex_destroy(&vcu->vcu_gce_mutex[VCU_VDEC]);
  2144. mutex_destroy(&vcu->vcu_gce_mutex[VCU_VENC]);
  2145. mutex_destroy(&vcu->ctx_ipi_binding[VCU_VDEC]);
  2146. mutex_destroy(&vcu->ctx_ipi_binding[VCU_VENC]);
  2147. mutex_destroy(&vcu->vcu_share);
  2148. mutex_destroy(&vpud_file_mutex);
  2149. err_ipi_init:
  2150. devm_kfree(dev, vcu);
  2151. return ret;
  2152. }
  2153. static const struct of_device_id mtk_vcu_match[] = {
  2154. {.compatible = "mediatek,mt8173-vcu",},
  2155. {.compatible = "mediatek,mt2701-vpu",},
  2156. {.compatible = "mediatek,mt2712-vcu",},
  2157. {.compatible = "mediatek,mt6771-vcu",},
  2158. {.compatible = "mediatek-vcu",},
  2159. {},
  2160. };
  2161. MODULE_DEVICE_TABLE(of, mtk_vcu_match);
  2162. static int mtk_vcu_remove(struct platform_device *pdev)
  2163. {
  2164. struct mtk_vcu *vcu = platform_get_drvdata(pdev);
  2165. vcu_free_d_ext_mem(vcu);
  2166. if (vcu->is_open == true) {
  2167. filp_close(vcu->file, NULL);
  2168. vcu->is_open = false;
  2169. }
  2170. devm_kfree(&pdev->dev, vcu);
  2171. device_destroy(vcu->vcu_class, vcu->vcu_devno);
  2172. class_destroy(vcu->vcu_class);
  2173. cdev_del(vcu->vcu_cdev);
  2174. unregister_chrdev_region(vcu->vcu_devno, 1);
  2175. return 0;
  2176. }
  2177. static const struct dev_pm_ops mtk_vcu_pm_ops = {
  2178. .suspend = mtk_vcu_suspend,
  2179. .resume = mtk_vcu_resume,
  2180. };
  2181. static struct platform_driver mtk_vcu_driver = {
  2182. .probe = mtk_vcu_probe,
  2183. .remove = mtk_vcu_remove,
  2184. .driver = {
  2185. .name = "mtk_vcu",
  2186. .owner = THIS_MODULE,
  2187. .pm = &mtk_vcu_pm_ops,
  2188. .of_match_table = mtk_vcu_match,
  2189. },
  2190. };
  2191. module_platform_driver(mtk_vcu_driver);
  2192. MODULE_LICENSE("GPL v2");
  2193. MODULE_DESCRIPTION("Mediatek Video Communication And Controller Unit driver");