mtk_disp_rdma.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. * Copyright (C) 2021 XiaoMi, Inc.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <drm/drmP.h>
  15. #include <linux/clk.h>
  16. #include <linux/sched.h>
  17. #include <linux/sched/clock.h>
  18. #include <linux/component.h>
  19. #include <linux/iommu.h>
  20. #include <linux/of_device.h>
  21. #include <linux/of_irq.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/pm_runtime.h>
  24. #include <linux/soc/mediatek/mtk-cmdq.h>
  25. #include "mtk_drm_crtc.h"
  26. #include "mtk_drm_ddp_comp.h"
  27. #include "mtk_dump.h"
  28. #include "mtk_drm_mmp.h"
  29. #include "mtk_drm_gem.h"
  30. #include "mtk_drm_helper.h"
  31. #include "mtk_drm_drv.h"
  32. #include "mtk_drm_fb.h"
  33. #include "mtk_layering_rule.h"
  34. #include "mtk_drm_trace.h"
  35. #include "swpm_me.h"
  36. #define DISP_REG_RDMA_INT_ENABLE 0x0000
  37. #define DISP_REG_RDMA_INT_STATUS 0x0004
  38. #define RDMA_TARGET_LINE_INT BIT(5)
  39. #define RDMA_FIFO_UNDERFLOW_INT BIT(4)
  40. #define RDMA_EOF_ABNORMAL_INT BIT(3)
  41. #define RDMA_FRAME_END_INT BIT(2)
  42. #define RDMA_FRAME_START_INT BIT(1)
  43. #define RDMA_REG_UPDATE_INT BIT(0)
  44. #define DISP_REG_RDMA_GLOBAL_CON 0x0010
  45. #define RDMA_ENGINE_EN BIT(0)
  46. #define RDMA_SOFT_RESET BIT(4)
  47. #define RDMA_MODE_MEMORY BIT(1)
  48. #define DISP_REG_RDMA_SIZE_CON_0 0x0014
  49. #define RDMA_MATRIX_ENABLE BIT(17)
  50. #define RDMA_MATRIX_INT_MTX_SEL (7UL << 20)
  51. #define DISP_REG_RDMA_SIZE_CON_1 0x0018
  52. #define DISP_REG_RDMA_TARGET_LINE 0x001c
  53. #define DISP_REG_RDMA_MEM_CON 0x0024
  54. #define DISP_REG_RDMA_MEM_START_ADDR 0x0f00
  55. #define DISP_REG_RDMA_MEM_SRC_PITCH 0x002c
  56. #define DISP_REG_RDMA_MEM_GMC_S0 0x0030
  57. #define MEM_GMC_S0_FLD_PRE_ULTRA_THRESHOLD_LOW \
  58. REG_FLD_MSB_LSB(13, 0)
  59. #define MEM_GMC_S0_FLD_PRE_ULTRA_THRESHOLD_HIGH \
  60. REG_FLD_MSB_LSB(29, 16)
  61. #define MEM_GMC_S0_FLD_RG_VALID_THRESHOLD_FORCE_PREULTRA \
  62. REG_FLD_MSB_LSB(30, 30)
  63. #define MEM_GMC_S0_FLD_RG_VDE_FORCE_PREULTRA \
  64. REG_FLD_MSB_LSB(31, 31)
  65. #define DISP_REG_RDMA_MEM_GMC_S1 0x0034
  66. #define MEM_GMC_S1_FLD_ULTRA_THRESHOLD_LOW \
  67. REG_FLD_MSB_LSB(13, 0)
  68. #define MEM_GMC_S1_FLD_ULTRA_THRESHOLD_HIGH \
  69. REG_FLD_MSB_LSB(29, 16)
  70. #define MEM_GMC_S1_FLD_RG_VALID_THRESHOLD_BLOCK_ULTRA \
  71. REG_FLD_MSB_LSB(30, 30)
  72. #define MEM_GMC_S1_FLD_RG_VDE_BLOCK_ULTRA \
  73. REG_FLD_MSB_LSB(31, 31)
  74. #define DISP_REG_RDMA_MEM_SLOW_CON 0x0038
  75. #define DISP_REG_RDMA_MEM_GMC_S2 0x003c
  76. #define MEM_GMC_S2_FLD_ISSUE_REQ_THRESHOLD REG_FLD_MSB_LSB(13, 0)
  77. #define DISP_REG_RDMA_FIFO_LOG 0x0044
  78. #define DISP_REG_RDMA_PRE_ADD_0 0x0078
  79. #define DISP_REG_RDMA_PRE_ADD_1 0x007c
  80. #define DISP_REG_RDMA_PRE_ADD_2 0x0080
  81. #define DISP_REG_RDMA_POST_ADD_0 0x0084
  82. #define DISP_REG_RDMA_POST_ADD_1 0x0088
  83. #define DISP_REG_RDMA_POST_ADD_2 0x008c
  84. #define DISP_REG_RDMA_DUMMY 0x0090
  85. #define DISP_REG_RDMA_DEBUG_OUT_SEL 0x0094
  86. #define DISP_REG_RDMA_BG_CON_0 0x00a0
  87. #define DISP_REG_RDMA_BG_CON_1 0x00a4
  88. #define DISP_REG_RDMA_THRESHOLD_FOR_SODI 0x00a8
  89. #define RDMA_THRESHOLD_FOR_SODI_FLD_LOW REG_FLD_MSB_LSB(13, 0)
  90. #define RDMA_THRESHOLD_FOR_SODI_FLD_HIGH REG_FLD_MSB_LSB(29, 16)
  91. #define DISP_REG_RDMA_THRESHOLD_FOR_DVFS 0x00ac
  92. #define RDMA_THRESHOLD_FOR_DVFS_FLD_LOW REG_FLD_MSB_LSB(13, 0)
  93. #define RDMA_THRESHOLD_FOR_DVFS_FLD_HIGH REG_FLD_MSB_LSB(29, 16)
  94. #define DISP_REG_RDMA_SRAM_SEL 0x00b0
  95. #define DISP_REG_RDMA_STALL_CG_CON 0x00b4
  96. #if defined(CONFIG_MACH_MT6885) || defined(CONFIG_MACH_MT6893)
  97. #define DISP_REG_RDMA_SHADOW_UPDATE 0x00b8
  98. #endif
  99. #if defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853)
  100. #define DISP_REG_RDMA_SHADOW_UPDATE 0x00bc
  101. #define RDMA_BYPASS_SHADOW BIT(1)
  102. #define RDMA_READ_WORK_REG BIT(2)
  103. #endif
  104. #define DISP_RDMA_SRAM_CASCADE 0x00c8
  105. #define RG_DISP_RDMA_FIFO_SIZE REG_FLD_MSB_LSB(13, 0)
  106. #define RG_DISP_RDMA_RSZ_FIFO_SIZE REG_FLD_MSB_LSB(29, 16)
  107. #define DISP_REG_RDMA_DVFS_SETTING_PRE 0x00d0
  108. #define RG_DVFS_PRE_ULTRA_THRESHOLD_LOW REG_FLD_MSB_LSB(13, 0)
  109. #define RG_DVFS_PRE_ULTRA_THRESHOLD_HIGH REG_FLD_MSB_LSB(29, 16)
  110. #define DISP_REG_RDMA_DVFS_SETTING_ULTRA 0x00d4
  111. #define RG_DVFS_ULTRA_THRESHOLD_LOW REG_FLD_MSB_LSB(13, 0)
  112. #define RG_DVFS_ULTRA_THRESHOLD_HIGH REG_FLD_MSB_LSB(29, 16)
  113. #define DISP_REG_RDMA_LEAVE_DRS_SETTING 0x00d8
  114. #define RG_IS_DRS_STATUS_THRESHOLD_LOW REG_FLD_MSB_LSB(13, 0)
  115. #define RG_IS_DRS_STATUS_THRESHOLD_HIGH REG_FLD_MSB_LSB(29, 16)
  116. #define DISP_REG_RDMA_ENTER_DRS_SETTING 0x00dc
  117. #define RG_NOT_DRS_STATUS_THRESHOLD_LOW REG_FLD_MSB_LSB(13, 0)
  118. #define RG_NOT_DRS_STATUS_THRESHOLD_HIGH REG_FLD_MSB_LSB(29, 16)
  119. #define DISP_REG_RDMA_CROP_CON_0 0x00e0
  120. #define CROP_CON_0_FLD_CROP_LEFT REG_FLD_MSB_LSB(12, 0)
  121. #define CROP_CON_0_FLD_CROP_RIGHT REG_FLD_MSB_LSB(28, 16)
  122. #define DISP_REG_RDMA_CROP_CON_1 0x00e4
  123. #define CROP_CON_0_FLD_CROP_TOP REG_FLD_MSB_LSB(12, 0)
  124. #define CROP_CON_0_FLD_CROP_BOTTOM REG_FLD_MSB_LSB(28, 16)
  125. #define DISP_REG_RDMA_MEM_GMC_S3 0x00e8
  126. #define FLD_LOW_FOR_URGENT REG_FLD_MSB_LSB(13, 0)
  127. #define FLD_HIGH_FOR_URGENT REG_FLD_MSB_LSB(29, 16)
  128. #define FLD_RG_VALID_THRESHOLD_BLOCK_URGENT REG_FLD_MSB_LSB(30, 30)
  129. #define FLD_RG_VDE_BLOCK_URGENT REG_FLD_MSB_LSB(31, 31)
  130. /* TODO: handle pixel/line cnt for other platform */
  131. #define DISP_REG_RDMA_IN_P_CNT 0x0120
  132. #define DISP_REG_RDMA_IN_LINE_CNT 0x0124
  133. #define DISP_REG_RDMA_OUT_P_CNT 0x0128
  134. #define DISP_REG_RDMA_OUT_LINE_CNT 0x012C
  135. #define DISP_REG_RDMA_DBG_OUT 0x0100
  136. #define DISP_REG_RDMA_DBG_OUT1 0x010c
  137. #define DISP_REG_RDMA_DBG_OUT2 0x0110
  138. #define DISP_REG_RDMA_DBG_OUT3 0x0114
  139. #define DISP_REG_RDMA_DBG_OUT4 0x0118
  140. #define DISP_REG_RDMA_DBG_OUT5 0x011c
  141. #define DISP_REG_RDMA_GREQ_URG_NUM_SEL 0x01a8
  142. #define FLD_RG_LAYER_SMI_ID_EN REG_FLD_MSB_LSB(29, 29)
  143. #define DISP_RDMA_MEM_CON 0x0024
  144. #define MEM_MODE_INPUT_SWAP BIT(8)
  145. #define DISP_RDMA_MEM_SRC_PITCH 0x002c
  146. #define DISP_REG_RDMA_FIFO_CON 0x0040
  147. #define FIFO_CON_FLD_OUTPUT_VALID_FIFO_THRESHOLD REG_FLD_MSB_LSB(13, 0)
  148. #define FIFO_CON_FLD_FIFO_PSEUDO_SIZE REG_FLD_MSB_LSB(29, 16)
  149. #define FIFO_CON_FLD_FIFO_UNDERFLOW_EN REG_FLD_MSB_LSB(31, 31)
  150. #define RDMA_FIFO_UNDERFLOW_EN BIT(31)
  151. #define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16UL) << 16)
  152. #define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
  153. #define RDMA_FIFO_SIZE(module) ((module)->data->fifo_size)
  154. #define DISP_RDMA_MEM_START_ADDR 0x0f00
  155. #define MATRIX_INT_MTX_SEL_DEFAULT 0x000000
  156. #define MEM_MODE_INPUT_FORMAT_RGB565 0x0U
  157. #define MEM_MODE_INPUT_FORMAT_RGB888 (0x001U << 4)
  158. #define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002U << 4)
  159. #define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003U << 4)
  160. #define MEM_MODE_INPUT_FORMAT_UYVY (0x004U << 4)
  161. #define MEM_MODE_INPUT_FORMAT_YUYV (0x005U << 4)
  162. #define RDMA_DUMMY_BUFFER_SIZE(h, v) ((h) * (v)*4)
  163. #define RDMA_DUMMY_BUFFER_PITCH(h) ((h)*4)
  164. #define GLOBAL_CON_FLD_ENGINE_EN REG_FLD_MSB_LSB(0, 0)
  165. #define GLOBAL_CON_FLD_MODE_SEL REG_FLD_MSB_LSB(1, 1)
  166. #define GLOBAL_CON_FLD_SMI_BUSY REG_FLD_MSB_LSB(12, 12)
  167. #define RDMA_BG_CON_0_LEFT REG_FLD_MSB_LSB(12, 0)
  168. #define RDMA_BG_CON_0_RIGHT REG_FLD_MSB_LSB(28, 16)
  169. #define RDMA_BG_CON_1_TOP REG_FLD_MSB_LSB(12, 0)
  170. #define RDMA_BG_CON_1_BOTTOM REG_FLD_MSB_LSB(28, 16)
  171. /* golden setting */
  172. enum GS_RDMA_FLD {
  173. GS_RDMA_PRE_ULTRA_TH_LOW = 0,
  174. GS_RDMA_PRE_ULTRA_TH_HIGH,
  175. GS_RDMA_VALID_TH_FORCE_PRE_ULTRA,
  176. GS_RDMA_VDE_FORCE_PRE_ULTRA,
  177. GS_RDMA_ULTRA_TH_LOW,
  178. GS_RDMA_ULTRA_TH_HIGH,
  179. GS_RDMA_VALID_TH_BLOCK_ULTRA,
  180. GS_RDMA_VDE_BLOCK_ULTRA,
  181. GS_RDMA_ISSUE_REQ_TH,
  182. GS_RDMA_OUTPUT_VALID_FIFO_TH,
  183. GS_RDMA_FIFO_SIZE,
  184. GS_RDMA_FIFO_UNDERFLOW_EN,
  185. GS_RDMA_TH_LOW_FOR_SODI,
  186. GS_RDMA_TH_HIGH_FOR_SODI,
  187. GS_RDMA_TH_LOW_FOR_DVFS,
  188. GS_RDMA_TH_HIGH_FOR_DVFS,
  189. GS_RDMA_SRAM_SEL,
  190. GS_RDMA_DVFS_PRE_ULTRA_TH_LOW,
  191. GS_RDMA_DVFS_PRE_ULTRA_TH_HIGH,
  192. GS_RDMA_DVFS_ULTRA_TH_LOW,
  193. GS_RDMA_DVFS_ULTRA_TH_HIGH,
  194. GS_RDMA_IS_DRS_STATUS_TH_LOW,
  195. GS_RDMA_IS_DRS_STATUS_TH_HIGH,
  196. GS_RDMA_NOT_DRS_STATUS_TH_LOW,
  197. GS_RDMA_NOT_DRS_STATUS_TH_HIGH,
  198. GS_RDMA_URGENT_TH_LOW,
  199. GS_RDMA_URGENT_TH_HIGH,
  200. GS_RDMA_SELF_FIFO_SIZE,
  201. GS_RDMA_RSZ_FIFO_SIZE,
  202. GS_RDMA_LAYER_SMI_ID_EN,
  203. GS_RDMA_FLD_NUM,
  204. };
  205. struct mtk_disp_rdma_data {
  206. unsigned int fifo_size;
  207. void (*sodi_config)(struct drm_device *drm, enum mtk_ddp_comp_id id,
  208. struct cmdq_pkt *handle, void *data);
  209. bool support_shadow;
  210. };
  211. struct mtk_rdma_backup_info {
  212. unsigned long addr;
  213. };
  214. struct mtk_rdma_cfg_info {
  215. unsigned int addr;
  216. unsigned int width;
  217. unsigned int height;
  218. unsigned int fmt;
  219. };
  220. /**
  221. * struct mtk_disp_rdma - DISP_RDMA driver structure
  222. * @ddp_comp - structure containing type enum and hardware resources
  223. * @crtc - associated crtc to report irq events to
  224. */
  225. struct mtk_disp_rdma {
  226. struct mtk_ddp_comp ddp_comp;
  227. struct drm_crtc *crtc;
  228. const struct mtk_disp_rdma_data *data;
  229. struct drm_device *drm_dev;
  230. bool rdma_memory_mode;
  231. unsigned int underflow_cnt;
  232. unsigned int abnormal_cnt;
  233. unsigned int dummy_w;
  234. unsigned int dummy_h;
  235. struct mtk_rdma_backup_info backup_info;
  236. struct mtk_rdma_cfg_info cfg_info;
  237. };
  238. static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
  239. {
  240. return container_of(comp, struct mtk_disp_rdma, ddp_comp);
  241. }
  242. static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
  243. {
  244. struct mtk_disp_rdma *priv = dev_id;
  245. struct mtk_ddp_comp *rdma = &priv->ddp_comp;
  246. unsigned int val = 0;
  247. unsigned int ret = 0;
  248. if (mtk_drm_top_clk_isr_get("rdma_irq") == false) {
  249. DDPIRQ("%s, top clk off\n", __func__);
  250. return IRQ_NONE;
  251. }
  252. val = readl(rdma->regs + DISP_REG_RDMA_INT_STATUS);
  253. if (!val) {
  254. ret = IRQ_NONE;
  255. goto out;
  256. }
  257. DRM_MMP_MARK(IRQ, irq, val);
  258. if (rdma->id == DDP_COMPONENT_RDMA0)
  259. DRM_MMP_MARK(rdma0, val, 0);
  260. if (rdma->id == DDP_COMPONENT_RDMA4)
  261. DRM_MMP_MARK(rdma4, val, 0);
  262. if (rdma->id == DDP_COMPONENT_RDMA5)
  263. DRM_MMP_MARK(rdma5, val, 0);
  264. if (val & 0x18)
  265. DRM_MMP_MARK(abnormal_irq,
  266. (priv->underflow_cnt << 24) |
  267. (priv->abnormal_cnt << 16) | val,
  268. rdma->id);
  269. DDPIRQ("%s irq, val:0x%x\n", mtk_dump_comp_str(rdma), val);
  270. writel(~val, rdma->regs + DISP_REG_RDMA_INT_STATUS);
  271. if (val & (1 << 0))
  272. DDPIRQ("[IRQ] %s: reg update done!\n", mtk_dump_comp_str(rdma));
  273. if (val & (1 << 2)) {
  274. set_swpm_disp_work(); /* counting fps for swpm */
  275. DDPIRQ("[IRQ] %s: frame done!\n", mtk_dump_comp_str(rdma));
  276. if (rdma->mtk_crtc && rdma->mtk_crtc->esd_ctx)
  277. atomic_set(&rdma->mtk_crtc->esd_ctx->target_time, 0);
  278. if (rdma->id == DDP_COMPONENT_RDMA0) {
  279. unsigned long long rdma_end_time = sched_clock();
  280. lcm_fps_ctx_update(rdma_end_time,
  281. priv->ddp_comp.mtk_crtc->base.index,
  282. 1);
  283. }
  284. mtk_drm_refresh_tag_end(&priv->ddp_comp);
  285. }
  286. if (val & (1 << 1)) {
  287. DDPIRQ("[IRQ] %s: frame start!\n", mtk_dump_comp_str(rdma));
  288. mtk_drm_refresh_tag_start(&priv->ddp_comp);
  289. MMPathTraceDRM(rdma);
  290. }
  291. if (val & (1 << 3)) {
  292. DDPPR_ERR("[IRQ] %s: abnormal! cnt=%d\n",
  293. mtk_dump_comp_str(rdma), priv->abnormal_cnt);
  294. priv->abnormal_cnt++;
  295. }
  296. if (val & (1 << 4)) {
  297. DDPPR_ERR("[IRQ] %s: underflow! cnt=%d\n",
  298. mtk_dump_comp_str(rdma), priv->underflow_cnt);
  299. DDPMSG("%s: pix(%d,%d,%d,%d)\n", mtk_dump_comp_str(rdma),
  300. readl(DISP_REG_RDMA_IN_P_CNT + rdma->regs),
  301. readl(DISP_REG_RDMA_IN_LINE_CNT + rdma->regs),
  302. readl(DISP_REG_RDMA_OUT_P_CNT + rdma->regs),
  303. readl(DISP_REG_RDMA_OUT_LINE_CNT + rdma->regs));
  304. mtk_rdma_analysis(rdma);
  305. mtk_rdma_dump(rdma);
  306. if (rdma->mtk_crtc) {
  307. mtk_drm_crtc_analysis(&(rdma->mtk_crtc->base));
  308. mtk_drm_crtc_dump(&(rdma->mtk_crtc->base));
  309. }
  310. if (rdma->mtk_crtc) {
  311. struct mtk_drm_private *drm_priv = NULL;
  312. if (rdma->mtk_crtc->base.dev)
  313. drm_priv =
  314. rdma->mtk_crtc->base.dev->dev_private;
  315. if (drm_priv && mtk_drm_helper_get_opt(
  316. drm_priv->helper_opt,
  317. MTK_DRM_OPT_RDMA_UNDERFLOW_AEE)) {
  318. disp_met_set(NULL, 1);
  319. DDPAEE("%s: underflow! cnt=%d\n",
  320. mtk_dump_comp_str(rdma),
  321. priv->underflow_cnt);
  322. }
  323. }
  324. priv->underflow_cnt++;
  325. }
  326. if (val & (1 << 5)) {
  327. DDPIRQ("[IRQ] %s: target line!\n", mtk_dump_comp_str(rdma));
  328. if (rdma->mtk_crtc && rdma->mtk_crtc->esd_ctx &&
  329. (!(val & (1 << 2)))) {
  330. atomic_set(&rdma->mtk_crtc->esd_ctx->target_time, 1);
  331. wake_up_interruptible(
  332. &rdma->mtk_crtc->esd_ctx->check_task_wq);
  333. }
  334. }
  335. /* TODO: check if this is not necessary */
  336. /* mtk_crtc_ddp_irq(priv->crtc, rdma); */
  337. ret = IRQ_HANDLED;
  338. out:
  339. mtk_drm_top_clk_isr_put("rdma_irq");
  340. return ret;
  341. }
  342. #if 0
  343. static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
  344. struct drm_crtc *crtc,
  345. struct cmdq_pkt *handle)
  346. {
  347. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  348. rdma->crtc = crtc;
  349. cmdq_pkt_write(handle, comp->cmdq_base,
  350. comp->regs_pa + DISP_REG_RDMA_INT_ENABLE,
  351. RDMA_FRAME_END_INT, RDMA_FRAME_END_INT);
  352. }
  353. static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp,
  354. struct cmdq_pkt *handle)
  355. {
  356. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  357. rdma->crtc = NULL;
  358. cmdq_pkt_write(handle, comp->cmdq_base,
  359. comp->regs_pa + DISP_REG_RDMA_INT_ENABLE,
  360. RDMA_FRAME_END_INT, 0);
  361. }
  362. #endif
  363. static int mtk_rdma_io_cmd(struct mtk_ddp_comp *comp, struct cmdq_pkt *handle,
  364. enum mtk_ddp_io_cmd io_cmd, void *params);
  365. static void mtk_rdma_start(struct mtk_ddp_comp *comp, struct cmdq_pkt *handle)
  366. {
  367. int ret;
  368. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  369. const struct mtk_disp_rdma_data *data = rdma->data;
  370. bool en = 1;
  371. ret = pm_runtime_get_sync(comp->dev);
  372. if (ret < 0)
  373. DRM_ERROR("Failed to enable power domain: %d\n", ret);
  374. mtk_ddp_write_mask(comp, MATRIX_INT_MTX_SEL_DEFAULT,
  375. DISP_REG_RDMA_SIZE_CON_0, 0xff0000, handle);
  376. mtk_rdma_io_cmd(comp, handle, IRQ_LEVEL_ALL, NULL);
  377. mtk_ddp_write_mask(comp, RDMA_ENGINE_EN, DISP_REG_RDMA_GLOBAL_CON,
  378. RDMA_ENGINE_EN, handle);
  379. if (data && data->sodi_config)
  380. data->sodi_config(comp->mtk_crtc->base.dev, comp->id, handle,
  381. &en);
  382. }
  383. static void mtk_rdma_stop(struct mtk_ddp_comp *comp, struct cmdq_pkt *handle)
  384. {
  385. int ret;
  386. bool en = 0;
  387. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  388. const struct mtk_disp_rdma_data *data = rdma->data;
  389. mtk_ddp_write(comp, 0x0, DISP_REG_RDMA_INT_ENABLE, handle);
  390. mtk_ddp_write(comp, RDMA_SOFT_RESET, DISP_REG_RDMA_GLOBAL_CON, handle);
  391. mtk_ddp_write(comp, 0x0, DISP_REG_RDMA_GLOBAL_CON, handle);
  392. mtk_ddp_write(comp, 0x0, DISP_REG_RDMA_INT_STATUS, handle);
  393. if (data && data->sodi_config)
  394. data->sodi_config(comp->mtk_crtc->base.dev, comp->id, handle,
  395. &en);
  396. ret = pm_runtime_put(comp->dev);
  397. if (ret < 0)
  398. DRM_ERROR("Failed to disable power domain: %d\n", ret);
  399. }
  400. /* TODO RDMA1, wrot sram */
  401. void mtk_rdma_cal_golden_setting(struct mtk_ddp_comp *comp,
  402. struct mtk_ddp_config *cfg, unsigned int *gs)
  403. {
  404. /* fixed variable */
  405. unsigned int mmsys_clk = 208;
  406. unsigned int FP = 1000;
  407. unsigned int fifo_size = 2240;
  408. #if defined(CONFIG_MACH_MT6885) || defined(CONFIG_MACH_MT6893)
  409. unsigned int pre_ultra_low_us = 245, pre_ultra_high_us = 255;
  410. unsigned int ultra_low_us = 230, ultra_high_us = 245;
  411. unsigned int urgent_low_us = 113, urgent_high_us = 117;
  412. #endif
  413. #if defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853) \
  414. || defined(CONFIG_MACH_MT6833)
  415. unsigned int pre_ultra_low_us = 250, pre_ultra_high_us = 260;
  416. unsigned int ultra_low_us = 230, ultra_high_us = 250;
  417. unsigned int urgent_low_us = 110, urgent_high_us = 120;
  418. #endif
  419. /* input variable */
  420. struct golden_setting_context *gsc = cfg->p_golden_setting_context;
  421. unsigned long long width = gsc->dst_width, height = gsc->dst_height;
  422. unsigned int Bpp;
  423. bool is_dc = gsc->is_dc;
  424. unsigned int if_fps = gsc->vrefresh;
  425. unsigned int fill_rate = 0; /* 100 times */
  426. unsigned long long consume_rate = 0; /* 100 times */
  427. if (if_fps == 0) {
  428. DDPPR_ERR("%s invalid vrefresh %u\n",
  429. __func__, if_fps);
  430. if_fps = 60;
  431. }
  432. if (comp->mtk_crtc->is_dual_pipe)
  433. width /= 2;
  434. switch (cfg->bpc) {
  435. case 8:
  436. /* 888 */
  437. Bpp = 3;
  438. break;
  439. case 5:
  440. /* 565 */
  441. Bpp = 2;
  442. break;
  443. case 6:
  444. /* 666 */
  445. Bpp = 3;
  446. break;
  447. default:
  448. /* 888 */
  449. Bpp = 3;
  450. break;
  451. }
  452. /* critical variable calc */
  453. if (is_dc)
  454. fill_rate = 96 * mmsys_clk; /* FIFO depth / us */
  455. else
  456. fill_rate = 96 * mmsys_clk * 3 / 16; /* FIFO depth / us */
  457. DDPINFO("%s,w:%llu,h:%llu,vrefresh:%d,bpc:%d,is_vdo:%d,is_dc:%d\n",
  458. __func__, width, height, if_fps, cfg->bpc,
  459. gsc->is_vdo_mode, gsc->is_dc);
  460. consume_rate = width * height * if_fps * Bpp;
  461. do_div(consume_rate, 1000);
  462. consume_rate *= 125;
  463. do_div(consume_rate, 16 * 1000);
  464. /* RDMA golden setting calculation */
  465. /* DISP_RDMA_MEM_GMC_SETTING_0 */
  466. gs[GS_RDMA_PRE_ULTRA_TH_LOW] =
  467. DIV_ROUND_UP(consume_rate * (pre_ultra_low_us), FP);
  468. gs[GS_RDMA_PRE_ULTRA_TH_HIGH] =
  469. DIV_ROUND_UP(consume_rate * (pre_ultra_high_us), FP);
  470. if (gsc->is_vdo_mode) {
  471. gs[GS_RDMA_VALID_TH_FORCE_PRE_ULTRA] = 0;
  472. gs[GS_RDMA_VDE_FORCE_PRE_ULTRA] = 1;
  473. } else {
  474. gs[GS_RDMA_VALID_TH_FORCE_PRE_ULTRA] = 1;
  475. gs[GS_RDMA_VDE_FORCE_PRE_ULTRA] = 0;
  476. }
  477. /* DISP_RDMA_MEM_GMC_SETTING_1 */
  478. gs[GS_RDMA_ULTRA_TH_LOW] =
  479. DIV_ROUND_UP(consume_rate * (ultra_low_us), FP);
  480. gs[GS_RDMA_ULTRA_TH_HIGH] = gs[GS_RDMA_PRE_ULTRA_TH_LOW];
  481. if (gsc->is_vdo_mode) {
  482. gs[GS_RDMA_VALID_TH_BLOCK_ULTRA] = 0;
  483. gs[GS_RDMA_VDE_BLOCK_ULTRA] = 1;
  484. } else {
  485. gs[GS_RDMA_VALID_TH_BLOCK_ULTRA] = 1;
  486. gs[GS_RDMA_VDE_BLOCK_ULTRA] = 0;
  487. }
  488. /* DISP_RDMA_FIFO_CON */
  489. if (gsc->is_vdo_mode)
  490. gs[GS_RDMA_OUTPUT_VALID_FIFO_TH] = 0;
  491. else
  492. gs[GS_RDMA_OUTPUT_VALID_FIFO_TH] = gs[GS_RDMA_PRE_ULTRA_TH_LOW];
  493. gs[GS_RDMA_FIFO_SIZE] = fifo_size;
  494. gs[GS_RDMA_FIFO_UNDERFLOW_EN] = 1;
  495. /* DISP_RDMA_MEM_GMC_SETTING_2 */
  496. /* do not min this value with 256 to avoid hrt fail in
  497. * dc mode under SODI CG mode
  498. */
  499. gs[GS_RDMA_ISSUE_REQ_TH] =
  500. ((gs[GS_RDMA_FIFO_SIZE] -
  501. gs[GS_RDMA_PRE_ULTRA_TH_LOW]) >= 256) ? 256 :
  502. (gs[GS_RDMA_FIFO_SIZE] - gs[GS_RDMA_PRE_ULTRA_TH_LOW]);
  503. /* DISP_RDMA_THRESHOLD_FOR_SODI */
  504. gs[GS_RDMA_TH_LOW_FOR_SODI] =
  505. DIV_ROUND_UP(consume_rate * (ultra_low_us + 50), FP);
  506. gs[GS_RDMA_TH_HIGH_FOR_SODI] = DIV_ROUND_UP(
  507. gs[GS_RDMA_FIFO_SIZE] * FP - (fill_rate - consume_rate) * 12,
  508. FP);
  509. if (gs[GS_RDMA_TH_HIGH_FOR_SODI] < gs[GS_RDMA_PRE_ULTRA_TH_HIGH])
  510. gs[GS_RDMA_TH_HIGH_FOR_SODI] = gs[GS_RDMA_PRE_ULTRA_TH_HIGH];
  511. if (gs[GS_RDMA_TH_HIGH_FOR_SODI] >= gs[GS_RDMA_FIFO_SIZE])
  512. gs[GS_RDMA_TH_HIGH_FOR_SODI] = gs[GS_RDMA_FIFO_SIZE] - 1;
  513. /* DISP_RDMA_THRESHOLD_FOR_DVFS */
  514. gs[GS_RDMA_TH_LOW_FOR_DVFS] = gs[GS_RDMA_PRE_ULTRA_TH_LOW];
  515. gs[GS_RDMA_TH_HIGH_FOR_DVFS] = gs[GS_RDMA_PRE_ULTRA_TH_LOW] + 1;
  516. /* DISP_RDMA_SRAM_SEL */
  517. gs[GS_RDMA_SRAM_SEL] = 0;
  518. /* DISP_RDMA_DVFS_SETTING_PREULTRA */
  519. gs[GS_RDMA_DVFS_PRE_ULTRA_TH_LOW] =
  520. DIV_ROUND_UP(consume_rate * (pre_ultra_low_us + 40), FP);
  521. gs[GS_RDMA_DVFS_PRE_ULTRA_TH_HIGH] =
  522. DIV_ROUND_UP(consume_rate * (pre_ultra_high_us + 40), FP);
  523. /* DISP_RDMA_DVFS_SETTING_ULTRA */
  524. gs[GS_RDMA_DVFS_ULTRA_TH_LOW] =
  525. DIV_ROUND_UP(consume_rate * (ultra_low_us + 40), FP);
  526. gs[GS_RDMA_DVFS_ULTRA_TH_HIGH] = gs[GS_RDMA_DVFS_PRE_ULTRA_TH_LOW];
  527. /* DISP_RDMA_LEAVE_DRS_SETTING */
  528. gs[GS_RDMA_IS_DRS_STATUS_TH_LOW] =
  529. DIV_ROUND_UP(consume_rate * (pre_ultra_low_us + 20), FP);
  530. gs[GS_RDMA_IS_DRS_STATUS_TH_HIGH] =
  531. DIV_ROUND_UP(consume_rate * (pre_ultra_low_us + 20), FP);
  532. /* DISP_RDMA_ENTER_DRS_SETTING */
  533. gs[GS_RDMA_NOT_DRS_STATUS_TH_LOW] =
  534. DIV_ROUND_UP(consume_rate * (ultra_high_us + 40), FP);
  535. gs[GS_RDMA_NOT_DRS_STATUS_TH_HIGH] =
  536. DIV_ROUND_UP(consume_rate * (ultra_high_us + 40), FP);
  537. /* DISP_RDMA_MEM_GMC_SETTING_3 */
  538. gs[GS_RDMA_URGENT_TH_LOW] = DIV_ROUND_UP(consume_rate *
  539. urgent_low_us, FP);
  540. gs[GS_RDMA_URGENT_TH_HIGH] = DIV_ROUND_UP(consume_rate *
  541. urgent_high_us, FP);
  542. /* DISP_RDMA_GREQ_URG_NUM_SEL */
  543. gs[GS_RDMA_LAYER_SMI_ID_EN] = 1;
  544. #if 0
  545. /* DISP_RDMA_SRAM_CASCADE */
  546. gs[GS_RDMA_SELF_FIFO_SIZE] = 1536;
  547. gs[GS_RDMA_RSZ_FIFO_SIZE] = 1536;
  548. #endif
  549. }
  550. /* Set register with value from mtk_rdma_cal_golden_setting.
  551. * Do not do any math here!
  552. */
  553. static void mtk_rdma_set_ultra_l(struct mtk_ddp_comp *comp,
  554. struct mtk_ddp_config *cfg,
  555. struct cmdq_pkt *handle)
  556. {
  557. unsigned int gs[GS_RDMA_FLD_NUM] = {0};
  558. unsigned int val = 0;
  559. if ((comp->id != DDP_COMPONENT_RDMA0)
  560. && (comp->id != DDP_COMPONENT_RDMA4)
  561. && (comp->id != DDP_COMPONENT_RDMA5)) {
  562. DDPPR_ERR("unsupport golden setting, id:%d\n", comp->id);
  563. return;
  564. }
  565. if (!cfg->p_golden_setting_context) {
  566. DDPPR_ERR("golden setting is null, %s,%d\n", __FILE__,
  567. __LINE__);
  568. return;
  569. }
  570. /* calculate golden setting */
  571. mtk_rdma_cal_golden_setting(comp, cfg, gs);
  572. /* set golden setting */
  573. val = gs[GS_RDMA_PRE_ULTRA_TH_LOW] +
  574. (gs[GS_RDMA_PRE_ULTRA_TH_HIGH] << 16) +
  575. (gs[GS_RDMA_VALID_TH_FORCE_PRE_ULTRA] << 30) +
  576. (gs[GS_RDMA_VDE_FORCE_PRE_ULTRA] << 31);
  577. cmdq_pkt_write(handle, comp->cmdq_base,
  578. comp->regs_pa + DISP_REG_RDMA_MEM_GMC_S0, val, ~0);
  579. val = gs[GS_RDMA_ULTRA_TH_LOW] + (gs[GS_RDMA_ULTRA_TH_HIGH] << 16) +
  580. (gs[GS_RDMA_VALID_TH_BLOCK_ULTRA] << 30) +
  581. (gs[GS_RDMA_VDE_BLOCK_ULTRA] << 31);
  582. cmdq_pkt_write(handle, comp->cmdq_base,
  583. comp->regs_pa + DISP_REG_RDMA_MEM_GMC_S1, val, ~0);
  584. val = gs[GS_RDMA_ISSUE_REQ_TH];
  585. cmdq_pkt_write(handle, comp->cmdq_base,
  586. comp->regs_pa + DISP_REG_RDMA_MEM_GMC_S2, val,
  587. ~0);
  588. val = gs[GS_RDMA_OUTPUT_VALID_FIFO_TH] + (gs[GS_RDMA_FIFO_SIZE] << 16) +
  589. (gs[GS_RDMA_FIFO_UNDERFLOW_EN] << 31);
  590. cmdq_pkt_write(handle, comp->cmdq_base,
  591. comp->regs_pa + DISP_REG_RDMA_FIFO_CON, val, ~0);
  592. val = gs[GS_RDMA_TH_LOW_FOR_SODI] +
  593. (gs[GS_RDMA_TH_HIGH_FOR_SODI] << 16);
  594. cmdq_pkt_write(handle, comp->cmdq_base,
  595. comp->regs_pa + DISP_REG_RDMA_THRESHOLD_FOR_SODI, val,
  596. ~0);
  597. val = gs[GS_RDMA_TH_LOW_FOR_DVFS] +
  598. (gs[GS_RDMA_TH_HIGH_FOR_DVFS] << 16);
  599. cmdq_pkt_write(handle, comp->cmdq_base,
  600. comp->regs_pa + DISP_REG_RDMA_THRESHOLD_FOR_DVFS, val,
  601. ~0);
  602. cmdq_pkt_write(handle, comp->cmdq_base,
  603. comp->regs_pa + DISP_REG_RDMA_SRAM_SEL,
  604. gs[GS_RDMA_SRAM_SEL], ~0);
  605. val = gs[GS_RDMA_DVFS_PRE_ULTRA_TH_LOW] +
  606. (gs[GS_RDMA_DVFS_PRE_ULTRA_TH_HIGH] << 16);
  607. cmdq_pkt_write(handle, comp->cmdq_base,
  608. comp->regs_pa + DISP_REG_RDMA_DVFS_SETTING_PRE, val, ~0);
  609. val = gs[GS_RDMA_DVFS_ULTRA_TH_LOW] +
  610. (gs[GS_RDMA_DVFS_ULTRA_TH_HIGH] << 16);
  611. cmdq_pkt_write(handle, comp->cmdq_base,
  612. comp->regs_pa + DISP_REG_RDMA_DVFS_SETTING_ULTRA, val,
  613. ~0);
  614. val = gs[GS_RDMA_IS_DRS_STATUS_TH_LOW] +
  615. (gs[GS_RDMA_IS_DRS_STATUS_TH_HIGH] << 16);
  616. cmdq_pkt_write(handle, comp->cmdq_base,
  617. comp->regs_pa + DISP_REG_RDMA_LEAVE_DRS_SETTING, val,
  618. ~0);
  619. val = gs[GS_RDMA_NOT_DRS_STATUS_TH_LOW] +
  620. (gs[GS_RDMA_NOT_DRS_STATUS_TH_HIGH] << 16);
  621. cmdq_pkt_write(handle, comp->cmdq_base,
  622. comp->regs_pa + DISP_REG_RDMA_ENTER_DRS_SETTING, val,
  623. ~0);
  624. val = gs[GS_RDMA_URGENT_TH_LOW] + (gs[GS_RDMA_URGENT_TH_HIGH] << 16);
  625. cmdq_pkt_write(handle, comp->cmdq_base,
  626. comp->regs_pa + DISP_REG_RDMA_MEM_GMC_S3, val,
  627. ~0);
  628. #if !defined(CONFIG_MACH_MT6833)
  629. val = gs[GS_RDMA_LAYER_SMI_ID_EN] << 29;
  630. cmdq_pkt_write(handle, comp->cmdq_base,
  631. comp->regs_pa + DISP_REG_RDMA_GREQ_URG_NUM_SEL, val,
  632. REG_FLD_MASK(FLD_RG_LAYER_SMI_ID_EN));
  633. #endif
  634. /*esd will wait this target line irq*/
  635. mtk_ddp_write(comp, (cfg->h << 3)/10,
  636. DISP_REG_RDMA_TARGET_LINE, handle);
  637. #if 0
  638. val = gs[GS_RDMA_SELF_FIFO_SIZE] + (gs[GS_RDMA_RSZ_FIFO_SIZE] << 16);
  639. cmdq_pkt_write(handle, comp->cmdq_base,
  640. comp->regs_pa + DISP_RDMA_SRAM_CASCADE, val, ~0);
  641. #endif
  642. }
  643. static void mtk_rdma_config(struct mtk_ddp_comp *comp,
  644. struct mtk_ddp_config *cfg, struct cmdq_pkt *handle)
  645. {
  646. #if 0
  647. unsigned long long threshold;
  648. unsigned int reg;
  649. #endif
  650. unsigned int w;
  651. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  652. bool *rdma_memory_mode = comp->comp_mode;
  653. //for dual pipe one layer
  654. if (comp->mtk_crtc->is_dual_pipe) {
  655. w = cfg->w / 2;
  656. DDPFUNC();
  657. } else
  658. w = cfg->w;
  659. cmdq_pkt_write(handle, comp->cmdq_base,
  660. comp->regs_pa + DISP_REG_RDMA_SIZE_CON_0, w,
  661. 0x1fff);
  662. cmdq_pkt_write(handle, comp->cmdq_base,
  663. comp->regs_pa + DISP_REG_RDMA_SIZE_CON_1, cfg->h,
  664. 0xfffff);
  665. if (*rdma_memory_mode == true) {
  666. rdma->dummy_w = w;
  667. rdma->dummy_h = cfg->h;
  668. mtk_ddp_write_mask(comp, MATRIX_INT_MTX_SEL_DEFAULT,
  669. DISP_REG_RDMA_SIZE_CON_0, 0xff0000, handle);
  670. mtk_ddp_write_relaxed(comp, RDMA_DUMMY_BUFFER_PITCH(w),
  671. DISP_RDMA_MEM_SRC_PITCH, handle);
  672. mtk_ddp_write_mask(comp, RDMA_MODE_MEMORY,
  673. DISP_REG_RDMA_GLOBAL_CON, RDMA_MODE_MEMORY,
  674. handle);
  675. } else {
  676. mtk_ddp_write_mask(comp, 0, DISP_REG_RDMA_SIZE_CON_0, 0xff0000,
  677. handle);
  678. mtk_ddp_write_relaxed(comp, 0, DISP_RDMA_MEM_SRC_PITCH, handle);
  679. mtk_ddp_write_relaxed(comp, 0, DISP_REG_RDMA_MEM_CON, handle);
  680. mtk_ddp_write_mask(comp, 0, DISP_REG_RDMA_GLOBAL_CON,
  681. RDMA_MODE_MEMORY, handle);
  682. mtk_ddp_write_relaxed(comp, 0, DISP_RDMA_MEM_START_ADDR,
  683. handle);
  684. }
  685. #if 0
  686. /*
  687. * Enable FIFO underflow since DSI and DPI can't be blocked.
  688. * Keep the FIFO pseudo size reset default of 8 KiB. Set the
  689. * output threshold to 6 microseconds with 7/6 overhead to
  690. * account for blanking, and with a pixel depth of 4 bytes:
  691. */
  692. threshold = div_u64((unsigned long long)cfg->w * cfg->h *
  693. cfg->vrefresh * 4 * 7,
  694. 1000000);
  695. reg = RDMA_FIFO_UNDERFLOW_EN |
  696. RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
  697. RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
  698. cmdq_pkt_write(handle, comp->cmdq_base,
  699. comp->regs_pa + DISP_REG_RDMA_FIFO_CON, reg, ~0);
  700. #endif
  701. mtk_rdma_set_ultra_l(comp, cfg, handle);
  702. }
  703. static void mtk_rdma_backup_info_cmp(struct mtk_ddp_comp *comp, bool *compare)
  704. {
  705. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  706. void __iomem *baddr = comp->regs;
  707. unsigned long addr;
  708. addr = readl(DISP_REG_RDMA_MEM_START_ADDR + baddr);
  709. if (addr == 0 || (addr != 0 && rdma->backup_info.addr != addr))
  710. *compare = 1;
  711. else
  712. *compare = 0;
  713. rdma->backup_info.addr = addr;
  714. }
  715. static int mtk_rdma_io_cmd(struct mtk_ddp_comp *comp, struct cmdq_pkt *handle,
  716. enum mtk_ddp_io_cmd io_cmd, void *params)
  717. {
  718. int ret = 0;
  719. switch (io_cmd) {
  720. case MTK_IO_CMD_RDMA_GOLDEN_SETTING: {
  721. struct mtk_ddp_config *cfg;
  722. cfg = (struct mtk_ddp_config *)params;
  723. mtk_rdma_set_ultra_l(comp, cfg, handle);
  724. break;
  725. }
  726. case IRQ_LEVEL_ALL: {
  727. unsigned int inten;
  728. inten = RDMA_FRAME_START_INT | RDMA_FRAME_END_INT |
  729. RDMA_EOF_ABNORMAL_INT | RDMA_FIFO_UNDERFLOW_INT |
  730. RDMA_TARGET_LINE_INT;
  731. cmdq_pkt_write(handle, comp->cmdq_base,
  732. comp->regs_pa + DISP_REG_RDMA_INT_ENABLE, inten,
  733. inten);
  734. break;
  735. }
  736. case IRQ_LEVEL_IDLE: {
  737. unsigned int inten;
  738. inten = RDMA_REG_UPDATE_INT | RDMA_FRAME_START_INT |
  739. RDMA_FRAME_END_INT | RDMA_TARGET_LINE_INT;
  740. cmdq_pkt_write(handle, comp->cmdq_base,
  741. comp->regs_pa + DISP_REG_RDMA_INT_ENABLE, 0,
  742. inten);
  743. break;
  744. }
  745. #ifdef MTK_FB_MMDVFS_SUPPORT
  746. case PMQOS_SET_HRT_BW: {
  747. bool *rdma_memory_mode = comp->comp_mode;
  748. u32 bw_val = *(unsigned int *)params;
  749. struct mtk_ddp_comp *output_comp;
  750. output_comp = mtk_ddp_comp_request_output(comp->mtk_crtc);
  751. if (*rdma_memory_mode == true) {
  752. if (output_comp)
  753. mtk_ddp_comp_io_cmd(output_comp, NULL,
  754. GET_FRAME_HRT_BW_BY_DATARATE, &bw_val);
  755. ret = RDMA_REQ_HRT;
  756. }
  757. __mtk_disp_set_module_hrt(&comp->hrt_qos_req, bw_val);
  758. break;
  759. }
  760. #endif
  761. case BACKUP_INFO_CMP: {
  762. mtk_rdma_backup_info_cmp(comp, params);
  763. break;
  764. }
  765. default:
  766. break;
  767. }
  768. return ret;
  769. }
  770. void mtk_rdma_dump_golden_setting(struct mtk_ddp_comp *comp)
  771. {
  772. void __iomem *baddr = comp->regs;
  773. unsigned int value;
  774. DDPDUMP("-- %s Golden Setting --\n", mtk_dump_comp_str(comp));
  775. DDPDUMP("0x%03x:0x%08x 0x%03x:0x%08x 0x%03x:0x%08x 0x%03x:0x%08x\n",
  776. 0x30, readl(DISP_REG_RDMA_MEM_GMC_S0 + baddr),
  777. 0x34, readl(DISP_REG_RDMA_MEM_GMC_S1 + baddr),
  778. 0x3c, readl(DISP_REG_RDMA_MEM_GMC_S2 + baddr),
  779. 0x40, readl(DISP_REG_RDMA_FIFO_CON + baddr));
  780. DDPDUMP("0x%03x:0x%08x 0x%03x:0x%08x 0x%03x:0x%08x 0x%03x:0x%08x\n",
  781. 0xa8, readl(DISP_REG_RDMA_THRESHOLD_FOR_SODI + baddr),
  782. 0xac, readl(DISP_REG_RDMA_THRESHOLD_FOR_DVFS + baddr),
  783. 0xb0, readl(DISP_REG_RDMA_SRAM_SEL + baddr),
  784. 0xc8, readl(DISP_RDMA_SRAM_CASCADE + baddr));
  785. DDPDUMP("0x%03x:0x%08x 0x%08x 0x%08x 0x%08x\n",
  786. 0xd0, readl(DISP_REG_RDMA_DVFS_SETTING_PRE + baddr),
  787. readl(DISP_REG_RDMA_DVFS_SETTING_ULTRA + baddr),
  788. readl(DISP_REG_RDMA_LEAVE_DRS_SETTING + baddr),
  789. readl(DISP_REG_RDMA_ENTER_DRS_SETTING + baddr));
  790. #if !defined(CONFIG_MACH_MT6833)
  791. DDPDUMP("0x%03x:0x%08x 0x%03x:0x%08x\n",
  792. 0xe8, readl(DISP_REG_RDMA_MEM_GMC_S3 + baddr),
  793. 0x1a8, readl(DISP_REG_RDMA_GREQ_URG_NUM_SEL + baddr));
  794. #else
  795. DDPDUMP("0x%03x:0x%08x\n",
  796. 0xe8, readl(DISP_REG_RDMA_MEM_GMC_S3 + baddr));
  797. #endif
  798. value = readl(DISP_REG_RDMA_MEM_GMC_S0 + baddr);
  799. DDPDUMP("GMC_SETTING_0 [11:0]:%u [27:16]:%u [30]:%u [31]:%u\n",
  800. REG_FLD_VAL_GET(
  801. MEM_GMC_S0_FLD_PRE_ULTRA_THRESHOLD_LOW, value),
  802. REG_FLD_VAL_GET(
  803. MEM_GMC_S0_FLD_PRE_ULTRA_THRESHOLD_HIGH, value),
  804. REG_FLD_VAL_GET(
  805. MEM_GMC_S0_FLD_RG_VALID_THRESHOLD_FORCE_PREULTRA,
  806. value),
  807. REG_FLD_VAL_GET(
  808. MEM_GMC_S0_FLD_RG_VDE_FORCE_PREULTRA, value));
  809. value = readl(DISP_REG_RDMA_MEM_GMC_S1 + baddr);
  810. DDPDUMP("GMC_SETTING_1 [11:0]:%u [27:16]:%u [30]:%u [31]:%u\n",
  811. REG_FLD_VAL_GET(MEM_GMC_S1_FLD_ULTRA_THRESHOLD_LOW, value),
  812. REG_FLD_VAL_GET(MEM_GMC_S1_FLD_ULTRA_THRESHOLD_HIGH, value),
  813. REG_FLD_VAL_GET(
  814. MEM_GMC_S1_FLD_RG_VALID_THRESHOLD_BLOCK_ULTRA, value),
  815. REG_FLD_VAL_GET(
  816. MEM_GMC_S1_FLD_RG_VDE_BLOCK_ULTRA, value));
  817. value = readl(DISP_REG_RDMA_MEM_GMC_S2 + baddr);
  818. DDPDUMP("GMC_SETTING_2 [11:0]:%u\n",
  819. REG_FLD_VAL_GET(MEM_GMC_S2_FLD_ISSUE_REQ_THRESHOLD, value));
  820. value = readl(DISP_REG_RDMA_FIFO_CON + baddr);
  821. DDPDUMP("FIFO_CON [11:0]:%u [27:16]:%d [31]:%u\n",
  822. REG_FLD_VAL_GET(
  823. FIFO_CON_FLD_OUTPUT_VALID_FIFO_THRESHOLD, value),
  824. REG_FLD_VAL_GET(FIFO_CON_FLD_FIFO_PSEUDO_SIZE, value),
  825. REG_FLD_VAL_GET(FIFO_CON_FLD_FIFO_UNDERFLOW_EN, value));
  826. value = readl(DISP_REG_RDMA_THRESHOLD_FOR_SODI + baddr);
  827. DDPDUMP("THRSHOLD_SODI [11:0]:%u [27:16]:%u\n",
  828. REG_FLD_VAL_GET(RDMA_THRESHOLD_FOR_SODI_FLD_LOW, value),
  829. REG_FLD_VAL_GET(RDMA_THRESHOLD_FOR_SODI_FLD_HIGH, value));
  830. value = readl(DISP_REG_RDMA_THRESHOLD_FOR_DVFS + baddr);
  831. DDPDUMP("THRSHOLD_DVFS [11:0]:%u [27:16]:%u\n",
  832. REG_FLD_VAL_GET(RDMA_THRESHOLD_FOR_DVFS_FLD_LOW, value),
  833. REG_FLD_VAL_GET(RDMA_THRESHOLD_FOR_DVFS_FLD_HIGH, value));
  834. DDPDUMP("SRAM_SEL [0]:%u\n", readl(DISP_REG_RDMA_SRAM_SEL + baddr));
  835. #if 0
  836. value = readl(DISP_RDMA_SRAM_CASCADE + baddr);
  837. DDPDUMP("SRAM_CASCADE [13:0]:%u [27:16]:%u\n",
  838. REG_FLD_VAL_GET(RG_DISP_RDMA_FIFO_SIZE, value),
  839. REG_FLD_VAL_GET(RG_DISP_RDMA_RSZ_FIFO_SIZE, value));
  840. #endif
  841. value = readl(DISP_REG_RDMA_DVFS_SETTING_PRE + baddr);
  842. DDPDUMP("DVFS_SETTING_PREULTRA [11:0]:%u [27:16]:%u\n",
  843. REG_FLD_VAL_GET(RG_DVFS_PRE_ULTRA_THRESHOLD_LOW, value),
  844. REG_FLD_VAL_GET(RG_DVFS_PRE_ULTRA_THRESHOLD_HIGH, value));
  845. value = readl(DISP_REG_RDMA_DVFS_SETTING_ULTRA + baddr);
  846. DDPDUMP("DVFS_SETTING_ULTRA [11:0]:%u [27:16]:%u\n",
  847. REG_FLD_VAL_GET(RG_DVFS_ULTRA_THRESHOLD_LOW, value),
  848. REG_FLD_VAL_GET(RG_DVFS_ULTRA_THRESHOLD_HIGH, value));
  849. value = readl(DISP_REG_RDMA_LEAVE_DRS_SETTING + baddr);
  850. DDPDUMP("LEAVE_DRS_SETTING [11:0]:%u [27:16]:%u\n",
  851. REG_FLD_VAL_GET(RG_IS_DRS_STATUS_THRESHOLD_LOW, value),
  852. REG_FLD_VAL_GET(RG_IS_DRS_STATUS_THRESHOLD_HIGH, value));
  853. value = readl(DISP_REG_RDMA_ENTER_DRS_SETTING + baddr);
  854. DDPDUMP("ENTER_DRS_SETTING [11:0]:%u [27:16]:%u\n",
  855. REG_FLD_VAL_GET(RG_NOT_DRS_STATUS_THRESHOLD_LOW, value),
  856. REG_FLD_VAL_GET(RG_NOT_DRS_STATUS_THRESHOLD_HIGH, value));
  857. value = readl(DISP_REG_RDMA_MEM_GMC_S3 + baddr);
  858. DDPDUMP("GMC_SETTING_3 [11:0]:%u [27:16]:%u\n",
  859. REG_FLD_VAL_GET(FLD_LOW_FOR_URGENT, value),
  860. REG_FLD_VAL_GET(FLD_HIGH_FOR_URGENT, value));
  861. #if !defined(CONFIG_MACH_MT6833)
  862. value = readl(DISP_REG_RDMA_GREQ_URG_NUM_SEL + baddr);
  863. DDPDUMP("GREQ URG NUM SEL [29:29]: %u\n",
  864. REG_FLD_VAL_GET(FLD_RG_LAYER_SMI_ID_EN, value));
  865. #endif
  866. }
  867. int mtk_rdma_dump(struct mtk_ddp_comp *comp)
  868. {
  869. void __iomem *baddr = comp->regs;
  870. DDPDUMP("== %s REGS ==\n", mtk_dump_comp_str(comp));
  871. if (mtk_ddp_comp_helper_get_opt(comp,
  872. MTK_DRM_OPT_REG_PARSER_RAW_DUMP)) {
  873. unsigned int i = 0;
  874. for (i = 0; i < 0x200; i += 0x10)
  875. mtk_serial_dump_reg(baddr, i, 4);
  876. } else {
  877. DDPDUMP("(0x000)R_INTEN=0x%x\n",
  878. readl(DISP_REG_RDMA_INT_ENABLE + baddr));
  879. DDPDUMP("(0x004)R_INTS=0x%x\n",
  880. readl(DISP_REG_RDMA_INT_STATUS + baddr));
  881. DDPDUMP("(0x010)R_CON=0x%x\n",
  882. readl(DISP_REG_RDMA_GLOBAL_CON + baddr));
  883. DDPDUMP("(0x014)R_SIZE0=0x%x\n",
  884. readl(DISP_REG_RDMA_SIZE_CON_0 + baddr));
  885. DDPDUMP("(0x018)R_SIZE1=0x%x\n",
  886. readl(DISP_REG_RDMA_SIZE_CON_1 + baddr));
  887. DDPDUMP("(0x01c)R_TAR_LINE=0x%x\n",
  888. readl(DISP_REG_RDMA_TARGET_LINE + baddr));
  889. DDPDUMP("(0x024)R_M_CON=0x%x\n",
  890. readl(DISP_REG_RDMA_MEM_CON + baddr));
  891. DDPDUMP("(0xf00)R_M_S_ADDR=0x%x\n",
  892. readl(DISP_REG_RDMA_MEM_START_ADDR + baddr));
  893. DDPDUMP("(0x02c)R_M_SRC_PITCH=0x%x\n",
  894. readl(DISP_REG_RDMA_MEM_SRC_PITCH + baddr));
  895. DDPDUMP("(0x030)R_M_GMC_SET0=0x%x\n",
  896. readl(DISP_REG_RDMA_MEM_GMC_S0 + baddr));
  897. DDPDUMP("(0x034)R_M_GMC_SET1=0x%x\n",
  898. readl(DISP_REG_RDMA_MEM_GMC_S1 + baddr));
  899. DDPDUMP("(0x038)R_M_SLOW_CON=0x%x\n",
  900. readl(DISP_REG_RDMA_MEM_SLOW_CON + baddr));
  901. DDPDUMP("(0x03c)R_M_GMC_SET2=0x%x\n",
  902. readl(DISP_REG_RDMA_MEM_GMC_S2 + baddr));
  903. DDPDUMP("(0x040)R_FIFO_CON=0x%x\n",
  904. readl(DISP_REG_RDMA_FIFO_CON + baddr));
  905. DDPDUMP("(0x044)R_FIFO_LOG=0x%x\n",
  906. readl(DISP_REG_RDMA_FIFO_LOG + baddr));
  907. DDPDUMP("(0x078)R_PRE_ADD0=0x%x\n",
  908. readl(DISP_REG_RDMA_PRE_ADD_0 + baddr));
  909. DDPDUMP("(0x07c)R_PRE_ADD1=0x%x\n",
  910. readl(DISP_REG_RDMA_PRE_ADD_1 + baddr));
  911. DDPDUMP("(0x080)R_PRE_ADD2=0x%x\n",
  912. readl(DISP_REG_RDMA_PRE_ADD_2 + baddr));
  913. DDPDUMP("(0x084)R_POST_ADD0=0x%x\n",
  914. readl(DISP_REG_RDMA_POST_ADD_0 + baddr));
  915. DDPDUMP("(0x088)R_POST_ADD1=0x%x\n",
  916. readl(DISP_REG_RDMA_POST_ADD_1 + baddr));
  917. DDPDUMP("(0x08c)R_POST_ADD2=0x%x\n",
  918. readl(DISP_REG_RDMA_POST_ADD_2 + baddr));
  919. DDPDUMP("(0x090)R_DUMMY=0x%x\n",
  920. readl(DISP_REG_RDMA_DUMMY + baddr));
  921. DDPDUMP("(0x094)R_OUT_SEL=0x%x\n",
  922. readl(DISP_REG_RDMA_DEBUG_OUT_SEL + baddr));
  923. DDPDUMP("(0xf00)R_M_START=0x%x\n",
  924. readl(DISP_REG_RDMA_MEM_START_ADDR + baddr));
  925. DDPDUMP("(0x0a0)R_BG_CON_0=0x%x\n",
  926. readl(DISP_REG_RDMA_BG_CON_0 + baddr));
  927. DDPDUMP("(0x0a4)R_BG_CON_1=0x%x\n",
  928. readl(DISP_REG_RDMA_BG_CON_1 + baddr));
  929. DDPDUMP("(0x0a8)R_FOR_SODI=0x%x\n",
  930. readl(DISP_REG_RDMA_THRESHOLD_FOR_SODI + baddr));
  931. DDPDUMP("(0x0ac)R_FOR_DVFS=0x%x\n",
  932. readl(DISP_REG_RDMA_THRESHOLD_FOR_DVFS + baddr));
  933. DDPDUMP("(0x0b0)R_FOR_SRAM=0x%x\n",
  934. readl(DISP_REG_RDMA_SRAM_SEL + baddr));
  935. DDPDUMP("(0x0b4)DISP_REG_RDMA_STALL_CG_CON=0x%x\n",
  936. readl(DISP_REG_RDMA_STALL_CG_CON + baddr));
  937. #if defined(CONFIG_MACH_MT6885) || defined(CONFIG_MACH_MT6893)
  938. DDPDUMP("(0x0b8)DISP_REG_RDMA_SHADOW_UPDATE=0x%x\n",
  939. readl(DISP_REG_RDMA_SHADOW_UPDATE + baddr));
  940. #elif defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853)
  941. DDPDUMP("(0x0bc)DISP_REG_RDMA_SHADOW_UPDATE=0x%x\n",
  942. readl(DISP_REG_RDMA_SHADOW_UPDATE + baddr));
  943. #endif
  944. DDPDUMP("(0x0c8)DISP_RDMA_SRAM_CASCADE=0x%x\n",
  945. readl(DISP_RDMA_SRAM_CASCADE + baddr));
  946. DDPDUMP("(0x0d0)DISP_REG_RDMA_DVFS_SETTING_PRE=0x%x\n",
  947. readl(DISP_REG_RDMA_DVFS_SETTING_PRE + baddr));
  948. DDPDUMP("(0x0d4)DISP_REG_RDMA_DVFS_SETTING_ULTRA=0x%x\n",
  949. readl(DISP_REG_RDMA_DVFS_SETTING_ULTRA + baddr));
  950. DDPDUMP("(0x0d8)DISP_REG_RDMA_LEAVE_DRS_SETTING=0x%x\n",
  951. readl(DISP_REG_RDMA_LEAVE_DRS_SETTING + baddr));
  952. DDPDUMP("(0x0dc)DISP_REG_RDMA_ENTER_DRS_SETTING=0x%x\n",
  953. readl(DISP_REG_RDMA_ENTER_DRS_SETTING + baddr));
  954. DDPDUMP("(0x0e0)DISP_REG_RDMA_CROP_CON_0=0x%x\n",
  955. readl(DISP_REG_RDMA_CROP_CON_1 + baddr));
  956. DDPDUMP("(0x0e4)DISP_REG_RDMA_CROP_CON_1=0x%x\n",
  957. readl(DISP_REG_RDMA_CROP_CON_0 + baddr));
  958. DDPDUMP("(0x0e8)DISP_REG_RDMA_MEM_GMC_SETTING_3=0x%x\n",
  959. readl(DISP_REG_RDMA_MEM_GMC_S3 + baddr));
  960. DDPDUMP("(0x0f0)R_IN_PXL_CNT=0x%x\n",
  961. readl(DISP_REG_RDMA_IN_P_CNT + baddr));
  962. DDPDUMP("(0x0f4)R_IN_LINE_CNT=0x%x\n",
  963. readl(DISP_REG_RDMA_IN_LINE_CNT + baddr));
  964. DDPDUMP("(0x0f8)R_OUT_PXL_CNT=0x%x\n",
  965. readl(DISP_REG_RDMA_OUT_P_CNT + baddr));
  966. DDPDUMP("(0x0fc)R_OUT_LINE_CNT=0x%x\n",
  967. readl(DISP_REG_RDMA_OUT_LINE_CNT + baddr));
  968. DDPDUMP("(0x100)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT + baddr));
  969. DDPDUMP("(0x10c)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT1 + baddr));
  970. DDPDUMP("(0x110)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT2 + baddr));
  971. DDPDUMP("(0x114)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT3 + baddr));
  972. DDPDUMP("(0x118)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT4 + baddr));
  973. DDPDUMP("(0x11c)0x%x\n", readl(DISP_REG_RDMA_DBG_OUT5 + baddr));
  974. }
  975. mtk_rdma_dump_golden_setting(comp);
  976. return 0;
  977. }
  978. int mtk_rdma_analysis(struct mtk_ddp_comp *comp)
  979. {
  980. void __iomem *baddr = comp->regs;
  981. unsigned int global_ctrl;
  982. unsigned int bg0 = readl(baddr + DISP_REG_RDMA_BG_CON_0);
  983. unsigned int bg1 = readl(baddr + DISP_REG_RDMA_BG_CON_1);
  984. unsigned int fifo = readl(baddr + DISP_REG_RDMA_FIFO_CON);
  985. global_ctrl = readl(DISP_REG_RDMA_GLOBAL_CON + baddr);
  986. DDPDUMP("== %s ANALYSIS ==\n", mtk_dump_comp_str(comp));
  987. DDPDUMP("en=%d,mode:%s,smi_busy:%d\n",
  988. REG_FLD_VAL_GET(GLOBAL_CON_FLD_ENGINE_EN, global_ctrl),
  989. REG_FLD_VAL_GET(GLOBAL_CON_FLD_MODE_SEL, global_ctrl)
  990. ? "mem" : "DL",
  991. REG_FLD_VAL_GET(GLOBAL_CON_FLD_SMI_BUSY, global_ctrl));
  992. DDPDUMP("wh(%dx%d),pitch=%d,addr=0x%08x\n",
  993. readl(DISP_REG_RDMA_SIZE_CON_0 + baddr) & 0xfff,
  994. readl(DISP_REG_RDMA_SIZE_CON_1 + baddr) & 0xfffff,
  995. readl(DISP_REG_RDMA_MEM_SRC_PITCH + baddr),
  996. readl(DISP_REG_RDMA_MEM_START_ADDR + baddr));
  997. DDPDUMP("fifo_sz=%u,output_valid_threshold=%u,fifo_min=%d\n",
  998. #if 0 /* TODO */
  999. unified_color_fmt_name(display_fmt_reg_to_unified_fmt(
  1000. (readl(DISP_REG_RDMA_MEM_CON +
  1001. baddr) >> 4) & 0xf,
  1002. (readl(DISP_REG_RDMA_MEM_CON +
  1003. baddr) >> 8) & 0x1, 0)),
  1004. #endif
  1005. REG_FLD_VAL_GET(FIFO_CON_FLD_FIFO_PSEUDO_SIZE, fifo),
  1006. REG_FLD_VAL_GET(FIFO_CON_FLD_OUTPUT_VALID_FIFO_THRESHOLD, fifo),
  1007. readl(DISP_REG_RDMA_FIFO_LOG + baddr));
  1008. DDPDUMP("pos:in(%d,%d)out(%d,%d),bg(t%d,b%d,l%d,r%d)\n",
  1009. readl(DISP_REG_RDMA_IN_P_CNT + baddr),
  1010. readl(DISP_REG_RDMA_IN_LINE_CNT + baddr),
  1011. readl(DISP_REG_RDMA_OUT_P_CNT + baddr),
  1012. readl(DISP_REG_RDMA_OUT_LINE_CNT + baddr),
  1013. REG_FLD_VAL_GET(RDMA_BG_CON_1_TOP, bg1),
  1014. REG_FLD_VAL_GET(RDMA_BG_CON_1_BOTTOM, bg1),
  1015. REG_FLD_VAL_GET(RDMA_BG_CON_0_LEFT, bg0),
  1016. REG_FLD_VAL_GET(RDMA_BG_CON_0_RIGHT, bg0));
  1017. #if 0 /* TODO */
  1018. DDPDUMP("irq cnt:start=%d,end=%d,underflow=%d,targetline=%d\n",
  1019. rdma_start_irq_cnt[idx], rdma_done_irq_cnt[idx],
  1020. rdma_underflow_irq_cnt[idx], rdma_targetline_irq_cnt[idx]);
  1021. #endif
  1022. return 0;
  1023. }
  1024. static void mtk_rdma_prepare(struct mtk_ddp_comp *comp)
  1025. {
  1026. #if defined(CONFIG_DRM_MTK_SHADOW_REGISTER_SUPPORT)
  1027. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  1028. #endif
  1029. mtk_ddp_comp_clk_prepare(comp);
  1030. #if defined(CONFIG_DRM_MTK_SHADOW_REGISTER_SUPPORT)
  1031. if (rdma->data->support_shadow) {
  1032. /* Enable shadow register and read shadow register */
  1033. mtk_ddp_write_mask_cpu(comp, 0x0,
  1034. DISP_REG_RDMA_SHADOW_UPDATE, RDMA_BYPASS_SHADOW);
  1035. } else {
  1036. /* Bypass shadow register and read shadow register */
  1037. mtk_ddp_write_mask_cpu(comp, RDMA_BYPASS_SHADOW,
  1038. DISP_REG_RDMA_SHADOW_UPDATE, RDMA_BYPASS_SHADOW);
  1039. }
  1040. #else
  1041. #if defined(CONFIG_MACH_MT6873) || defined(CONFIG_MACH_MT6853)
  1042. /* Bypass shadow register and read shadow register */
  1043. mtk_ddp_write_mask_cpu(comp, RDMA_BYPASS_SHADOW,
  1044. DISP_REG_RDMA_SHADOW_UPDATE, RDMA_BYPASS_SHADOW);
  1045. #endif
  1046. #endif
  1047. }
  1048. static void mtk_rdma_unprepare(struct mtk_ddp_comp *comp)
  1049. {
  1050. mtk_ddp_comp_clk_unprepare(comp);
  1051. }
  1052. static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
  1053. unsigned int fmt)
  1054. {
  1055. switch (fmt) {
  1056. default:
  1057. case DRM_FORMAT_RGB565:
  1058. return MEM_MODE_INPUT_FORMAT_RGB565;
  1059. case DRM_FORMAT_BGR565:
  1060. return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
  1061. case DRM_FORMAT_RGB888:
  1062. return MEM_MODE_INPUT_FORMAT_RGB888;
  1063. case DRM_FORMAT_BGR888:
  1064. return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
  1065. case DRM_FORMAT_RGBX8888:
  1066. case DRM_FORMAT_RGBA8888:
  1067. return MEM_MODE_INPUT_FORMAT_ARGB8888;
  1068. case DRM_FORMAT_BGRX8888:
  1069. case DRM_FORMAT_BGRA8888:
  1070. return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
  1071. case DRM_FORMAT_XRGB8888:
  1072. case DRM_FORMAT_ARGB8888:
  1073. return MEM_MODE_INPUT_FORMAT_RGBA8888;
  1074. case DRM_FORMAT_XBGR8888:
  1075. case DRM_FORMAT_ABGR8888:
  1076. return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
  1077. case DRM_FORMAT_UYVY:
  1078. return MEM_MODE_INPUT_FORMAT_UYVY;
  1079. case DRM_FORMAT_YUYV:
  1080. return MEM_MODE_INPUT_FORMAT_YUYV;
  1081. }
  1082. }
  1083. static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
  1084. struct mtk_plane_state *state,
  1085. struct cmdq_pkt *handle)
  1086. {
  1087. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  1088. struct mtk_rdma_cfg_info *cfg_info = &rdma->cfg_info;
  1089. struct mtk_plane_pending_state *pending = &state->pending;
  1090. unsigned int addr = pending->addr;
  1091. unsigned int pitch = pending->pitch & 0xffff;
  1092. unsigned int fmt = pending->format;
  1093. unsigned int con;
  1094. if (pending->height == 0u || pending->width == 0u)
  1095. return;
  1096. DDPINFO("%s addr: 0x%x\n", __func__, addr);
  1097. con = rdma_fmt_convert(rdma, fmt);
  1098. mtk_ddp_write_relaxed(comp, con, DISP_RDMA_MEM_CON, handle);
  1099. if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV)
  1100. mtk_ddp_write_mask(comp,
  1101. RDMA_MATRIX_ENABLE | RDMA_MATRIX_INT_MTX_SEL,
  1102. DISP_REG_RDMA_SIZE_CON_0, 0xff0000, handle);
  1103. else
  1104. mtk_ddp_write_mask(comp, MATRIX_INT_MTX_SEL_DEFAULT,
  1105. DISP_REG_RDMA_SIZE_CON_0, 0xff0000, handle);
  1106. mtk_ddp_write_relaxed(comp, addr, DISP_RDMA_MEM_START_ADDR, handle);
  1107. mtk_ddp_write_relaxed(comp, pitch, DISP_RDMA_MEM_SRC_PITCH, handle);
  1108. cfg_info->addr = addr;
  1109. cfg_info->width = pending->width;
  1110. cfg_info->height = pending->height;
  1111. cfg_info->fmt = fmt;
  1112. }
  1113. int MMPathTraceRDMA(struct mtk_ddp_comp *ddp_comp, char *str,
  1114. unsigned int strlen, unsigned int n)
  1115. {
  1116. struct mtk_disp_rdma *rdma = comp_to_rdma(ddp_comp);
  1117. struct mtk_rdma_cfg_info *cfg_info = &rdma->cfg_info;
  1118. n += scnprintf(str + n, strlen - n,
  1119. "in=0x%x, in_width=%d, in_height=%d, in_fmt=%s, in_bpp=%d, ",
  1120. cfg_info->addr,
  1121. cfg_info->width,
  1122. cfg_info->height,
  1123. mtk_get_format_name(cfg_info->fmt),
  1124. mtk_get_format_bpp(cfg_info->fmt));
  1125. return n;
  1126. }
  1127. static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
  1128. .config = mtk_rdma_config,
  1129. .start = mtk_rdma_start,
  1130. .stop = mtk_rdma_stop,
  1131. #if 0
  1132. .enable_vblank = mtk_rdma_enable_vblank,
  1133. .disable_vblank = mtk_rdma_disable_vblank,
  1134. #endif
  1135. .io_cmd = mtk_rdma_io_cmd,
  1136. .prepare = mtk_rdma_prepare,
  1137. .unprepare = mtk_rdma_unprepare,
  1138. .layer_config = mtk_rdma_layer_config,
  1139. };
  1140. static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
  1141. void *data)
  1142. {
  1143. int ret;
  1144. struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
  1145. struct mtk_ddp_comp *comp = &priv->ddp_comp;
  1146. struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
  1147. struct drm_device *drm_dev = data;
  1148. #ifdef MTK_FB_MMDVFS_SUPPORT
  1149. struct mtk_drm_private *drm_priv = drm_dev->dev_private;
  1150. int qos_req_port;
  1151. #endif
  1152. DDPINFO("%s\n", __func__);
  1153. ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp);
  1154. if (ret < 0) {
  1155. dev_err(dev, "Failed to register component %s: %d\n",
  1156. dev->of_node->full_name, ret);
  1157. return ret;
  1158. }
  1159. rdma->drm_dev = drm_dev;
  1160. comp->comp_mode = &priv->rdma_memory_mode;
  1161. #ifdef MTK_FB_MMDVFS_SUPPORT
  1162. qos_req_port = __mtk_disp_pmqos_port_look_up(priv->ddp_comp.id);
  1163. if (qos_req_port < 0) {
  1164. DDPPR_ERR("Failed to request QOS port\n");
  1165. } else {
  1166. mm_qos_add_request(&drm_priv->bw_request_list,
  1167. &priv->ddp_comp.qos_req, qos_req_port);
  1168. mm_qos_add_request(&drm_priv->hrt_request_list,
  1169. &priv->ddp_comp.hrt_qos_req, qos_req_port);
  1170. }
  1171. #endif
  1172. return 0;
  1173. }
  1174. static void mtk_disp_rdma_unbind(struct device *dev, struct device *master,
  1175. void *data)
  1176. {
  1177. struct mtk_disp_rdma *priv = dev_get_drvdata(dev);
  1178. struct drm_device *drm_dev = data;
  1179. mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp);
  1180. }
  1181. static const struct component_ops mtk_disp_rdma_component_ops = {
  1182. .bind = mtk_disp_rdma_bind, .unbind = mtk_disp_rdma_unbind,
  1183. };
  1184. static int mtk_disp_rdma_probe(struct platform_device *pdev)
  1185. {
  1186. struct device *dev = &pdev->dev;
  1187. struct mtk_disp_rdma *priv;
  1188. enum mtk_ddp_comp_id comp_id;
  1189. int irq;
  1190. int ret;
  1191. DDPINFO("%s+\n", __func__);
  1192. priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
  1193. if (!priv)
  1194. return -ENOMEM;
  1195. irq = platform_get_irq(pdev, 0);
  1196. if (irq < 0)
  1197. return irq;
  1198. comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_RDMA);
  1199. if ((int)comp_id < 0) {
  1200. dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
  1201. return comp_id;
  1202. }
  1203. ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
  1204. &mtk_disp_rdma_funcs);
  1205. if (ret) {
  1206. dev_err(dev, "Failed to initialize component: %d\n", ret);
  1207. return ret;
  1208. }
  1209. /* Disable and clear pending interrupts */
  1210. writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_ENABLE);
  1211. writel(0x0, priv->ddp_comp.regs + DISP_REG_RDMA_INT_STATUS);
  1212. ret = devm_request_irq(dev, irq, mtk_disp_rdma_irq_handler,
  1213. IRQF_TRIGGER_NONE | IRQF_SHARED, dev_name(dev),
  1214. priv);
  1215. if (ret < 0) {
  1216. DDPAEE("%s:%d, failed to request irq:%d ret:%d comp_id:%d\n",
  1217. __func__, __LINE__,
  1218. irq, ret, comp_id);
  1219. return ret;
  1220. }
  1221. priv->data = of_device_get_match_data(dev);
  1222. platform_set_drvdata(pdev, priv);
  1223. pm_runtime_enable(dev);
  1224. ret = component_add(dev, &mtk_disp_rdma_component_ops);
  1225. if (ret != 0) {
  1226. dev_err(dev, "Failed to add component: %d\n", ret);
  1227. pm_runtime_disable(dev);
  1228. }
  1229. DDPINFO("%s-\n", __func__);
  1230. return ret;
  1231. }
  1232. static int mtk_disp_rdma_remove(struct platform_device *pdev)
  1233. {
  1234. component_del(&pdev->dev, &mtk_disp_rdma_component_ops);
  1235. pm_runtime_disable(&pdev->dev);
  1236. return 0;
  1237. }
  1238. static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
  1239. .fifo_size = SZ_4K,
  1240. .support_shadow = false,
  1241. };
  1242. static const struct mtk_disp_rdma_data mt6779_rdma_driver_data = {
  1243. .fifo_size = SZ_8K + SZ_16K,
  1244. .sodi_config = mt6779_mtk_sodi_config,
  1245. .support_shadow = false,
  1246. };
  1247. static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
  1248. .fifo_size = SZ_8K,
  1249. .support_shadow = false,
  1250. };
  1251. static const struct mtk_disp_rdma_data mt6885_rdma_driver_data = {
  1252. .fifo_size = SZ_1K * 3 + SZ_32K,
  1253. .sodi_config = mt6885_mtk_sodi_config,
  1254. .support_shadow = false,
  1255. };
  1256. static const struct mtk_disp_rdma_data mt6873_rdma_driver_data = {
  1257. .fifo_size = SZ_1K * 3 + SZ_32K,
  1258. .sodi_config = mt6873_mtk_sodi_config,
  1259. .support_shadow = false,
  1260. };
  1261. static const struct mtk_disp_rdma_data mt6853_rdma_driver_data = {
  1262. .fifo_size = SZ_1K * 3 + SZ_32K,
  1263. .sodi_config = mt6853_mtk_sodi_config,
  1264. .support_shadow = false,
  1265. };
  1266. static const struct mtk_disp_rdma_data mt6833_rdma_driver_data = {
  1267. .fifo_size = SZ_1K * 3 + SZ_32K,
  1268. .sodi_config = mt6833_mtk_sodi_config,
  1269. .support_shadow = false,
  1270. };
  1271. static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
  1272. {.compatible = "mediatek,mt2701-disp-rdma",
  1273. .data = &mt2701_rdma_driver_data},
  1274. {.compatible = "mediatek,mt6779-disp-rdma",
  1275. .data = &mt6779_rdma_driver_data},
  1276. {.compatible = "mediatek,mt8173-disp-rdma",
  1277. .data = &mt8173_rdma_driver_data},
  1278. {.compatible = "mediatek,mt6885-disp-rdma",
  1279. .data = &mt6885_rdma_driver_data},
  1280. {.compatible = "mediatek,mt6873-disp-rdma",
  1281. .data = &mt6873_rdma_driver_data},
  1282. {.compatible = "mediatek,mt6853-disp-rdma",
  1283. .data = &mt6853_rdma_driver_data},
  1284. {.compatible = "mediatek,mt6833-disp-rdma",
  1285. .data = &mt6833_rdma_driver_data},
  1286. {},
  1287. };
  1288. MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
  1289. struct platform_driver mtk_disp_rdma_driver = {
  1290. .probe = mtk_disp_rdma_probe,
  1291. .remove = mtk_disp_rdma_remove,
  1292. .driver = {
  1293. .name = "mediatek-disp-rdma",
  1294. .owner = THIS_MODULE,
  1295. .of_match_table = mtk_disp_rdma_driver_dt_match,
  1296. },
  1297. };