mdp_ppp.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /* drivers/video/msm/mdp_ppp.c
  2. *
  3. * Copyright (C) 2007 QUALCOMM Incorporated
  4. * Copyright (C) 2007 Google Incorporated
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/fb.h>
  16. #include <linux/file.h>
  17. #include <linux/delay.h>
  18. #include <linux/msm_mdp.h>
  19. #include <mach/msm_fb.h>
  20. #include "mdp_hw.h"
  21. #include "mdp_scale_tables.h"
  22. #define DLOG(x...) do {} while (0)
  23. #define MDP_DOWNSCALE_BLUR (MDP_DOWNSCALE_MAX + 1)
  24. static int downscale_y_table = MDP_DOWNSCALE_MAX;
  25. static int downscale_x_table = MDP_DOWNSCALE_MAX;
  26. struct mdp_regs {
  27. uint32_t src0;
  28. uint32_t src1;
  29. uint32_t dst0;
  30. uint32_t dst1;
  31. uint32_t src_cfg;
  32. uint32_t dst_cfg;
  33. uint32_t src_pack;
  34. uint32_t dst_pack;
  35. uint32_t src_rect;
  36. uint32_t dst_rect;
  37. uint32_t src_ystride;
  38. uint32_t dst_ystride;
  39. uint32_t op;
  40. uint32_t src_bpp;
  41. uint32_t dst_bpp;
  42. uint32_t edge;
  43. uint32_t phasex_init;
  44. uint32_t phasey_init;
  45. uint32_t phasex_step;
  46. uint32_t phasey_step;
  47. };
  48. static uint32_t pack_pattern[] = {
  49. PPP_ARRAY0(PACK_PATTERN)
  50. };
  51. static uint32_t src_img_cfg[] = {
  52. PPP_ARRAY1(CFG, SRC)
  53. };
  54. static uint32_t dst_img_cfg[] = {
  55. PPP_ARRAY1(CFG, DST)
  56. };
  57. static uint32_t bytes_per_pixel[] = {
  58. [MDP_RGB_565] = 2,
  59. [MDP_RGB_888] = 3,
  60. [MDP_XRGB_8888] = 4,
  61. [MDP_ARGB_8888] = 4,
  62. [MDP_RGBA_8888] = 4,
  63. [MDP_BGRA_8888] = 4,
  64. [MDP_RGBX_8888] = 4,
  65. [MDP_Y_CBCR_H2V1] = 1,
  66. [MDP_Y_CBCR_H2V2] = 1,
  67. [MDP_Y_CRCB_H2V1] = 1,
  68. [MDP_Y_CRCB_H2V2] = 1,
  69. [MDP_YCRYCB_H2V1] = 2
  70. };
  71. static uint32_t dst_op_chroma[] = {
  72. PPP_ARRAY1(CHROMA_SAMP, DST)
  73. };
  74. static uint32_t src_op_chroma[] = {
  75. PPP_ARRAY1(CHROMA_SAMP, SRC)
  76. };
  77. static uint32_t bg_op_chroma[] = {
  78. PPP_ARRAY1(CHROMA_SAMP, BG)
  79. };
  80. static void rotate_dst_addr_x(struct mdp_blit_req *req, struct mdp_regs *regs)
  81. {
  82. regs->dst0 += (req->dst_rect.w -
  83. min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
  84. regs->dst1 += (req->dst_rect.w -
  85. min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp;
  86. }
  87. static void rotate_dst_addr_y(struct mdp_blit_req *req, struct mdp_regs *regs)
  88. {
  89. regs->dst0 += (req->dst_rect.h -
  90. min((uint32_t)16, req->dst_rect.h)) *
  91. regs->dst_ystride;
  92. regs->dst1 += (req->dst_rect.h -
  93. min((uint32_t)16, req->dst_rect.h)) *
  94. regs->dst_ystride;
  95. }
  96. static void blit_rotate(struct mdp_blit_req *req,
  97. struct mdp_regs *regs)
  98. {
  99. if (req->flags == MDP_ROT_NOP)
  100. return;
  101. regs->op |= PPP_OP_ROT_ON;
  102. if ((req->flags & MDP_ROT_90 || req->flags & MDP_FLIP_LR) &&
  103. !(req->flags & MDP_ROT_90 && req->flags & MDP_FLIP_LR))
  104. rotate_dst_addr_x(req, regs);
  105. if (req->flags & MDP_ROT_90)
  106. regs->op |= PPP_OP_ROT_90;
  107. if (req->flags & MDP_FLIP_UD) {
  108. regs->op |= PPP_OP_FLIP_UD;
  109. rotate_dst_addr_y(req, regs);
  110. }
  111. if (req->flags & MDP_FLIP_LR)
  112. regs->op |= PPP_OP_FLIP_LR;
  113. }
  114. static void blit_convert(struct mdp_blit_req *req, struct mdp_regs *regs)
  115. {
  116. if (req->src.format == req->dst.format)
  117. return;
  118. if (IS_RGB(req->src.format) && IS_YCRCB(req->dst.format)) {
  119. regs->op |= PPP_OP_CONVERT_RGB2YCBCR | PPP_OP_CONVERT_ON;
  120. } else if (IS_YCRCB(req->src.format) && IS_RGB(req->dst.format)) {
  121. regs->op |= PPP_OP_CONVERT_YCBCR2RGB | PPP_OP_CONVERT_ON;
  122. if (req->dst.format == MDP_RGB_565)
  123. regs->op |= PPP_OP_CONVERT_MATRIX_SECONDARY;
  124. }
  125. }
  126. #define GET_BIT_RANGE(value, high, low) \
  127. (((1 << (high - low + 1)) - 1) & (value >> low))
  128. static uint32_t transp_convert(struct mdp_blit_req *req)
  129. {
  130. uint32_t transp = 0;
  131. if (req->src.format == MDP_RGB_565) {
  132. /* pad each value to 8 bits by copying the high bits into the
  133. * low end, convert RGB to RBG by switching low 2 components */
  134. transp |= ((GET_BIT_RANGE(req->transp_mask, 15, 11) << 3) |
  135. (GET_BIT_RANGE(req->transp_mask, 15, 13))) << 16;
  136. transp |= ((GET_BIT_RANGE(req->transp_mask, 4, 0) << 3) |
  137. (GET_BIT_RANGE(req->transp_mask, 4, 2))) << 8;
  138. transp |= (GET_BIT_RANGE(req->transp_mask, 10, 5) << 2) |
  139. (GET_BIT_RANGE(req->transp_mask, 10, 9));
  140. } else {
  141. /* convert RGB to RBG */
  142. transp |= (GET_BIT_RANGE(req->transp_mask, 15, 8)) |
  143. (GET_BIT_RANGE(req->transp_mask, 23, 16) << 16) |
  144. (GET_BIT_RANGE(req->transp_mask, 7, 0) << 8);
  145. }
  146. return transp;
  147. }
  148. #undef GET_BIT_RANGE
  149. static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs)
  150. {
  151. /* TRANSP BLEND */
  152. if (req->transp_mask != MDP_TRANSP_NOP) {
  153. req->transp_mask = transp_convert(req);
  154. if (req->alpha != MDP_ALPHA_NOP) {
  155. /* use blended transparancy mode
  156. * pixel = (src == transp) ? dst : blend
  157. * blend is combo of blend_eq_sel and
  158. * blend_alpha_sel */
  159. regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
  160. PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
  161. PPP_OP_BLEND_CONSTANT_ALPHA |
  162. PPP_BLEND_ALPHA_TRANSP;
  163. } else {
  164. /* simple transparancy mode
  165. * pixel = (src == transp) ? dst : src */
  166. regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
  167. PPP_OP_BLEND_SRCPIXEL_TRANSP;
  168. }
  169. }
  170. req->alpha &= 0xff;
  171. /* ALPHA BLEND */
  172. if (HAS_ALPHA(req->src.format)) {
  173. regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
  174. PPP_OP_BLEND_SRCPIXEL_ALPHA;
  175. } else if (req->alpha < MDP_ALPHA_NOP) {
  176. /* just blend by alpha */
  177. regs->op |= PPP_OP_ROT_ON | PPP_OP_BLEND_ON |
  178. PPP_OP_BLEND_ALPHA_BLEND_NORMAL |
  179. PPP_OP_BLEND_CONSTANT_ALPHA;
  180. }
  181. regs->op |= bg_op_chroma[req->dst.format];
  182. }
  183. #define ONE_HALF (1LL << 32)
  184. #define ONE (1LL << 33)
  185. #define TWO (2LL << 33)
  186. #define THREE (3LL << 33)
  187. #define FRAC_MASK (ONE - 1)
  188. #define INT_MASK (~FRAC_MASK)
  189. static int scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t origin,
  190. uint32_t *phase_init, uint32_t *phase_step)
  191. {
  192. /* to improve precicsion calculations are done in U31.33 and converted
  193. * to U3.29 at the end */
  194. int64_t k1, k2, k3, k4, tmp;
  195. uint64_t n, d, os, os_p, od, od_p, oreq;
  196. unsigned rpa = 0;
  197. int64_t ip64, delta;
  198. if (dim_out % 3 == 0)
  199. rpa = !(dim_in % (dim_out / 3));
  200. n = ((uint64_t)dim_out) << 34;
  201. d = dim_in;
  202. if (!d)
  203. return -1;
  204. do_div(n, d);
  205. k3 = (n + 1) >> 1;
  206. if ((k3 >> 4) < (1LL << 27) || (k3 >> 4) > (1LL << 31)) {
  207. DLOG("crap bad scale\n");
  208. return -1;
  209. }
  210. n = ((uint64_t)dim_in) << 34;
  211. d = (uint64_t)dim_out;
  212. if (!d)
  213. return -1;
  214. do_div(n, d);
  215. k1 = (n + 1) >> 1;
  216. k2 = (k1 - ONE) >> 1;
  217. *phase_init = (int)(k2 >> 4);
  218. k4 = (k3 - ONE) >> 1;
  219. if (rpa) {
  220. os = ((uint64_t)origin << 33) - ONE_HALF;
  221. tmp = (dim_out * os) + ONE_HALF;
  222. if (!dim_in)
  223. return -1;
  224. do_div(tmp, dim_in);
  225. od = tmp - ONE_HALF;
  226. } else {
  227. os = ((uint64_t)origin << 1) - 1;
  228. od = (((k3 * os) >> 1) + k4);
  229. }
  230. od_p = od & INT_MASK;
  231. if (od_p != od)
  232. od_p += ONE;
  233. if (rpa) {
  234. tmp = (dim_in * od_p) + ONE_HALF;
  235. if (!dim_in)
  236. return -1;
  237. do_div(tmp, dim_in);
  238. os_p = tmp - ONE_HALF;
  239. } else {
  240. os_p = ((k1 * (od_p >> 33)) + k2);
  241. }
  242. oreq = (os_p & INT_MASK) - ONE;
  243. ip64 = os_p - oreq;
  244. delta = ((int64_t)(origin) << 33) - oreq;
  245. ip64 -= delta;
  246. /* limit to valid range before the left shift */
  247. delta = (ip64 & (1LL << 63)) ? 4 : -4;
  248. delta <<= 33;
  249. while (abs((int)(ip64 >> 33)) > 4)
  250. ip64 += delta;
  251. *phase_init = (int)(ip64 >> 4);
  252. *phase_step = (uint32_t)(k1 >> 4);
  253. return 0;
  254. }
  255. static void load_scale_table(const struct mdp_info *mdp,
  256. struct mdp_table_entry *table, int len)
  257. {
  258. int i;
  259. for (i = 0; i < len; i++)
  260. mdp_writel(mdp, table[i].val, table[i].reg);
  261. }
  262. enum {
  263. IMG_LEFT,
  264. IMG_RIGHT,
  265. IMG_TOP,
  266. IMG_BOTTOM,
  267. };
  268. static void get_edge_info(uint32_t src, uint32_t src_coord, uint32_t dst,
  269. uint32_t *interp1, uint32_t *interp2,
  270. uint32_t *repeat1, uint32_t *repeat2) {
  271. if (src > 3 * dst) {
  272. *interp1 = 0;
  273. *interp2 = src - 1;
  274. *repeat1 = 0;
  275. *repeat2 = 0;
  276. } else if (src == 3 * dst) {
  277. *interp1 = 0;
  278. *interp2 = src;
  279. *repeat1 = 0;
  280. *repeat2 = 1;
  281. } else if (src > dst && src < 3 * dst) {
  282. *interp1 = -1;
  283. *interp2 = src;
  284. *repeat1 = 1;
  285. *repeat2 = 1;
  286. } else if (src == dst) {
  287. *interp1 = -1;
  288. *interp2 = src + 1;
  289. *repeat1 = 1;
  290. *repeat2 = 2;
  291. } else {
  292. *interp1 = -2;
  293. *interp2 = src + 1;
  294. *repeat1 = 2;
  295. *repeat2 = 2;
  296. }
  297. *interp1 += src_coord;
  298. *interp2 += src_coord;
  299. }
  300. static int get_edge_cond(struct mdp_blit_req *req, struct mdp_regs *regs)
  301. {
  302. int32_t luma_interp[4];
  303. int32_t luma_repeat[4];
  304. int32_t chroma_interp[4];
  305. int32_t chroma_bound[4];
  306. int32_t chroma_repeat[4];
  307. uint32_t dst_w, dst_h;
  308. memset(&luma_interp, 0, sizeof(int32_t) * 4);
  309. memset(&luma_repeat, 0, sizeof(int32_t) * 4);
  310. memset(&chroma_interp, 0, sizeof(int32_t) * 4);
  311. memset(&chroma_bound, 0, sizeof(int32_t) * 4);
  312. memset(&chroma_repeat, 0, sizeof(int32_t) * 4);
  313. regs->edge = 0;
  314. if (req->flags & MDP_ROT_90) {
  315. dst_w = req->dst_rect.h;
  316. dst_h = req->dst_rect.w;
  317. } else {
  318. dst_w = req->dst_rect.w;
  319. dst_h = req->dst_rect.h;
  320. }
  321. if (regs->op & (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON)) {
  322. get_edge_info(req->src_rect.h, req->src_rect.y, dst_h,
  323. &luma_interp[IMG_TOP], &luma_interp[IMG_BOTTOM],
  324. &luma_repeat[IMG_TOP], &luma_repeat[IMG_BOTTOM]);
  325. get_edge_info(req->src_rect.w, req->src_rect.x, dst_w,
  326. &luma_interp[IMG_LEFT], &luma_interp[IMG_RIGHT],
  327. &luma_repeat[IMG_LEFT], &luma_repeat[IMG_RIGHT]);
  328. } else {
  329. luma_interp[IMG_LEFT] = req->src_rect.x;
  330. luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
  331. luma_interp[IMG_TOP] = req->src_rect.y;
  332. luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
  333. luma_repeat[IMG_LEFT] = 0;
  334. luma_repeat[IMG_TOP] = 0;
  335. luma_repeat[IMG_RIGHT] = 0;
  336. luma_repeat[IMG_BOTTOM] = 0;
  337. }
  338. chroma_interp[IMG_LEFT] = luma_interp[IMG_LEFT];
  339. chroma_interp[IMG_RIGHT] = luma_interp[IMG_RIGHT];
  340. chroma_interp[IMG_TOP] = luma_interp[IMG_TOP];
  341. chroma_interp[IMG_BOTTOM] = luma_interp[IMG_BOTTOM];
  342. chroma_bound[IMG_LEFT] = req->src_rect.x;
  343. chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1;
  344. chroma_bound[IMG_TOP] = req->src_rect.y;
  345. chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1;
  346. if (IS_YCRCB(req->src.format)) {
  347. chroma_interp[IMG_LEFT] = chroma_interp[IMG_LEFT] >> 1;
  348. chroma_interp[IMG_RIGHT] = (chroma_interp[IMG_RIGHT] + 1) >> 1;
  349. chroma_bound[IMG_LEFT] = chroma_bound[IMG_LEFT] >> 1;
  350. chroma_bound[IMG_RIGHT] = chroma_bound[IMG_RIGHT] >> 1;
  351. }
  352. if (req->src.format == MDP_Y_CBCR_H2V2 ||
  353. req->src.format == MDP_Y_CRCB_H2V2) {
  354. chroma_interp[IMG_TOP] = (chroma_interp[IMG_TOP] - 1) >> 1;
  355. chroma_interp[IMG_BOTTOM] = (chroma_interp[IMG_BOTTOM] + 1)
  356. >> 1;
  357. chroma_bound[IMG_TOP] = (chroma_bound[IMG_TOP] + 1) >> 1;
  358. chroma_bound[IMG_BOTTOM] = chroma_bound[IMG_BOTTOM] >> 1;
  359. }
  360. chroma_repeat[IMG_LEFT] = chroma_bound[IMG_LEFT] -
  361. chroma_interp[IMG_LEFT];
  362. chroma_repeat[IMG_RIGHT] = chroma_interp[IMG_RIGHT] -
  363. chroma_bound[IMG_RIGHT];
  364. chroma_repeat[IMG_TOP] = chroma_bound[IMG_TOP] -
  365. chroma_interp[IMG_TOP];
  366. chroma_repeat[IMG_BOTTOM] = chroma_interp[IMG_BOTTOM] -
  367. chroma_bound[IMG_BOTTOM];
  368. if (chroma_repeat[IMG_LEFT] < 0 || chroma_repeat[IMG_LEFT] > 3 ||
  369. chroma_repeat[IMG_RIGHT] < 0 || chroma_repeat[IMG_RIGHT] > 3 ||
  370. chroma_repeat[IMG_TOP] < 0 || chroma_repeat[IMG_TOP] > 3 ||
  371. chroma_repeat[IMG_BOTTOM] < 0 || chroma_repeat[IMG_BOTTOM] > 3 ||
  372. luma_repeat[IMG_LEFT] < 0 || luma_repeat[IMG_LEFT] > 3 ||
  373. luma_repeat[IMG_RIGHT] < 0 || luma_repeat[IMG_RIGHT] > 3 ||
  374. luma_repeat[IMG_TOP] < 0 || luma_repeat[IMG_TOP] > 3 ||
  375. luma_repeat[IMG_BOTTOM] < 0 || luma_repeat[IMG_BOTTOM] > 3)
  376. return -1;
  377. regs->edge |= (chroma_repeat[IMG_LEFT] & 3) << MDP_LEFT_CHROMA;
  378. regs->edge |= (chroma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_CHROMA;
  379. regs->edge |= (chroma_repeat[IMG_TOP] & 3) << MDP_TOP_CHROMA;
  380. regs->edge |= (chroma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_CHROMA;
  381. regs->edge |= (luma_repeat[IMG_LEFT] & 3) << MDP_LEFT_LUMA;
  382. regs->edge |= (luma_repeat[IMG_RIGHT] & 3) << MDP_RIGHT_LUMA;
  383. regs->edge |= (luma_repeat[IMG_TOP] & 3) << MDP_TOP_LUMA;
  384. regs->edge |= (luma_repeat[IMG_BOTTOM] & 3) << MDP_BOTTOM_LUMA;
  385. return 0;
  386. }
  387. static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req,
  388. struct mdp_regs *regs)
  389. {
  390. uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
  391. uint32_t scale_factor_x, scale_factor_y;
  392. uint32_t downscale;
  393. uint32_t dst_w, dst_h;
  394. if (req->flags & MDP_ROT_90) {
  395. dst_w = req->dst_rect.h;
  396. dst_h = req->dst_rect.w;
  397. } else {
  398. dst_w = req->dst_rect.w;
  399. dst_h = req->dst_rect.h;
  400. }
  401. if ((req->src_rect.w == dst_w) && (req->src_rect.h == dst_h) &&
  402. !(req->flags & MDP_BLUR)) {
  403. regs->phasex_init = 0;
  404. regs->phasey_init = 0;
  405. regs->phasex_step = 0;
  406. regs->phasey_step = 0;
  407. return 0;
  408. }
  409. if (scale_params(req->src_rect.w, dst_w, 1, &phase_init_x,
  410. &phase_step_x) ||
  411. scale_params(req->src_rect.h, dst_h, 1, &phase_init_y,
  412. &phase_step_y))
  413. return -1;
  414. scale_factor_x = (dst_w * 10) / req->src_rect.w;
  415. scale_factor_y = (dst_h * 10) / req->src_rect.h;
  416. if (scale_factor_x > 8)
  417. downscale = MDP_DOWNSCALE_PT8TO1;
  418. else if (scale_factor_x > 6)
  419. downscale = MDP_DOWNSCALE_PT6TOPT8;
  420. else if (scale_factor_x > 4)
  421. downscale = MDP_DOWNSCALE_PT4TOPT6;
  422. else
  423. downscale = MDP_DOWNSCALE_PT2TOPT4;
  424. if (downscale != downscale_x_table) {
  425. load_scale_table(mdp, mdp_downscale_x_table[downscale], 64);
  426. downscale_x_table = downscale;
  427. }
  428. if (scale_factor_y > 8)
  429. downscale = MDP_DOWNSCALE_PT8TO1;
  430. else if (scale_factor_y > 6)
  431. downscale = MDP_DOWNSCALE_PT6TOPT8;
  432. else if (scale_factor_y > 4)
  433. downscale = MDP_DOWNSCALE_PT4TOPT6;
  434. else
  435. downscale = MDP_DOWNSCALE_PT2TOPT4;
  436. if (downscale != downscale_y_table) {
  437. load_scale_table(mdp, mdp_downscale_y_table[downscale], 64);
  438. downscale_y_table = downscale;
  439. }
  440. regs->phasex_init = phase_init_x;
  441. regs->phasey_init = phase_init_y;
  442. regs->phasex_step = phase_step_x;
  443. regs->phasey_step = phase_step_y;
  444. regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
  445. return 0;
  446. }
  447. static void blit_blur(const struct mdp_info *mdp, struct mdp_blit_req *req,
  448. struct mdp_regs *regs)
  449. {
  450. if (!(req->flags & MDP_BLUR))
  451. return;
  452. if (!(downscale_x_table == MDP_DOWNSCALE_BLUR &&
  453. downscale_y_table == MDP_DOWNSCALE_BLUR)) {
  454. load_scale_table(mdp, mdp_gaussian_blur_table, 128);
  455. downscale_x_table = MDP_DOWNSCALE_BLUR;
  456. downscale_y_table = MDP_DOWNSCALE_BLUR;
  457. }
  458. regs->op |= (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
  459. }
  460. #define IMG_LEN(rect_h, w, rect_w, bpp) (((rect_h) * w) * bpp)
  461. #define Y_TO_CRCB_RATIO(format) \
  462. ((format == MDP_Y_CBCR_H2V2 || format == MDP_Y_CRCB_H2V2) ? 2 :\
  463. (format == MDP_Y_CBCR_H2V1 || format == MDP_Y_CRCB_H2V1) ? 1 : 1)
  464. static void get_len(struct mdp_img *img, struct mdp_rect *rect, uint32_t bpp,
  465. uint32_t *len0, uint32_t *len1)
  466. {
  467. *len0 = IMG_LEN(rect->h, img->width, rect->w, bpp);
  468. if (IS_PSEUDOPLNR(img->format))
  469. *len1 = *len0/Y_TO_CRCB_RATIO(img->format);
  470. else
  471. *len1 = 0;
  472. }
  473. static int valid_src_dst(unsigned long src_start, unsigned long src_len,
  474. unsigned long dst_start, unsigned long dst_len,
  475. struct mdp_blit_req *req, struct mdp_regs *regs)
  476. {
  477. unsigned long src_min_ok = src_start;
  478. unsigned long src_max_ok = src_start + src_len;
  479. unsigned long dst_min_ok = dst_start;
  480. unsigned long dst_max_ok = dst_start + dst_len;
  481. uint32_t src0_len, src1_len, dst0_len, dst1_len;
  482. get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len,
  483. &src1_len);
  484. get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len,
  485. &dst1_len);
  486. if (regs->src0 < src_min_ok || regs->src0 > src_max_ok ||
  487. regs->src0 + src0_len > src_max_ok) {
  488. DLOG("invalid_src %x %x %lx %lx\n", regs->src0,
  489. src0_len, src_min_ok, src_max_ok);
  490. return 0;
  491. }
  492. if (regs->src_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
  493. if (regs->src1 < src_min_ok || regs->src1 > src_max_ok ||
  494. regs->src1 + src1_len > src_max_ok) {
  495. DLOG("invalid_src1");
  496. return 0;
  497. }
  498. }
  499. if (regs->dst0 < dst_min_ok || regs->dst0 > dst_max_ok ||
  500. regs->dst0 + dst0_len > dst_max_ok) {
  501. DLOG("invalid_dst");
  502. return 0;
  503. }
  504. if (regs->dst_cfg & PPP_SRC_PLANE_PSEUDOPLNR) {
  505. if (regs->dst1 < dst_min_ok || regs->dst1 > dst_max_ok ||
  506. regs->dst1 + dst1_len > dst_max_ok) {
  507. DLOG("invalid_dst1");
  508. return 0;
  509. }
  510. }
  511. return 1;
  512. }
  513. static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs,
  514. struct file *src_file, struct file *dst_file)
  515. {
  516. }
  517. static void get_chroma_addr(struct mdp_img *img, struct mdp_rect *rect,
  518. uint32_t base, uint32_t bpp, uint32_t cfg,
  519. uint32_t *addr, uint32_t *ystride)
  520. {
  521. uint32_t compress_v = Y_TO_CRCB_RATIO(img->format);
  522. uint32_t compress_h = 2;
  523. uint32_t offset;
  524. if (IS_PSEUDOPLNR(img->format)) {
  525. offset = (rect->x / compress_h) * compress_h;
  526. offset += rect->y == 0 ? 0 :
  527. ((rect->y + 1) / compress_v) * img->width;
  528. *addr = base + (img->width * img->height * bpp);
  529. *addr += offset * bpp;
  530. *ystride |= *ystride << 16;
  531. } else {
  532. *addr = 0;
  533. }
  534. }
  535. static int send_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
  536. struct mdp_regs *regs, struct file *src_file,
  537. struct file *dst_file)
  538. {
  539. mdp_writel(mdp, 1, 0x060);
  540. mdp_writel(mdp, regs->src_rect, PPP_ADDR_SRC_ROI);
  541. mdp_writel(mdp, regs->src0, PPP_ADDR_SRC0);
  542. mdp_writel(mdp, regs->src1, PPP_ADDR_SRC1);
  543. mdp_writel(mdp, regs->src_ystride, PPP_ADDR_SRC_YSTRIDE);
  544. mdp_writel(mdp, regs->src_cfg, PPP_ADDR_SRC_CFG);
  545. mdp_writel(mdp, regs->src_pack, PPP_ADDR_SRC_PACK_PATTERN);
  546. mdp_writel(mdp, regs->op, PPP_ADDR_OPERATION);
  547. mdp_writel(mdp, regs->phasex_init, PPP_ADDR_PHASEX_INIT);
  548. mdp_writel(mdp, regs->phasey_init, PPP_ADDR_PHASEY_INIT);
  549. mdp_writel(mdp, regs->phasex_step, PPP_ADDR_PHASEX_STEP);
  550. mdp_writel(mdp, regs->phasey_step, PPP_ADDR_PHASEY_STEP);
  551. mdp_writel(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff),
  552. PPP_ADDR_ALPHA_TRANSP);
  553. mdp_writel(mdp, regs->dst_cfg, PPP_ADDR_DST_CFG);
  554. mdp_writel(mdp, regs->dst_pack, PPP_ADDR_DST_PACK_PATTERN);
  555. mdp_writel(mdp, regs->dst_rect, PPP_ADDR_DST_ROI);
  556. mdp_writel(mdp, regs->dst0, PPP_ADDR_DST0);
  557. mdp_writel(mdp, regs->dst1, PPP_ADDR_DST1);
  558. mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_DST_YSTRIDE);
  559. mdp_writel(mdp, regs->edge, PPP_ADDR_EDGE);
  560. if (regs->op & PPP_OP_BLEND_ON) {
  561. mdp_writel(mdp, regs->dst0, PPP_ADDR_BG0);
  562. mdp_writel(mdp, regs->dst1, PPP_ADDR_BG1);
  563. mdp_writel(mdp, regs->dst_ystride, PPP_ADDR_BG_YSTRIDE);
  564. mdp_writel(mdp, src_img_cfg[req->dst.format], PPP_ADDR_BG_CFG);
  565. mdp_writel(mdp, pack_pattern[req->dst.format],
  566. PPP_ADDR_BG_PACK_PATTERN);
  567. }
  568. flush_imgs(req, regs, src_file, dst_file);
  569. mdp_writel(mdp, 0x1000, MDP_DISPLAY0_START);
  570. return 0;
  571. }
  572. int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req,
  573. struct file *src_file, unsigned long src_start, unsigned long src_len,
  574. struct file *dst_file, unsigned long dst_start, unsigned long dst_len)
  575. {
  576. struct mdp_regs regs = {0};
  577. if (unlikely(req->src.format >= MDP_IMGTYPE_LIMIT ||
  578. req->dst.format >= MDP_IMGTYPE_LIMIT)) {
  579. printk(KERN_ERR "mpd_ppp: img is of wrong format\n");
  580. return -EINVAL;
  581. }
  582. if (unlikely(req->src_rect.x > req->src.width ||
  583. req->src_rect.y > req->src.height ||
  584. req->dst_rect.x > req->dst.width ||
  585. req->dst_rect.y > req->dst.height)) {
  586. printk(KERN_ERR "mpd_ppp: img rect is outside of img!\n");
  587. return -EINVAL;
  588. }
  589. /* set the src image configuration */
  590. regs.src_cfg = src_img_cfg[req->src.format];
  591. regs.src_cfg |= (req->src_rect.x & 0x1) ? PPP_SRC_BPP_ROI_ODD_X : 0;
  592. regs.src_cfg |= (req->src_rect.y & 0x1) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
  593. regs.src_rect = (req->src_rect.h << 16) | req->src_rect.w;
  594. regs.src_pack = pack_pattern[req->src.format];
  595. /* set the dest image configuration */
  596. regs.dst_cfg = dst_img_cfg[req->dst.format] | PPP_DST_OUT_SEL_AXI;
  597. regs.dst_rect = (req->dst_rect.h << 16) | req->dst_rect.w;
  598. regs.dst_pack = pack_pattern[req->dst.format];
  599. /* set src, bpp, start pixel and ystride */
  600. regs.src_bpp = bytes_per_pixel[req->src.format];
  601. regs.src0 = src_start + req->src.offset;
  602. regs.src_ystride = req->src.width * regs.src_bpp;
  603. get_chroma_addr(&req->src, &req->src_rect, regs.src0, regs.src_bpp,
  604. regs.src_cfg, &regs.src1, &regs.src_ystride);
  605. regs.src0 += (req->src_rect.x + (req->src_rect.y * req->src.width)) *
  606. regs.src_bpp;
  607. /* set dst, bpp, start pixel and ystride */
  608. regs.dst_bpp = bytes_per_pixel[req->dst.format];
  609. regs.dst0 = dst_start + req->dst.offset;
  610. regs.dst_ystride = req->dst.width * regs.dst_bpp;
  611. get_chroma_addr(&req->dst, &req->dst_rect, regs.dst0, regs.dst_bpp,
  612. regs.dst_cfg, &regs.dst1, &regs.dst_ystride);
  613. regs.dst0 += (req->dst_rect.x + (req->dst_rect.y * req->dst.width)) *
  614. regs.dst_bpp;
  615. if (!valid_src_dst(src_start, src_len, dst_start, dst_len, req,
  616. &regs)) {
  617. printk(KERN_ERR "mpd_ppp: final src or dst location is "
  618. "invalid, are you trying to make an image too large "
  619. "or to place it outside the screen?\n");
  620. return -EINVAL;
  621. }
  622. /* set up operation register */
  623. regs.op = 0;
  624. blit_rotate(req, &regs);
  625. blit_convert(req, &regs);
  626. if (req->flags & MDP_DITHER)
  627. regs.op |= PPP_OP_DITHER_EN;
  628. blit_blend(req, &regs);
  629. if (blit_scale(mdp, req, &regs)) {
  630. printk(KERN_ERR "mpd_ppp: error computing scale for img.\n");
  631. return -EINVAL;
  632. }
  633. blit_blur(mdp, req, &regs);
  634. regs.op |= dst_op_chroma[req->dst.format] |
  635. src_op_chroma[req->src.format];
  636. /* if the image is YCRYCB, the x and w must be even */
  637. if (unlikely(req->src.format == MDP_YCRYCB_H2V1)) {
  638. req->src_rect.x = req->src_rect.x & (~0x1);
  639. req->src_rect.w = req->src_rect.w & (~0x1);
  640. req->dst_rect.x = req->dst_rect.x & (~0x1);
  641. req->dst_rect.w = req->dst_rect.w & (~0x1);
  642. }
  643. if (get_edge_cond(req, &regs))
  644. return -EINVAL;
  645. send_blit(mdp, req, &regs, src_file, dst_file);
  646. return 0;
  647. }