mdp3_ppp_hwio.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223
  1. /* Copyright (c) 2007, 2012-2013 The Linux Foundation. All rights reserved.
  2. * Copyright (C) 2007 Google Incorporated
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/file.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/major.h>
  17. #include <linux/slab.h>
  18. #include <linux/types.h>
  19. #include <linux/uaccess.h>
  20. #include "linux/proc_fs.h"
  21. #include "mdss_fb.h"
  22. #include "mdp3_ppp.h"
  23. #include "mdp3_hwio.h"
  24. /* SHIM Q Factor */
  25. #define PHI_Q_FACTOR 29
  26. #define PQF_PLUS_5 (PHI_Q_FACTOR + 5) /* due to 32 phases */
  27. #define PQF_PLUS_4 (PHI_Q_FACTOR + 4)
  28. #define PQF_PLUS_2 (PHI_Q_FACTOR + 2) /* to get 4.0 */
  29. #define PQF_MINUS_2 (PHI_Q_FACTOR - 2) /* to get 0.25 */
  30. #define PQF_PLUS_5_PLUS_2 (PQF_PLUS_5 + 2)
  31. #define PQF_PLUS_5_MINUS_2 (PQF_PLUS_5 - 2)
  32. static long long mdp_do_div(long long num, long long den)
  33. {
  34. do_div(num, den);
  35. return num;
  36. }
  37. static int mdp_calc_scale_params(uint32_t org, uint32_t dim_in,
  38. uint32_t dim_out, bool is_W, int32_t *phase_init_ptr,
  39. uint32_t *phase_step_ptr)
  40. {
  41. bool rpa_on = false;
  42. int init_phase = 0;
  43. uint64_t numer = 0;
  44. uint64_t denom = 0;
  45. int64_t point5 = 1;
  46. int64_t one = 1;
  47. int64_t k1, k2, k3, k4; /* linear equation coefficients */
  48. uint64_t int_mask;
  49. uint64_t fract_mask;
  50. uint64_t Os;
  51. int64_t Osprime;
  52. int64_t Od;
  53. int64_t Odprime;
  54. int64_t Oreq;
  55. int64_t init_phase_temp;
  56. int64_t delta;
  57. uint32_t mult;
  58. /*
  59. * The phase accumulator should really be rational for all cases in a
  60. * general purpose polyphase scaler for a tiled architecture with
  61. * non-zero * origin capability because there is no way to represent
  62. * certain scale factors in fixed point regardless of precision.
  63. * The error incurred in attempting to use fixed point is most
  64. * eggregious for SF where 1/SF is an integral multiple of 1/3.
  65. *
  66. * Set the RPA flag for this dimension.
  67. *
  68. * In order for 1/SF (dim_in/dim_out) to be an integral multiple of
  69. * 1/3, dim_out must be an integral multiple of 3.
  70. */
  71. if (!(dim_out % 3)) {
  72. mult = dim_out / 3;
  73. rpa_on = (!(dim_in % mult));
  74. }
  75. numer = dim_out;
  76. denom = dim_in;
  77. /*
  78. * convert to U30.34 before division
  79. *
  80. * The K vectors carry 4 extra bits of precision
  81. * and are rounded.
  82. *
  83. * We initially go 5 bits over then round by adding
  84. * 1 and right shifting by 1
  85. * so final result is U31.33
  86. */
  87. numer <<= PQF_PLUS_5;
  88. /* now calculate the scale factor (aka k3) */
  89. k3 = ((mdp_do_div(numer, denom) + 1) >> 1);
  90. /* check scale factor for legal range [0.25 - 4.0] */
  91. if (((k3 >> 4) < (1LL << PQF_MINUS_2)) ||
  92. ((k3 >> 4) > (1LL << PQF_PLUS_2))) {
  93. return -EINVAL;
  94. }
  95. /* calculate inverse scale factor (aka k1) for phase init */
  96. numer = dim_in;
  97. denom = dim_out;
  98. numer <<= PQF_PLUS_5;
  99. k1 = ((mdp_do_div(numer, denom) + 1) >> 1);
  100. /*
  101. * calculate initial phase and ROI overfetch
  102. */
  103. /* convert point5 & one to S39.24 (will always be positive) */
  104. point5 <<= (PQF_PLUS_4 - 1);
  105. one <<= PQF_PLUS_4;
  106. k2 = ((k1 - one) >> 1);
  107. init_phase = (int)(k2 >> 4);
  108. k4 = ((k3 - one) >> 1);
  109. if (k3 != one) {
  110. /* calculate the masks */
  111. fract_mask = one - 1;
  112. int_mask = ~fract_mask;
  113. if (!rpa_on) {
  114. /*
  115. * FIXED POINT IMPLEMENTATION
  116. */
  117. if (org) {
  118. /*
  119. * The complicated case; ROI origin != 0
  120. * init_phase needs to be adjusted
  121. * OF is also position dependent
  122. */
  123. /* map (org - .5) into destination space */
  124. Os = ((uint64_t) org << 1) - 1;
  125. Od = ((k3 * Os) >> 1) + k4;
  126. /* take the ceiling */
  127. Odprime = (Od & int_mask);
  128. if (Odprime != Od)
  129. Odprime += one;
  130. /* now map that back to source space */
  131. Osprime = (k1 * (Odprime >> PQF_PLUS_4)) + k2;
  132. /* then floor & decrement to calc the required
  133. starting coordinate */
  134. Oreq = (Osprime & int_mask) - one;
  135. /* calculate initial phase */
  136. init_phase_temp = Osprime - Oreq;
  137. delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
  138. init_phase_temp -= delta;
  139. /* limit to valid range before the left shift */
  140. delta = (init_phase_temp & (1LL << 63)) ?
  141. 4 : -4;
  142. delta <<= PQF_PLUS_4;
  143. while (abs((int)(init_phase_temp >>
  144. PQF_PLUS_4)) > 4)
  145. init_phase_temp += delta;
  146. /*
  147. * right shift to account for extra bits of
  148. * precision
  149. */
  150. init_phase = (int)(init_phase_temp >> 4);
  151. }
  152. } else {
  153. /*
  154. * RPA IMPLEMENTATION
  155. *
  156. * init_phase needs to be calculated in all RPA_on cases
  157. * because it's a numerator, not a fixed point value.
  158. */
  159. /* map (org - .5) into destination space */
  160. Os = ((uint64_t) org << PQF_PLUS_4) - point5;
  161. Od = mdp_do_div((dim_out * (Os + point5)),
  162. dim_in);
  163. Od -= point5;
  164. /* take the ceiling */
  165. Odprime = (Od & int_mask);
  166. if (Odprime != Od)
  167. Odprime += one;
  168. /* now map that back to source space */
  169. Osprime =
  170. mdp_do_div((dim_in * (Odprime + point5)),
  171. dim_out);
  172. Osprime -= point5;
  173. /* then floor & decrement to calculate the required
  174. starting coordinate */
  175. Oreq = (Osprime & int_mask) - one;
  176. /* calculate initial phase */
  177. init_phase_temp = Osprime - Oreq;
  178. delta = ((int64_t) (org) << PQF_PLUS_4) - Oreq;
  179. init_phase_temp -= delta;
  180. /* limit to valid range before the left shift */
  181. delta = (init_phase_temp & (1LL << 63)) ? 4 : -4;
  182. delta <<= PQF_PLUS_4;
  183. while (abs((int)(init_phase_temp >> PQF_PLUS_4)) > 4)
  184. init_phase_temp += delta;
  185. /* right shift to account for extra bits of precision */
  186. init_phase = (int)(init_phase_temp >> 4);
  187. }
  188. }
  189. /* return the scale parameters */
  190. *phase_init_ptr = init_phase;
  191. *phase_step_ptr = (uint32_t) (k1 >> 4);
  192. return 0;
  193. }
  194. static int scale_idx(int factor)
  195. {
  196. int idx;
  197. if (factor > 80)
  198. idx = PPP_DOWNSCALE_PT8TOPT1;
  199. else if (factor > 60)
  200. idx = PPP_DOWNSCALE_PT6TOPT8;
  201. else if (factor > 40)
  202. idx = PPP_DOWNSCALE_PT4TOPT6;
  203. else
  204. idx = PPP_DOWNSCALE_PT2TOPT4;
  205. return idx;
  206. }
  207. inline int32_t comp_conv_rgb2yuv(int32_t comp, int32_t y_high,
  208. int32_t y_low, int32_t c_high, int32_t c_low)
  209. {
  210. if (comp < 0)
  211. comp = 0;
  212. if (comp > 255)
  213. comp = 255;
  214. /* clamp */
  215. if (comp < y_low)
  216. comp = y_low;
  217. if (comp > y_high)
  218. comp = y_high;
  219. return comp;
  220. }
  221. static uint32_t conv_rgb2yuv(uint32_t input_pixel,
  222. uint16_t *matrix_vector,
  223. uint16_t *bv,
  224. uint16_t *clamp_vector)
  225. {
  226. uint8_t input_C2, input_C0, input_C1;
  227. uint32_t output;
  228. int32_t comp_C2, comp_C1, comp_C0, temp;
  229. int32_t temp1, temp2, temp3;
  230. int32_t matrix[9];
  231. int32_t bias_vector[3];
  232. int32_t Y_low_limit, Y_high_limit, C_low_limit, C_high_limit;
  233. int32_t i;
  234. input_C2 = (input_pixel >> 16) & 0xFF;
  235. input_C1 = (input_pixel >> 8) & 0xFF;
  236. input_C0 = (input_pixel >> 0) & 0xFF;
  237. comp_C0 = input_C0;
  238. comp_C1 = input_C1;
  239. comp_C2 = input_C2;
  240. for (i = 0; i < MDP_CSC_SIZE; i++)
  241. matrix[i] =
  242. ((int32_t) (((int32_t) matrix_vector[i]) << 20)) >> 20;
  243. bias_vector[0] = (int32_t) (bv[0] & 0xFF);
  244. bias_vector[1] = (int32_t) (bv[1] & 0xFF);
  245. bias_vector[2] = (int32_t) (bv[2] & 0xFF);
  246. Y_low_limit = (int32_t) clamp_vector[0];
  247. Y_high_limit = (int32_t) clamp_vector[1];
  248. C_low_limit = (int32_t) clamp_vector[2];
  249. C_high_limit = (int32_t) clamp_vector[3];
  250. /*
  251. * Color Conversion
  252. * reorder input colors
  253. */
  254. temp = comp_C2;
  255. comp_C2 = comp_C1;
  256. comp_C1 = comp_C0;
  257. comp_C0 = temp;
  258. /* matrix multiplication */
  259. temp1 = comp_C0 * matrix[0] + comp_C1 * matrix[1] + comp_C2 * matrix[2];
  260. temp2 = comp_C0 * matrix[3] + comp_C1 * matrix[4] + comp_C2 * matrix[5];
  261. temp3 = comp_C0 * matrix[6] + comp_C1 * matrix[7] + comp_C2 * matrix[8];
  262. comp_C0 = temp1 + 0x100;
  263. comp_C1 = temp2 + 0x100;
  264. comp_C2 = temp3 + 0x100;
  265. /* take interger part */
  266. comp_C0 >>= 9;
  267. comp_C1 >>= 9;
  268. comp_C2 >>= 9;
  269. /* post bias (+) */
  270. comp_C0 += bias_vector[0];
  271. comp_C1 += bias_vector[1];
  272. comp_C2 += bias_vector[2];
  273. /* limit pixel to 8-bit */
  274. comp_C0 = comp_conv_rgb2yuv(comp_C0, Y_high_limit,
  275. Y_low_limit, C_high_limit, C_low_limit);
  276. comp_C1 = comp_conv_rgb2yuv(comp_C1, Y_high_limit,
  277. Y_low_limit, C_high_limit, C_low_limit);
  278. comp_C2 = comp_conv_rgb2yuv(comp_C2, Y_high_limit,
  279. Y_low_limit, C_high_limit, C_low_limit);
  280. output = (comp_C2 << 16) | (comp_C1 << 8) | comp_C0;
  281. return output;
  282. }
  283. inline void y_h_even_num(struct ppp_img_desc *img)
  284. {
  285. img->roi.y = (img->roi.y / 2) * 2;
  286. img->roi.height = (img->roi.height / 2) * 2;
  287. }
  288. inline void x_w_even_num(struct ppp_img_desc *img)
  289. {
  290. img->roi.x = (img->roi.x / 2) * 2;
  291. img->roi.width = (img->roi.width / 2) * 2;
  292. }
  293. bool check_if_rgb(int color)
  294. {
  295. bool rgb = false;
  296. switch (color) {
  297. case MDP_RGB_565:
  298. case MDP_BGR_565:
  299. case MDP_RGB_888:
  300. case MDP_BGR_888:
  301. case MDP_BGRA_8888:
  302. case MDP_RGBA_8888:
  303. case MDP_ARGB_8888:
  304. case MDP_XRGB_8888:
  305. case MDP_RGBX_8888:
  306. case MDP_BGRX_8888:
  307. rgb = true;
  308. default:
  309. break;
  310. }
  311. return rgb;
  312. }
  313. uint8_t *mdp_dst_adjust_rot_addr(struct ppp_blit_op *iBuf,
  314. uint8_t *addr, uint32_t bpp, uint32_t uv)
  315. {
  316. uint32_t dest_ystride = iBuf->dst.prop.width * bpp;
  317. uint32_t h_slice = 1;
  318. if (uv && ((iBuf->dst.color_fmt == MDP_Y_CBCR_H2V2) ||
  319. (iBuf->dst.color_fmt == MDP_Y_CRCB_H2V2)))
  320. h_slice = 2;
  321. if (((iBuf->mdp_op & MDPOP_ROT90) == MDPOP_ROT90) ^
  322. ((iBuf->mdp_op & MDPOP_LR) == MDPOP_LR)) {
  323. addr +=
  324. (iBuf->dst.roi.width -
  325. MIN(16, iBuf->dst.roi.width)) * bpp;
  326. }
  327. if ((iBuf->mdp_op & MDPOP_UD) == MDPOP_UD) {
  328. addr +=
  329. ((iBuf->dst.roi.height -
  330. MIN(16, iBuf->dst.roi.height))/h_slice) *
  331. dest_ystride;
  332. }
  333. return addr;
  334. }
  335. void mdp_adjust_start_addr(struct ppp_blit_op *blit_op,
  336. struct ppp_img_desc *img, int v_slice,
  337. int h_slice, int layer)
  338. {
  339. uint32_t bpp = ppp_bpp(img->color_fmt);
  340. int x = img->roi.x;
  341. int y = img->roi.y;
  342. uint32_t width = img->prop.width;
  343. if (img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO && layer == 0)
  344. img->p0 += (x + y * ALIGN(width, 32)) * bpp;
  345. else if (img->color_fmt == MDP_Y_CBCR_H2V2_VENUS && layer == 0)
  346. img->p0 += (x + y * ALIGN(width, 128)) * bpp;
  347. else
  348. img->p0 += (x + y * width) * bpp;
  349. if (layer != 0)
  350. img->p0 = mdp_dst_adjust_rot_addr(blit_op, img->p0, bpp, 0);
  351. if (img->p1) {
  352. /*
  353. * MDP_Y_CBCR_H2V2/MDP_Y_CRCB_H2V2 cosite for now
  354. * we need to shift x direction same as y dir for offsite
  355. */
  356. if ((img->color_fmt == MDP_Y_CBCR_H2V2_ADRENO ||
  357. img->color_fmt == MDP_Y_CBCR_H2V2_VENUS)
  358. && layer == 0)
  359. img->p1 += ((x / h_slice) * h_slice + ((y == 0) ? 0 :
  360. (((y + 1) / v_slice - 1) * (ALIGN(width/2, 32) * 2))))
  361. * bpp;
  362. else
  363. img->p1 += ((x / h_slice) * h_slice +
  364. ((y == 0) ? 0 : ((y + 1) / v_slice - 1) * width)) * bpp;
  365. if (layer != 0)
  366. img->p0 = mdp_dst_adjust_rot_addr(blit_op,
  367. img->p0, bpp, 0);
  368. }
  369. }
  370. int load_ppp_lut(int tableType, uint32_t *lut)
  371. {
  372. int i;
  373. uint32_t base_addr;
  374. base_addr = tableType ? MDP3_PPP_POST_LUT : MDP3_PPP_PRE_LUT;
  375. for (i = 0; i < PPP_LUT_MAX; i++)
  376. PPP_WRITEL(lut[i], base_addr + MDP3_PPP_LUTn(i));
  377. return 0;
  378. }
  379. /* Configure Primary CSC Matrix */
  380. int load_primary_matrix(struct ppp_csc_table *csc)
  381. {
  382. int i;
  383. for (i = 0; i < MDP_CSC_SIZE; i++)
  384. PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_PFMVn(i));
  385. for (i = 0; i < MDP_CSC_SIZE; i++)
  386. PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_PRMVn(i));
  387. for (i = 0; i < MDP_BV_SIZE; i++)
  388. PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_PBVn(i));
  389. for (i = 0; i < MDP_LV_SIZE; i++)
  390. PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_PLVn(i));
  391. return 0;
  392. }
  393. /* Load Secondary CSC Matrix */
  394. int load_secondary_matrix(struct ppp_csc_table *csc)
  395. {
  396. int i;
  397. for (i = 0; i < MDP_CSC_SIZE; i++)
  398. PPP_WRITEL(csc->fwd_matrix[i], MDP3_PPP_CSC_SFMVn(i));
  399. for (i = 0; i < MDP_CSC_SIZE; i++)
  400. PPP_WRITEL(csc->rev_matrix[i], MDP3_PPP_CSC_SRMVn(i));
  401. for (i = 0; i < MDP_BV_SIZE; i++)
  402. PPP_WRITEL(csc->bv[i], MDP3_PPP_CSC_SBVn(i));
  403. for (i = 0; i < MDP_LV_SIZE; i++)
  404. PPP_WRITEL(csc->lv[i], MDP3_PPP_CSC_SLVn(i));
  405. return 0;
  406. }
  407. int load_csc_matrix(int matrix_type, struct ppp_csc_table *csc)
  408. {
  409. if (matrix_type == CSC_PRIMARY_MATRIX)
  410. return load_primary_matrix(csc);
  411. return load_secondary_matrix(csc);
  412. }
  413. int config_ppp_src(struct ppp_img_desc *src, uint32_t yuv2rgb)
  414. {
  415. uint32_t val;
  416. val = ((src->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
  417. (src->roi.width & MDP3_PPP_XY_MASK);
  418. PPP_WRITEL(val, MDP3_PPP_SRC_SIZE);
  419. PPP_WRITEL(src->p0, MDP3_PPP_SRCP0_ADDR);
  420. PPP_WRITEL(src->p1, MDP3_PPP_SRCP1_ADDR);
  421. PPP_WRITEL(src->p3, MDP3_PPP_SRCP3_ADDR);
  422. val = (src->stride0 & MDP3_PPP_STRIDE_MASK) |
  423. ((src->stride1 & MDP3_PPP_STRIDE_MASK) <<
  424. MDP3_PPP_STRIDE1_OFFSET);
  425. PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE1_ADDR);
  426. val = ((src->stride2 & MDP3_PPP_STRIDE_MASK) <<
  427. MDP3_PPP_STRIDE1_OFFSET);
  428. PPP_WRITEL(val, MDP3_PPP_SRC_YSTRIDE2_ADDR);
  429. val = ppp_src_config(src->color_fmt);
  430. val |= (src->roi.x % 2) ? PPP_SRC_BPP_ROI_ODD_X : 0;
  431. val |= (src->roi.y % 2) ? PPP_SRC_BPP_ROI_ODD_Y : 0;
  432. PPP_WRITEL(val, MDP3_PPP_SRC_FORMAT);
  433. PPP_WRITEL(ppp_pack_pattern(src->color_fmt, yuv2rgb),
  434. MDP3_PPP_SRC_UNPACK_PATTERN1);
  435. return 0;
  436. }
  437. int config_ppp_out(struct ppp_img_desc *dst, uint32_t yuv2rgb)
  438. {
  439. uint32_t val;
  440. bool pseudoplanr_output = false;
  441. switch (dst->color_fmt) {
  442. case MDP_Y_CBCR_H2V2:
  443. case MDP_Y_CRCB_H2V2:
  444. case MDP_Y_CBCR_H2V1:
  445. case MDP_Y_CRCB_H2V1:
  446. pseudoplanr_output = true;
  447. break;
  448. default:
  449. break;
  450. }
  451. val = ppp_out_config(dst->color_fmt);
  452. if (pseudoplanr_output)
  453. val |= PPP_DST_PLANE_PSEUDOPLN;
  454. PPP_WRITEL(val, MDP3_PPP_OUT_FORMAT);
  455. PPP_WRITEL(ppp_pack_pattern(dst->color_fmt, yuv2rgb),
  456. MDP3_PPP_OUT_PACK_PATTERN1);
  457. val = ((dst->roi.height & MDP3_PPP_XY_MASK) << MDP3_PPP_XY_OFFSET) |
  458. (dst->roi.width & MDP3_PPP_XY_MASK);
  459. PPP_WRITEL(val, MDP3_PPP_OUT_SIZE);
  460. PPP_WRITEL(dst->p0, MDP3_PPP_OUTP0_ADDR);
  461. PPP_WRITEL(dst->p1, MDP3_PPP_OUTP1_ADDR);
  462. PPP_WRITEL(dst->p3, MDP3_PPP_OUTP3_ADDR);
  463. val = (dst->stride0 & MDP3_PPP_STRIDE_MASK) |
  464. ((dst->stride1 & MDP3_PPP_STRIDE_MASK) <<
  465. MDP3_PPP_STRIDE1_OFFSET);
  466. PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE1_ADDR);
  467. val = ((dst->stride2 & MDP3_PPP_STRIDE_MASK) <<
  468. MDP3_PPP_STRIDE1_OFFSET);
  469. PPP_WRITEL(val, MDP3_PPP_OUT_YSTRIDE2_ADDR);
  470. return 0;
  471. }
  472. int config_ppp_background(struct ppp_img_desc *bg)
  473. {
  474. uint32_t val;
  475. PPP_WRITEL(bg->p0, MDP3_PPP_BGP0_ADDR);
  476. PPP_WRITEL(bg->p1, MDP3_PPP_BGP1_ADDR);
  477. PPP_WRITEL(bg->p3, MDP3_PPP_BGP3_ADDR);
  478. val = (bg->stride0 & MDP3_PPP_STRIDE_MASK) |
  479. ((bg->stride1 & MDP3_PPP_STRIDE_MASK) <<
  480. MDP3_PPP_STRIDE1_OFFSET);
  481. PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE1_ADDR);
  482. val = ((bg->stride2 & MDP3_PPP_STRIDE_MASK) <<
  483. MDP3_PPP_STRIDE1_OFFSET);
  484. PPP_WRITEL(val, MDP3_PPP_BG_YSTRIDE2_ADDR);
  485. PPP_WRITEL(ppp_src_config(bg->color_fmt),
  486. MDP3_PPP_BG_FORMAT);
  487. PPP_WRITEL(ppp_pack_pattern(bg->color_fmt, 0),
  488. MDP3_PPP_BG_UNPACK_PATTERN1);
  489. return 0;
  490. }
  491. void ppp_edge_rep_luma_pixel(struct ppp_blit_op *blit_op,
  492. struct ppp_edge_rep *er)
  493. {
  494. if (blit_op->mdp_op & MDPOP_ASCALE) {
  495. er->is_scale_enabled = 1;
  496. if (blit_op->mdp_op & MDPOP_ROT90) {
  497. er->dst_roi_width = blit_op->dst.roi.height;
  498. er->dst_roi_height = blit_op->dst.roi.width;
  499. } else {
  500. er->dst_roi_width = blit_op->dst.roi.width;
  501. er->dst_roi_height = blit_op->dst.roi.height;
  502. }
  503. /*
  504. * Find out the luma pixels needed for scaling in the
  505. * x direction (LEFT and RIGHT). Locations of pixels are
  506. * relative to the ROI. Upper-left corner of ROI corresponds
  507. * to coordinates (0,0). Also set the number of luma pixel
  508. * to repeat.
  509. */
  510. if (blit_op->src.roi.width > 3 * er->dst_roi_width) {
  511. /* scale factor < 1/3 */
  512. er->luma_interp_point_right =
  513. (blit_op->src.roi.width - 1);
  514. } else if (blit_op->src.roi.width == 3 * er->dst_roi_width) {
  515. /* scale factor == 1/3 */
  516. er->luma_interp_point_right =
  517. (blit_op->src.roi.width - 1) + 1;
  518. er->luma_repeat_right = 1;
  519. } else if ((blit_op->src.roi.width > er->dst_roi_width) &&
  520. (blit_op->src.roi.width < 3 * er->dst_roi_width)) {
  521. /* 1/3 < scale factor < 1 */
  522. er->luma_interp_point_left = -1;
  523. er->luma_interp_point_right =
  524. (blit_op->src.roi.width - 1) + 1;
  525. er->luma_repeat_left = 1;
  526. er->luma_repeat_right = 1;
  527. } else if (blit_op->src.roi.width == er->dst_roi_width) {
  528. /* scale factor == 1 */
  529. er->luma_interp_point_left = -1;
  530. er->luma_interp_point_right =
  531. (blit_op->src.roi.width - 1) + 2;
  532. er->luma_repeat_left = 1;
  533. er->luma_repeat_right = 2;
  534. } else {
  535. /* scale factor > 1 */
  536. er->luma_interp_point_left = -2;
  537. er->luma_interp_point_right =
  538. (blit_op->src.roi.width - 1) + 2;
  539. er->luma_repeat_left = 2;
  540. er->luma_repeat_right = 2;
  541. }
  542. /*
  543. * Find out the number of pixels needed for scaling in the
  544. * y direction (TOP and BOTTOM). Locations of pixels are
  545. * relative to the ROI. Upper-left corner of ROI corresponds
  546. * to coordinates (0,0). Also set the number of luma pixel
  547. * to repeat.
  548. */
  549. if (blit_op->src.roi.height > 3 * er->dst_roi_height) {
  550. er->luma_interp_point_bottom =
  551. (blit_op->src.roi.height - 1);
  552. } else if (blit_op->src.roi.height == 3 * er->dst_roi_height) {
  553. er->luma_interp_point_bottom =
  554. (blit_op->src.roi.height - 1) + 1;
  555. er->luma_repeat_bottom = 1;
  556. } else if ((blit_op->src.roi.height > er->dst_roi_height) &&
  557. (blit_op->src.roi.height < 3 * er->dst_roi_height)) {
  558. er->luma_interp_point_top = -1;
  559. er->luma_interp_point_bottom =
  560. (blit_op->src.roi.height - 1) + 1;
  561. er->luma_repeat_top = 1;
  562. er->luma_repeat_bottom = 1;
  563. } else if (blit_op->src.roi.height == er->dst_roi_height) {
  564. er->luma_interp_point_top = -1;
  565. er->luma_interp_point_bottom =
  566. (blit_op->src.roi.height - 1) + 2;
  567. er->luma_repeat_top = 1;
  568. er->luma_repeat_bottom = 2;
  569. } else {
  570. er->luma_interp_point_top = -2;
  571. er->luma_interp_point_bottom =
  572. (blit_op->src.roi.height - 1) + 2;
  573. er->luma_repeat_top = 2;
  574. er->luma_repeat_bottom = 2;
  575. }
  576. } else {
  577. /*
  578. * Since no scaling needed, Tile Fetch does not require any
  579. * more luma pixel than what the ROI contains.
  580. */
  581. er->luma_interp_point_right =
  582. (int32_t) (blit_op->src.roi.width - 1);
  583. er->luma_interp_point_bottom =
  584. (int32_t) (blit_op->src.roi.height - 1);
  585. }
  586. /* After adding the ROI offsets, we have locations of
  587. * luma_interp_points relative to the image.
  588. */
  589. er->luma_interp_point_left += (int32_t) (blit_op->src.roi.x);
  590. er->luma_interp_point_right += (int32_t) (blit_op->src.roi.x);
  591. er->luma_interp_point_top += (int32_t) (blit_op->src.roi.y);
  592. er->luma_interp_point_bottom += (int32_t) (blit_op->src.roi.y);
  593. }
  594. void ppp_edge_rep_chroma_pixel(struct ppp_blit_op *blit_op,
  595. struct ppp_edge_rep *er)
  596. {
  597. bool chroma_edge_enable = true;
  598. uint32_t is_yuv_offsite_vertical = 0;
  599. /* find out which chroma pixels are needed for chroma upsampling. */
  600. switch (blit_op->src.color_fmt) {
  601. case MDP_Y_CBCR_H2V1:
  602. case MDP_Y_CRCB_H2V1:
  603. case MDP_YCRYCB_H2V1:
  604. er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
  605. er->chroma_interp_point_right =
  606. (er->luma_interp_point_right + 1) >> 1;
  607. er->chroma_interp_point_top = er->luma_interp_point_top;
  608. er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
  609. break;
  610. case MDP_Y_CBCR_H2V2:
  611. case MDP_Y_CBCR_H2V2_ADRENO:
  612. case MDP_Y_CBCR_H2V2_VENUS:
  613. case MDP_Y_CRCB_H2V2:
  614. er->chroma_interp_point_left = er->luma_interp_point_left >> 1;
  615. er->chroma_interp_point_right =
  616. (er->luma_interp_point_right + 1) >> 1;
  617. er->chroma_interp_point_top =
  618. (er->luma_interp_point_top - 1) >> 1;
  619. er->chroma_interp_point_bottom =
  620. (er->luma_interp_point_bottom + 1) >> 1;
  621. is_yuv_offsite_vertical = 1;
  622. break;
  623. default:
  624. chroma_edge_enable = false;
  625. er->chroma_interp_point_left = er->luma_interp_point_left;
  626. er->chroma_interp_point_right = er->luma_interp_point_right;
  627. er->chroma_interp_point_top = er->luma_interp_point_top;
  628. er->chroma_interp_point_bottom = er->luma_interp_point_bottom;
  629. break;
  630. }
  631. if (chroma_edge_enable) {
  632. /* Defines which chroma pixels belongs to the roi */
  633. switch (blit_op->src.color_fmt) {
  634. case MDP_Y_CBCR_H2V1:
  635. case MDP_Y_CRCB_H2V1:
  636. case MDP_YCRYCB_H2V1:
  637. er->chroma_bound_left = blit_op->src.roi.x / 2;
  638. /* there are half as many chroma pixel as luma pixels */
  639. er->chroma_bound_right =
  640. (blit_op->src.roi.width +
  641. blit_op->src.roi.x - 1) / 2;
  642. er->chroma_bound_top = blit_op->src.roi.y;
  643. er->chroma_bound_bottom =
  644. (blit_op->src.roi.height + blit_op->src.roi.y - 1);
  645. break;
  646. case MDP_Y_CBCR_H2V2:
  647. case MDP_Y_CBCR_H2V2_ADRENO:
  648. case MDP_Y_CBCR_H2V2_VENUS:
  649. case MDP_Y_CRCB_H2V2:
  650. /*
  651. * cosite in horizontal dir, and offsite in vertical dir
  652. * width of chroma ROI is 1/2 of size of luma ROI
  653. * height of chroma ROI is 1/2 of size of luma ROI
  654. */
  655. er->chroma_bound_left = blit_op->src.roi.x / 2;
  656. er->chroma_bound_right =
  657. (blit_op->src.roi.width +
  658. blit_op->src.roi.x - 1) / 2;
  659. er->chroma_bound_top = blit_op->src.roi.y / 2;
  660. er->chroma_bound_bottom =
  661. (blit_op->src.roi.height +
  662. blit_op->src.roi.y - 1) / 2;
  663. break;
  664. default:
  665. /*
  666. * If no valid chroma sub-sampling format specified,
  667. * assume 4:4:4 ( i.e. fully sampled).
  668. */
  669. er->chroma_bound_left = blit_op->src.roi.x;
  670. er->chroma_bound_right = blit_op->src.roi.width +
  671. blit_op->src.roi.x - 1;
  672. er->chroma_bound_top = blit_op->src.roi.y;
  673. er->chroma_bound_bottom =
  674. (blit_op->src.roi.height + blit_op->src.roi.y - 1);
  675. break;
  676. }
  677. /*
  678. * Knowing which chroma pixels are needed, and which chroma
  679. * pixels belong to the ROI (i.e. available for fetching ),
  680. * calculate how many chroma pixels Tile Fetch needs to
  681. * duplicate. If any required chroma pixels falls outside
  682. * of the ROI, Tile Fetch must obtain them by replicating
  683. * pixels.
  684. */
  685. if (er->chroma_bound_left > er->chroma_interp_point_left)
  686. er->chroma_repeat_left =
  687. er->chroma_bound_left -
  688. er->chroma_interp_point_left;
  689. else
  690. er->chroma_repeat_left = 0;
  691. if (er->chroma_interp_point_right > er->chroma_bound_right)
  692. er->chroma_repeat_right =
  693. er->chroma_interp_point_right -
  694. er->chroma_bound_right;
  695. else
  696. er->chroma_repeat_right = 0;
  697. if (er->chroma_bound_top > er->chroma_interp_point_top)
  698. er->chroma_repeat_top =
  699. er->chroma_bound_top -
  700. er->chroma_interp_point_top;
  701. else
  702. er->chroma_repeat_top = 0;
  703. if (er->chroma_interp_point_bottom > er->chroma_bound_bottom)
  704. er->chroma_repeat_bottom =
  705. er->chroma_interp_point_bottom -
  706. er->chroma_bound_bottom;
  707. else
  708. er->chroma_repeat_bottom = 0;
  709. if (er->is_scale_enabled && (blit_op->src.roi.height == 1)
  710. && is_yuv_offsite_vertical) {
  711. er->chroma_repeat_bottom = 3;
  712. er->chroma_repeat_top = 0;
  713. }
  714. }
  715. }
  716. int config_ppp_edge_rep(struct ppp_blit_op *blit_op)
  717. {
  718. uint32_t reg = 0;
  719. struct ppp_edge_rep er;
  720. memset(&er, 0, sizeof(er));
  721. ppp_edge_rep_luma_pixel(blit_op, &er);
  722. /*
  723. * After adding the ROI offsets, we have locations of
  724. * chroma_interp_points relative to the image.
  725. */
  726. er.chroma_interp_point_left = er.luma_interp_point_left;
  727. er.chroma_interp_point_right = er.luma_interp_point_right;
  728. er.chroma_interp_point_top = er.luma_interp_point_top;
  729. er.chroma_interp_point_bottom = er.luma_interp_point_bottom;
  730. ppp_edge_rep_chroma_pixel(blit_op, &er);
  731. /* ensure repeats are >=0 and no larger than 3 pixels */
  732. if ((er.chroma_repeat_left < 0) || (er.chroma_repeat_right < 0) ||
  733. (er.chroma_repeat_top < 0) || (er.chroma_repeat_bottom < 0))
  734. return -EINVAL;
  735. if ((er.chroma_repeat_left > 3) || (er.chroma_repeat_right > 3) ||
  736. (er.chroma_repeat_top > 3) || (er.chroma_repeat_bottom > 3))
  737. return -EINVAL;
  738. if ((er.luma_repeat_left < 0) || (er.luma_repeat_right < 0) ||
  739. (er.luma_repeat_top < 0) || (er.luma_repeat_bottom < 0))
  740. return -EINVAL;
  741. if ((er.luma_repeat_left > 3) || (er.luma_repeat_right > 3) ||
  742. (er.luma_repeat_top > 3) || (er.luma_repeat_bottom > 3))
  743. return -EINVAL;
  744. reg |= (er.chroma_repeat_left & 3) << MDP_LEFT_CHROMA;
  745. reg |= (er.chroma_repeat_right & 3) << MDP_RIGHT_CHROMA;
  746. reg |= (er.chroma_repeat_top & 3) << MDP_TOP_CHROMA;
  747. reg |= (er.chroma_repeat_bottom & 3) << MDP_BOTTOM_CHROMA;
  748. reg |= (er.luma_repeat_left & 3) << MDP_LEFT_LUMA;
  749. reg |= (er.luma_repeat_right & 3) << MDP_RIGHT_LUMA;
  750. reg |= (er.luma_repeat_top & 3) << MDP_TOP_LUMA;
  751. reg |= (er.luma_repeat_bottom & 3) << MDP_BOTTOM_LUMA;
  752. PPP_WRITEL(reg, MDP3_PPP_SRC_EDGE_REP);
  753. return 0;
  754. }
  755. int config_ppp_bg_edge_rep(struct ppp_blit_op *blit_op)
  756. {
  757. uint32_t reg = 0;
  758. switch (blit_op->dst.color_fmt) {
  759. case MDP_Y_CBCR_H2V2:
  760. case MDP_Y_CRCB_H2V2:
  761. if (blit_op->dst.roi.y == 0)
  762. reg |= BIT(MDP_TOP_CHROMA);
  763. if ((blit_op->dst.roi.y + blit_op->dst.roi.height) ==
  764. blit_op->dst.prop.height) {
  765. reg |= BIT(MDP_BOTTOM_CHROMA);
  766. }
  767. if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
  768. blit_op->dst.prop.width) &&
  769. ((blit_op->dst.roi.width % 2) == 0))
  770. reg |= BIT(MDP_RIGHT_CHROMA);
  771. break;
  772. case MDP_Y_CBCR_H2V1:
  773. case MDP_Y_CRCB_H2V1:
  774. case MDP_YCRYCB_H2V1:
  775. if (((blit_op->dst.roi.x + blit_op->dst.roi.width) ==
  776. blit_op->dst.prop.width) &&
  777. ((blit_op->dst.roi.width % 2) == 0))
  778. reg |= BIT(MDP_RIGHT_CHROMA);
  779. break;
  780. default:
  781. break;
  782. }
  783. PPP_WRITEL(reg, MDP3_PPP_BG_EDGE_REP);
  784. return 0;
  785. }
  786. int config_ppp_lut(uint32_t *pppop_reg_ptr, int lut_c0_en,
  787. int lut_c1_en, int lut_c2_en)
  788. {
  789. if (lut_c0_en)
  790. *pppop_reg_ptr |= MDP_LUT_C0_EN;
  791. if (lut_c1_en)
  792. *pppop_reg_ptr |= MDP_LUT_C1_EN;
  793. if (lut_c2_en)
  794. *pppop_reg_ptr |= MDP_LUT_C2_EN;
  795. return 0;
  796. }
  797. int config_ppp_scale(struct ppp_blit_op *blit_op, uint32_t *pppop_reg_ptr)
  798. {
  799. struct ppp_img_desc *src = &blit_op->src;
  800. struct ppp_img_desc *dst = &blit_op->dst;
  801. uint32_t dstW, dstH;
  802. uint32_t x_fac, y_fac;
  803. uint32_t mdp_blur = 0;
  804. uint32_t phase_init_x, phase_init_y, phase_step_x, phase_step_y;
  805. int x_idx, y_idx;
  806. if (blit_op->mdp_op & MDPOP_ASCALE) {
  807. if (blit_op->mdp_op & MDPOP_ROT90) {
  808. dstW = dst->roi.height;
  809. dstH = dst->roi.width;
  810. } else {
  811. dstW = dst->roi.width;
  812. dstH = dst->roi.height;
  813. }
  814. *pppop_reg_ptr |=
  815. (PPP_OP_SCALE_Y_ON | PPP_OP_SCALE_X_ON);
  816. mdp_blur = blit_op->mdp_op & MDPOP_BLUR;
  817. if ((dstW != src->roi.width) ||
  818. (dstH != src->roi.height) || mdp_blur) {
  819. mdp_calc_scale_params(blit_op->src.roi.x,
  820. blit_op->src.roi.width,
  821. dstW, 1, &phase_init_x,
  822. &phase_step_x);
  823. mdp_calc_scale_params(blit_op->src.roi.y,
  824. blit_op->src.roi.height,
  825. dstH, 0, &phase_init_y,
  826. &phase_step_y);
  827. PPP_WRITEL(phase_init_x, MDP3_PPP_SCALE_PHASEX_INIT);
  828. PPP_WRITEL(phase_init_y, MDP3_PPP_SCALE_PHASEY_INIT);
  829. PPP_WRITEL(phase_step_x, MDP3_PPP_SCALE_PHASEX_STEP);
  830. PPP_WRITEL(phase_step_y, MDP3_PPP_SCALE_PHASEY_STEP);
  831. if (dstW > src->roi.width || dstH > src->roi.height)
  832. ppp_load_up_lut();
  833. if (mdp_blur)
  834. ppp_load_gaussian_lut();
  835. if (dstW <= src->roi.width) {
  836. x_fac = (dstW * 100) / src->roi.width;
  837. x_idx = scale_idx(x_fac);
  838. ppp_load_x_scale_table(x_idx);
  839. }
  840. if (dstH <= src->roi.height) {
  841. y_fac = (dstH * 100) / src->roi.height;
  842. y_idx = scale_idx(y_fac);
  843. ppp_load_y_scale_table(y_idx);
  844. }
  845. } else {
  846. blit_op->mdp_op &= ~(MDPOP_ASCALE);
  847. }
  848. }
  849. config_ppp_edge_rep(blit_op);
  850. config_ppp_bg_edge_rep(blit_op);
  851. return 0;
  852. }
  853. int config_ppp_csc(int src_color, int dst_color, uint32_t *pppop_reg_ptr)
  854. {
  855. bool inputRGB, outputRGB;
  856. inputRGB = check_if_rgb(src_color);
  857. outputRGB = check_if_rgb(dst_color);
  858. if ((!inputRGB) && (outputRGB))
  859. *pppop_reg_ptr |= PPP_OP_CONVERT_YCBCR2RGB |
  860. PPP_OP_CONVERT_ON;
  861. if ((inputRGB) && (!outputRGB))
  862. *pppop_reg_ptr |= PPP_OP_CONVERT_ON;
  863. return 0;
  864. }
  865. int config_ppp_blend(struct ppp_blit_op *blit_op,
  866. uint32_t *pppop_reg_ptr)
  867. {
  868. struct ppp_csc_table *csc;
  869. uint32_t alpha, trans_color;
  870. uint32_t val = 0;
  871. int c_fmt = blit_op->src.color_fmt;
  872. int bg_alpha;
  873. csc = ppp_csc_rgb2yuv();
  874. alpha = blit_op->blend.const_alpha;
  875. trans_color = blit_op->blend.trans_color;
  876. if (blit_op->mdp_op & MDPOP_FG_PM_ALPHA) {
  877. if (ppp_per_p_alpha(c_fmt)) {
  878. *pppop_reg_ptr |= PPP_OP_ROT_ON |
  879. PPP_OP_BLEND_ON |
  880. PPP_OP_BLEND_CONSTANT_ALPHA;
  881. } else {
  882. if ((blit_op->mdp_op & MDPOP_ALPHAB)
  883. && (blit_op->blend.const_alpha == 0xff)) {
  884. blit_op->mdp_op &= ~(MDPOP_ALPHAB);
  885. }
  886. if ((blit_op->mdp_op & MDPOP_ALPHAB)
  887. || (blit_op->mdp_op & MDPOP_TRANSP)) {
  888. *pppop_reg_ptr |= PPP_OP_ROT_ON |
  889. PPP_OP_BLEND_ON |
  890. PPP_OP_BLEND_CONSTANT_ALPHA |
  891. PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
  892. }
  893. }
  894. bg_alpha = PPP_BLEND_BG_USE_ALPHA_SEL |
  895. PPP_BLEND_BG_ALPHA_REVERSE;
  896. if ((ppp_per_p_alpha(c_fmt)) && !(blit_op->mdp_op &
  897. MDPOP_LAYER_IS_FG)) {
  898. bg_alpha |= PPP_BLEND_BG_SRCPIXEL_ALPHA;
  899. } else {
  900. bg_alpha |= PPP_BLEND_BG_CONSTANT_ALPHA;
  901. bg_alpha |= blit_op->blend.const_alpha << 24;
  902. }
  903. PPP_WRITEL(bg_alpha, MDP3_PPP_BLEND_BG_ALPHA_SEL);
  904. if (blit_op->mdp_op & MDPOP_TRANSP)
  905. *pppop_reg_ptr |= PPP_BLEND_CALPHA_TRNASP;
  906. } else if (ppp_per_p_alpha(c_fmt)) {
  907. if (blit_op->mdp_op & MDPOP_LAYER_IS_FG)
  908. *pppop_reg_ptr |= PPP_OP_ROT_ON |
  909. PPP_OP_BLEND_ON |
  910. PPP_OP_BLEND_CONSTANT_ALPHA;
  911. else
  912. *pppop_reg_ptr |= PPP_OP_ROT_ON |
  913. PPP_OP_BLEND_ON |
  914. PPP_OP_BLEND_SRCPIXEL_ALPHA;
  915. PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
  916. } else {
  917. if ((blit_op->mdp_op & MDPOP_ALPHAB)
  918. && (blit_op->blend.const_alpha == 0xff)) {
  919. blit_op->mdp_op &=
  920. ~(MDPOP_ALPHAB);
  921. }
  922. if ((blit_op->mdp_op & MDPOP_ALPHAB)
  923. || (blit_op->mdp_op & MDPOP_TRANSP)) {
  924. *pppop_reg_ptr |= PPP_OP_ROT_ON |
  925. PPP_OP_BLEND_ON |
  926. PPP_OP_BLEND_CONSTANT_ALPHA |
  927. PPP_OP_BLEND_ALPHA_BLEND_NORMAL;
  928. }
  929. if (blit_op->mdp_op & MDPOP_TRANSP)
  930. *pppop_reg_ptr |=
  931. PPP_BLEND_CALPHA_TRNASP;
  932. PPP_WRITEL(0, MDP3_PPP_BLEND_BG_ALPHA_SEL);
  933. }
  934. if (*pppop_reg_ptr & PPP_OP_BLEND_ON) {
  935. config_ppp_background(&blit_op->bg);
  936. if (blit_op->dst.color_fmt == MDP_YCRYCB_H2V1) {
  937. *pppop_reg_ptr |= PPP_OP_BG_CHROMA_H2V1;
  938. if (blit_op->mdp_op & MDPOP_TRANSP) {
  939. trans_color = conv_rgb2yuv(trans_color,
  940. &csc->fwd_matrix[0],
  941. &csc->bv[0],
  942. &csc->lv[0]);
  943. }
  944. }
  945. }
  946. val = (alpha << MDP_BLEND_CONST_ALPHA);
  947. val |= (trans_color & MDP_BLEND_TRASP_COL_MASK);
  948. PPP_WRITEL(val, MDP3_PPP_BLEND_PARAM);
  949. return 0;
  950. }
  951. int config_ppp_rotation(uint32_t mdp_op, uint32_t *pppop_reg_ptr)
  952. {
  953. *pppop_reg_ptr |= PPP_OP_ROT_ON;
  954. if (mdp_op & MDPOP_ROT90)
  955. *pppop_reg_ptr |= PPP_OP_ROT_90;
  956. if (mdp_op & MDPOP_LR)
  957. *pppop_reg_ptr |= PPP_OP_FLIP_LR;
  958. if (mdp_op & MDPOP_UD)
  959. *pppop_reg_ptr |= PPP_OP_FLIP_UD;
  960. return 0;
  961. }
  962. int config_ppp_op_mode(struct ppp_blit_op *blit_op)
  963. {
  964. uint32_t yuv2rgb;
  965. uint32_t ppp_operation_reg = 0;
  966. int sv_slice, sh_slice;
  967. int dv_slice, dh_slice;
  968. sv_slice = sh_slice = dv_slice = dh_slice = 1;
  969. ppp_operation_reg |= ppp_dst_op_reg(blit_op->dst.color_fmt);
  970. switch (blit_op->dst.color_fmt) {
  971. case MDP_Y_CBCR_H2V2:
  972. case MDP_Y_CRCB_H2V2:
  973. y_h_even_num(&blit_op->dst);
  974. y_h_even_num(&blit_op->src);
  975. dv_slice = 2;
  976. case MDP_Y_CBCR_H2V1:
  977. case MDP_Y_CRCB_H2V1:
  978. case MDP_YCRYCB_H2V1:
  979. x_w_even_num(&blit_op->dst);
  980. x_w_even_num(&blit_op->src);
  981. dh_slice = 2;
  982. break;
  983. default:
  984. break;
  985. }
  986. ppp_operation_reg |= ppp_src_op_reg(blit_op->src.color_fmt);
  987. switch (blit_op->src.color_fmt) {
  988. case MDP_Y_CBCR_H2V2:
  989. case MDP_Y_CBCR_H2V2_ADRENO:
  990. case MDP_Y_CBCR_H2V2_VENUS:
  991. case MDP_Y_CRCB_H2V2:
  992. sh_slice = sv_slice = 2;
  993. break;
  994. case MDP_YCRYCB_H2V1:
  995. x_w_even_num(&blit_op->dst);
  996. x_w_even_num(&blit_op->src);
  997. case MDP_Y_CBCR_H2V1:
  998. case MDP_Y_CRCB_H2V1:
  999. sh_slice = 2;
  1000. break;
  1001. default:
  1002. break;
  1003. }
  1004. config_ppp_csc(blit_op->src.color_fmt,
  1005. blit_op->dst.color_fmt, &ppp_operation_reg);
  1006. yuv2rgb = ppp_operation_reg & PPP_OP_CONVERT_YCBCR2RGB;
  1007. if (blit_op->mdp_op & MDPOP_DITHER)
  1008. ppp_operation_reg |= PPP_OP_DITHER_EN;
  1009. if (blit_op->mdp_op & MDPOP_ROTATION)
  1010. config_ppp_rotation(blit_op->mdp_op, &ppp_operation_reg);
  1011. if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_ADRENO) {
  1012. blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 32) *
  1013. ppp_bpp(blit_op->src.color_fmt);
  1014. blit_op->src.stride1 = 2 * ALIGN(blit_op->src.prop.width/2, 32);
  1015. } else if (blit_op->src.color_fmt == MDP_Y_CBCR_H2V2_VENUS) {
  1016. blit_op->src.stride0 = ALIGN(blit_op->src.prop.width, 128) *
  1017. ppp_bpp(blit_op->src.color_fmt);
  1018. blit_op->src.stride1 = blit_op->src.stride0;
  1019. } else {
  1020. blit_op->src.stride0 = blit_op->src.prop.width *
  1021. ppp_bpp(blit_op->src.color_fmt);
  1022. blit_op->src.stride1 = blit_op->src.stride0;
  1023. }
  1024. blit_op->dst.stride0 = blit_op->dst.prop.width *
  1025. ppp_bpp(blit_op->dst.color_fmt);
  1026. if (ppp_multi_plane(blit_op->dst.color_fmt)) {
  1027. blit_op->dst.p1 = blit_op->dst.p0;
  1028. blit_op->dst.p1 += blit_op->dst.prop.width *
  1029. blit_op->dst.prop.height *
  1030. ppp_bpp(blit_op->dst.color_fmt);
  1031. } else {
  1032. blit_op->dst.p1 = NULL;
  1033. }
  1034. blit_op->bg = blit_op->dst;
  1035. /* Jumping from Y-Plane to Chroma Plane */
  1036. /* first pixel addr calculation */
  1037. mdp_adjust_start_addr(blit_op, &blit_op->src, sv_slice, sh_slice, 0);
  1038. mdp_adjust_start_addr(blit_op, &blit_op->bg, dv_slice, dh_slice, 1);
  1039. mdp_adjust_start_addr(blit_op, &blit_op->dst, dv_slice, dh_slice, 2);
  1040. config_ppp_scale(blit_op, &ppp_operation_reg);
  1041. config_ppp_blend(blit_op, &ppp_operation_reg);
  1042. config_ppp_src(&blit_op->src, yuv2rgb);
  1043. config_ppp_out(&blit_op->dst, yuv2rgb);
  1044. PPP_WRITEL(ppp_operation_reg, MDP3_PPP_OP_MODE);
  1045. mb();
  1046. return 0;
  1047. }
  1048. void ppp_enable(void)
  1049. {
  1050. PPP_WRITEL(0x1000, 0x30);
  1051. mb();
  1052. }
  1053. int mdp3_ppp_init(void)
  1054. {
  1055. load_ppp_lut(LUT_PRE_TABLE, ppp_default_pre_lut());
  1056. load_ppp_lut(LUT_POST_TABLE, ppp_default_post_lut());
  1057. load_csc_matrix(CSC_PRIMARY_MATRIX, ppp_csc_rgb2yuv());
  1058. load_csc_matrix(CSC_SECONDARY_MATRIX, ppp_csc_table2());
  1059. return 0;
  1060. }