mhl_msc.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/mhl_8334.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/input.h>
  17. #include "mhl_msc.h"
  18. #include <linux/mdss_hdmi_mhl.h>
  19. static struct mhl_tx_ctrl *mhl_ctrl;
  20. static DEFINE_MUTEX(msc_send_workqueue_mutex);
  21. const char *devcap_reg_name[] = {
  22. "DEV_STATE ",
  23. "MHL_VERSION ",
  24. "DEV_CAT ",
  25. "ADOPTER_ID_H ",
  26. "ADOPTER_ID_L ",
  27. "VID_LINK_MODE ",
  28. "AUD_LINK_MODE ",
  29. "VIDEO_TYPE ",
  30. "LOG_DEV_MAP ",
  31. "BANDWIDTH ",
  32. "FEATURE_FLAG ",
  33. "DEVICE_ID_H ",
  34. "DEVICE_ID_L ",
  35. "SCRATCHPAD_SIZE ",
  36. "INT_STAT_SIZE ",
  37. "Reserved ",
  38. };
  39. static bool mhl_check_tmds_enabled(struct mhl_tx_ctrl *mhl_ctrl)
  40. {
  41. if (mhl_ctrl && mhl_ctrl->hdmi_mhl_ops) {
  42. struct msm_hdmi_mhl_ops *ops = mhl_ctrl->hdmi_mhl_ops;
  43. struct platform_device *pdev = mhl_ctrl->pdata->hdmi_pdev;
  44. return (ops->tmds_enabled(pdev) == true);
  45. } else {
  46. pr_err("%s: invalid input\n", __func__);
  47. return false;
  48. }
  49. }
  50. static void mhl_print_devcap(u8 offset, u8 devcap)
  51. {
  52. switch (offset) {
  53. case DEVCAP_OFFSET_DEV_CAT:
  54. pr_debug("DCAP: %02X %s: %02X DEV_TYPE=%X POW=%s\n",
  55. offset, devcap_reg_name[offset], devcap,
  56. devcap & 0x0F, (devcap & 0x10) ? "y" : "n");
  57. break;
  58. case DEVCAP_OFFSET_FEATURE_FLAG:
  59. pr_debug("DCAP: %02X %s: %02X RCP=%s RAP=%s SP=%s\n",
  60. offset, devcap_reg_name[offset], devcap,
  61. (devcap & 0x01) ? "y" : "n",
  62. (devcap & 0x02) ? "y" : "n",
  63. (devcap & 0x04) ? "y" : "n");
  64. break;
  65. default:
  66. pr_debug("DCAP: %02X %s: %02X\n",
  67. offset, devcap_reg_name[offset], devcap);
  68. break;
  69. }
  70. }
  71. static bool mhl_qualify_path_enable(struct mhl_tx_ctrl *mhl_ctrl)
  72. {
  73. int rc = false;
  74. if (!mhl_ctrl)
  75. return rc;
  76. if (mhl_ctrl->tmds_en_state ||
  77. /* Identify sink with non-standard INT STAT SIZE */
  78. (mhl_ctrl->devcap[DEVCAP_OFFSET_MHL_VERSION] == 0x10 &&
  79. mhl_ctrl->devcap[DEVCAP_OFFSET_INT_STAT_SIZE] == 0x44))
  80. rc = true;
  81. return rc;
  82. }
  83. void mhl_register_msc(struct mhl_tx_ctrl *ctrl)
  84. {
  85. if (ctrl)
  86. mhl_ctrl = ctrl;
  87. }
  88. static int mhl_flag_scrpd_burst_req(struct mhl_tx_ctrl *mhl_ctrl,
  89. struct msc_command_struct *req)
  90. {
  91. int postpone_send = 0;
  92. if ((req->command == MHL_SET_INT) &&
  93. (req->offset == MHL_RCHANGE_INT)) {
  94. if (mhl_ctrl->scrpd_busy) {
  95. /* reduce priority */
  96. if (req->payload.data[0] == MHL_INT_REQ_WRT)
  97. postpone_send = 1;
  98. } else {
  99. if (req->payload.data[0] == MHL_INT_REQ_WRT) {
  100. mhl_ctrl->scrpd_busy = true;
  101. mhl_ctrl->wr_burst_pending = true;
  102. } else if (req->payload.data[0] == MHL_INT_GRT_WRT) {
  103. mhl_ctrl->scrpd_busy = true;
  104. }
  105. }
  106. }
  107. return postpone_send;
  108. }
  109. void mhl_msc_send_work(struct work_struct *work)
  110. {
  111. struct mhl_tx_ctrl *mhl_ctrl =
  112. container_of(work, struct mhl_tx_ctrl, mhl_msc_send_work);
  113. struct msc_cmd_envelope *cmd_env;
  114. int ret, postpone_send;
  115. /*
  116. * Remove item from the queue
  117. * and schedule it
  118. */
  119. mutex_lock(&msc_send_workqueue_mutex);
  120. while (!list_empty(&mhl_ctrl->list_cmd)) {
  121. cmd_env = list_first_entry(&mhl_ctrl->list_cmd,
  122. struct msc_cmd_envelope,
  123. msc_queue_envelope);
  124. list_del(&cmd_env->msc_queue_envelope);
  125. mutex_unlock(&msc_send_workqueue_mutex);
  126. postpone_send = mhl_flag_scrpd_burst_req(
  127. mhl_ctrl,
  128. &cmd_env->msc_cmd_msg);
  129. if (postpone_send) {
  130. if (cmd_env->msc_cmd_msg.retry-- > 0) {
  131. mutex_lock(&msc_send_workqueue_mutex);
  132. list_add_tail(
  133. &cmd_env->msc_queue_envelope,
  134. &mhl_ctrl->list_cmd);
  135. mutex_unlock(&msc_send_workqueue_mutex);
  136. } else {
  137. pr_err("%s: max scrpd retry out\n",
  138. __func__);
  139. }
  140. } else {
  141. ret = mhl_send_msc_command(mhl_ctrl,
  142. &cmd_env->msc_cmd_msg);
  143. if (ret == -EAGAIN) {
  144. int retry = 2;
  145. while (retry--) {
  146. ret = mhl_send_msc_command(
  147. mhl_ctrl,
  148. &cmd_env->msc_cmd_msg);
  149. if (ret != -EAGAIN)
  150. break;
  151. }
  152. }
  153. if (ret == -EAGAIN)
  154. pr_err("%s: send_msc_command retry out!\n",
  155. __func__);
  156. vfree(cmd_env);
  157. }
  158. mutex_lock(&msc_send_workqueue_mutex);
  159. }
  160. mutex_unlock(&msc_send_workqueue_mutex);
  161. }
  162. int mhl_queue_msc_command(struct mhl_tx_ctrl *mhl_ctrl,
  163. struct msc_command_struct *req,
  164. int priority_send)
  165. {
  166. struct msc_cmd_envelope *cmd_env;
  167. mutex_lock(&msc_send_workqueue_mutex);
  168. cmd_env = vmalloc(sizeof(struct msc_cmd_envelope));
  169. if (!cmd_env) {
  170. pr_err("%s: out of memory!\n", __func__);
  171. mutex_unlock(&msc_send_workqueue_mutex);
  172. return -ENOMEM;
  173. }
  174. memcpy(&cmd_env->msc_cmd_msg, req,
  175. sizeof(struct msc_command_struct));
  176. if (priority_send)
  177. list_add(&cmd_env->msc_queue_envelope,
  178. &mhl_ctrl->list_cmd);
  179. else
  180. list_add_tail(&cmd_env->msc_queue_envelope,
  181. &mhl_ctrl->list_cmd);
  182. mutex_unlock(&msc_send_workqueue_mutex);
  183. queue_work(mhl_ctrl->msc_send_workqueue, &mhl_ctrl->mhl_msc_send_work);
  184. return 0;
  185. }
  186. static int mhl_update_devcap(struct mhl_tx_ctrl *mhl_ctrl,
  187. int offset, u8 devcap)
  188. {
  189. if (!mhl_ctrl)
  190. return -EFAULT;
  191. if (offset < 0 || offset > 15)
  192. return -EFAULT;
  193. mhl_ctrl->devcap[offset] = devcap;
  194. mhl_print_devcap(offset, mhl_ctrl->devcap[offset]);
  195. return 0;
  196. }
  197. int mhl_msc_clear(struct mhl_tx_ctrl *mhl_ctrl)
  198. {
  199. if (!mhl_ctrl)
  200. return -EFAULT;
  201. memset(mhl_ctrl->devcap, 0, 16);
  202. mhl_ctrl->devcap_state = 0;
  203. mhl_ctrl->path_en_state = 0;
  204. mhl_ctrl->status[0] = 0;
  205. mhl_ctrl->status[1] = 0;
  206. mhl_ctrl->scrpd_busy = 0;
  207. mhl_ctrl->wr_burst_pending = 0;
  208. return 0;
  209. }
  210. int mhl_msc_command_done(struct mhl_tx_ctrl *mhl_ctrl,
  211. struct msc_command_struct *req)
  212. {
  213. switch (req->command) {
  214. case MHL_WRITE_STAT:
  215. if (req->offset == MHL_STATUS_REG_LINK_MODE) {
  216. if (req->payload.data[0]
  217. & MHL_STATUS_PATH_ENABLED) {
  218. /* Enable TMDS output */
  219. mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
  220. if (mhl_ctrl->devcap_state == MHL_DEVCAP_ALL)
  221. mhl_drive_hpd(mhl_ctrl, HPD_UP);
  222. } else {
  223. /* Disable TMDS output */
  224. mhl_tmds_ctrl(mhl_ctrl, TMDS_DISABLE);
  225. mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
  226. }
  227. }
  228. break;
  229. case MHL_READ_DEVCAP:
  230. mhl_update_devcap(mhl_ctrl,
  231. req->offset, req->retval);
  232. mhl_ctrl->devcap_state |= BIT(req->offset);
  233. switch (req->offset) {
  234. case MHL_DEV_CATEGORY_OFFSET:
  235. if (req->retval & MHL_DEV_CATEGORY_POW_BIT)
  236. pr_debug("%s: devcap pow bit set\n",
  237. __func__);
  238. else
  239. pr_debug("%s: devcap pow bit unset\n",
  240. __func__);
  241. break;
  242. case DEVCAP_OFFSET_RESERVED:
  243. mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
  244. mhl_drive_hpd(mhl_ctrl, HPD_UP);
  245. break;
  246. case DEVCAP_OFFSET_MHL_VERSION:
  247. case DEVCAP_OFFSET_INT_STAT_SIZE:
  248. if (mhl_qualify_path_enable(mhl_ctrl))
  249. mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
  250. break;
  251. }
  252. break;
  253. case MHL_WRITE_BURST:
  254. mhl_msc_send_set_int(
  255. mhl_ctrl,
  256. MHL_RCHANGE_INT,
  257. MHL_INT_DSCR_CHG,
  258. MSC_PRIORITY_SEND);
  259. break;
  260. }
  261. return 0;
  262. }
  263. int mhl_msc_send_set_int(struct mhl_tx_ctrl *mhl_ctrl,
  264. u8 offset, u8 mask, u8 prior)
  265. {
  266. struct msc_command_struct req;
  267. req.command = MHL_SET_INT;
  268. req.offset = offset;
  269. req.payload.data[0] = mask;
  270. return mhl_queue_msc_command(mhl_ctrl, &req, prior);
  271. }
  272. int mhl_msc_send_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
  273. u8 offset, u8 value)
  274. {
  275. struct msc_command_struct req;
  276. req.command = MHL_WRITE_STAT;
  277. req.offset = offset;
  278. req.payload.data[0] = value;
  279. return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
  280. }
  281. static int mhl_msc_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
  282. u8 offset, u8 *data, u8 length)
  283. {
  284. struct msc_command_struct req;
  285. if (!mhl_ctrl)
  286. return -EFAULT;
  287. if (!mhl_ctrl->wr_burst_pending)
  288. return -EFAULT;
  289. req.command = MHL_WRITE_BURST;
  290. req.offset = offset;
  291. req.length = length;
  292. req.payload.burst_data = data;
  293. mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
  294. mhl_ctrl->wr_burst_pending = false;
  295. return 0;
  296. }
  297. int mhl_msc_send_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
  298. u8 sub_cmd, u8 cmd_data)
  299. {
  300. struct msc_command_struct req;
  301. req.command = MHL_MSC_MSG;
  302. req.payload.data[0] = sub_cmd;
  303. req.payload.data[1] = cmd_data;
  304. return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
  305. }
  306. /*
  307. * Certain MSC msgs such as RCPK, RCPE and RAPK
  308. * should be transmitted as a high priority
  309. * because these msgs should be sent within
  310. * 1000ms of a receipt of RCP/RAP. So such msgs can
  311. * be added to the head of msc cmd queue.
  312. */
  313. static int mhl_msc_send_prior_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
  314. u8 sub_cmd, u8 cmd_data)
  315. {
  316. struct msc_command_struct req;
  317. req.command = MHL_MSC_MSG;
  318. req.payload.data[0] = sub_cmd;
  319. req.payload.data[1] = cmd_data;
  320. return mhl_queue_msc_command(mhl_ctrl, &req, MSC_PRIORITY_SEND);
  321. }
  322. int mhl_msc_read_devcap(struct mhl_tx_ctrl *mhl_ctrl, u8 offset)
  323. {
  324. struct msc_command_struct req;
  325. if (offset < 0 || offset > 15)
  326. return -EFAULT;
  327. req.command = MHL_READ_DEVCAP;
  328. req.offset = offset;
  329. req.payload.data[0] = 0;
  330. return mhl_queue_msc_command(mhl_ctrl, &req, MSC_NORMAL_SEND);
  331. }
  332. int mhl_msc_read_devcap_all(struct mhl_tx_ctrl *mhl_ctrl)
  333. {
  334. int offset;
  335. int ret;
  336. for (offset = 0; offset < DEVCAP_SIZE; offset++) {
  337. ret = mhl_msc_read_devcap(mhl_ctrl, offset);
  338. if (ret == -EBUSY)
  339. pr_err("%s: queue busy!\n", __func__);
  340. }
  341. return ret;
  342. }
  343. static void mhl_handle_input(struct mhl_tx_ctrl *mhl_ctrl,
  344. u8 key_code, u16 input_key_code)
  345. {
  346. int key_press = (key_code & 0x80) == 0;
  347. pr_debug("%s: send key events[%x][%x][%d]\n",
  348. __func__, key_code, input_key_code, key_press);
  349. input_report_key(mhl_ctrl->input, input_key_code, key_press);
  350. input_sync(mhl_ctrl->input);
  351. }
  352. int mhl_rcp_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 key_code)
  353. {
  354. u8 index = key_code & 0x7f;
  355. u16 input_key_code;
  356. if (!mhl_ctrl->rcp_key_code_tbl) {
  357. pr_err("%s: RCP Key Code Table not initialized\n", __func__);
  358. return -EINVAL;
  359. }
  360. input_key_code = mhl_ctrl->rcp_key_code_tbl[index];
  361. if ((index < mhl_ctrl->rcp_key_code_tbl_len) &&
  362. (input_key_code > 0)) {
  363. /* prior send rcpk */
  364. mhl_msc_send_prior_msc_msg(
  365. mhl_ctrl,
  366. MHL_MSC_MSG_RCPK,
  367. key_code);
  368. if (mhl_ctrl->input)
  369. mhl_handle_input(mhl_ctrl, key_code, input_key_code);
  370. } else {
  371. /* prior send rcpe */
  372. mhl_msc_send_prior_msc_msg(
  373. mhl_ctrl,
  374. MHL_MSC_MSG_RCPE,
  375. MHL_RCPE_INEFFECTIVE_KEY_CODE);
  376. /* send rcpk after rcpe send */
  377. mhl_msc_send_prior_msc_msg(
  378. mhl_ctrl,
  379. MHL_MSC_MSG_RCPK,
  380. key_code);
  381. }
  382. return 0;
  383. }
  384. static int mhl_rap_action(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
  385. {
  386. switch (action_code) {
  387. case MHL_RAP_CONTENT_ON:
  388. mhl_tmds_ctrl(mhl_ctrl, TMDS_ENABLE);
  389. break;
  390. case MHL_RAP_CONTENT_OFF:
  391. /*
  392. * instead of only disabling tmds
  393. * send power button press - CONTENT_OFF
  394. */
  395. input_report_key(mhl_ctrl->input, KEY_VENDOR, 1);
  396. input_sync(mhl_ctrl->input);
  397. input_report_key(mhl_ctrl->input, KEY_VENDOR, 0);
  398. input_sync(mhl_ctrl->input);
  399. break;
  400. default:
  401. break;
  402. }
  403. return 0;
  404. }
  405. static int mhl_rap_recv(struct mhl_tx_ctrl *mhl_ctrl, u8 action_code)
  406. {
  407. u8 error_code;
  408. bool tmds_en;
  409. tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
  410. switch (action_code) {
  411. case MHL_RAP_POLL:
  412. if (tmds_en)
  413. error_code = MHL_RAPK_NO_ERROR;
  414. else
  415. error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
  416. break;
  417. case MHL_RAP_CONTENT_ON:
  418. case MHL_RAP_CONTENT_OFF:
  419. if (tmds_en) {
  420. mhl_rap_action(mhl_ctrl, action_code);
  421. error_code = MHL_RAPK_NO_ERROR;
  422. } else {
  423. error_code = MHL_RAPK_UNSUPPORTED_ACTION_CODE;
  424. }
  425. break;
  426. default:
  427. error_code = MHL_RAPK_UNRECOGNIZED_ACTION_CODE;
  428. break;
  429. }
  430. /* prior send rapk */
  431. return mhl_msc_send_prior_msc_msg(
  432. mhl_ctrl,
  433. MHL_MSC_MSG_RAPK,
  434. error_code);
  435. }
  436. int mhl_msc_recv_msc_msg(struct mhl_tx_ctrl *mhl_ctrl,
  437. u8 sub_cmd, u8 cmd_data)
  438. {
  439. int rc = 0;
  440. switch (sub_cmd) {
  441. case MHL_MSC_MSG_RCP:
  442. pr_debug("MHL: receive RCP(0x%02x)\n", cmd_data);
  443. rc = mhl_rcp_recv(mhl_ctrl, cmd_data);
  444. break;
  445. case MHL_MSC_MSG_RCPK:
  446. pr_debug("MHL: receive RCPK(0x%02x)\n", cmd_data);
  447. break;
  448. case MHL_MSC_MSG_RCPE:
  449. pr_debug("MHL: receive RCPE(0x%02x)\n", cmd_data);
  450. break;
  451. case MHL_MSC_MSG_RAP:
  452. pr_debug("MHL: receive RAP(0x%02x)\n", cmd_data);
  453. rc = mhl_rap_recv(mhl_ctrl, cmd_data);
  454. break;
  455. case MHL_MSC_MSG_RAPK:
  456. pr_debug("MHL: receive RAPK(0x%02x)\n", cmd_data);
  457. break;
  458. default:
  459. break;
  460. }
  461. return rc;
  462. }
  463. int mhl_msc_recv_set_int(struct mhl_tx_ctrl *mhl_ctrl,
  464. u8 offset, u8 set_int)
  465. {
  466. int prior;
  467. if (offset >= 2)
  468. return -EFAULT;
  469. switch (offset) {
  470. case 0:
  471. if (set_int & MHL_INT_DCAP_CHG) {
  472. /* peer dcap has changed */
  473. mhl_ctrl->devcap_state = 0;
  474. mhl_msc_read_devcap_all(mhl_ctrl);
  475. }
  476. if (set_int & MHL_INT_DSCR_CHG) {
  477. /* peer's scratchpad reg changed */
  478. pr_debug("%s: dscr chg\n", __func__);
  479. mhl_read_scratchpad(mhl_ctrl);
  480. mhl_ctrl->scrpd_busy = false;
  481. }
  482. if (set_int & MHL_INT_REQ_WRT) {
  483. /* SET_INT: REQ_WRT */
  484. if (mhl_ctrl->scrpd_busy) {
  485. prior = MSC_NORMAL_SEND;
  486. } else {
  487. prior = MSC_PRIORITY_SEND;
  488. mhl_ctrl->scrpd_busy = true;
  489. }
  490. mhl_msc_send_set_int(
  491. mhl_ctrl,
  492. MHL_RCHANGE_INT,
  493. MHL_INT_GRT_WRT,
  494. prior);
  495. }
  496. if (set_int & MHL_INT_GRT_WRT) {
  497. /* SET_INT: GRT_WRT */
  498. pr_debug("%s: recvd req to permit/grant write",
  499. __func__);
  500. complete_all(&mhl_ctrl->req_write_done);
  501. mhl_msc_write_burst(
  502. mhl_ctrl,
  503. MHL_SCRATCHPAD_OFFSET,
  504. mhl_ctrl->scrpd.data,
  505. mhl_ctrl->scrpd.length);
  506. }
  507. break;
  508. case 1:
  509. if (set_int & MHL_INT_EDID_CHG) {
  510. /* peer EDID has changed
  511. * toggle HPD to read EDID
  512. */
  513. pr_debug("%s: EDID CHG\n", __func__);
  514. mhl_drive_hpd(mhl_ctrl, HPD_DOWN);
  515. msleep(110);
  516. mhl_drive_hpd(mhl_ctrl, HPD_UP);
  517. }
  518. }
  519. return 0;
  520. }
  521. int mhl_msc_recv_write_stat(struct mhl_tx_ctrl *mhl_ctrl,
  522. u8 offset, u8 value)
  523. {
  524. bool tmds_en;
  525. if (offset >= 2)
  526. return -EFAULT;
  527. switch (offset) {
  528. case 0:
  529. /*
  530. * connected device bits
  531. * changed and DEVCAP READY
  532. */
  533. if (((value ^ mhl_ctrl->status[offset]) &
  534. MHL_STATUS_DCAP_RDY)) {
  535. if (value & MHL_STATUS_DCAP_RDY) {
  536. mhl_ctrl->devcap_state = 0;
  537. mhl_msc_read_devcap_all(mhl_ctrl);
  538. } else {
  539. /*
  540. * peer dcap turned not ready
  541. * use old devap state
  542. */
  543. pr_debug("%s: DCAP RDY bit cleared\n",
  544. __func__);
  545. }
  546. }
  547. break;
  548. case 1:
  549. /*
  550. * connected device bits
  551. * changed and PATH ENABLED
  552. * bit set
  553. */
  554. tmds_en = mhl_check_tmds_enabled(mhl_ctrl);
  555. if ((value ^ mhl_ctrl->status[offset])
  556. & MHL_STATUS_PATH_ENABLED) {
  557. if (value & MHL_STATUS_PATH_ENABLED) {
  558. if (tmds_en &&
  559. (mhl_ctrl->devcap[offset] &
  560. MHL_FEATURE_RAP_SUPPORT)) {
  561. mhl_msc_send_msc_msg(
  562. mhl_ctrl,
  563. MHL_MSC_MSG_RAP,
  564. MHL_RAP_CONTENT_ON);
  565. }
  566. mhl_ctrl->path_en_state
  567. |= (MHL_STATUS_PATH_ENABLED |
  568. MHL_STATUS_CLK_MODE_NORMAL);
  569. mhl_msc_send_write_stat(
  570. mhl_ctrl,
  571. MHL_STATUS_REG_LINK_MODE,
  572. mhl_ctrl->path_en_state);
  573. } else {
  574. mhl_ctrl->path_en_state
  575. &= ~(MHL_STATUS_PATH_ENABLED |
  576. MHL_STATUS_CLK_MODE_NORMAL);
  577. mhl_msc_send_write_stat(
  578. mhl_ctrl,
  579. MHL_STATUS_REG_LINK_MODE,
  580. mhl_ctrl->path_en_state);
  581. }
  582. }
  583. break;
  584. }
  585. mhl_ctrl->status[offset] = value;
  586. return 0;
  587. }
  588. static int mhl_request_write_burst(struct mhl_tx_ctrl *mhl_ctrl,
  589. u8 start_reg,
  590. u8 length, u8 *data)
  591. {
  592. int i, reg;
  593. int timeout, retry = 20;
  594. if (!(mhl_ctrl->devcap[DEVCAP_OFFSET_FEATURE_FLAG] &
  595. MHL_FEATURE_SP_SUPPORT)) {
  596. pr_debug("MHL: SCRATCHPAD_NOT_SUPPORTED\n");
  597. return -EFAULT;
  598. }
  599. /*
  600. * scratchpad remains busy as long as a peer's permission or
  601. * write bursts are pending; experimentally it was found that
  602. * 50ms is optimal
  603. */
  604. while (mhl_ctrl->scrpd_busy && retry--)
  605. msleep(50);
  606. if (!retry) {
  607. pr_debug("MHL: scratchpad_busy\n");
  608. return -EBUSY;
  609. }
  610. for (i = 0, reg = start_reg; (i < length) &&
  611. (reg < MHL_SCRATCHPAD_SIZE); i++, reg++)
  612. mhl_ctrl->scrpd.data[reg] = data[i];
  613. mhl_ctrl->scrpd.length = length;
  614. mhl_ctrl->scrpd.offset = start_reg;
  615. retry = 5;
  616. do {
  617. init_completion(&mhl_ctrl->req_write_done);
  618. mhl_msc_send_set_int(
  619. mhl_ctrl,
  620. MHL_RCHANGE_INT,
  621. MHL_INT_REQ_WRT,
  622. MSC_PRIORITY_SEND);
  623. timeout = wait_for_completion_interruptible_timeout(
  624. &mhl_ctrl->req_write_done,
  625. msecs_to_jiffies(MHL_BURST_WAIT));
  626. if (!timeout)
  627. mhl_ctrl->scrpd_busy = false;
  628. } while (retry-- && timeout == 0);
  629. if (!timeout) {
  630. pr_err("%s: timed out!\n", __func__);
  631. return -EAGAIN;
  632. }
  633. return 0;
  634. }
  635. /* write scratchpad entry */
  636. int mhl_write_scratchpad(struct mhl_tx_ctrl *mhl_ctrl,
  637. u8 offset, u8 length, u8 *data)
  638. {
  639. int rc;
  640. if ((length < ADOPTER_ID_SIZE) ||
  641. (length > MAX_SCRATCHPAD_TRANSFER_SIZE) ||
  642. (offset > (MAX_SCRATCHPAD_TRANSFER_SIZE - ADOPTER_ID_SIZE)) ||
  643. ((offset + length) > MAX_SCRATCHPAD_TRANSFER_SIZE)) {
  644. pr_debug("MHL: write_burst (0x%02x)\n", -EINVAL);
  645. return -EINVAL;
  646. }
  647. rc = mhl_request_write_burst(mhl_ctrl, offset, length, data);
  648. return rc;
  649. }