sdio.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. /*
  2. * SD/MMC Greybus driver.
  3. *
  4. * Copyright 2014-2015 Google Inc.
  5. * Copyright 2014-2015 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mmc/core.h>
  11. #include <linux/mmc/host.h>
  12. #include <linux/mmc/mmc.h>
  13. #include <linux/scatterlist.h>
  14. #include <linux/workqueue.h>
  15. #include "greybus.h"
  16. #include "gbphy.h"
  17. struct gb_sdio_host {
  18. struct gb_connection *connection;
  19. struct gbphy_device *gbphy_dev;
  20. struct mmc_host *mmc;
  21. struct mmc_request *mrq;
  22. struct mutex lock; /* lock for this host */
  23. size_t data_max;
  24. spinlock_t xfer; /* lock to cancel ongoing transfer */
  25. bool xfer_stop;
  26. struct workqueue_struct *mrq_workqueue;
  27. struct work_struct mrqwork;
  28. u8 queued_events;
  29. bool removed;
  30. bool card_present;
  31. bool read_only;
  32. };
  33. #define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
  34. GB_SDIO_RSP_OPCODE)
  35. #define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
  36. #define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
  37. GB_SDIO_RSP_136)
  38. #define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
  39. GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
  40. /* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
  41. #define GB_SDIO_VDD_SHIFT 8
  42. #ifndef MMC_CAP2_CORE_RUNTIME_PM
  43. #define MMC_CAP2_CORE_RUNTIME_PM 0
  44. #endif
  45. static inline bool single_op(struct mmc_command *cmd)
  46. {
  47. uint32_t opcode = cmd->opcode;
  48. return opcode == MMC_WRITE_BLOCK ||
  49. opcode == MMC_READ_SINGLE_BLOCK;
  50. }
  51. static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
  52. {
  53. u32 caps = 0;
  54. u32 caps2 = 0;
  55. caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
  56. ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
  57. ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
  58. ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
  59. ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
  60. ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
  61. ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
  62. ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
  63. ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
  64. ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
  65. ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
  66. ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
  67. ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
  68. ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
  69. ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
  70. ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
  71. ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
  72. caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
  73. ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
  74. ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
  75. ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
  76. host->mmc->caps = caps;
  77. host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
  78. if (caps & MMC_CAP_NONREMOVABLE)
  79. host->card_present = true;
  80. }
  81. static u32 _gb_sdio_get_host_ocr(u32 ocr)
  82. {
  83. return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
  84. ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
  85. ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
  86. ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
  87. ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
  88. ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
  89. ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
  90. ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
  91. ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
  92. ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
  93. ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
  94. ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
  95. ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
  96. ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
  97. ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
  98. ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
  99. ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
  100. );
  101. }
  102. static int gb_sdio_get_caps(struct gb_sdio_host *host)
  103. {
  104. struct gb_sdio_get_caps_response response;
  105. struct mmc_host *mmc = host->mmc;
  106. u16 data_max;
  107. u32 blksz;
  108. u32 ocr;
  109. u32 r;
  110. int ret;
  111. ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
  112. NULL, 0, &response, sizeof(response));
  113. if (ret < 0)
  114. return ret;
  115. r = le32_to_cpu(response.caps);
  116. _gb_sdio_set_host_caps(host, r);
  117. /* get the max block size that could fit our payload */
  118. data_max = gb_operation_get_payload_size_max(host->connection);
  119. data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
  120. data_max - sizeof(struct gb_sdio_transfer_response));
  121. blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
  122. blksz = max_t(u32, 512, blksz);
  123. mmc->max_blk_size = rounddown_pow_of_two(blksz);
  124. mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
  125. host->data_max = data_max;
  126. /* get ocr supported values */
  127. ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
  128. mmc->ocr_avail = ocr;
  129. mmc->ocr_avail_sdio = mmc->ocr_avail;
  130. mmc->ocr_avail_sd = mmc->ocr_avail;
  131. mmc->ocr_avail_mmc = mmc->ocr_avail;
  132. /* get frequency range values */
  133. mmc->f_min = le32_to_cpu(response.f_min);
  134. mmc->f_max = le32_to_cpu(response.f_max);
  135. return 0;
  136. }
  137. static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
  138. {
  139. if (event & GB_SDIO_CARD_INSERTED)
  140. host->queued_events &= ~GB_SDIO_CARD_REMOVED;
  141. else if (event & GB_SDIO_CARD_REMOVED)
  142. host->queued_events &= ~GB_SDIO_CARD_INSERTED;
  143. host->queued_events |= event;
  144. }
  145. static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
  146. {
  147. u8 state_changed = 0;
  148. if (event & GB_SDIO_CARD_INSERTED) {
  149. if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
  150. return 0;
  151. if (host->card_present)
  152. return 0;
  153. host->card_present = true;
  154. state_changed = 1;
  155. }
  156. if (event & GB_SDIO_CARD_REMOVED) {
  157. if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
  158. return 0;
  159. if (!(host->card_present))
  160. return 0;
  161. host->card_present = false;
  162. state_changed = 1;
  163. }
  164. if (event & GB_SDIO_WP) {
  165. host->read_only = true;
  166. }
  167. if (state_changed) {
  168. dev_info(mmc_dev(host->mmc), "card %s now event\n",
  169. (host->card_present ? "inserted" : "removed"));
  170. mmc_detect_change(host->mmc, 0);
  171. }
  172. return 0;
  173. }
  174. static int gb_sdio_request_handler(struct gb_operation *op)
  175. {
  176. struct gb_sdio_host *host = gb_connection_get_data(op->connection);
  177. struct gb_message *request;
  178. struct gb_sdio_event_request *payload;
  179. u8 type = op->type;
  180. int ret = 0;
  181. u8 event;
  182. if (type != GB_SDIO_TYPE_EVENT) {
  183. dev_err(mmc_dev(host->mmc),
  184. "unsupported unsolicited event: %u\n", type);
  185. return -EINVAL;
  186. }
  187. request = op->request;
  188. if (request->payload_size < sizeof(*payload)) {
  189. dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
  190. request->payload_size, sizeof(*payload));
  191. return -EINVAL;
  192. }
  193. payload = request->payload;
  194. event = payload->event;
  195. if (host->removed)
  196. _gb_queue_event(host, event);
  197. else
  198. ret = _gb_sdio_process_events(host, event);
  199. return ret;
  200. }
  201. static int gb_sdio_set_ios(struct gb_sdio_host *host,
  202. struct gb_sdio_set_ios_request *request)
  203. {
  204. int ret;
  205. ret = gbphy_runtime_get_sync(host->gbphy_dev);
  206. if (ret)
  207. return ret;
  208. ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
  209. sizeof(*request), NULL, 0);
  210. gbphy_runtime_put_autosuspend(host->gbphy_dev);
  211. return ret;
  212. }
  213. static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
  214. size_t len, u16 nblocks, off_t skip)
  215. {
  216. struct gb_sdio_transfer_request *request;
  217. struct gb_sdio_transfer_response *response;
  218. struct gb_operation *operation;
  219. struct scatterlist *sg = data->sg;
  220. unsigned int sg_len = data->sg_len;
  221. size_t copied;
  222. u16 send_blksz;
  223. u16 send_blocks;
  224. int ret;
  225. WARN_ON(len > host->data_max);
  226. operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
  227. len + sizeof(*request),
  228. sizeof(*response), GFP_KERNEL);
  229. if (!operation)
  230. return -ENOMEM;
  231. request = operation->request->payload;
  232. request->data_flags = (data->flags >> 8);
  233. request->data_blocks = cpu_to_le16(nblocks);
  234. request->data_blksz = cpu_to_le16(data->blksz);
  235. copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
  236. if (copied != len) {
  237. ret = -EINVAL;
  238. goto err_put_operation;
  239. }
  240. ret = gb_operation_request_send_sync(operation);
  241. if (ret < 0)
  242. goto err_put_operation;
  243. response = operation->response->payload;
  244. send_blocks = le16_to_cpu(response->data_blocks);
  245. send_blksz = le16_to_cpu(response->data_blksz);
  246. if (len != send_blksz * send_blocks) {
  247. dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
  248. len, send_blksz * send_blocks);
  249. ret = -EINVAL;
  250. }
  251. err_put_operation:
  252. gb_operation_put(operation);
  253. return ret;
  254. }
  255. static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
  256. size_t len, u16 nblocks, off_t skip)
  257. {
  258. struct gb_sdio_transfer_request *request;
  259. struct gb_sdio_transfer_response *response;
  260. struct gb_operation *operation;
  261. struct scatterlist *sg = data->sg;
  262. unsigned int sg_len = data->sg_len;
  263. size_t copied;
  264. u16 recv_blksz;
  265. u16 recv_blocks;
  266. int ret;
  267. WARN_ON(len > host->data_max);
  268. operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
  269. sizeof(*request),
  270. len + sizeof(*response), GFP_KERNEL);
  271. if (!operation)
  272. return -ENOMEM;
  273. request = operation->request->payload;
  274. request->data_flags = (data->flags >> 8);
  275. request->data_blocks = cpu_to_le16(nblocks);
  276. request->data_blksz = cpu_to_le16(data->blksz);
  277. ret = gb_operation_request_send_sync(operation);
  278. if (ret < 0)
  279. goto err_put_operation;
  280. response = operation->response->payload;
  281. recv_blocks = le16_to_cpu(response->data_blocks);
  282. recv_blksz = le16_to_cpu(response->data_blksz);
  283. if (len != recv_blksz * recv_blocks) {
  284. dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
  285. recv_blksz * recv_blocks, len);
  286. ret = -EINVAL;
  287. goto err_put_operation;
  288. }
  289. copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
  290. skip);
  291. if (copied != len)
  292. ret = -EINVAL;
  293. err_put_operation:
  294. gb_operation_put(operation);
  295. return ret;
  296. }
  297. static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
  298. {
  299. size_t left, len;
  300. off_t skip = 0;
  301. int ret = 0;
  302. u16 nblocks;
  303. if (single_op(data->mrq->cmd) && data->blocks > 1) {
  304. ret = -ETIMEDOUT;
  305. goto out;
  306. }
  307. left = data->blksz * data->blocks;
  308. while (left) {
  309. /* check is a stop transmission is pending */
  310. spin_lock(&host->xfer);
  311. if (host->xfer_stop) {
  312. host->xfer_stop = false;
  313. spin_unlock(&host->xfer);
  314. ret = -EINTR;
  315. goto out;
  316. }
  317. spin_unlock(&host->xfer);
  318. len = min(left, host->data_max);
  319. nblocks = len / data->blksz;
  320. len = nblocks * data->blksz;
  321. if (data->flags & MMC_DATA_READ) {
  322. ret = _gb_sdio_recv(host, data, len, nblocks, skip);
  323. if (ret < 0)
  324. goto out;
  325. } else {
  326. ret = _gb_sdio_send(host, data, len, nblocks, skip);
  327. if (ret < 0)
  328. goto out;
  329. }
  330. data->bytes_xfered += len;
  331. left -= len;
  332. skip += len;
  333. }
  334. out:
  335. data->error = ret;
  336. return ret;
  337. }
  338. static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
  339. {
  340. struct gb_sdio_command_request request = {0};
  341. struct gb_sdio_command_response response;
  342. struct mmc_data *data = host->mrq->data;
  343. u8 cmd_flags;
  344. u8 cmd_type;
  345. int i;
  346. int ret;
  347. switch (mmc_resp_type(cmd)) {
  348. case MMC_RSP_NONE:
  349. cmd_flags = GB_SDIO_RSP_NONE;
  350. break;
  351. case MMC_RSP_R1:
  352. cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
  353. break;
  354. case MMC_RSP_R1B:
  355. cmd_flags = GB_SDIO_RSP_R1B;
  356. break;
  357. case MMC_RSP_R2:
  358. cmd_flags = GB_SDIO_RSP_R2;
  359. break;
  360. case MMC_RSP_R3:
  361. cmd_flags = GB_SDIO_RSP_R3_R4;
  362. break;
  363. default:
  364. dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
  365. mmc_resp_type(cmd));
  366. ret = -EINVAL;
  367. goto out;
  368. }
  369. switch (mmc_cmd_type(cmd)) {
  370. case MMC_CMD_BC:
  371. cmd_type = GB_SDIO_CMD_BC;
  372. break;
  373. case MMC_CMD_BCR:
  374. cmd_type = GB_SDIO_CMD_BCR;
  375. break;
  376. case MMC_CMD_AC:
  377. cmd_type = GB_SDIO_CMD_AC;
  378. break;
  379. case MMC_CMD_ADTC:
  380. cmd_type = GB_SDIO_CMD_ADTC;
  381. break;
  382. default:
  383. dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
  384. mmc_cmd_type(cmd));
  385. ret = -EINVAL;
  386. goto out;
  387. }
  388. request.cmd = cmd->opcode;
  389. request.cmd_flags = cmd_flags;
  390. request.cmd_type = cmd_type;
  391. request.cmd_arg = cpu_to_le32(cmd->arg);
  392. /* some controllers need to know at command time data details */
  393. if (data) {
  394. request.data_blocks = cpu_to_le16(data->blocks);
  395. request.data_blksz = cpu_to_le16(data->blksz);
  396. }
  397. ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
  398. &request, sizeof(request), &response,
  399. sizeof(response));
  400. if (ret < 0)
  401. goto out;
  402. /* no response expected */
  403. if (cmd_flags == GB_SDIO_RSP_NONE)
  404. goto out;
  405. /* long response expected */
  406. if (cmd_flags & GB_SDIO_RSP_R2)
  407. for (i = 0; i < 4; i++)
  408. cmd->resp[i] = le32_to_cpu(response.resp[i]);
  409. else
  410. cmd->resp[0] = le32_to_cpu(response.resp[0]);
  411. out:
  412. cmd->error = ret;
  413. return ret;
  414. }
  415. static void gb_sdio_mrq_work(struct work_struct *work)
  416. {
  417. struct gb_sdio_host *host;
  418. struct mmc_request *mrq;
  419. int ret;
  420. host = container_of(work, struct gb_sdio_host, mrqwork);
  421. ret = gbphy_runtime_get_sync(host->gbphy_dev);
  422. if (ret)
  423. return;
  424. mutex_lock(&host->lock);
  425. mrq = host->mrq;
  426. if (!mrq) {
  427. mutex_unlock(&host->lock);
  428. gbphy_runtime_put_autosuspend(host->gbphy_dev);
  429. dev_err(mmc_dev(host->mmc), "mmc request is NULL");
  430. return;
  431. }
  432. if (host->removed) {
  433. mrq->cmd->error = -ESHUTDOWN;
  434. goto done;
  435. }
  436. if (mrq->sbc) {
  437. ret = gb_sdio_command(host, mrq->sbc);
  438. if (ret < 0)
  439. goto done;
  440. }
  441. ret = gb_sdio_command(host, mrq->cmd);
  442. if (ret < 0)
  443. goto done;
  444. if (mrq->data) {
  445. ret = gb_sdio_transfer(host, mrq->data);
  446. if (ret < 0)
  447. goto done;
  448. }
  449. if (mrq->stop) {
  450. ret = gb_sdio_command(host, mrq->stop);
  451. if (ret < 0)
  452. goto done;
  453. }
  454. done:
  455. host->mrq = NULL;
  456. mutex_unlock(&host->lock);
  457. mmc_request_done(host->mmc, mrq);
  458. gbphy_runtime_put_autosuspend(host->gbphy_dev);
  459. }
  460. static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
  461. {
  462. struct gb_sdio_host *host = mmc_priv(mmc);
  463. struct mmc_command *cmd = mrq->cmd;
  464. /* Check if it is a cancel to ongoing transfer */
  465. if (cmd->opcode == MMC_STOP_TRANSMISSION) {
  466. spin_lock(&host->xfer);
  467. host->xfer_stop = true;
  468. spin_unlock(&host->xfer);
  469. }
  470. mutex_lock(&host->lock);
  471. WARN_ON(host->mrq);
  472. host->mrq = mrq;
  473. if (host->removed) {
  474. mrq->cmd->error = -ESHUTDOWN;
  475. goto out;
  476. }
  477. if (!host->card_present) {
  478. mrq->cmd->error = -ENOMEDIUM;
  479. goto out;
  480. }
  481. queue_work(host->mrq_workqueue, &host->mrqwork);
  482. mutex_unlock(&host->lock);
  483. return;
  484. out:
  485. host->mrq = NULL;
  486. mutex_unlock(&host->lock);
  487. mmc_request_done(mmc, mrq);
  488. }
  489. static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  490. {
  491. struct gb_sdio_host *host = mmc_priv(mmc);
  492. struct gb_sdio_set_ios_request request;
  493. int ret;
  494. u8 power_mode;
  495. u8 bus_width;
  496. u8 timing;
  497. u8 signal_voltage;
  498. u8 drv_type;
  499. u32 vdd = 0;
  500. mutex_lock(&host->lock);
  501. request.clock = cpu_to_le32(ios->clock);
  502. if (ios->vdd)
  503. vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
  504. request.vdd = cpu_to_le32(vdd);
  505. request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
  506. GB_SDIO_BUSMODE_OPENDRAIN :
  507. GB_SDIO_BUSMODE_PUSHPULL);
  508. switch (ios->power_mode) {
  509. case MMC_POWER_OFF:
  510. default:
  511. power_mode = GB_SDIO_POWER_OFF;
  512. break;
  513. case MMC_POWER_UP:
  514. power_mode = GB_SDIO_POWER_UP;
  515. break;
  516. case MMC_POWER_ON:
  517. power_mode = GB_SDIO_POWER_ON;
  518. break;
  519. case MMC_POWER_UNDEFINED:
  520. power_mode = GB_SDIO_POWER_UNDEFINED;
  521. break;
  522. }
  523. request.power_mode = power_mode;
  524. switch (ios->bus_width) {
  525. case MMC_BUS_WIDTH_1:
  526. bus_width = GB_SDIO_BUS_WIDTH_1;
  527. break;
  528. case MMC_BUS_WIDTH_4:
  529. default:
  530. bus_width = GB_SDIO_BUS_WIDTH_4;
  531. break;
  532. case MMC_BUS_WIDTH_8:
  533. bus_width = GB_SDIO_BUS_WIDTH_8;
  534. break;
  535. }
  536. request.bus_width = bus_width;
  537. switch (ios->timing) {
  538. case MMC_TIMING_LEGACY:
  539. default:
  540. timing = GB_SDIO_TIMING_LEGACY;
  541. break;
  542. case MMC_TIMING_MMC_HS:
  543. timing = GB_SDIO_TIMING_MMC_HS;
  544. break;
  545. case MMC_TIMING_SD_HS:
  546. timing = GB_SDIO_TIMING_SD_HS;
  547. break;
  548. case MMC_TIMING_UHS_SDR12:
  549. timing = GB_SDIO_TIMING_UHS_SDR12;
  550. break;
  551. case MMC_TIMING_UHS_SDR25:
  552. timing = GB_SDIO_TIMING_UHS_SDR25;
  553. break;
  554. case MMC_TIMING_UHS_SDR50:
  555. timing = GB_SDIO_TIMING_UHS_SDR50;
  556. break;
  557. case MMC_TIMING_UHS_SDR104:
  558. timing = GB_SDIO_TIMING_UHS_SDR104;
  559. break;
  560. case MMC_TIMING_UHS_DDR50:
  561. timing = GB_SDIO_TIMING_UHS_DDR50;
  562. break;
  563. case MMC_TIMING_MMC_DDR52:
  564. timing = GB_SDIO_TIMING_MMC_DDR52;
  565. break;
  566. case MMC_TIMING_MMC_HS200:
  567. timing = GB_SDIO_TIMING_MMC_HS200;
  568. break;
  569. case MMC_TIMING_MMC_HS400:
  570. timing = GB_SDIO_TIMING_MMC_HS400;
  571. break;
  572. }
  573. request.timing = timing;
  574. switch (ios->signal_voltage) {
  575. case MMC_SIGNAL_VOLTAGE_330:
  576. signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
  577. break;
  578. case MMC_SIGNAL_VOLTAGE_180:
  579. default:
  580. signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
  581. break;
  582. case MMC_SIGNAL_VOLTAGE_120:
  583. signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
  584. break;
  585. }
  586. request.signal_voltage = signal_voltage;
  587. switch (ios->drv_type) {
  588. case MMC_SET_DRIVER_TYPE_A:
  589. drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
  590. break;
  591. case MMC_SET_DRIVER_TYPE_C:
  592. drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
  593. break;
  594. case MMC_SET_DRIVER_TYPE_D:
  595. drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
  596. break;
  597. case MMC_SET_DRIVER_TYPE_B:
  598. default:
  599. drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
  600. break;
  601. }
  602. request.drv_type = drv_type;
  603. ret = gb_sdio_set_ios(host, &request);
  604. if (ret < 0)
  605. goto out;
  606. memcpy(&mmc->ios, ios, sizeof(mmc->ios));
  607. out:
  608. mutex_unlock(&host->lock);
  609. }
  610. static int gb_mmc_get_ro(struct mmc_host *mmc)
  611. {
  612. struct gb_sdio_host *host = mmc_priv(mmc);
  613. mutex_lock(&host->lock);
  614. if (host->removed) {
  615. mutex_unlock(&host->lock);
  616. return -ESHUTDOWN;
  617. }
  618. mutex_unlock(&host->lock);
  619. return host->read_only;
  620. }
  621. static int gb_mmc_get_cd(struct mmc_host *mmc)
  622. {
  623. struct gb_sdio_host *host = mmc_priv(mmc);
  624. mutex_lock(&host->lock);
  625. if (host->removed) {
  626. mutex_unlock(&host->lock);
  627. return -ESHUTDOWN;
  628. }
  629. mutex_unlock(&host->lock);
  630. return host->card_present;
  631. }
  632. static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
  633. {
  634. return 0;
  635. }
  636. static const struct mmc_host_ops gb_sdio_ops = {
  637. .request = gb_mmc_request,
  638. .set_ios = gb_mmc_set_ios,
  639. .get_ro = gb_mmc_get_ro,
  640. .get_cd = gb_mmc_get_cd,
  641. .start_signal_voltage_switch = gb_mmc_switch_voltage,
  642. };
  643. static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
  644. const struct gbphy_device_id *id)
  645. {
  646. struct gb_connection *connection;
  647. struct mmc_host *mmc;
  648. struct gb_sdio_host *host;
  649. int ret = 0;
  650. mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
  651. if (!mmc)
  652. return -ENOMEM;
  653. connection = gb_connection_create(gbphy_dev->bundle,
  654. le16_to_cpu(gbphy_dev->cport_desc->id),
  655. gb_sdio_request_handler);
  656. if (IS_ERR(connection)) {
  657. ret = PTR_ERR(connection);
  658. goto exit_mmc_free;
  659. }
  660. host = mmc_priv(mmc);
  661. host->mmc = mmc;
  662. host->removed = true;
  663. host->connection = connection;
  664. gb_connection_set_data(connection, host);
  665. host->gbphy_dev = gbphy_dev;
  666. gb_gbphy_set_data(gbphy_dev, host);
  667. ret = gb_connection_enable_tx(connection);
  668. if (ret)
  669. goto exit_connection_destroy;
  670. ret = gb_sdio_get_caps(host);
  671. if (ret < 0)
  672. goto exit_connection_disable;
  673. mmc->ops = &gb_sdio_ops;
  674. mmc->max_segs = host->mmc->max_blk_count;
  675. /* for now we make a map 1:1 between max request and segment size */
  676. mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
  677. mmc->max_seg_size = mmc->max_req_size;
  678. mutex_init(&host->lock);
  679. spin_lock_init(&host->xfer);
  680. host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
  681. dev_name(&gbphy_dev->dev));
  682. if (!host->mrq_workqueue) {
  683. ret = -ENOMEM;
  684. goto exit_connection_disable;
  685. }
  686. INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
  687. ret = gb_connection_enable(connection);
  688. if (ret)
  689. goto exit_wq_destroy;
  690. ret = mmc_add_host(mmc);
  691. if (ret < 0)
  692. goto exit_wq_destroy;
  693. host->removed = false;
  694. ret = _gb_sdio_process_events(host, host->queued_events);
  695. host->queued_events = 0;
  696. gbphy_runtime_put_autosuspend(gbphy_dev);
  697. return ret;
  698. exit_wq_destroy:
  699. destroy_workqueue(host->mrq_workqueue);
  700. exit_connection_disable:
  701. gb_connection_disable(connection);
  702. exit_connection_destroy:
  703. gb_connection_destroy(connection);
  704. exit_mmc_free:
  705. mmc_free_host(mmc);
  706. return ret;
  707. }
  708. static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
  709. {
  710. struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
  711. struct gb_connection *connection = host->connection;
  712. struct mmc_host *mmc;
  713. int ret;
  714. ret = gbphy_runtime_get_sync(gbphy_dev);
  715. if (ret)
  716. gbphy_runtime_get_noresume(gbphy_dev);
  717. mutex_lock(&host->lock);
  718. host->removed = true;
  719. mmc = host->mmc;
  720. gb_connection_set_data(connection, NULL);
  721. mutex_unlock(&host->lock);
  722. flush_workqueue(host->mrq_workqueue);
  723. destroy_workqueue(host->mrq_workqueue);
  724. gb_connection_disable_rx(connection);
  725. mmc_remove_host(mmc);
  726. gb_connection_disable(connection);
  727. gb_connection_destroy(connection);
  728. mmc_free_host(mmc);
  729. }
  730. static const struct gbphy_device_id gb_sdio_id_table[] = {
  731. { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
  732. { },
  733. };
  734. MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
  735. static struct gbphy_driver sdio_driver = {
  736. .name = "sdio",
  737. .probe = gb_sdio_probe,
  738. .remove = gb_sdio_remove,
  739. .id_table = gb_sdio_id_table,
  740. };
  741. module_gbphy_driver(sdio_driver);
  742. MODULE_LICENSE("GPL v2");