mmc_ops.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "host.h"
  20. #include "mmc_ops.h"
  21. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  22. static const u8 tuning_blk_pattern_4bit[] = {
  23. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  24. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  25. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  26. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  27. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  28. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  29. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  30. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  31. };
  32. static const u8 tuning_blk_pattern_8bit[] = {
  33. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  34. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  35. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  36. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  37. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  38. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  39. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  40. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  41. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  42. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  43. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  44. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  45. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  46. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  47. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  48. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  49. };
  50. static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
  51. bool ignore_crc)
  52. {
  53. int err;
  54. struct mmc_command cmd = {0};
  55. BUG_ON(!card);
  56. BUG_ON(!card->host);
  57. cmd.opcode = MMC_SEND_STATUS;
  58. if (!mmc_host_is_spi(card->host))
  59. cmd.arg = card->rca << 16;
  60. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  61. if (ignore_crc)
  62. cmd.flags &= ~MMC_RSP_CRC;
  63. err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  64. if (err)
  65. return err;
  66. /* NOTE: callers are required to understand the difference
  67. * between "native" and SPI format status words!
  68. */
  69. if (status)
  70. *status = cmd.resp[0];
  71. return 0;
  72. }
  73. int mmc_send_status(struct mmc_card *card, u32 *status)
  74. {
  75. return __mmc_send_status(card, status, false);
  76. }
  77. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  78. {
  79. struct mmc_command cmd = {0};
  80. BUG_ON(!host);
  81. cmd.opcode = MMC_SELECT_CARD;
  82. if (card) {
  83. cmd.arg = card->rca << 16;
  84. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  85. } else {
  86. cmd.arg = 0;
  87. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  88. }
  89. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  90. }
  91. int mmc_select_card(struct mmc_card *card)
  92. {
  93. BUG_ON(!card);
  94. return _mmc_select_card(card->host, card);
  95. }
  96. int mmc_deselect_cards(struct mmc_host *host)
  97. {
  98. return _mmc_select_card(host, NULL);
  99. }
  100. /*
  101. * Write the value specified in the device tree or board code into the optional
  102. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  103. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  104. * value is hardware dependant.
  105. * The presence of the DSR register can be determined from the CSD register,
  106. * bit 76.
  107. */
  108. int mmc_set_dsr(struct mmc_host *host)
  109. {
  110. struct mmc_command cmd = {0};
  111. cmd.opcode = MMC_SET_DSR;
  112. cmd.arg = (host->dsr << 16) | 0xffff;
  113. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  114. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  115. }
  116. int mmc_go_idle(struct mmc_host *host)
  117. {
  118. int err;
  119. struct mmc_command cmd = {0};
  120. /*
  121. * Non-SPI hosts need to prevent chipselect going active during
  122. * GO_IDLE; that would put chips into SPI mode. Remind them of
  123. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  124. *
  125. * SPI hosts ignore ios.chip_select; it's managed according to
  126. * rules that must accommodate non-MMC slaves which this layer
  127. * won't even know about.
  128. */
  129. if (!mmc_host_is_spi(host)) {
  130. mmc_set_chip_select(host, MMC_CS_HIGH);
  131. mmc_delay(1);
  132. }
  133. cmd.opcode = MMC_GO_IDLE_STATE;
  134. cmd.arg = 0;
  135. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  136. err = mmc_wait_for_cmd(host, &cmd, 0);
  137. mmc_delay(1);
  138. if (!mmc_host_is_spi(host)) {
  139. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  140. mmc_delay(1);
  141. }
  142. host->use_spi_crc = 0;
  143. return err;
  144. }
  145. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  146. {
  147. struct mmc_command cmd = {0};
  148. int i, err = 0;
  149. BUG_ON(!host);
  150. cmd.opcode = MMC_SEND_OP_COND;
  151. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  152. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  153. for (i = 100; i; i--) {
  154. err = mmc_wait_for_cmd(host, &cmd, 0);
  155. if (err)
  156. break;
  157. /* if we're just probing, do a single pass */
  158. if (ocr == 0)
  159. break;
  160. /* otherwise wait until reset completes */
  161. if (mmc_host_is_spi(host)) {
  162. if (!(cmd.resp[0] & R1_SPI_IDLE))
  163. break;
  164. } else {
  165. if (cmd.resp[0] & MMC_CARD_BUSY)
  166. break;
  167. }
  168. err = -ETIMEDOUT;
  169. mmc_delay(10);
  170. }
  171. if (rocr && !mmc_host_is_spi(host))
  172. *rocr = cmd.resp[0];
  173. return err;
  174. }
  175. int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
  176. {
  177. int err;
  178. struct mmc_command cmd = {0};
  179. BUG_ON(!host);
  180. BUG_ON(!cid);
  181. cmd.opcode = MMC_ALL_SEND_CID;
  182. cmd.arg = 0;
  183. cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR;
  184. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  185. if (err)
  186. return err;
  187. memcpy(cid, cmd.resp, sizeof(u32) * 4);
  188. return 0;
  189. }
  190. int mmc_set_relative_addr(struct mmc_card *card)
  191. {
  192. struct mmc_command cmd = {0};
  193. BUG_ON(!card);
  194. BUG_ON(!card->host);
  195. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  196. cmd.arg = card->rca << 16;
  197. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  198. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  199. }
  200. static int
  201. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  202. {
  203. int err;
  204. struct mmc_command cmd = {0};
  205. BUG_ON(!host);
  206. BUG_ON(!cxd);
  207. cmd.opcode = opcode;
  208. cmd.arg = arg;
  209. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  210. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  211. if (err)
  212. return err;
  213. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  214. return 0;
  215. }
  216. /*
  217. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  218. * buffer or on-stack buffer (with some overhead in callee).
  219. */
  220. static int
  221. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  222. u32 opcode, void *buf, unsigned len)
  223. {
  224. struct mmc_request mrq = {NULL};
  225. struct mmc_command cmd = {0};
  226. struct mmc_data data = {0};
  227. struct scatterlist sg;
  228. mrq.cmd = &cmd;
  229. mrq.data = &data;
  230. cmd.opcode = opcode;
  231. cmd.arg = 0;
  232. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  233. * rely on callers to never use this with "native" calls for reading
  234. * CSD or CID. Native versions of those commands use the R2 type,
  235. * not R1 plus a data block.
  236. */
  237. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  238. data.blksz = len;
  239. data.blocks = 1;
  240. data.flags = MMC_DATA_READ;
  241. data.sg = &sg;
  242. data.sg_len = 1;
  243. sg_init_one(&sg, buf, len);
  244. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  245. /*
  246. * The spec states that CSR and CID accesses have a timeout
  247. * of 64 clock cycles.
  248. */
  249. data.timeout_ns = 0;
  250. data.timeout_clks = 64;
  251. } else
  252. mmc_set_data_timeout(&data, card);
  253. mmc_wait_for_req(host, &mrq);
  254. if (cmd.error)
  255. return cmd.error;
  256. if (data.error)
  257. return data.error;
  258. return 0;
  259. }
  260. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  261. {
  262. int ret, i;
  263. u32 *csd_tmp;
  264. if (!mmc_host_is_spi(card->host))
  265. return mmc_send_cxd_native(card->host, card->rca << 16,
  266. csd, MMC_SEND_CSD);
  267. csd_tmp = kzalloc(16, GFP_KERNEL);
  268. if (!csd_tmp)
  269. return -ENOMEM;
  270. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  271. if (ret)
  272. goto err;
  273. for (i = 0;i < 4;i++)
  274. csd[i] = be32_to_cpu(csd_tmp[i]);
  275. err:
  276. kfree(csd_tmp);
  277. return ret;
  278. }
  279. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  280. {
  281. int ret, i;
  282. u32 *cid_tmp;
  283. if (!mmc_host_is_spi(host)) {
  284. if (!host->card)
  285. return -EINVAL;
  286. return mmc_send_cxd_native(host, host->card->rca << 16,
  287. cid, MMC_SEND_CID);
  288. }
  289. cid_tmp = kzalloc(16, GFP_KERNEL);
  290. if (!cid_tmp)
  291. return -ENOMEM;
  292. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  293. if (ret)
  294. goto err;
  295. for (i = 0;i < 4;i++)
  296. cid[i] = be32_to_cpu(cid_tmp[i]);
  297. err:
  298. kfree(cid_tmp);
  299. return ret;
  300. }
  301. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  302. {
  303. int err;
  304. u8 *ext_csd;
  305. if (!card || !new_ext_csd)
  306. return -EINVAL;
  307. if (!mmc_can_ext_csd(card))
  308. return -EOPNOTSUPP;
  309. /*
  310. * As the ext_csd is so large and mostly unused, we don't store the
  311. * raw block in mmc_card.
  312. */
  313. ext_csd = kzalloc(512, GFP_KERNEL);
  314. if (!ext_csd)
  315. return -ENOMEM;
  316. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  317. 512);
  318. if (err)
  319. kfree(ext_csd);
  320. else
  321. *new_ext_csd = ext_csd;
  322. return err;
  323. }
  324. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  325. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  326. {
  327. struct mmc_command cmd = {0};
  328. int err;
  329. cmd.opcode = MMC_SPI_READ_OCR;
  330. cmd.arg = highcap ? (1 << 30) : 0;
  331. cmd.flags = MMC_RSP_SPI_R3;
  332. err = mmc_wait_for_cmd(host, &cmd, 0);
  333. *ocrp = cmd.resp[1];
  334. return err;
  335. }
  336. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  337. {
  338. struct mmc_command cmd = {0};
  339. int err;
  340. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  341. cmd.flags = MMC_RSP_SPI_R1;
  342. cmd.arg = use_crc;
  343. err = mmc_wait_for_cmd(host, &cmd, 0);
  344. if (!err)
  345. host->use_spi_crc = use_crc;
  346. return err;
  347. }
  348. int mmc_switch_status_error(struct mmc_host *host, u32 status)
  349. {
  350. if (mmc_host_is_spi(host)) {
  351. if (status & R1_SPI_ILLEGAL_COMMAND)
  352. return -EBADMSG;
  353. } else {
  354. if (status & 0xFDFFA000)
  355. pr_warn("%s: unexpected status %#x after switch\n",
  356. mmc_hostname(host), status);
  357. if (status & R1_SWITCH_ERROR)
  358. return -EBADMSG;
  359. }
  360. return 0;
  361. }
  362. /**
  363. * __mmc_switch - modify EXT_CSD register
  364. * @card: the MMC card associated with the data transfer
  365. * @set: cmd set values
  366. * @index: EXT_CSD register index
  367. * @value: value to program into EXT_CSD register
  368. * @timeout_ms: timeout (ms) for operation performed by register write,
  369. * timeout of zero implies maximum possible timeout
  370. * @use_busy_signal: use the busy signal as response type
  371. * @send_status: send status cmd to poll for busy
  372. * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy
  373. *
  374. * Modifies the EXT_CSD register for selected card.
  375. */
  376. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  377. unsigned int timeout_ms, bool use_busy_signal, bool send_status,
  378. bool ignore_crc)
  379. {
  380. struct mmc_host *host = card->host;
  381. int err;
  382. struct mmc_command cmd = {0};
  383. unsigned long timeout;
  384. u32 status = 0;
  385. bool use_r1b_resp = use_busy_signal;
  386. bool expired = false;
  387. bool busy = false;
  388. mmc_retune_hold(host);
  389. /*
  390. * If the cmd timeout and the max_busy_timeout of the host are both
  391. * specified, let's validate them. A failure means we need to prevent
  392. * the host from doing hw busy detection, which is done by converting
  393. * to a R1 response instead of a R1B.
  394. */
  395. if (timeout_ms && host->max_busy_timeout &&
  396. (timeout_ms > host->max_busy_timeout))
  397. use_r1b_resp = false;
  398. cmd.opcode = MMC_SWITCH;
  399. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  400. (index << 16) |
  401. (value << 8) |
  402. set;
  403. cmd.flags = MMC_CMD_AC;
  404. if (use_r1b_resp) {
  405. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  406. /*
  407. * A busy_timeout of zero means the host can decide to use
  408. * whatever value it finds suitable.
  409. */
  410. cmd.busy_timeout = timeout_ms;
  411. } else {
  412. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  413. }
  414. if (index == EXT_CSD_SANITIZE_START)
  415. cmd.sanitize_busy = true;
  416. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  417. if (err)
  418. goto out;
  419. /* No need to check card status in case of unblocking command */
  420. if (!use_busy_signal)
  421. goto out;
  422. /*
  423. * CRC errors shall only be ignored in cases were CMD13 is used to poll
  424. * to detect busy completion.
  425. */
  426. if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  427. ignore_crc = false;
  428. /* We have an unspecified cmd timeout, use the fallback value. */
  429. if (!timeout_ms)
  430. timeout_ms = MMC_OPS_TIMEOUT_MS;
  431. /* Must check status to be sure of no errors. */
  432. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  433. do {
  434. /*
  435. * Due to the possibility of being preempted after
  436. * sending the status command, check the expiration
  437. * time first.
  438. */
  439. expired = time_after(jiffies, timeout);
  440. if (send_status) {
  441. err = __mmc_send_status(card, &status, ignore_crc);
  442. if (err)
  443. goto out;
  444. }
  445. if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
  446. break;
  447. if (host->ops->card_busy) {
  448. if (!host->ops->card_busy(host))
  449. break;
  450. busy = true;
  451. }
  452. if (mmc_host_is_spi(host))
  453. break;
  454. /*
  455. * We are not allowed to issue a status command and the host
  456. * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only
  457. * rely on waiting for the stated timeout to be sufficient.
  458. */
  459. if (!send_status && !host->ops->card_busy) {
  460. mmc_delay(timeout_ms);
  461. goto out;
  462. }
  463. /* Timeout if the device never leaves the program state. */
  464. if (expired &&
  465. (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy)) {
  466. pr_err("%s: Card stuck in programming state! %s\n",
  467. mmc_hostname(host), __func__);
  468. err = -ETIMEDOUT;
  469. goto out;
  470. }
  471. } while (R1_CURRENT_STATE(status) == R1_STATE_PRG || busy);
  472. err = mmc_switch_status_error(host, status);
  473. out:
  474. mmc_retune_release(host);
  475. return err;
  476. }
  477. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  478. unsigned int timeout_ms)
  479. {
  480. return __mmc_switch(card, set, index, value, timeout_ms, true, true,
  481. false);
  482. }
  483. EXPORT_SYMBOL_GPL(mmc_switch);
  484. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  485. {
  486. struct mmc_request mrq = {NULL};
  487. struct mmc_command cmd = {0};
  488. struct mmc_data data = {0};
  489. struct scatterlist sg;
  490. struct mmc_ios *ios = &host->ios;
  491. const u8 *tuning_block_pattern;
  492. int size, err = 0;
  493. u8 *data_buf;
  494. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  495. tuning_block_pattern = tuning_blk_pattern_8bit;
  496. size = sizeof(tuning_blk_pattern_8bit);
  497. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  498. tuning_block_pattern = tuning_blk_pattern_4bit;
  499. size = sizeof(tuning_blk_pattern_4bit);
  500. } else
  501. return -EINVAL;
  502. data_buf = kzalloc(size, GFP_KERNEL);
  503. if (!data_buf)
  504. return -ENOMEM;
  505. mrq.cmd = &cmd;
  506. mrq.data = &data;
  507. cmd.opcode = opcode;
  508. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  509. data.blksz = size;
  510. data.blocks = 1;
  511. data.flags = MMC_DATA_READ;
  512. /*
  513. * According to the tuning specs, Tuning process
  514. * is normally shorter 40 executions of CMD19,
  515. * and timeout value should be shorter than 150 ms
  516. */
  517. data.timeout_ns = 150 * NSEC_PER_MSEC;
  518. data.sg = &sg;
  519. data.sg_len = 1;
  520. sg_init_one(&sg, data_buf, size);
  521. mmc_wait_for_req(host, &mrq);
  522. if (cmd_error)
  523. *cmd_error = cmd.error;
  524. if (cmd.error) {
  525. err = cmd.error;
  526. goto out;
  527. }
  528. if (data.error) {
  529. err = data.error;
  530. goto out;
  531. }
  532. if (memcmp(data_buf, tuning_block_pattern, size))
  533. err = -EIO;
  534. out:
  535. kfree(data_buf);
  536. return err;
  537. }
  538. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  539. static int
  540. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  541. u8 len)
  542. {
  543. struct mmc_request mrq = {NULL};
  544. struct mmc_command cmd = {0};
  545. struct mmc_data data = {0};
  546. struct scatterlist sg;
  547. u8 *data_buf;
  548. u8 *test_buf;
  549. int i, err;
  550. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  551. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  552. /* dma onto stack is unsafe/nonportable, but callers to this
  553. * routine normally provide temporary on-stack buffers ...
  554. */
  555. data_buf = kmalloc(len, GFP_KERNEL);
  556. if (!data_buf)
  557. return -ENOMEM;
  558. if (len == 8)
  559. test_buf = testdata_8bit;
  560. else if (len == 4)
  561. test_buf = testdata_4bit;
  562. else {
  563. pr_err("%s: Invalid bus_width %d\n",
  564. mmc_hostname(host), len);
  565. kfree(data_buf);
  566. return -EINVAL;
  567. }
  568. if (opcode == MMC_BUS_TEST_W)
  569. memcpy(data_buf, test_buf, len);
  570. mrq.cmd = &cmd;
  571. mrq.data = &data;
  572. cmd.opcode = opcode;
  573. cmd.arg = 0;
  574. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  575. * rely on callers to never use this with "native" calls for reading
  576. * CSD or CID. Native versions of those commands use the R2 type,
  577. * not R1 plus a data block.
  578. */
  579. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  580. data.blksz = len;
  581. data.blocks = 1;
  582. if (opcode == MMC_BUS_TEST_R)
  583. data.flags = MMC_DATA_READ;
  584. else
  585. data.flags = MMC_DATA_WRITE;
  586. data.sg = &sg;
  587. data.sg_len = 1;
  588. mmc_set_data_timeout(&data, card);
  589. sg_init_one(&sg, data_buf, len);
  590. mmc_wait_for_req(host, &mrq);
  591. err = 0;
  592. if (opcode == MMC_BUS_TEST_R) {
  593. for (i = 0; i < len / 4; i++)
  594. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  595. err = -EIO;
  596. break;
  597. }
  598. }
  599. kfree(data_buf);
  600. if (cmd.error)
  601. return cmd.error;
  602. if (data.error)
  603. return data.error;
  604. return err;
  605. }
  606. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  607. {
  608. int width;
  609. if (bus_width == MMC_BUS_WIDTH_8)
  610. width = 8;
  611. else if (bus_width == MMC_BUS_WIDTH_4)
  612. width = 4;
  613. else if (bus_width == MMC_BUS_WIDTH_1)
  614. return 0; /* no need for test */
  615. else
  616. return -EINVAL;
  617. /*
  618. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  619. * is a problem. This improves chances that the test will work.
  620. */
  621. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  622. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  623. }
  624. int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  625. {
  626. struct mmc_command cmd = {0};
  627. unsigned int opcode;
  628. int err;
  629. if (!card->ext_csd.hpi) {
  630. pr_warn("%s: Card didn't support HPI command\n",
  631. mmc_hostname(card->host));
  632. return -EINVAL;
  633. }
  634. opcode = card->ext_csd.hpi_cmd;
  635. if (opcode == MMC_STOP_TRANSMISSION)
  636. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  637. else if (opcode == MMC_SEND_STATUS)
  638. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  639. cmd.opcode = opcode;
  640. cmd.arg = card->rca << 16 | 1;
  641. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  642. if (err) {
  643. pr_warn("%s: error %d interrupting operation. "
  644. "HPI command response %#x\n", mmc_hostname(card->host),
  645. err, cmd.resp[0]);
  646. return err;
  647. }
  648. if (status)
  649. *status = cmd.resp[0];
  650. return 0;
  651. }
  652. int mmc_can_ext_csd(struct mmc_card *card)
  653. {
  654. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  655. }