rmi_spi.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530
  1. /*
  2. * Copyright (c) 2011-2016 Synaptics Incorporated
  3. * Copyright (c) 2011 Unixphere
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of the GNU General Public License version 2 as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/module.h>
  11. #include <linux/rmi.h>
  12. #include <linux/slab.h>
  13. #include <linux/spi/spi.h>
  14. #include <linux/of.h>
  15. #include "rmi_driver.h"
  16. #define RMI_SPI_DEFAULT_XFER_BUF_SIZE 64
  17. #define RMI_PAGE_SELECT_REGISTER 0x00FF
  18. #define RMI_SPI_PAGE(addr) (((addr) >> 8) & 0x80)
  19. #define RMI_SPI_XFER_SIZE_LIMIT 255
  20. #define BUFFER_SIZE_INCREMENT 32
  21. enum rmi_spi_op {
  22. RMI_SPI_WRITE = 0,
  23. RMI_SPI_READ,
  24. RMI_SPI_V2_READ_UNIFIED,
  25. RMI_SPI_V2_READ_SPLIT,
  26. RMI_SPI_V2_WRITE,
  27. };
  28. struct rmi_spi_cmd {
  29. enum rmi_spi_op op;
  30. u16 addr;
  31. };
  32. struct rmi_spi_xport {
  33. struct rmi_transport_dev xport;
  34. struct spi_device *spi;
  35. struct mutex page_mutex;
  36. int page;
  37. u8 *rx_buf;
  38. u8 *tx_buf;
  39. int xfer_buf_size;
  40. struct spi_transfer *rx_xfers;
  41. struct spi_transfer *tx_xfers;
  42. int rx_xfer_count;
  43. int tx_xfer_count;
  44. };
  45. static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
  46. {
  47. struct spi_device *spi = rmi_spi->spi;
  48. int buf_size = rmi_spi->xfer_buf_size
  49. ? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
  50. struct spi_transfer *xfer_buf;
  51. void *buf;
  52. void *tmp;
  53. while (buf_size < len)
  54. buf_size *= 2;
  55. if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
  56. buf_size = RMI_SPI_XFER_SIZE_LIMIT;
  57. tmp = rmi_spi->rx_buf;
  58. buf = devm_kzalloc(&spi->dev, buf_size * 2,
  59. GFP_KERNEL | GFP_DMA);
  60. if (!buf)
  61. return -ENOMEM;
  62. rmi_spi->rx_buf = buf;
  63. rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
  64. rmi_spi->xfer_buf_size = buf_size;
  65. if (tmp)
  66. devm_kfree(&spi->dev, tmp);
  67. if (rmi_spi->xport.pdata.spi_data.read_delay_us)
  68. rmi_spi->rx_xfer_count = buf_size;
  69. else
  70. rmi_spi->rx_xfer_count = 1;
  71. if (rmi_spi->xport.pdata.spi_data.write_delay_us)
  72. rmi_spi->tx_xfer_count = buf_size;
  73. else
  74. rmi_spi->tx_xfer_count = 1;
  75. /*
  76. * Allocate a pool of spi_transfer buffers for devices which need
  77. * per byte delays.
  78. */
  79. tmp = rmi_spi->rx_xfers;
  80. xfer_buf = devm_kzalloc(&spi->dev,
  81. (rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count)
  82. * sizeof(struct spi_transfer), GFP_KERNEL);
  83. if (!xfer_buf)
  84. return -ENOMEM;
  85. rmi_spi->rx_xfers = xfer_buf;
  86. rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
  87. if (tmp)
  88. devm_kfree(&spi->dev, tmp);
  89. return 0;
  90. }
  91. static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
  92. const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
  93. int tx_len, u8 *rx_buf, int rx_len)
  94. {
  95. struct spi_device *spi = rmi_spi->spi;
  96. struct rmi_device_platform_data_spi *spi_data =
  97. &rmi_spi->xport.pdata.spi_data;
  98. struct spi_message msg;
  99. struct spi_transfer *xfer;
  100. int ret = 0;
  101. int len;
  102. int cmd_len = 0;
  103. int total_tx_len;
  104. int i;
  105. u16 addr = cmd->addr;
  106. spi_message_init(&msg);
  107. switch (cmd->op) {
  108. case RMI_SPI_WRITE:
  109. case RMI_SPI_READ:
  110. cmd_len += 2;
  111. break;
  112. case RMI_SPI_V2_READ_UNIFIED:
  113. case RMI_SPI_V2_READ_SPLIT:
  114. case RMI_SPI_V2_WRITE:
  115. cmd_len += 4;
  116. break;
  117. }
  118. total_tx_len = cmd_len + tx_len;
  119. len = max(total_tx_len, rx_len);
  120. if (len > RMI_SPI_XFER_SIZE_LIMIT)
  121. return -EINVAL;
  122. if (rmi_spi->xfer_buf_size < len)
  123. rmi_spi_manage_pools(rmi_spi, len);
  124. if (addr == 0)
  125. /*
  126. * SPI needs an address. Use 0x7FF if we want to keep
  127. * reading from the last position of the register pointer.
  128. */
  129. addr = 0x7FF;
  130. switch (cmd->op) {
  131. case RMI_SPI_WRITE:
  132. rmi_spi->tx_buf[0] = (addr >> 8);
  133. rmi_spi->tx_buf[1] = addr & 0xFF;
  134. break;
  135. case RMI_SPI_READ:
  136. rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
  137. rmi_spi->tx_buf[1] = addr & 0xFF;
  138. break;
  139. case RMI_SPI_V2_READ_UNIFIED:
  140. break;
  141. case RMI_SPI_V2_READ_SPLIT:
  142. break;
  143. case RMI_SPI_V2_WRITE:
  144. rmi_spi->tx_buf[0] = 0x40;
  145. rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
  146. rmi_spi->tx_buf[2] = addr & 0xFF;
  147. rmi_spi->tx_buf[3] = tx_len;
  148. break;
  149. }
  150. if (tx_buf)
  151. memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
  152. if (rmi_spi->tx_xfer_count > 1) {
  153. for (i = 0; i < total_tx_len; i++) {
  154. xfer = &rmi_spi->tx_xfers[i];
  155. memset(xfer, 0, sizeof(struct spi_transfer));
  156. xfer->tx_buf = &rmi_spi->tx_buf[i];
  157. xfer->len = 1;
  158. xfer->delay_usecs = spi_data->write_delay_us;
  159. spi_message_add_tail(xfer, &msg);
  160. }
  161. } else {
  162. xfer = rmi_spi->tx_xfers;
  163. memset(xfer, 0, sizeof(struct spi_transfer));
  164. xfer->tx_buf = rmi_spi->tx_buf;
  165. xfer->len = total_tx_len;
  166. spi_message_add_tail(xfer, &msg);
  167. }
  168. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
  169. __func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
  170. total_tx_len, total_tx_len, rmi_spi->tx_buf);
  171. if (rx_buf) {
  172. if (rmi_spi->rx_xfer_count > 1) {
  173. for (i = 0; i < rx_len; i++) {
  174. xfer = &rmi_spi->rx_xfers[i];
  175. memset(xfer, 0, sizeof(struct spi_transfer));
  176. xfer->rx_buf = &rmi_spi->rx_buf[i];
  177. xfer->len = 1;
  178. xfer->delay_usecs = spi_data->read_delay_us;
  179. spi_message_add_tail(xfer, &msg);
  180. }
  181. } else {
  182. xfer = rmi_spi->rx_xfers;
  183. memset(xfer, 0, sizeof(struct spi_transfer));
  184. xfer->rx_buf = rmi_spi->rx_buf;
  185. xfer->len = rx_len;
  186. spi_message_add_tail(xfer, &msg);
  187. }
  188. }
  189. ret = spi_sync(spi, &msg);
  190. if (ret < 0) {
  191. dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
  192. return ret;
  193. }
  194. if (rx_buf) {
  195. memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
  196. rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
  197. __func__, rx_len, rx_len, rx_buf);
  198. }
  199. return 0;
  200. }
  201. /*
  202. * rmi_set_page - Set RMI page
  203. * @xport: The pointer to the rmi_transport_dev struct
  204. * @page: The new page address.
  205. *
  206. * RMI devices have 16-bit addressing, but some of the transport
  207. * implementations (like SMBus) only have 8-bit addressing. So RMI implements
  208. * a page address at 0xff of every page so we can reliable page addresses
  209. * every 256 registers.
  210. *
  211. * The page_mutex lock must be held when this function is entered.
  212. *
  213. * Returns zero on success, non-zero on failure.
  214. */
  215. static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
  216. {
  217. struct rmi_spi_cmd cmd;
  218. int ret;
  219. cmd.op = RMI_SPI_WRITE;
  220. cmd.addr = RMI_PAGE_SELECT_REGISTER;
  221. ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
  222. if (ret)
  223. rmi_spi->page = page;
  224. return ret;
  225. }
  226. static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
  227. const void *buf, size_t len)
  228. {
  229. struct rmi_spi_xport *rmi_spi =
  230. container_of(xport, struct rmi_spi_xport, xport);
  231. struct rmi_spi_cmd cmd;
  232. int ret;
  233. mutex_lock(&rmi_spi->page_mutex);
  234. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  235. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  236. if (ret)
  237. goto exit;
  238. }
  239. cmd.op = RMI_SPI_WRITE;
  240. cmd.addr = addr;
  241. ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
  242. exit:
  243. mutex_unlock(&rmi_spi->page_mutex);
  244. return ret;
  245. }
  246. static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
  247. void *buf, size_t len)
  248. {
  249. struct rmi_spi_xport *rmi_spi =
  250. container_of(xport, struct rmi_spi_xport, xport);
  251. struct rmi_spi_cmd cmd;
  252. int ret;
  253. mutex_lock(&rmi_spi->page_mutex);
  254. if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
  255. ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
  256. if (ret)
  257. goto exit;
  258. }
  259. cmd.op = RMI_SPI_READ;
  260. cmd.addr = addr;
  261. ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
  262. exit:
  263. mutex_unlock(&rmi_spi->page_mutex);
  264. return ret;
  265. }
  266. static const struct rmi_transport_ops rmi_spi_ops = {
  267. .write_block = rmi_spi_write_block,
  268. .read_block = rmi_spi_read_block,
  269. };
  270. #ifdef CONFIG_OF
  271. static int rmi_spi_of_probe(struct spi_device *spi,
  272. struct rmi_device_platform_data *pdata)
  273. {
  274. struct device *dev = &spi->dev;
  275. int retval;
  276. retval = rmi_of_property_read_u32(dev,
  277. &pdata->spi_data.read_delay_us,
  278. "spi-rx-delay-us", 1);
  279. if (retval)
  280. return retval;
  281. retval = rmi_of_property_read_u32(dev,
  282. &pdata->spi_data.write_delay_us,
  283. "spi-tx-delay-us", 1);
  284. if (retval)
  285. return retval;
  286. return 0;
  287. }
  288. static const struct of_device_id rmi_spi_of_match[] = {
  289. { .compatible = "syna,rmi4-spi" },
  290. {},
  291. };
  292. MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
  293. #else
  294. static inline int rmi_spi_of_probe(struct spi_device *spi,
  295. struct rmi_device_platform_data *pdata)
  296. {
  297. return -ENODEV;
  298. }
  299. #endif
  300. static void rmi_spi_unregister_transport(void *data)
  301. {
  302. struct rmi_spi_xport *rmi_spi = data;
  303. rmi_unregister_transport_device(&rmi_spi->xport);
  304. }
  305. static int rmi_spi_probe(struct spi_device *spi)
  306. {
  307. struct rmi_spi_xport *rmi_spi;
  308. struct rmi_device_platform_data *pdata;
  309. struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
  310. int retval;
  311. if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
  312. return -EINVAL;
  313. rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
  314. GFP_KERNEL);
  315. if (!rmi_spi)
  316. return -ENOMEM;
  317. pdata = &rmi_spi->xport.pdata;
  318. if (spi->dev.of_node) {
  319. retval = rmi_spi_of_probe(spi, pdata);
  320. if (retval)
  321. return retval;
  322. } else if (spi_pdata) {
  323. *pdata = *spi_pdata;
  324. }
  325. if (pdata->spi_data.bits_per_word)
  326. spi->bits_per_word = pdata->spi_data.bits_per_word;
  327. if (pdata->spi_data.mode)
  328. spi->mode = pdata->spi_data.mode;
  329. retval = spi_setup(spi);
  330. if (retval < 0) {
  331. dev_err(&spi->dev, "spi_setup failed!\n");
  332. return retval;
  333. }
  334. pdata->irq = spi->irq;
  335. rmi_spi->spi = spi;
  336. mutex_init(&rmi_spi->page_mutex);
  337. rmi_spi->xport.dev = &spi->dev;
  338. rmi_spi->xport.proto_name = "spi";
  339. rmi_spi->xport.ops = &rmi_spi_ops;
  340. spi_set_drvdata(spi, rmi_spi);
  341. retval = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
  342. if (retval)
  343. return retval;
  344. /*
  345. * Setting the page to zero will (a) make sure the PSR is in a
  346. * known state, and (b) make sure we can talk to the device.
  347. */
  348. retval = rmi_set_page(rmi_spi, 0);
  349. if (retval) {
  350. dev_err(&spi->dev, "Failed to set page select to 0.\n");
  351. return retval;
  352. }
  353. retval = rmi_register_transport_device(&rmi_spi->xport);
  354. if (retval) {
  355. dev_err(&spi->dev, "failed to register transport.\n");
  356. return retval;
  357. }
  358. retval = devm_add_action_or_reset(&spi->dev,
  359. rmi_spi_unregister_transport,
  360. rmi_spi);
  361. if (retval)
  362. return retval;
  363. dev_info(&spi->dev, "registered RMI SPI driver\n");
  364. return 0;
  365. }
  366. #ifdef CONFIG_PM_SLEEP
  367. static int rmi_spi_suspend(struct device *dev)
  368. {
  369. struct spi_device *spi = to_spi_device(dev);
  370. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  371. int ret;
  372. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
  373. if (ret)
  374. dev_warn(dev, "Failed to resume device: %d\n", ret);
  375. return ret;
  376. }
  377. static int rmi_spi_resume(struct device *dev)
  378. {
  379. struct spi_device *spi = to_spi_device(dev);
  380. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  381. int ret;
  382. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
  383. if (ret)
  384. dev_warn(dev, "Failed to resume device: %d\n", ret);
  385. return ret;
  386. }
  387. #endif
  388. #ifdef CONFIG_PM
  389. static int rmi_spi_runtime_suspend(struct device *dev)
  390. {
  391. struct spi_device *spi = to_spi_device(dev);
  392. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  393. int ret;
  394. ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
  395. if (ret)
  396. dev_warn(dev, "Failed to resume device: %d\n", ret);
  397. return 0;
  398. }
  399. static int rmi_spi_runtime_resume(struct device *dev)
  400. {
  401. struct spi_device *spi = to_spi_device(dev);
  402. struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
  403. int ret;
  404. ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
  405. if (ret)
  406. dev_warn(dev, "Failed to resume device: %d\n", ret);
  407. return 0;
  408. }
  409. #endif
  410. static const struct dev_pm_ops rmi_spi_pm = {
  411. SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
  412. SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
  413. NULL)
  414. };
  415. static const struct spi_device_id rmi_id[] = {
  416. { "rmi4_spi", 0 },
  417. { }
  418. };
  419. MODULE_DEVICE_TABLE(spi, rmi_id);
  420. static struct spi_driver rmi_spi_driver = {
  421. .driver = {
  422. .name = "rmi4_spi",
  423. .pm = &rmi_spi_pm,
  424. .of_match_table = of_match_ptr(rmi_spi_of_match),
  425. },
  426. .id_table = rmi_id,
  427. .probe = rmi_spi_probe,
  428. };
  429. module_spi_driver(rmi_spi_driver);
  430. MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
  431. MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
  432. MODULE_DESCRIPTION("RMI SPI driver");
  433. MODULE_LICENSE("GPL");
  434. MODULE_VERSION(RMI_DRIVER_VERSION);