spi-mt65xx.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103
  1. /*
  2. * Copyright (c) 2015 MediaTek Inc.
  3. * Copyright (C) 2021 XiaoMi, Inc.
  4. * Author: Leilk Liu <leilk.liu@mediatek.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <linux/clk.h>
  16. #include <linux/device.h>
  17. #include <linux/err.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/ioport.h>
  21. #include <linux/module.h>
  22. #include <linux/of.h>
  23. #include <linux/of_gpio.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/platform_data/spi-mt65xx.h>
  26. #include <linux/pm_runtime.h>
  27. #include <linux/spi/spi.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/pm_qos.h>
  30. #define SPI_CFG0_REG 0x0000
  31. #define SPI_CFG1_REG 0x0004
  32. #define SPI_TX_SRC_REG 0x0008
  33. #define SPI_RX_DST_REG 0x000c
  34. #define SPI_TX_DATA_REG 0x0010
  35. #define SPI_RX_DATA_REG 0x0014
  36. #define SPI_CMD_REG 0x0018
  37. #define SPI_STATUS0_REG 0x001c
  38. #define SPI_STATUS1_REG 0x0020
  39. #define SPI_PAD_SEL_REG 0x0024
  40. #define SPI_CFG2_REG 0x0028
  41. #define SPI_TX_SRC_REG_64 0x002c
  42. #define SPI_RX_DST_REG_64 0x0030
  43. #define SPI_CFG0_SCK_HIGH_OFFSET 0
  44. #define SPI_CFG0_SCK_LOW_OFFSET 8
  45. #define SPI_CFG0_CS_HOLD_OFFSET 16
  46. #define SPI_CFG0_CS_SETUP_OFFSET 24
  47. #define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
  48. #define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
  49. #define SPI_CFG1_CS_IDLE_OFFSET 0
  50. #define SPI_CFG1_PACKET_LOOP_OFFSET 8
  51. #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
  52. #define SPI_CFG1_GET_TICK_DLY_OFFSET 29
  53. #define SPI_CFG1_CS_IDLE_MASK 0xff
  54. #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
  55. #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
  56. #define SPI_CFG2_SCK_HIGH_OFFSET 0
  57. #define SPI_CFG2_SCK_LOW_OFFSET 16
  58. #define SPI_CMD_ACT BIT(0)
  59. #define SPI_CMD_RESUME BIT(1)
  60. #define SPI_CMD_RST BIT(2)
  61. #define SPI_CMD_PAUSE_EN BIT(4)
  62. #define SPI_CMD_DEASSERT BIT(5)
  63. #define SPI_CMD_SAMPLE_SEL BIT(6)
  64. #define SPI_CMD_CS_POL BIT(7)
  65. #define SPI_CMD_CPHA BIT(8)
  66. #define SPI_CMD_CPOL BIT(9)
  67. #define SPI_CMD_RX_DMA BIT(10)
  68. #define SPI_CMD_TX_DMA BIT(11)
  69. #define SPI_CMD_TXMSBF BIT(12)
  70. #define SPI_CMD_RXMSBF BIT(13)
  71. #define SPI_CMD_RX_ENDIAN BIT(14)
  72. #define SPI_CMD_TX_ENDIAN BIT(15)
  73. #define SPI_CMD_FINISH_IE BIT(16)
  74. #define SPI_CMD_PAUSE_IE BIT(17)
  75. #define MT8173_SPI_MAX_PAD_SEL 3
  76. #define MTK_SPI_PAUSE_INT_STATUS 0x2
  77. #define MTK_SPI_IDLE 0
  78. #define MTK_SPI_PAUSED 1
  79. #define MTK_SPI_MAX_FIFO_SIZE 32U
  80. #define MTK_SPI_PACKET_SIZE 1024
  81. #define MTK_SPI_32BITS_MASK (0xffffffff)
  82. #define DMA_ADDR_EXT_BITS (36)
  83. #define DMA_ADDR_DEF_BITS (32)
  84. struct mtk_spi_compatible {
  85. bool need_pad_sel;
  86. /* Must explicitly send dummy Tx bytes to do Rx only transfer */
  87. bool must_tx;
  88. /* some IC design adjust cfg register to enhance time accuracy */
  89. bool enhance_timing;
  90. /* some IC support DMA addr extension */
  91. bool dma_ext;
  92. };
  93. struct mtk_spi {
  94. void __iomem *base;
  95. u32 state;
  96. int pad_num;
  97. u32 *pad_sel;
  98. struct clk *parent_clk, *sel_clk, *spi_clk;
  99. struct spi_transfer *cur_transfer;
  100. u32 xfer_len;
  101. u32 num_xfered;
  102. struct scatterlist *tx_sgl, *rx_sgl;
  103. u32 tx_sgl_len, rx_sgl_len;
  104. const struct mtk_spi_compatible *dev_comp;
  105. struct pm_qos_request spi_qos_request;
  106. };
  107. static const struct mtk_spi_compatible mtk_common_compat;
  108. static const struct mtk_spi_compatible mt2712_compat = {
  109. .must_tx = true,
  110. };
  111. static const struct mtk_spi_compatible mt6739_compat = {
  112. .need_pad_sel = true,
  113. .must_tx = true,
  114. .enhance_timing = true,
  115. .dma_ext = true,
  116. };
  117. static const struct mtk_spi_compatible mt6765_compat = {
  118. .need_pad_sel = true,
  119. .must_tx = true,
  120. .enhance_timing = true,
  121. .dma_ext = true,
  122. };
  123. static const struct mtk_spi_compatible mt7622_compat = {
  124. .must_tx = true,
  125. .enhance_timing = true,
  126. };
  127. static const struct mtk_spi_compatible mt8173_compat = {
  128. .need_pad_sel = true,
  129. .must_tx = true,
  130. };
  131. /*
  132. * A piece of default chip info unless the platform
  133. * supplies it.
  134. */
  135. static const struct mtk_chip_config mtk_default_chip_info = {
  136. .rx_mlsb = 1,
  137. .tx_mlsb = 1,
  138. .sample_sel = 0,
  139. .cs_setuptime = 0,
  140. .cs_holdtime = 0,
  141. .cs_idletime = 0,
  142. .deassert_mode = false,
  143. .tick_delay = 0,
  144. };
  145. static const struct of_device_id mtk_spi_of_match[] = {
  146. { .compatible = "mediatek,mt2701-spi",
  147. .data = (void *)&mtk_common_compat,
  148. },
  149. { .compatible = "mediatek,mt2712-spi",
  150. .data = (void *)&mt2712_compat,
  151. },
  152. { .compatible = "mediatek,mt6589-spi",
  153. .data = (void *)&mtk_common_compat,
  154. },
  155. { .compatible = "mediatek,mt6739-spi",
  156. .data = (void *)&mt6739_compat,
  157. },
  158. { .compatible = "mediatek,mt6765-spi",
  159. .data = (void *)&mt6765_compat,
  160. },
  161. { .compatible = "mediatek,mt7622-spi",
  162. .data = (void *)&mt7622_compat,
  163. },
  164. { .compatible = "mediatek,mt8135-spi",
  165. .data = (void *)&mtk_common_compat,
  166. },
  167. { .compatible = "mediatek,mt8173-spi",
  168. .data = (void *)&mt8173_compat,
  169. },
  170. {}
  171. };
  172. MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
  173. #define LOG_CLOSE 0
  174. #define LOG_OPEN 1
  175. u8 spi_log_status = LOG_CLOSE;
  176. #define spi_debug(fmt, args...) do { \
  177. if (spi_log_status == LOG_OPEN) {\
  178. pr_info("[%s]%s() " fmt, dev_name(&master->dev),\
  179. __func__, ##args);\
  180. } \
  181. } while (0)
  182. static ssize_t spi_log_show(struct device *dev, struct device_attribute *attr,
  183. char *buf)
  184. {
  185. char buf_temp[50] = { 0 };
  186. if (buf == NULL) {
  187. pr_notice("%s() *buf is NULL\n", __func__);
  188. return -EINVAL;
  189. }
  190. snprintf(buf_temp, sizeof(buf_temp), "Now spi log %s.\n",
  191. (spi_log_status == LOG_CLOSE)?"disabled":"enabled");
  192. strncat(buf, buf_temp, strlen(buf_temp));
  193. return strlen(buf);
  194. }
  195. static ssize_t spi_log_store(struct device *dev, struct device_attribute *attr,
  196. const char *buf, size_t count)
  197. {
  198. if (strlen(buf) < 1) {
  199. pr_notice("%s() Invalid input!\n", __func__);
  200. return -EINVAL;
  201. }
  202. pr_info("[spi]%s buflen:%zu buf:%s\n", __func__, strlen(buf), buf);
  203. if (!strncmp(buf, "1", 1)) {
  204. pr_info("[spi]%s Now enable spi log\n", __func__);
  205. spi_log_status = LOG_OPEN;
  206. } else if (!strncmp(buf, "0", 1)) {
  207. pr_info("[spi]%s Now disable spi log\n", __func__);
  208. spi_log_status = LOG_CLOSE;
  209. } else
  210. pr_info("[spi]%s invalid parameter.Plz Input 1 or 0\n",
  211. __func__);
  212. return count;
  213. }
  214. static DEVICE_ATTR_RW(spi_log);
  215. static void spi_dump_reg(struct mtk_spi *mdata, struct spi_master *master)
  216. {
  217. spi_debug("||**************%s**************||\n", __func__);
  218. spi_debug("cfg0:0x%.8x\n", readl(mdata->base + SPI_CFG0_REG));
  219. spi_debug("cfg1:0x%.8x\n", readl(mdata->base + SPI_CFG1_REG));
  220. spi_debug("cfg2:0x%.8x\n", readl(mdata->base + SPI_CFG2_REG));
  221. spi_debug("cmd :0x%.8x\n", readl(mdata->base + SPI_CMD_REG));
  222. spi_debug("tx_s:0x%.8x\n", readl(mdata->base + SPI_TX_SRC_REG));
  223. spi_debug("rx_d:0x%.8x\n", readl(mdata->base + SPI_RX_DST_REG));
  224. spi_debug("status1:0x%.8x\n", readl(mdata->base + SPI_STATUS1_REG));
  225. spi_debug("pad_sel:0x%.8x\n", readl(mdata->base + SPI_PAD_SEL_REG));
  226. spi_debug("||**************%s end**************||\n", __func__);
  227. }
  228. static void spi_dump_config(struct spi_master *master, struct spi_message *msg)
  229. {
  230. struct spi_device *spi = msg->spi;
  231. struct mtk_chip_config *chip_config = spi->controller_data;
  232. struct mtk_spi *mdata = spi_master_get_devdata(master);
  233. spi_debug("||**************%s**************||\n", __func__);
  234. spi_debug("chip_config->spi_mode:0x%.4x\n", spi->mode);
  235. spi_debug("chip_config->tx_mlsb:%d.\n", chip_config->tx_mlsb);
  236. spi_debug("chip_config->rx_mlsb:%d.\n", chip_config->rx_mlsb);
  237. spi_debug("chip_config->sample_sel:%d\n", chip_config->sample_sel);
  238. spi_debug("chip_config->cs_setuptime=%d\n",
  239. chip_config->cs_setuptime);
  240. spi_debug("chip_config->cs_holdtime=%d\n",
  241. chip_config->cs_holdtime);
  242. spi_debug("chip_config->cs_idletime=%d\n",
  243. chip_config->cs_idletime);
  244. spi_debug("chip_config->deassert_mode=%d\n",
  245. chip_config->deassert_mode);
  246. spi_debug("chip_config->chip_select:%d,chip_config->pad_sel:%d\n",
  247. spi->chip_select, mdata->pad_sel[spi->chip_select]);
  248. spi_debug("||**************%s end**************||\n", __func__);
  249. }
  250. static void mtk_spi_reset(struct mtk_spi *mdata)
  251. {
  252. u32 reg_val;
  253. /* set the software reset bit in SPI_CMD_REG. */
  254. reg_val = readl(mdata->base + SPI_CMD_REG);
  255. reg_val |= SPI_CMD_RST;
  256. writel(reg_val, mdata->base + SPI_CMD_REG);
  257. reg_val = readl(mdata->base + SPI_CMD_REG);
  258. reg_val &= ~SPI_CMD_RST;
  259. writel(reg_val, mdata->base + SPI_CMD_REG);
  260. }
  261. static int mtk_spi_prepare_message(struct spi_master *master,
  262. struct spi_message *msg)
  263. {
  264. u16 cpha, cpol;
  265. u32 reg_val;
  266. struct spi_device *spi = msg->spi;
  267. struct mtk_chip_config *chip_config = spi->controller_data;
  268. struct mtk_spi *mdata = spi_master_get_devdata(master);
  269. cpha = spi->mode & SPI_CPHA ? 1 : 0;
  270. cpol = spi->mode & SPI_CPOL ? 1 : 0;
  271. spi_debug("cpha:%d cpol:%d. chip_config as below\n", cpha, cpol);
  272. spi_dump_config(master, msg);
  273. reg_val = readl(mdata->base + SPI_CMD_REG);
  274. if (cpha)
  275. reg_val |= SPI_CMD_CPHA;
  276. else
  277. reg_val &= ~SPI_CMD_CPHA;
  278. if (cpol)
  279. reg_val |= SPI_CMD_CPOL;
  280. else
  281. reg_val &= ~SPI_CMD_CPOL;
  282. /* set the mlsbx and mlsbtx */
  283. if (chip_config->tx_mlsb)
  284. reg_val |= SPI_CMD_TXMSBF;
  285. else
  286. reg_val &= ~SPI_CMD_TXMSBF;
  287. if (chip_config->rx_mlsb)
  288. reg_val |= SPI_CMD_RXMSBF;
  289. else
  290. reg_val &= ~SPI_CMD_RXMSBF;
  291. /* set the tx/rx endian */
  292. #ifdef __LITTLE_ENDIAN
  293. reg_val &= ~SPI_CMD_TX_ENDIAN;
  294. reg_val &= ~SPI_CMD_RX_ENDIAN;
  295. #else
  296. reg_val |= SPI_CMD_TX_ENDIAN;
  297. reg_val |= SPI_CMD_RX_ENDIAN;
  298. #endif
  299. if (mdata->dev_comp->enhance_timing) {
  300. /* set CS polarity */
  301. if (spi->mode & SPI_CS_HIGH)
  302. reg_val |= SPI_CMD_CS_POL;
  303. else
  304. reg_val &= ~SPI_CMD_CS_POL;
  305. if (chip_config->sample_sel)
  306. reg_val |= SPI_CMD_SAMPLE_SEL;
  307. else
  308. reg_val &= ~SPI_CMD_SAMPLE_SEL;
  309. }
  310. /* set finish and pause interrupt always enable */
  311. reg_val |= SPI_CMD_FINISH_IE | SPI_CMD_PAUSE_IE;
  312. /* disable dma mode */
  313. reg_val &= ~(SPI_CMD_TX_DMA | SPI_CMD_RX_DMA);
  314. /* deassert mode */
  315. if (chip_config->deassert_mode == true)
  316. reg_val |= SPI_CMD_DEASSERT;
  317. else
  318. reg_val &= ~SPI_CMD_DEASSERT;
  319. writel(reg_val, mdata->base + SPI_CMD_REG);
  320. /* pad select */
  321. if (mdata->dev_comp->need_pad_sel)
  322. writel(mdata->pad_sel[spi->chip_select],
  323. mdata->base + SPI_PAD_SEL_REG);
  324. return 0;
  325. }
  326. static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
  327. {
  328. u32 reg_val;
  329. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  330. if (spi->mode & SPI_CS_HIGH)
  331. enable = !enable;
  332. reg_val = readl(mdata->base + SPI_CMD_REG);
  333. if (!enable) {
  334. reg_val |= SPI_CMD_PAUSE_EN;
  335. writel(reg_val, mdata->base + SPI_CMD_REG);
  336. } else {
  337. reg_val &= ~SPI_CMD_PAUSE_EN;
  338. writel(reg_val, mdata->base + SPI_CMD_REG);
  339. mdata->state = MTK_SPI_IDLE;
  340. mtk_spi_reset(mdata);
  341. }
  342. }
  343. static void mtk_spi_prepare_transfer(struct spi_master *master,
  344. struct spi_transfer *xfer, struct spi_device *spi)
  345. {
  346. u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
  347. struct mtk_spi *mdata = spi_master_get_devdata(master);
  348. u32 cs_setuptime, cs_holdtime, cs_idletime = 0;
  349. struct mtk_chip_config *chip_config = spi->controller_data;
  350. spi_clk_hz = clk_get_rate(mdata->spi_clk);
  351. if (xfer->speed_hz < spi_clk_hz / 2)
  352. div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
  353. else
  354. div = 1;
  355. sck_time = (div + 1) / 2;
  356. cs_time = sck_time * 2;
  357. if (chip_config->cs_setuptime)
  358. cs_setuptime = chip_config->cs_setuptime;
  359. else
  360. cs_setuptime = cs_time;
  361. if (chip_config->cs_holdtime)
  362. cs_holdtime = chip_config->cs_holdtime;
  363. else
  364. cs_holdtime = cs_time;
  365. if (chip_config->cs_idletime)
  366. cs_idletime = chip_config->cs_idletime;
  367. else
  368. cs_idletime = cs_time;
  369. if (mdata->dev_comp->enhance_timing) {
  370. reg_val = (((sck_time - 1) & 0xffff)
  371. << SPI_CFG2_SCK_HIGH_OFFSET);
  372. reg_val |= (((sck_time - 1) & 0xffff)
  373. << SPI_CFG2_SCK_LOW_OFFSET);
  374. writel(reg_val, mdata->base + SPI_CFG2_REG);
  375. reg_val = (((cs_time - 1) & 0xffff)
  376. << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
  377. reg_val |= (((cs_setuptime - 1) & 0xffff)
  378. << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
  379. writel(reg_val, mdata->base + SPI_CFG0_REG);
  380. } else {
  381. reg_val = (((sck_time - 1) & 0xff)
  382. << SPI_CFG0_SCK_HIGH_OFFSET);
  383. reg_val |= (((sck_time - 1) & 0xff) <<
  384. SPI_CFG0_SCK_LOW_OFFSET);
  385. reg_val |= (((cs_holdtime - 1) & 0xff) <<
  386. SPI_CFG0_CS_HOLD_OFFSET);
  387. reg_val |= (((cs_setuptime - 1) & 0xff) <<
  388. SPI_CFG0_CS_SETUP_OFFSET);
  389. writel(reg_val, mdata->base + SPI_CFG0_REG);
  390. }
  391. reg_val = readl(mdata->base + SPI_CFG1_REG);
  392. reg_val &= ~SPI_CFG1_CS_IDLE_MASK;
  393. reg_val |= (((cs_idletime - 1) & 0xff) << SPI_CFG1_CS_IDLE_OFFSET);
  394. writel(reg_val, mdata->base + SPI_CFG1_REG);
  395. }
  396. static void mtk_spi_setup_packet(struct spi_master *master)
  397. {
  398. u32 packet_size, packet_loop, reg_val;
  399. struct mtk_spi *mdata = spi_master_get_devdata(master);
  400. packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
  401. packet_loop = mdata->xfer_len / packet_size;
  402. reg_val = readl(mdata->base + SPI_CFG1_REG);
  403. reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
  404. reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
  405. reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
  406. writel(reg_val, mdata->base + SPI_CFG1_REG);
  407. }
  408. static void mtk_spi_enable_transfer(struct spi_master *master)
  409. {
  410. u32 cmd;
  411. struct mtk_spi *mdata = spi_master_get_devdata(master);
  412. cmd = readl(mdata->base + SPI_CMD_REG);
  413. if (mdata->state == MTK_SPI_IDLE)
  414. cmd |= SPI_CMD_ACT;
  415. else
  416. cmd |= SPI_CMD_RESUME;
  417. writel(cmd, mdata->base + SPI_CMD_REG);
  418. }
  419. static int mtk_spi_get_mult_delta(u32 xfer_len)
  420. {
  421. u32 mult_delta;
  422. if (xfer_len > MTK_SPI_PACKET_SIZE)
  423. mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
  424. else
  425. mult_delta = 0;
  426. return mult_delta;
  427. }
  428. static void mtk_spi_update_mdata_len(struct spi_master *master)
  429. {
  430. int mult_delta;
  431. struct mtk_spi *mdata = spi_master_get_devdata(master);
  432. if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
  433. if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
  434. mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
  435. mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
  436. mdata->rx_sgl_len = mult_delta;
  437. mdata->tx_sgl_len -= mdata->xfer_len;
  438. } else {
  439. mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
  440. mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
  441. mdata->tx_sgl_len = mult_delta;
  442. mdata->rx_sgl_len -= mdata->xfer_len;
  443. }
  444. } else if (mdata->tx_sgl_len) {
  445. mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
  446. mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
  447. mdata->tx_sgl_len = mult_delta;
  448. } else if (mdata->rx_sgl_len) {
  449. mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
  450. mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
  451. mdata->rx_sgl_len = mult_delta;
  452. }
  453. }
  454. static void mtk_spi_setup_dma_addr(struct spi_master *master,
  455. struct spi_transfer *xfer)
  456. {
  457. struct mtk_spi *mdata = spi_master_get_devdata(master);
  458. if (mdata->tx_sgl) {
  459. writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
  460. mdata->base + SPI_TX_SRC_REG);
  461. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  462. if (mdata->dev_comp->dma_ext)
  463. writel((u32)(xfer->tx_dma >> 32),
  464. mdata->base + SPI_TX_SRC_REG_64);
  465. #endif
  466. }
  467. if (mdata->rx_sgl) {
  468. writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
  469. mdata->base + SPI_RX_DST_REG);
  470. #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
  471. if (mdata->dev_comp->dma_ext)
  472. writel((u32)(xfer->rx_dma >> 32),
  473. mdata->base + SPI_RX_DST_REG_64);
  474. #endif
  475. }
  476. }
  477. static int mtk_spi_fifo_transfer(struct spi_master *master,
  478. struct spi_device *spi,
  479. struct spi_transfer *xfer)
  480. {
  481. int cnt, remainder;
  482. u32 reg_val;
  483. struct mtk_spi *mdata = spi_master_get_devdata(master);
  484. mdata->cur_transfer = xfer;
  485. mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
  486. mdata->num_xfered = 0;
  487. mtk_spi_prepare_transfer(master, xfer, spi);
  488. mtk_spi_setup_packet(master);
  489. cnt = xfer->len / 4;
  490. iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt);
  491. remainder = xfer->len % 4;
  492. if (remainder > 0) {
  493. reg_val = 0;
  494. memcpy(&reg_val, xfer->tx_buf + (cnt * 4), remainder);
  495. writel(reg_val, mdata->base + SPI_TX_DATA_REG);
  496. }
  497. spi_debug("spi setting Done.Dump reg before Transfer start:\n");
  498. spi_dump_reg(mdata, master);
  499. mtk_spi_enable_transfer(master);
  500. return 1;
  501. }
  502. static int mtk_spi_dma_transfer(struct spi_master *master,
  503. struct spi_device *spi,
  504. struct spi_transfer *xfer)
  505. {
  506. int cmd;
  507. struct mtk_spi *mdata = spi_master_get_devdata(master);
  508. mdata->tx_sgl = NULL;
  509. mdata->rx_sgl = NULL;
  510. mdata->tx_sgl_len = 0;
  511. mdata->rx_sgl_len = 0;
  512. mdata->cur_transfer = xfer;
  513. mdata->num_xfered = 0;
  514. mtk_spi_prepare_transfer(master, xfer, spi);
  515. cmd = readl(mdata->base + SPI_CMD_REG);
  516. if (xfer->tx_buf)
  517. cmd |= SPI_CMD_TX_DMA;
  518. if (xfer->rx_buf)
  519. cmd |= SPI_CMD_RX_DMA;
  520. writel(cmd, mdata->base + SPI_CMD_REG);
  521. if (xfer->tx_buf)
  522. mdata->tx_sgl = xfer->tx_sg.sgl;
  523. if (xfer->rx_buf)
  524. mdata->rx_sgl = xfer->rx_sg.sgl;
  525. if (mdata->tx_sgl) {
  526. xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
  527. mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
  528. }
  529. if (mdata->rx_sgl) {
  530. xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
  531. mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
  532. }
  533. mtk_spi_update_mdata_len(master);
  534. mtk_spi_setup_packet(master);
  535. mtk_spi_setup_dma_addr(master, xfer);
  536. spi_debug("spi setting Done.Dump reg before Transfer start:\n");
  537. spi_dump_reg(mdata, master);
  538. mtk_spi_enable_transfer(master);
  539. return 1;
  540. }
  541. static int mtk_spi_transfer_one(struct spi_master *master,
  542. struct spi_device *spi,
  543. struct spi_transfer *xfer)
  544. {
  545. unsigned long us;
  546. struct mtk_spi *mdata = spi_master_get_devdata(master);
  547. spi_debug("xfer->len:%d\n", xfer->len);
  548. us = xfer->len * 8 * 1000 * 1000 / xfer->speed_hz;
  549. us = us + 20*1000;
  550. pm_qos_update_request_timeout(&mdata->spi_qos_request, 500, us);
  551. if (master->can_dma(master, spi, xfer))
  552. return mtk_spi_dma_transfer(master, spi, xfer);
  553. else
  554. return mtk_spi_fifo_transfer(master, spi, xfer);
  555. }
  556. static bool mtk_spi_can_dma(struct spi_master *master,
  557. struct spi_device *spi,
  558. struct spi_transfer *xfer)
  559. {
  560. /* Buffers for DMA transactions must be 4-byte aligned */
  561. return (xfer->len > MTK_SPI_MAX_FIFO_SIZE &&
  562. (unsigned long)xfer->tx_buf % 4 == 0 &&
  563. (unsigned long)xfer->rx_buf % 4 == 0);
  564. }
  565. static int mtk_spi_setup(struct spi_device *spi)
  566. {
  567. struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
  568. if (!spi->controller_data)
  569. spi->controller_data = (void *)&mtk_default_chip_info;
  570. if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
  571. gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
  572. return 0;
  573. }
  574. static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
  575. {
  576. u32 cmd, reg_val, cnt, remainder, len;
  577. struct spi_master *master = dev_id;
  578. struct mtk_spi *mdata = spi_master_get_devdata(master);
  579. struct spi_transfer *trans = mdata->cur_transfer;
  580. reg_val = readl(mdata->base + SPI_STATUS0_REG);
  581. if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
  582. mdata->state = MTK_SPI_PAUSED;
  583. else
  584. mdata->state = MTK_SPI_IDLE;
  585. if (!master->can_dma(master, master->cur_msg->spi, trans)) {
  586. if (trans->rx_buf) {
  587. cnt = mdata->xfer_len / 4;
  588. ioread32_rep(mdata->base + SPI_RX_DATA_REG,
  589. trans->rx_buf + mdata->num_xfered, cnt);
  590. remainder = mdata->xfer_len % 4;
  591. if (remainder > 0) {
  592. reg_val = readl(mdata->base + SPI_RX_DATA_REG);
  593. memcpy(trans->rx_buf +
  594. mdata->num_xfered +
  595. (cnt * 4),
  596. &reg_val,
  597. remainder);
  598. }
  599. }
  600. mdata->num_xfered += mdata->xfer_len;
  601. if (mdata->num_xfered == trans->len) {
  602. spi_finalize_current_transfer(master);
  603. return IRQ_HANDLED;
  604. }
  605. len = trans->len - mdata->num_xfered;
  606. mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
  607. mtk_spi_setup_packet(master);
  608. cnt = mdata->xfer_len / 4;
  609. iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
  610. trans->tx_buf + mdata->num_xfered, cnt);
  611. remainder = mdata->xfer_len % 4;
  612. if (remainder > 0) {
  613. reg_val = 0;
  614. memcpy(&reg_val,
  615. trans->tx_buf + (cnt * 4) + mdata->num_xfered,
  616. remainder);
  617. writel(reg_val, mdata->base + SPI_TX_DATA_REG);
  618. }
  619. mtk_spi_enable_transfer(master);
  620. spi_debug("The last fifo transfer Done.\n");
  621. return IRQ_HANDLED;
  622. }
  623. if (mdata->tx_sgl)
  624. trans->tx_dma += mdata->xfer_len;
  625. if (mdata->rx_sgl)
  626. trans->rx_dma += mdata->xfer_len;
  627. if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
  628. mdata->tx_sgl = sg_next(mdata->tx_sgl);
  629. if (mdata->tx_sgl) {
  630. trans->tx_dma = sg_dma_address(mdata->tx_sgl);
  631. mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
  632. }
  633. }
  634. if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
  635. mdata->rx_sgl = sg_next(mdata->rx_sgl);
  636. if (mdata->rx_sgl) {
  637. trans->rx_dma = sg_dma_address(mdata->rx_sgl);
  638. mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
  639. }
  640. }
  641. if (!mdata->tx_sgl && !mdata->rx_sgl) {
  642. /* spi disable dma */
  643. cmd = readl(mdata->base + SPI_CMD_REG);
  644. cmd &= ~SPI_CMD_TX_DMA;
  645. cmd &= ~SPI_CMD_RX_DMA;
  646. writel(cmd, mdata->base + SPI_CMD_REG);
  647. spi_finalize_current_transfer(master);
  648. spi_debug("The last DMA transfer Done.\n");
  649. return IRQ_HANDLED;
  650. }
  651. spi_debug("One DMA transfer Done.Start Next\n");
  652. mtk_spi_update_mdata_len(master);
  653. mtk_spi_setup_packet(master);
  654. mtk_spi_setup_dma_addr(master, trans);
  655. mtk_spi_enable_transfer(master);
  656. return IRQ_HANDLED;
  657. }
  658. static int mtk_spi_probe(struct platform_device *pdev)
  659. {
  660. struct spi_master *master;
  661. struct mtk_spi *mdata;
  662. const struct of_device_id *of_id;
  663. struct resource *res;
  664. int i, irq, ret, addr_bits, value;
  665. master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
  666. if (!master) {
  667. dev_err(&pdev->dev, "failed to alloc spi master\n");
  668. return -ENOMEM;
  669. }
  670. master->auto_runtime_pm = true;
  671. master->dev.of_node = pdev->dev.of_node;
  672. master->mode_bits = SPI_CPOL | SPI_CPHA;
  673. master->set_cs = mtk_spi_set_cs;
  674. master->prepare_message = mtk_spi_prepare_message;
  675. master->transfer_one = mtk_spi_transfer_one;
  676. master->can_dma = mtk_spi_can_dma;
  677. master->setup = mtk_spi_setup;
  678. of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
  679. if (!of_id) {
  680. dev_err(&pdev->dev, "failed to probe of_node\n");
  681. ret = -EINVAL;
  682. goto err_put_master;
  683. }
  684. mdata = spi_master_get_devdata(master);
  685. mdata->dev_comp = of_id->data;
  686. if (mdata->dev_comp->enhance_timing)
  687. master->mode_bits |= SPI_CS_HIGH;
  688. if (mdata->dev_comp->must_tx)
  689. master->flags = SPI_MASTER_MUST_TX;
  690. ret = of_property_read_u32(pdev->dev.of_node,
  691. "mediatek,kthread-rt", &value);
  692. if (ret < 0)
  693. dev_notice(&pdev->dev,
  694. "No 'mediatek,kthread-rt' property\n");
  695. else {
  696. if (value == 1)
  697. master->rt = true;
  698. else
  699. master->rt = false;
  700. }
  701. if (mdata->dev_comp->need_pad_sel) {
  702. mdata->pad_num = of_property_count_u32_elems(
  703. pdev->dev.of_node,
  704. "mediatek,pad-select");
  705. if (mdata->pad_num < 0) {
  706. dev_err(&pdev->dev,
  707. "No 'mediatek,pad-select' property\n");
  708. ret = -EINVAL;
  709. goto err_put_master;
  710. }
  711. mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
  712. sizeof(u32), GFP_KERNEL);
  713. if (!mdata->pad_sel) {
  714. ret = -ENOMEM;
  715. goto err_put_master;
  716. }
  717. for (i = 0; i < mdata->pad_num; i++) {
  718. of_property_read_u32_index(pdev->dev.of_node,
  719. "mediatek,pad-select",
  720. i, &mdata->pad_sel[i]);
  721. if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
  722. dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
  723. i, mdata->pad_sel[i]);
  724. ret = -EINVAL;
  725. goto err_put_master;
  726. }
  727. }
  728. }
  729. pm_qos_add_request(&mdata->spi_qos_request, PM_QOS_CPU_DMA_LATENCY,
  730. PM_QOS_DEFAULT_VALUE);
  731. platform_set_drvdata(pdev, master);
  732. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  733. if (!res) {
  734. ret = -ENODEV;
  735. dev_err(&pdev->dev, "failed to determine base address\n");
  736. goto err_put_master;
  737. }
  738. mdata->base = devm_ioremap_resource(&pdev->dev, res);
  739. if (IS_ERR(mdata->base)) {
  740. ret = PTR_ERR(mdata->base);
  741. goto err_put_master;
  742. }
  743. irq = platform_get_irq(pdev, 0);
  744. if (irq < 0) {
  745. dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
  746. ret = irq;
  747. goto err_put_master;
  748. }
  749. if (!pdev->dev.dma_mask)
  750. pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
  751. ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
  752. IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
  753. if (ret) {
  754. dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
  755. goto err_put_master;
  756. }
  757. mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
  758. if (IS_ERR(mdata->parent_clk)) {
  759. ret = PTR_ERR(mdata->parent_clk);
  760. dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
  761. goto err_put_master;
  762. }
  763. mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
  764. if (IS_ERR(mdata->sel_clk)) {
  765. ret = PTR_ERR(mdata->sel_clk);
  766. dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
  767. goto err_put_master;
  768. }
  769. mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
  770. if (IS_ERR(mdata->spi_clk)) {
  771. ret = PTR_ERR(mdata->spi_clk);
  772. dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
  773. goto err_put_master;
  774. }
  775. ret = clk_prepare_enable(mdata->spi_clk);
  776. if (ret < 0) {
  777. dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
  778. goto err_put_master;
  779. }
  780. ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
  781. if (ret < 0) {
  782. dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
  783. clk_disable_unprepare(mdata->spi_clk);
  784. goto err_put_master;
  785. }
  786. clk_disable_unprepare(mdata->spi_clk);
  787. pm_runtime_enable(&pdev->dev);
  788. ret = devm_spi_register_master(&pdev->dev, master);
  789. if (ret) {
  790. dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
  791. goto err_disable_runtime_pm;
  792. }
  793. printk("[%s-%d] run to here\n", __func__, __LINE__);
  794. if (mdata->dev_comp->need_pad_sel) {
  795. if (mdata->pad_num != master->num_chipselect) {
  796. dev_err(&pdev->dev,
  797. "pad_num does not match num_chipselect(%d != %d)\n",
  798. mdata->pad_num, master->num_chipselect);
  799. ret = -EINVAL;
  800. goto err_disable_runtime_pm;
  801. }
  802. if (!master->cs_gpios && master->num_chipselect > 1) {
  803. dev_err(&pdev->dev,
  804. "cs_gpios not specified and num_chipselect > 1\n");
  805. ret = -EINVAL;
  806. goto err_disable_runtime_pm;
  807. }
  808. if (master->cs_gpios) {
  809. for (i = 0; i < master->num_chipselect; i++) {
  810. ret = devm_gpio_request(&pdev->dev,
  811. master->cs_gpios[i],
  812. dev_name(&pdev->dev));
  813. if (ret) {
  814. dev_err(&pdev->dev,
  815. "can't get CS GPIO %i\n", i);
  816. goto err_disable_runtime_pm;
  817. }
  818. }
  819. }
  820. }
  821. if (mdata->dev_comp->dma_ext)
  822. addr_bits = DMA_ADDR_EXT_BITS;
  823. else
  824. addr_bits = DMA_ADDR_DEF_BITS;
  825. ret = device_create_file(&pdev->dev, &dev_attr_spi_log);
  826. if (ret)
  827. dev_notice(&pdev->dev, "SPI sysfs_create_file fail, ret:%d\n",
  828. ret);
  829. ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
  830. if (ret)
  831. dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
  832. addr_bits, ret);
  833. return 0;
  834. err_disable_runtime_pm:
  835. pm_runtime_disable(&pdev->dev);
  836. err_put_master:
  837. spi_master_put(master);
  838. return ret;
  839. }
  840. static int mtk_spi_remove(struct platform_device *pdev)
  841. {
  842. struct spi_master *master = platform_get_drvdata(pdev);
  843. struct mtk_spi *mdata = spi_master_get_devdata(master);
  844. pm_qos_remove_request(&mdata->spi_qos_request);
  845. pm_runtime_disable(&pdev->dev);
  846. mtk_spi_reset(mdata);
  847. return 0;
  848. }
  849. #ifdef CONFIG_PM_SLEEP
  850. static int mtk_spi_suspend(struct device *dev)
  851. {
  852. int ret;
  853. struct spi_master *master = dev_get_drvdata(dev);
  854. struct mtk_spi *mdata = spi_master_get_devdata(master);
  855. ret = spi_master_suspend(master);
  856. if (ret)
  857. return ret;
  858. if (!pm_runtime_suspended(dev))
  859. clk_disable_unprepare(mdata->spi_clk);
  860. ret = pinctrl_pm_select_sleep_state(dev);
  861. if (ret < 0)
  862. dev_notice(dev, "failed to set pin sleep_state (%d)\n", ret);
  863. return ret;
  864. }
  865. static int mtk_spi_resume(struct device *dev)
  866. {
  867. int ret;
  868. struct spi_master *master = dev_get_drvdata(dev);
  869. struct mtk_spi *mdata = spi_master_get_devdata(master);
  870. ret = pinctrl_pm_select_default_state(dev);
  871. if (ret < 0)
  872. dev_notice(dev, "failed to set pin default_state (%d)\n", ret);
  873. if (!pm_runtime_suspended(dev)) {
  874. ret = clk_prepare_enable(mdata->spi_clk);
  875. if (ret < 0) {
  876. dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  877. return ret;
  878. }
  879. }
  880. ret = spi_master_resume(master);
  881. if (ret < 0)
  882. clk_disable_unprepare(mdata->spi_clk);
  883. return ret;
  884. }
  885. #endif /* CONFIG_PM_SLEEP */
  886. #ifdef CONFIG_PM
  887. static int mtk_spi_runtime_suspend(struct device *dev)
  888. {
  889. struct spi_master *master = dev_get_drvdata(dev);
  890. struct mtk_spi *mdata = spi_master_get_devdata(master);
  891. clk_disable_unprepare(mdata->spi_clk);
  892. return 0;
  893. }
  894. static int mtk_spi_runtime_resume(struct device *dev)
  895. {
  896. struct spi_master *master = dev_get_drvdata(dev);
  897. struct mtk_spi *mdata = spi_master_get_devdata(master);
  898. int ret;
  899. ret = clk_prepare_enable(mdata->spi_clk);
  900. if (ret < 0) {
  901. dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
  902. return ret;
  903. }
  904. return 0;
  905. }
  906. #endif /* CONFIG_PM */
  907. static const struct dev_pm_ops mtk_spi_pm = {
  908. SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_suspend, mtk_spi_resume)
  909. SET_RUNTIME_PM_OPS(mtk_spi_runtime_suspend,
  910. mtk_spi_runtime_resume, NULL)
  911. };
  912. static struct platform_driver mtk_spi_driver = {
  913. .driver = {
  914. .name = "mtk-spi",
  915. .pm = &mtk_spi_pm,
  916. .of_match_table = mtk_spi_of_match,
  917. },
  918. .probe = mtk_spi_probe,
  919. .remove = mtk_spi_remove,
  920. };
  921. module_platform_driver(mtk_spi_driver);
  922. MODULE_DESCRIPTION("MTK SPI Controller driver");
  923. MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
  924. MODULE_LICENSE("GPL v2");
  925. MODULE_ALIAS("platform:mtk-spi");