cadence-quadspi.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * Driver for Cadence QSPI Controller
  3. *
  4. * Copyright Altera Corporation (C) 2012-2014. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/clk.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <linux/err.h>
  22. #include <linux/errno.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/io.h>
  25. #include <linux/jiffies.h>
  26. #include <linux/kernel.h>
  27. #include <linux/module.h>
  28. #include <linux/mtd/mtd.h>
  29. #include <linux/mtd/partitions.h>
  30. #include <linux/mtd/spi-nor.h>
  31. #include <linux/of_device.h>
  32. #include <linux/of.h>
  33. #include <linux/platform_device.h>
  34. #include <linux/sched.h>
  35. #include <linux/spi/spi.h>
  36. #include <linux/timer.h>
  37. #define CQSPI_NAME "cadence-qspi"
  38. #define CQSPI_MAX_CHIPSELECT 16
  39. struct cqspi_st;
  40. struct cqspi_flash_pdata {
  41. struct spi_nor nor;
  42. struct cqspi_st *cqspi;
  43. u32 clk_rate;
  44. u32 read_delay;
  45. u32 tshsl_ns;
  46. u32 tsd2d_ns;
  47. u32 tchsh_ns;
  48. u32 tslch_ns;
  49. u8 inst_width;
  50. u8 addr_width;
  51. u8 data_width;
  52. u8 cs;
  53. bool registered;
  54. };
  55. struct cqspi_st {
  56. struct platform_device *pdev;
  57. struct clk *clk;
  58. unsigned int sclk;
  59. void __iomem *iobase;
  60. void __iomem *ahb_base;
  61. struct completion transfer_complete;
  62. struct mutex bus_mutex;
  63. int current_cs;
  64. int current_page_size;
  65. int current_erase_size;
  66. int current_addr_width;
  67. unsigned long master_ref_clk_hz;
  68. bool is_decoded_cs;
  69. u32 fifo_depth;
  70. u32 fifo_width;
  71. u32 trigger_address;
  72. struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
  73. };
  74. /* Operation timeout value */
  75. #define CQSPI_TIMEOUT_MS 500
  76. #define CQSPI_READ_TIMEOUT_MS 10
  77. /* Instruction type */
  78. #define CQSPI_INST_TYPE_SINGLE 0
  79. #define CQSPI_INST_TYPE_DUAL 1
  80. #define CQSPI_INST_TYPE_QUAD 2
  81. #define CQSPI_DUMMY_CLKS_PER_BYTE 8
  82. #define CQSPI_DUMMY_BYTES_MAX 4
  83. #define CQSPI_DUMMY_CLKS_MAX 31
  84. #define CQSPI_STIG_DATA_LEN_MAX 8
  85. /* Register map */
  86. #define CQSPI_REG_CONFIG 0x00
  87. #define CQSPI_REG_CONFIG_ENABLE_MASK BIT(0)
  88. #define CQSPI_REG_CONFIG_DECODE_MASK BIT(9)
  89. #define CQSPI_REG_CONFIG_CHIPSELECT_LSB 10
  90. #define CQSPI_REG_CONFIG_DMA_MASK BIT(15)
  91. #define CQSPI_REG_CONFIG_BAUD_LSB 19
  92. #define CQSPI_REG_CONFIG_IDLE_LSB 31
  93. #define CQSPI_REG_CONFIG_CHIPSELECT_MASK 0xF
  94. #define CQSPI_REG_CONFIG_BAUD_MASK 0xF
  95. #define CQSPI_REG_RD_INSTR 0x04
  96. #define CQSPI_REG_RD_INSTR_OPCODE_LSB 0
  97. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB 8
  98. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB 12
  99. #define CQSPI_REG_RD_INSTR_TYPE_DATA_LSB 16
  100. #define CQSPI_REG_RD_INSTR_MODE_EN_LSB 20
  101. #define CQSPI_REG_RD_INSTR_DUMMY_LSB 24
  102. #define CQSPI_REG_RD_INSTR_TYPE_INSTR_MASK 0x3
  103. #define CQSPI_REG_RD_INSTR_TYPE_ADDR_MASK 0x3
  104. #define CQSPI_REG_RD_INSTR_TYPE_DATA_MASK 0x3
  105. #define CQSPI_REG_RD_INSTR_DUMMY_MASK 0x1F
  106. #define CQSPI_REG_WR_INSTR 0x08
  107. #define CQSPI_REG_WR_INSTR_OPCODE_LSB 0
  108. #define CQSPI_REG_WR_INSTR_TYPE_ADDR_LSB 12
  109. #define CQSPI_REG_WR_INSTR_TYPE_DATA_LSB 16
  110. #define CQSPI_REG_DELAY 0x0C
  111. #define CQSPI_REG_DELAY_TSLCH_LSB 0
  112. #define CQSPI_REG_DELAY_TCHSH_LSB 8
  113. #define CQSPI_REG_DELAY_TSD2D_LSB 16
  114. #define CQSPI_REG_DELAY_TSHSL_LSB 24
  115. #define CQSPI_REG_DELAY_TSLCH_MASK 0xFF
  116. #define CQSPI_REG_DELAY_TCHSH_MASK 0xFF
  117. #define CQSPI_REG_DELAY_TSD2D_MASK 0xFF
  118. #define CQSPI_REG_DELAY_TSHSL_MASK 0xFF
  119. #define CQSPI_REG_READCAPTURE 0x10
  120. #define CQSPI_REG_READCAPTURE_BYPASS_LSB 0
  121. #define CQSPI_REG_READCAPTURE_DELAY_LSB 1
  122. #define CQSPI_REG_READCAPTURE_DELAY_MASK 0xF
  123. #define CQSPI_REG_SIZE 0x14
  124. #define CQSPI_REG_SIZE_ADDRESS_LSB 0
  125. #define CQSPI_REG_SIZE_PAGE_LSB 4
  126. #define CQSPI_REG_SIZE_BLOCK_LSB 16
  127. #define CQSPI_REG_SIZE_ADDRESS_MASK 0xF
  128. #define CQSPI_REG_SIZE_PAGE_MASK 0xFFF
  129. #define CQSPI_REG_SIZE_BLOCK_MASK 0x3F
  130. #define CQSPI_REG_SRAMPARTITION 0x18
  131. #define CQSPI_REG_INDIRECTTRIGGER 0x1C
  132. #define CQSPI_REG_DMA 0x20
  133. #define CQSPI_REG_DMA_SINGLE_LSB 0
  134. #define CQSPI_REG_DMA_BURST_LSB 8
  135. #define CQSPI_REG_DMA_SINGLE_MASK 0xFF
  136. #define CQSPI_REG_DMA_BURST_MASK 0xFF
  137. #define CQSPI_REG_REMAP 0x24
  138. #define CQSPI_REG_MODE_BIT 0x28
  139. #define CQSPI_REG_SDRAMLEVEL 0x2C
  140. #define CQSPI_REG_SDRAMLEVEL_RD_LSB 0
  141. #define CQSPI_REG_SDRAMLEVEL_WR_LSB 16
  142. #define CQSPI_REG_SDRAMLEVEL_RD_MASK 0xFFFF
  143. #define CQSPI_REG_SDRAMLEVEL_WR_MASK 0xFFFF
  144. #define CQSPI_REG_IRQSTATUS 0x40
  145. #define CQSPI_REG_IRQMASK 0x44
  146. #define CQSPI_REG_INDIRECTRD 0x60
  147. #define CQSPI_REG_INDIRECTRD_START_MASK BIT(0)
  148. #define CQSPI_REG_INDIRECTRD_CANCEL_MASK BIT(1)
  149. #define CQSPI_REG_INDIRECTRD_DONE_MASK BIT(5)
  150. #define CQSPI_REG_INDIRECTRDWATERMARK 0x64
  151. #define CQSPI_REG_INDIRECTRDSTARTADDR 0x68
  152. #define CQSPI_REG_INDIRECTRDBYTES 0x6C
  153. #define CQSPI_REG_CMDCTRL 0x90
  154. #define CQSPI_REG_CMDCTRL_EXECUTE_MASK BIT(0)
  155. #define CQSPI_REG_CMDCTRL_INPROGRESS_MASK BIT(1)
  156. #define CQSPI_REG_CMDCTRL_WR_BYTES_LSB 12
  157. #define CQSPI_REG_CMDCTRL_WR_EN_LSB 15
  158. #define CQSPI_REG_CMDCTRL_ADD_BYTES_LSB 16
  159. #define CQSPI_REG_CMDCTRL_ADDR_EN_LSB 19
  160. #define CQSPI_REG_CMDCTRL_RD_BYTES_LSB 20
  161. #define CQSPI_REG_CMDCTRL_RD_EN_LSB 23
  162. #define CQSPI_REG_CMDCTRL_OPCODE_LSB 24
  163. #define CQSPI_REG_CMDCTRL_WR_BYTES_MASK 0x7
  164. #define CQSPI_REG_CMDCTRL_ADD_BYTES_MASK 0x3
  165. #define CQSPI_REG_CMDCTRL_RD_BYTES_MASK 0x7
  166. #define CQSPI_REG_INDIRECTWR 0x70
  167. #define CQSPI_REG_INDIRECTWR_START_MASK BIT(0)
  168. #define CQSPI_REG_INDIRECTWR_CANCEL_MASK BIT(1)
  169. #define CQSPI_REG_INDIRECTWR_DONE_MASK BIT(5)
  170. #define CQSPI_REG_INDIRECTWRWATERMARK 0x74
  171. #define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
  172. #define CQSPI_REG_INDIRECTWRBYTES 0x7C
  173. #define CQSPI_REG_CMDADDRESS 0x94
  174. #define CQSPI_REG_CMDREADDATALOWER 0xA0
  175. #define CQSPI_REG_CMDREADDATAUPPER 0xA4
  176. #define CQSPI_REG_CMDWRITEDATALOWER 0xA8
  177. #define CQSPI_REG_CMDWRITEDATAUPPER 0xAC
  178. /* Interrupt status bits */
  179. #define CQSPI_REG_IRQ_MODE_ERR BIT(0)
  180. #define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
  181. #define CQSPI_REG_IRQ_IND_COMP BIT(2)
  182. #define CQSPI_REG_IRQ_IND_RD_REJECT BIT(3)
  183. #define CQSPI_REG_IRQ_WR_PROTECTED_ERR BIT(4)
  184. #define CQSPI_REG_IRQ_ILLEGAL_AHB_ERR BIT(5)
  185. #define CQSPI_REG_IRQ_WATERMARK BIT(6)
  186. #define CQSPI_REG_IRQ_IND_SRAM_FULL BIT(12)
  187. #define CQSPI_IRQ_MASK_RD (CQSPI_REG_IRQ_WATERMARK | \
  188. CQSPI_REG_IRQ_IND_SRAM_FULL | \
  189. CQSPI_REG_IRQ_IND_COMP)
  190. #define CQSPI_IRQ_MASK_WR (CQSPI_REG_IRQ_IND_COMP | \
  191. CQSPI_REG_IRQ_WATERMARK | \
  192. CQSPI_REG_IRQ_UNDERFLOW)
  193. #define CQSPI_IRQ_STATUS_MASK 0x1FFFF
  194. static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clear)
  195. {
  196. unsigned long end = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
  197. u32 val;
  198. while (1) {
  199. val = readl(reg);
  200. if (clear)
  201. val = ~val;
  202. val &= mask;
  203. if (val == mask)
  204. return 0;
  205. if (time_after(jiffies, end))
  206. return -ETIMEDOUT;
  207. }
  208. }
  209. static bool cqspi_is_idle(struct cqspi_st *cqspi)
  210. {
  211. u32 reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
  212. return reg & (1 << CQSPI_REG_CONFIG_IDLE_LSB);
  213. }
  214. static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
  215. {
  216. u32 reg = readl(cqspi->iobase + CQSPI_REG_SDRAMLEVEL);
  217. reg >>= CQSPI_REG_SDRAMLEVEL_RD_LSB;
  218. return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
  219. }
  220. static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
  221. {
  222. struct cqspi_st *cqspi = dev;
  223. unsigned int irq_status;
  224. /* Read interrupt status */
  225. irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
  226. /* Clear interrupt */
  227. writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
  228. irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
  229. if (irq_status)
  230. complete(&cqspi->transfer_complete);
  231. return IRQ_HANDLED;
  232. }
  233. static unsigned int cqspi_calc_rdreg(struct spi_nor *nor, const u8 opcode)
  234. {
  235. struct cqspi_flash_pdata *f_pdata = nor->priv;
  236. u32 rdreg = 0;
  237. rdreg |= f_pdata->inst_width << CQSPI_REG_RD_INSTR_TYPE_INSTR_LSB;
  238. rdreg |= f_pdata->addr_width << CQSPI_REG_RD_INSTR_TYPE_ADDR_LSB;
  239. rdreg |= f_pdata->data_width << CQSPI_REG_RD_INSTR_TYPE_DATA_LSB;
  240. return rdreg;
  241. }
  242. static int cqspi_wait_idle(struct cqspi_st *cqspi)
  243. {
  244. const unsigned int poll_idle_retry = 3;
  245. unsigned int count = 0;
  246. unsigned long timeout;
  247. timeout = jiffies + msecs_to_jiffies(CQSPI_TIMEOUT_MS);
  248. while (1) {
  249. /*
  250. * Read few times in succession to ensure the controller
  251. * is indeed idle, that is, the bit does not transition
  252. * low again.
  253. */
  254. if (cqspi_is_idle(cqspi))
  255. count++;
  256. else
  257. count = 0;
  258. if (count >= poll_idle_retry)
  259. return 0;
  260. if (time_after(jiffies, timeout)) {
  261. /* Timeout, in busy mode. */
  262. dev_err(&cqspi->pdev->dev,
  263. "QSPI is still busy after %dms timeout.\n",
  264. CQSPI_TIMEOUT_MS);
  265. return -ETIMEDOUT;
  266. }
  267. cpu_relax();
  268. }
  269. }
  270. static int cqspi_exec_flash_cmd(struct cqspi_st *cqspi, unsigned int reg)
  271. {
  272. void __iomem *reg_base = cqspi->iobase;
  273. int ret;
  274. /* Write the CMDCTRL without start execution. */
  275. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  276. /* Start execute */
  277. reg |= CQSPI_REG_CMDCTRL_EXECUTE_MASK;
  278. writel(reg, reg_base + CQSPI_REG_CMDCTRL);
  279. /* Polling for completion. */
  280. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_CMDCTRL,
  281. CQSPI_REG_CMDCTRL_INPROGRESS_MASK, 1);
  282. if (ret) {
  283. dev_err(&cqspi->pdev->dev,
  284. "Flash command execution timed out.\n");
  285. return ret;
  286. }
  287. /* Polling QSPI idle status. */
  288. return cqspi_wait_idle(cqspi);
  289. }
  290. static int cqspi_command_read(struct spi_nor *nor,
  291. const u8 *txbuf, const unsigned n_tx,
  292. u8 *rxbuf, const unsigned n_rx)
  293. {
  294. struct cqspi_flash_pdata *f_pdata = nor->priv;
  295. struct cqspi_st *cqspi = f_pdata->cqspi;
  296. void __iomem *reg_base = cqspi->iobase;
  297. unsigned int rdreg;
  298. unsigned int reg;
  299. unsigned int read_len;
  300. int status;
  301. if (!n_rx || n_rx > CQSPI_STIG_DATA_LEN_MAX || !rxbuf) {
  302. dev_err(nor->dev, "Invalid input argument, len %d rxbuf 0x%p\n",
  303. n_rx, rxbuf);
  304. return -EINVAL;
  305. }
  306. reg = txbuf[0] << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  307. rdreg = cqspi_calc_rdreg(nor, txbuf[0]);
  308. writel(rdreg, reg_base + CQSPI_REG_RD_INSTR);
  309. reg |= (0x1 << CQSPI_REG_CMDCTRL_RD_EN_LSB);
  310. /* 0 means 1 byte. */
  311. reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
  312. << CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
  313. status = cqspi_exec_flash_cmd(cqspi, reg);
  314. if (status)
  315. return status;
  316. reg = readl(reg_base + CQSPI_REG_CMDREADDATALOWER);
  317. /* Put the read value into rx_buf */
  318. read_len = (n_rx > 4) ? 4 : n_rx;
  319. memcpy(rxbuf, &reg, read_len);
  320. rxbuf += read_len;
  321. if (n_rx > 4) {
  322. reg = readl(reg_base + CQSPI_REG_CMDREADDATAUPPER);
  323. read_len = n_rx - read_len;
  324. memcpy(rxbuf, &reg, read_len);
  325. }
  326. return 0;
  327. }
  328. static int cqspi_command_write(struct spi_nor *nor, const u8 opcode,
  329. const u8 *txbuf, const unsigned n_tx)
  330. {
  331. struct cqspi_flash_pdata *f_pdata = nor->priv;
  332. struct cqspi_st *cqspi = f_pdata->cqspi;
  333. void __iomem *reg_base = cqspi->iobase;
  334. unsigned int reg;
  335. unsigned int data;
  336. int ret;
  337. if (n_tx > 4 || (n_tx && !txbuf)) {
  338. dev_err(nor->dev,
  339. "Invalid input argument, cmdlen %d txbuf 0x%p\n",
  340. n_tx, txbuf);
  341. return -EINVAL;
  342. }
  343. reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  344. if (n_tx) {
  345. reg |= (0x1 << CQSPI_REG_CMDCTRL_WR_EN_LSB);
  346. reg |= ((n_tx - 1) & CQSPI_REG_CMDCTRL_WR_BYTES_MASK)
  347. << CQSPI_REG_CMDCTRL_WR_BYTES_LSB;
  348. data = 0;
  349. memcpy(&data, txbuf, n_tx);
  350. writel(data, reg_base + CQSPI_REG_CMDWRITEDATALOWER);
  351. }
  352. ret = cqspi_exec_flash_cmd(cqspi, reg);
  353. return ret;
  354. }
  355. static int cqspi_command_write_addr(struct spi_nor *nor,
  356. const u8 opcode, const unsigned int addr)
  357. {
  358. struct cqspi_flash_pdata *f_pdata = nor->priv;
  359. struct cqspi_st *cqspi = f_pdata->cqspi;
  360. void __iomem *reg_base = cqspi->iobase;
  361. unsigned int reg;
  362. reg = opcode << CQSPI_REG_CMDCTRL_OPCODE_LSB;
  363. reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
  364. reg |= ((nor->addr_width - 1) & CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
  365. << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
  366. writel(addr, reg_base + CQSPI_REG_CMDADDRESS);
  367. return cqspi_exec_flash_cmd(cqspi, reg);
  368. }
  369. static int cqspi_indirect_read_setup(struct spi_nor *nor,
  370. const unsigned int from_addr)
  371. {
  372. struct cqspi_flash_pdata *f_pdata = nor->priv;
  373. struct cqspi_st *cqspi = f_pdata->cqspi;
  374. void __iomem *reg_base = cqspi->iobase;
  375. unsigned int dummy_clk = 0;
  376. unsigned int reg;
  377. writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
  378. reg = nor->read_opcode << CQSPI_REG_RD_INSTR_OPCODE_LSB;
  379. reg |= cqspi_calc_rdreg(nor, nor->read_opcode);
  380. /* Setup dummy clock cycles */
  381. dummy_clk = nor->read_dummy;
  382. if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
  383. dummy_clk = CQSPI_DUMMY_CLKS_MAX;
  384. if (dummy_clk / 8) {
  385. reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
  386. /* Set mode bits high to ensure chip doesn't enter XIP */
  387. writel(0xFF, reg_base + CQSPI_REG_MODE_BIT);
  388. /* Need to subtract the mode byte (8 clocks). */
  389. if (f_pdata->inst_width != CQSPI_INST_TYPE_QUAD)
  390. dummy_clk -= 8;
  391. if (dummy_clk)
  392. reg |= (dummy_clk & CQSPI_REG_RD_INSTR_DUMMY_MASK)
  393. << CQSPI_REG_RD_INSTR_DUMMY_LSB;
  394. }
  395. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  396. /* Set address width */
  397. reg = readl(reg_base + CQSPI_REG_SIZE);
  398. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  399. reg |= (nor->addr_width - 1);
  400. writel(reg, reg_base + CQSPI_REG_SIZE);
  401. return 0;
  402. }
  403. static int cqspi_indirect_read_execute(struct spi_nor *nor,
  404. u8 *rxbuf, const unsigned n_rx)
  405. {
  406. struct cqspi_flash_pdata *f_pdata = nor->priv;
  407. struct cqspi_st *cqspi = f_pdata->cqspi;
  408. void __iomem *reg_base = cqspi->iobase;
  409. void __iomem *ahb_base = cqspi->ahb_base;
  410. unsigned int remaining = n_rx;
  411. unsigned int bytes_to_read = 0;
  412. int ret = 0;
  413. writel(remaining, reg_base + CQSPI_REG_INDIRECTRDBYTES);
  414. /* Clear all interrupts. */
  415. writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  416. writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
  417. reinit_completion(&cqspi->transfer_complete);
  418. writel(CQSPI_REG_INDIRECTRD_START_MASK,
  419. reg_base + CQSPI_REG_INDIRECTRD);
  420. while (remaining > 0) {
  421. ret = wait_for_completion_timeout(&cqspi->transfer_complete,
  422. msecs_to_jiffies
  423. (CQSPI_READ_TIMEOUT_MS));
  424. bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  425. if (!ret && bytes_to_read == 0) {
  426. dev_err(nor->dev, "Indirect read timeout, no bytes\n");
  427. ret = -ETIMEDOUT;
  428. goto failrd;
  429. }
  430. while (bytes_to_read != 0) {
  431. bytes_to_read *= cqspi->fifo_width;
  432. bytes_to_read = bytes_to_read > remaining ?
  433. remaining : bytes_to_read;
  434. readsl(ahb_base, rxbuf, DIV_ROUND_UP(bytes_to_read, 4));
  435. rxbuf += bytes_to_read;
  436. remaining -= bytes_to_read;
  437. bytes_to_read = cqspi_get_rd_sram_level(cqspi);
  438. }
  439. if (remaining > 0)
  440. reinit_completion(&cqspi->transfer_complete);
  441. }
  442. /* Check indirect done status */
  443. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTRD,
  444. CQSPI_REG_INDIRECTRD_DONE_MASK, 0);
  445. if (ret) {
  446. dev_err(nor->dev,
  447. "Indirect read completion error (%i)\n", ret);
  448. goto failrd;
  449. }
  450. /* Disable interrupt */
  451. writel(0, reg_base + CQSPI_REG_IRQMASK);
  452. /* Clear indirect completion status */
  453. writel(CQSPI_REG_INDIRECTRD_DONE_MASK, reg_base + CQSPI_REG_INDIRECTRD);
  454. return 0;
  455. failrd:
  456. /* Disable interrupt */
  457. writel(0, reg_base + CQSPI_REG_IRQMASK);
  458. /* Cancel the indirect read */
  459. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  460. reg_base + CQSPI_REG_INDIRECTRD);
  461. return ret;
  462. }
  463. static int cqspi_indirect_write_setup(struct spi_nor *nor,
  464. const unsigned int to_addr)
  465. {
  466. unsigned int reg;
  467. struct cqspi_flash_pdata *f_pdata = nor->priv;
  468. struct cqspi_st *cqspi = f_pdata->cqspi;
  469. void __iomem *reg_base = cqspi->iobase;
  470. /* Set opcode. */
  471. reg = nor->program_opcode << CQSPI_REG_WR_INSTR_OPCODE_LSB;
  472. writel(reg, reg_base + CQSPI_REG_WR_INSTR);
  473. reg = cqspi_calc_rdreg(nor, nor->program_opcode);
  474. writel(reg, reg_base + CQSPI_REG_RD_INSTR);
  475. writel(to_addr, reg_base + CQSPI_REG_INDIRECTWRSTARTADDR);
  476. reg = readl(reg_base + CQSPI_REG_SIZE);
  477. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  478. reg |= (nor->addr_width - 1);
  479. writel(reg, reg_base + CQSPI_REG_SIZE);
  480. return 0;
  481. }
  482. static int cqspi_indirect_write_execute(struct spi_nor *nor,
  483. const u8 *txbuf, const unsigned n_tx)
  484. {
  485. const unsigned int page_size = nor->page_size;
  486. struct cqspi_flash_pdata *f_pdata = nor->priv;
  487. struct cqspi_st *cqspi = f_pdata->cqspi;
  488. void __iomem *reg_base = cqspi->iobase;
  489. unsigned int remaining = n_tx;
  490. unsigned int write_bytes;
  491. int ret;
  492. writel(remaining, reg_base + CQSPI_REG_INDIRECTWRBYTES);
  493. /* Clear all interrupts. */
  494. writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
  495. writel(CQSPI_IRQ_MASK_WR, reg_base + CQSPI_REG_IRQMASK);
  496. reinit_completion(&cqspi->transfer_complete);
  497. writel(CQSPI_REG_INDIRECTWR_START_MASK,
  498. reg_base + CQSPI_REG_INDIRECTWR);
  499. while (remaining > 0) {
  500. write_bytes = remaining > page_size ? page_size : remaining;
  501. writesl(cqspi->ahb_base, txbuf, DIV_ROUND_UP(write_bytes, 4));
  502. ret = wait_for_completion_timeout(&cqspi->transfer_complete,
  503. msecs_to_jiffies
  504. (CQSPI_TIMEOUT_MS));
  505. if (!ret) {
  506. dev_err(nor->dev, "Indirect write timeout\n");
  507. ret = -ETIMEDOUT;
  508. goto failwr;
  509. }
  510. txbuf += write_bytes;
  511. remaining -= write_bytes;
  512. if (remaining > 0)
  513. reinit_completion(&cqspi->transfer_complete);
  514. }
  515. /* Check indirect done status */
  516. ret = cqspi_wait_for_bit(reg_base + CQSPI_REG_INDIRECTWR,
  517. CQSPI_REG_INDIRECTWR_DONE_MASK, 0);
  518. if (ret) {
  519. dev_err(nor->dev,
  520. "Indirect write completion error (%i)\n", ret);
  521. goto failwr;
  522. }
  523. /* Disable interrupt. */
  524. writel(0, reg_base + CQSPI_REG_IRQMASK);
  525. /* Clear indirect completion status */
  526. writel(CQSPI_REG_INDIRECTWR_DONE_MASK, reg_base + CQSPI_REG_INDIRECTWR);
  527. cqspi_wait_idle(cqspi);
  528. return 0;
  529. failwr:
  530. /* Disable interrupt. */
  531. writel(0, reg_base + CQSPI_REG_IRQMASK);
  532. /* Cancel the indirect write */
  533. writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
  534. reg_base + CQSPI_REG_INDIRECTWR);
  535. return ret;
  536. }
  537. static void cqspi_chipselect(struct spi_nor *nor)
  538. {
  539. struct cqspi_flash_pdata *f_pdata = nor->priv;
  540. struct cqspi_st *cqspi = f_pdata->cqspi;
  541. void __iomem *reg_base = cqspi->iobase;
  542. unsigned int chip_select = f_pdata->cs;
  543. unsigned int reg;
  544. reg = readl(reg_base + CQSPI_REG_CONFIG);
  545. if (cqspi->is_decoded_cs) {
  546. reg |= CQSPI_REG_CONFIG_DECODE_MASK;
  547. } else {
  548. reg &= ~CQSPI_REG_CONFIG_DECODE_MASK;
  549. /* Convert CS if without decoder.
  550. * CS0 to 4b'1110
  551. * CS1 to 4b'1101
  552. * CS2 to 4b'1011
  553. * CS3 to 4b'0111
  554. */
  555. chip_select = 0xF & ~(1 << chip_select);
  556. }
  557. reg &= ~(CQSPI_REG_CONFIG_CHIPSELECT_MASK
  558. << CQSPI_REG_CONFIG_CHIPSELECT_LSB);
  559. reg |= (chip_select & CQSPI_REG_CONFIG_CHIPSELECT_MASK)
  560. << CQSPI_REG_CONFIG_CHIPSELECT_LSB;
  561. writel(reg, reg_base + CQSPI_REG_CONFIG);
  562. }
  563. static void cqspi_configure_cs_and_sizes(struct spi_nor *nor)
  564. {
  565. struct cqspi_flash_pdata *f_pdata = nor->priv;
  566. struct cqspi_st *cqspi = f_pdata->cqspi;
  567. void __iomem *iobase = cqspi->iobase;
  568. unsigned int reg;
  569. /* configure page size and block size. */
  570. reg = readl(iobase + CQSPI_REG_SIZE);
  571. reg &= ~(CQSPI_REG_SIZE_PAGE_MASK << CQSPI_REG_SIZE_PAGE_LSB);
  572. reg &= ~(CQSPI_REG_SIZE_BLOCK_MASK << CQSPI_REG_SIZE_BLOCK_LSB);
  573. reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK;
  574. reg |= (nor->page_size << CQSPI_REG_SIZE_PAGE_LSB);
  575. reg |= (ilog2(nor->mtd.erasesize) << CQSPI_REG_SIZE_BLOCK_LSB);
  576. reg |= (nor->addr_width - 1);
  577. writel(reg, iobase + CQSPI_REG_SIZE);
  578. /* configure the chip select */
  579. cqspi_chipselect(nor);
  580. /* Store the new configuration of the controller */
  581. cqspi->current_page_size = nor->page_size;
  582. cqspi->current_erase_size = nor->mtd.erasesize;
  583. cqspi->current_addr_width = nor->addr_width;
  584. }
  585. static unsigned int calculate_ticks_for_ns(const unsigned int ref_clk_hz,
  586. const unsigned int ns_val)
  587. {
  588. unsigned int ticks;
  589. ticks = ref_clk_hz / 1000; /* kHz */
  590. ticks = DIV_ROUND_UP(ticks * ns_val, 1000000);
  591. return ticks;
  592. }
  593. static void cqspi_delay(struct spi_nor *nor)
  594. {
  595. struct cqspi_flash_pdata *f_pdata = nor->priv;
  596. struct cqspi_st *cqspi = f_pdata->cqspi;
  597. void __iomem *iobase = cqspi->iobase;
  598. const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  599. unsigned int tshsl, tchsh, tslch, tsd2d;
  600. unsigned int reg;
  601. unsigned int tsclk;
  602. /* calculate the number of ref ticks for one sclk tick */
  603. tsclk = DIV_ROUND_UP(ref_clk_hz, cqspi->sclk);
  604. tshsl = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tshsl_ns);
  605. /* this particular value must be at least one sclk */
  606. if (tshsl < tsclk)
  607. tshsl = tsclk;
  608. tchsh = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tchsh_ns);
  609. tslch = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tslch_ns);
  610. tsd2d = calculate_ticks_for_ns(ref_clk_hz, f_pdata->tsd2d_ns);
  611. reg = (tshsl & CQSPI_REG_DELAY_TSHSL_MASK)
  612. << CQSPI_REG_DELAY_TSHSL_LSB;
  613. reg |= (tchsh & CQSPI_REG_DELAY_TCHSH_MASK)
  614. << CQSPI_REG_DELAY_TCHSH_LSB;
  615. reg |= (tslch & CQSPI_REG_DELAY_TSLCH_MASK)
  616. << CQSPI_REG_DELAY_TSLCH_LSB;
  617. reg |= (tsd2d & CQSPI_REG_DELAY_TSD2D_MASK)
  618. << CQSPI_REG_DELAY_TSD2D_LSB;
  619. writel(reg, iobase + CQSPI_REG_DELAY);
  620. }
  621. static void cqspi_config_baudrate_div(struct cqspi_st *cqspi)
  622. {
  623. const unsigned int ref_clk_hz = cqspi->master_ref_clk_hz;
  624. void __iomem *reg_base = cqspi->iobase;
  625. u32 reg, div;
  626. /* Recalculate the baudrate divisor based on QSPI specification. */
  627. div = DIV_ROUND_UP(ref_clk_hz, 2 * cqspi->sclk) - 1;
  628. reg = readl(reg_base + CQSPI_REG_CONFIG);
  629. reg &= ~(CQSPI_REG_CONFIG_BAUD_MASK << CQSPI_REG_CONFIG_BAUD_LSB);
  630. reg |= (div & CQSPI_REG_CONFIG_BAUD_MASK) << CQSPI_REG_CONFIG_BAUD_LSB;
  631. writel(reg, reg_base + CQSPI_REG_CONFIG);
  632. }
  633. static void cqspi_readdata_capture(struct cqspi_st *cqspi,
  634. const unsigned int bypass,
  635. const unsigned int delay)
  636. {
  637. void __iomem *reg_base = cqspi->iobase;
  638. unsigned int reg;
  639. reg = readl(reg_base + CQSPI_REG_READCAPTURE);
  640. if (bypass)
  641. reg |= (1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  642. else
  643. reg &= ~(1 << CQSPI_REG_READCAPTURE_BYPASS_LSB);
  644. reg &= ~(CQSPI_REG_READCAPTURE_DELAY_MASK
  645. << CQSPI_REG_READCAPTURE_DELAY_LSB);
  646. reg |= (delay & CQSPI_REG_READCAPTURE_DELAY_MASK)
  647. << CQSPI_REG_READCAPTURE_DELAY_LSB;
  648. writel(reg, reg_base + CQSPI_REG_READCAPTURE);
  649. }
  650. static void cqspi_controller_enable(struct cqspi_st *cqspi, bool enable)
  651. {
  652. void __iomem *reg_base = cqspi->iobase;
  653. unsigned int reg;
  654. reg = readl(reg_base + CQSPI_REG_CONFIG);
  655. if (enable)
  656. reg |= CQSPI_REG_CONFIG_ENABLE_MASK;
  657. else
  658. reg &= ~CQSPI_REG_CONFIG_ENABLE_MASK;
  659. writel(reg, reg_base + CQSPI_REG_CONFIG);
  660. }
  661. static void cqspi_configure(struct spi_nor *nor)
  662. {
  663. struct cqspi_flash_pdata *f_pdata = nor->priv;
  664. struct cqspi_st *cqspi = f_pdata->cqspi;
  665. const unsigned int sclk = f_pdata->clk_rate;
  666. int switch_cs = (cqspi->current_cs != f_pdata->cs);
  667. int switch_ck = (cqspi->sclk != sclk);
  668. if ((cqspi->current_page_size != nor->page_size) ||
  669. (cqspi->current_erase_size != nor->mtd.erasesize) ||
  670. (cqspi->current_addr_width != nor->addr_width))
  671. switch_cs = 1;
  672. if (switch_cs || switch_ck)
  673. cqspi_controller_enable(cqspi, 0);
  674. /* Switch chip select. */
  675. if (switch_cs) {
  676. cqspi->current_cs = f_pdata->cs;
  677. cqspi_configure_cs_and_sizes(nor);
  678. }
  679. /* Setup baudrate divisor and delays */
  680. if (switch_ck) {
  681. cqspi->sclk = sclk;
  682. cqspi_config_baudrate_div(cqspi);
  683. cqspi_delay(nor);
  684. cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay);
  685. }
  686. if (switch_cs || switch_ck)
  687. cqspi_controller_enable(cqspi, 1);
  688. }
  689. static int cqspi_set_protocol(struct spi_nor *nor, const int read)
  690. {
  691. struct cqspi_flash_pdata *f_pdata = nor->priv;
  692. f_pdata->inst_width = CQSPI_INST_TYPE_SINGLE;
  693. f_pdata->addr_width = CQSPI_INST_TYPE_SINGLE;
  694. f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
  695. if (read) {
  696. switch (nor->flash_read) {
  697. case SPI_NOR_NORMAL:
  698. case SPI_NOR_FAST:
  699. f_pdata->data_width = CQSPI_INST_TYPE_SINGLE;
  700. break;
  701. case SPI_NOR_DUAL:
  702. f_pdata->data_width = CQSPI_INST_TYPE_DUAL;
  703. break;
  704. case SPI_NOR_QUAD:
  705. f_pdata->data_width = CQSPI_INST_TYPE_QUAD;
  706. break;
  707. default:
  708. return -EINVAL;
  709. }
  710. }
  711. cqspi_configure(nor);
  712. return 0;
  713. }
  714. static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
  715. size_t len, const u_char *buf)
  716. {
  717. int ret;
  718. ret = cqspi_set_protocol(nor, 0);
  719. if (ret)
  720. return ret;
  721. ret = cqspi_indirect_write_setup(nor, to);
  722. if (ret)
  723. return ret;
  724. ret = cqspi_indirect_write_execute(nor, buf, len);
  725. if (ret)
  726. return ret;
  727. return (ret < 0) ? ret : len;
  728. }
  729. static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
  730. size_t len, u_char *buf)
  731. {
  732. int ret;
  733. ret = cqspi_set_protocol(nor, 1);
  734. if (ret)
  735. return ret;
  736. ret = cqspi_indirect_read_setup(nor, from);
  737. if (ret)
  738. return ret;
  739. ret = cqspi_indirect_read_execute(nor, buf, len);
  740. if (ret)
  741. return ret;
  742. return (ret < 0) ? ret : len;
  743. }
  744. static int cqspi_erase(struct spi_nor *nor, loff_t offs)
  745. {
  746. int ret;
  747. ret = cqspi_set_protocol(nor, 0);
  748. if (ret)
  749. return ret;
  750. /* Send write enable, then erase commands. */
  751. ret = nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
  752. if (ret)
  753. return ret;
  754. /* Set up command buffer. */
  755. ret = cqspi_command_write_addr(nor, nor->erase_opcode, offs);
  756. if (ret)
  757. return ret;
  758. return 0;
  759. }
  760. static int cqspi_prep(struct spi_nor *nor, enum spi_nor_ops ops)
  761. {
  762. struct cqspi_flash_pdata *f_pdata = nor->priv;
  763. struct cqspi_st *cqspi = f_pdata->cqspi;
  764. mutex_lock(&cqspi->bus_mutex);
  765. return 0;
  766. }
  767. static void cqspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
  768. {
  769. struct cqspi_flash_pdata *f_pdata = nor->priv;
  770. struct cqspi_st *cqspi = f_pdata->cqspi;
  771. mutex_unlock(&cqspi->bus_mutex);
  772. }
  773. static int cqspi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  774. {
  775. int ret;
  776. ret = cqspi_set_protocol(nor, 0);
  777. if (!ret)
  778. ret = cqspi_command_read(nor, &opcode, 1, buf, len);
  779. return ret;
  780. }
  781. static int cqspi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
  782. {
  783. int ret;
  784. ret = cqspi_set_protocol(nor, 0);
  785. if (!ret)
  786. ret = cqspi_command_write(nor, opcode, buf, len);
  787. return ret;
  788. }
  789. static int cqspi_of_get_flash_pdata(struct platform_device *pdev,
  790. struct cqspi_flash_pdata *f_pdata,
  791. struct device_node *np)
  792. {
  793. if (of_property_read_u32(np, "cdns,read-delay", &f_pdata->read_delay)) {
  794. dev_err(&pdev->dev, "couldn't determine read-delay\n");
  795. return -ENXIO;
  796. }
  797. if (of_property_read_u32(np, "cdns,tshsl-ns", &f_pdata->tshsl_ns)) {
  798. dev_err(&pdev->dev, "couldn't determine tshsl-ns\n");
  799. return -ENXIO;
  800. }
  801. if (of_property_read_u32(np, "cdns,tsd2d-ns", &f_pdata->tsd2d_ns)) {
  802. dev_err(&pdev->dev, "couldn't determine tsd2d-ns\n");
  803. return -ENXIO;
  804. }
  805. if (of_property_read_u32(np, "cdns,tchsh-ns", &f_pdata->tchsh_ns)) {
  806. dev_err(&pdev->dev, "couldn't determine tchsh-ns\n");
  807. return -ENXIO;
  808. }
  809. if (of_property_read_u32(np, "cdns,tslch-ns", &f_pdata->tslch_ns)) {
  810. dev_err(&pdev->dev, "couldn't determine tslch-ns\n");
  811. return -ENXIO;
  812. }
  813. if (of_property_read_u32(np, "spi-max-frequency", &f_pdata->clk_rate)) {
  814. dev_err(&pdev->dev, "couldn't determine spi-max-frequency\n");
  815. return -ENXIO;
  816. }
  817. return 0;
  818. }
  819. static int cqspi_of_get_pdata(struct platform_device *pdev)
  820. {
  821. struct device_node *np = pdev->dev.of_node;
  822. struct cqspi_st *cqspi = platform_get_drvdata(pdev);
  823. cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
  824. if (of_property_read_u32(np, "cdns,fifo-depth", &cqspi->fifo_depth)) {
  825. dev_err(&pdev->dev, "couldn't determine fifo-depth\n");
  826. return -ENXIO;
  827. }
  828. if (of_property_read_u32(np, "cdns,fifo-width", &cqspi->fifo_width)) {
  829. dev_err(&pdev->dev, "couldn't determine fifo-width\n");
  830. return -ENXIO;
  831. }
  832. if (of_property_read_u32(np, "cdns,trigger-address",
  833. &cqspi->trigger_address)) {
  834. dev_err(&pdev->dev, "couldn't determine trigger-address\n");
  835. return -ENXIO;
  836. }
  837. return 0;
  838. }
  839. static void cqspi_controller_init(struct cqspi_st *cqspi)
  840. {
  841. cqspi_controller_enable(cqspi, 0);
  842. /* Configure the remap address register, no remap */
  843. writel(0, cqspi->iobase + CQSPI_REG_REMAP);
  844. /* Disable all interrupts. */
  845. writel(0, cqspi->iobase + CQSPI_REG_IRQMASK);
  846. /* Configure the SRAM split to 1:1 . */
  847. writel(cqspi->fifo_depth / 2, cqspi->iobase + CQSPI_REG_SRAMPARTITION);
  848. /* Load indirect trigger address. */
  849. writel(cqspi->trigger_address,
  850. cqspi->iobase + CQSPI_REG_INDIRECTTRIGGER);
  851. /* Program read watermark -- 1/2 of the FIFO. */
  852. writel(cqspi->fifo_depth * cqspi->fifo_width / 2,
  853. cqspi->iobase + CQSPI_REG_INDIRECTRDWATERMARK);
  854. /* Program write watermark -- 1/8 of the FIFO. */
  855. writel(cqspi->fifo_depth * cqspi->fifo_width / 8,
  856. cqspi->iobase + CQSPI_REG_INDIRECTWRWATERMARK);
  857. cqspi_controller_enable(cqspi, 1);
  858. }
  859. static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np)
  860. {
  861. struct platform_device *pdev = cqspi->pdev;
  862. struct device *dev = &pdev->dev;
  863. struct cqspi_flash_pdata *f_pdata;
  864. struct spi_nor *nor;
  865. struct mtd_info *mtd;
  866. unsigned int cs;
  867. int i, ret;
  868. /* Get flash device data */
  869. for_each_available_child_of_node(dev->of_node, np) {
  870. ret = of_property_read_u32(np, "reg", &cs);
  871. if (ret) {
  872. dev_err(dev, "Couldn't determine chip select.\n");
  873. goto err;
  874. }
  875. if (cs >= CQSPI_MAX_CHIPSELECT) {
  876. ret = -EINVAL;
  877. dev_err(dev, "Chip select %d out of range.\n", cs);
  878. goto err;
  879. }
  880. f_pdata = &cqspi->f_pdata[cs];
  881. f_pdata->cqspi = cqspi;
  882. f_pdata->cs = cs;
  883. ret = cqspi_of_get_flash_pdata(pdev, f_pdata, np);
  884. if (ret)
  885. goto err;
  886. nor = &f_pdata->nor;
  887. mtd = &nor->mtd;
  888. mtd->priv = nor;
  889. nor->dev = dev;
  890. spi_nor_set_flash_node(nor, np);
  891. nor->priv = f_pdata;
  892. nor->read_reg = cqspi_read_reg;
  893. nor->write_reg = cqspi_write_reg;
  894. nor->read = cqspi_read;
  895. nor->write = cqspi_write;
  896. nor->erase = cqspi_erase;
  897. nor->prepare = cqspi_prep;
  898. nor->unprepare = cqspi_unprep;
  899. mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d",
  900. dev_name(dev), cs);
  901. if (!mtd->name) {
  902. ret = -ENOMEM;
  903. goto err;
  904. }
  905. ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD);
  906. if (ret)
  907. goto err;
  908. ret = mtd_device_register(mtd, NULL, 0);
  909. if (ret)
  910. goto err;
  911. f_pdata->registered = true;
  912. }
  913. return 0;
  914. err:
  915. for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
  916. if (cqspi->f_pdata[i].registered)
  917. mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
  918. return ret;
  919. }
  920. static int cqspi_probe(struct platform_device *pdev)
  921. {
  922. struct device_node *np = pdev->dev.of_node;
  923. struct device *dev = &pdev->dev;
  924. struct cqspi_st *cqspi;
  925. struct resource *res;
  926. struct resource *res_ahb;
  927. int ret;
  928. int irq;
  929. cqspi = devm_kzalloc(dev, sizeof(*cqspi), GFP_KERNEL);
  930. if (!cqspi)
  931. return -ENOMEM;
  932. mutex_init(&cqspi->bus_mutex);
  933. cqspi->pdev = pdev;
  934. platform_set_drvdata(pdev, cqspi);
  935. /* Obtain configuration from OF. */
  936. ret = cqspi_of_get_pdata(pdev);
  937. if (ret) {
  938. dev_err(dev, "Cannot get mandatory OF data.\n");
  939. return -ENODEV;
  940. }
  941. /* Obtain QSPI clock. */
  942. cqspi->clk = devm_clk_get(dev, NULL);
  943. if (IS_ERR(cqspi->clk)) {
  944. dev_err(dev, "Cannot claim QSPI clock.\n");
  945. return PTR_ERR(cqspi->clk);
  946. }
  947. /* Obtain and remap controller address. */
  948. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  949. cqspi->iobase = devm_ioremap_resource(dev, res);
  950. if (IS_ERR(cqspi->iobase)) {
  951. dev_err(dev, "Cannot remap controller address.\n");
  952. return PTR_ERR(cqspi->iobase);
  953. }
  954. /* Obtain and remap AHB address. */
  955. res_ahb = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  956. cqspi->ahb_base = devm_ioremap_resource(dev, res_ahb);
  957. if (IS_ERR(cqspi->ahb_base)) {
  958. dev_err(dev, "Cannot remap AHB address.\n");
  959. return PTR_ERR(cqspi->ahb_base);
  960. }
  961. init_completion(&cqspi->transfer_complete);
  962. /* Obtain IRQ line. */
  963. irq = platform_get_irq(pdev, 0);
  964. if (irq < 0) {
  965. dev_err(dev, "Cannot obtain IRQ.\n");
  966. return -ENXIO;
  967. }
  968. ret = clk_prepare_enable(cqspi->clk);
  969. if (ret) {
  970. dev_err(dev, "Cannot enable QSPI clock.\n");
  971. return ret;
  972. }
  973. cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk);
  974. ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
  975. pdev->name, cqspi);
  976. if (ret) {
  977. dev_err(dev, "Cannot request IRQ.\n");
  978. goto probe_irq_failed;
  979. }
  980. cqspi_wait_idle(cqspi);
  981. cqspi_controller_init(cqspi);
  982. cqspi->current_cs = -1;
  983. cqspi->sclk = 0;
  984. ret = cqspi_setup_flash(cqspi, np);
  985. if (ret) {
  986. dev_err(dev, "Cadence QSPI NOR probe failed %d\n", ret);
  987. goto probe_setup_failed;
  988. }
  989. return ret;
  990. probe_irq_failed:
  991. cqspi_controller_enable(cqspi, 0);
  992. probe_setup_failed:
  993. clk_disable_unprepare(cqspi->clk);
  994. return ret;
  995. }
  996. static int cqspi_remove(struct platform_device *pdev)
  997. {
  998. struct cqspi_st *cqspi = platform_get_drvdata(pdev);
  999. int i;
  1000. for (i = 0; i < CQSPI_MAX_CHIPSELECT; i++)
  1001. if (cqspi->f_pdata[i].registered)
  1002. mtd_device_unregister(&cqspi->f_pdata[i].nor.mtd);
  1003. cqspi_controller_enable(cqspi, 0);
  1004. clk_disable_unprepare(cqspi->clk);
  1005. return 0;
  1006. }
  1007. #ifdef CONFIG_PM_SLEEP
  1008. static int cqspi_suspend(struct device *dev)
  1009. {
  1010. struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1011. cqspi_controller_enable(cqspi, 0);
  1012. return 0;
  1013. }
  1014. static int cqspi_resume(struct device *dev)
  1015. {
  1016. struct cqspi_st *cqspi = dev_get_drvdata(dev);
  1017. cqspi_controller_enable(cqspi, 1);
  1018. return 0;
  1019. }
  1020. static const struct dev_pm_ops cqspi__dev_pm_ops = {
  1021. .suspend = cqspi_suspend,
  1022. .resume = cqspi_resume,
  1023. };
  1024. #define CQSPI_DEV_PM_OPS (&cqspi__dev_pm_ops)
  1025. #else
  1026. #define CQSPI_DEV_PM_OPS NULL
  1027. #endif
  1028. static struct of_device_id const cqspi_dt_ids[] = {
  1029. {.compatible = "cdns,qspi-nor",},
  1030. { /* end of table */ }
  1031. };
  1032. MODULE_DEVICE_TABLE(of, cqspi_dt_ids);
  1033. static struct platform_driver cqspi_platform_driver = {
  1034. .probe = cqspi_probe,
  1035. .remove = cqspi_remove,
  1036. .driver = {
  1037. .name = CQSPI_NAME,
  1038. .pm = CQSPI_DEV_PM_OPS,
  1039. .of_match_table = cqspi_dt_ids,
  1040. },
  1041. };
  1042. module_platform_driver(cqspi_platform_driver);
  1043. MODULE_DESCRIPTION("Cadence QSPI Controller Driver");
  1044. MODULE_LICENSE("GPL v2");
  1045. MODULE_ALIAS("platform:" CQSPI_NAME);
  1046. MODULE_AUTHOR("Ley Foon Tan <lftan@altera.com>");
  1047. MODULE_AUTHOR("Graham Moore <grmoore@opensource.altera.com>");