spi-ep93xx.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208
  1. /*
  2. * Driver for Cirrus Logic EP93xx SPI controller.
  3. *
  4. * Copyright (C) 2010-2011 Mika Westerberg
  5. *
  6. * Explicit FIFO handling code was inspired by amba-pl022 driver.
  7. *
  8. * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
  9. *
  10. * For more information about the SPI controller see documentation on Cirrus
  11. * Logic web site:
  12. * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/clk.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #include <linux/device.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/bitops.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/sched.h>
  30. #include <linux/scatterlist.h>
  31. #include <linux/spi/spi.h>
  32. #include <mach/dma.h>
  33. #include <mach/ep93xx_spi.h>
  34. #define SSPCR0 0x0000
  35. #define SSPCR0_MODE_SHIFT 6
  36. #define SSPCR0_SCR_SHIFT 8
  37. #define SSPCR1 0x0004
  38. #define SSPCR1_RIE BIT(0)
  39. #define SSPCR1_TIE BIT(1)
  40. #define SSPCR1_RORIE BIT(2)
  41. #define SSPCR1_LBM BIT(3)
  42. #define SSPCR1_SSE BIT(4)
  43. #define SSPCR1_MS BIT(5)
  44. #define SSPCR1_SOD BIT(6)
  45. #define SSPDR 0x0008
  46. #define SSPSR 0x000c
  47. #define SSPSR_TFE BIT(0)
  48. #define SSPSR_TNF BIT(1)
  49. #define SSPSR_RNE BIT(2)
  50. #define SSPSR_RFF BIT(3)
  51. #define SSPSR_BSY BIT(4)
  52. #define SSPCPSR 0x0010
  53. #define SSPIIR 0x0014
  54. #define SSPIIR_RIS BIT(0)
  55. #define SSPIIR_TIS BIT(1)
  56. #define SSPIIR_RORIS BIT(2)
  57. #define SSPICR SSPIIR
  58. /* timeout in milliseconds */
  59. #define SPI_TIMEOUT 5
  60. /* maximum depth of RX/TX FIFO */
  61. #define SPI_FIFO_SIZE 8
  62. /**
  63. * struct ep93xx_spi - EP93xx SPI controller structure
  64. * @lock: spinlock that protects concurrent accesses to fields @running,
  65. * @current_msg and @msg_queue
  66. * @pdev: pointer to platform device
  67. * @clk: clock for the controller
  68. * @regs_base: pointer to ioremap()'d registers
  69. * @sspdr_phys: physical address of the SSPDR register
  70. * @irq: IRQ number used by the driver
  71. * @min_rate: minimum clock rate (in Hz) supported by the controller
  72. * @max_rate: maximum clock rate (in Hz) supported by the controller
  73. * @running: is the queue running
  74. * @wq: workqueue used by the driver
  75. * @msg_work: work that is queued for the driver
  76. * @wait: wait here until given transfer is completed
  77. * @msg_queue: queue for the messages
  78. * @current_msg: message that is currently processed (or %NULL if none)
  79. * @tx: current byte in transfer to transmit
  80. * @rx: current byte in transfer to receive
  81. * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
  82. * frame decreases this level and sending one frame increases it.
  83. * @dma_rx: RX DMA channel
  84. * @dma_tx: TX DMA channel
  85. * @dma_rx_data: RX parameters passed to the DMA engine
  86. * @dma_tx_data: TX parameters passed to the DMA engine
  87. * @rx_sgt: sg table for RX transfers
  88. * @tx_sgt: sg table for TX transfers
  89. * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
  90. * the client
  91. *
  92. * This structure holds EP93xx SPI controller specific information. When
  93. * @running is %true, driver accepts transfer requests from protocol drivers.
  94. * @current_msg is used to hold pointer to the message that is currently
  95. * processed. If @current_msg is %NULL, it means that no processing is going
  96. * on.
  97. *
  98. * Most of the fields are only written once and they can be accessed without
  99. * taking the @lock. Fields that are accessed concurrently are: @current_msg,
  100. * @running, and @msg_queue.
  101. */
  102. struct ep93xx_spi {
  103. spinlock_t lock;
  104. const struct platform_device *pdev;
  105. struct clk *clk;
  106. void __iomem *regs_base;
  107. unsigned long sspdr_phys;
  108. int irq;
  109. unsigned long min_rate;
  110. unsigned long max_rate;
  111. bool running;
  112. struct workqueue_struct *wq;
  113. struct work_struct msg_work;
  114. struct completion wait;
  115. struct list_head msg_queue;
  116. struct spi_message *current_msg;
  117. size_t tx;
  118. size_t rx;
  119. size_t fifo_level;
  120. struct dma_chan *dma_rx;
  121. struct dma_chan *dma_tx;
  122. struct ep93xx_dma_data dma_rx_data;
  123. struct ep93xx_dma_data dma_tx_data;
  124. struct sg_table rx_sgt;
  125. struct sg_table tx_sgt;
  126. void *zeropage;
  127. };
  128. /**
  129. * struct ep93xx_spi_chip - SPI device hardware settings
  130. * @spi: back pointer to the SPI device
  131. * @rate: max rate in hz this chip supports
  132. * @div_cpsr: cpsr (pre-scaler) divider
  133. * @div_scr: scr divider
  134. * @dss: bits per word (4 - 16 bits)
  135. * @ops: private chip operations
  136. *
  137. * This structure is used to store hardware register specific settings for each
  138. * SPI device. Settings are written to hardware by function
  139. * ep93xx_spi_chip_setup().
  140. */
  141. struct ep93xx_spi_chip {
  142. const struct spi_device *spi;
  143. unsigned long rate;
  144. u8 div_cpsr;
  145. u8 div_scr;
  146. u8 dss;
  147. struct ep93xx_spi_chip_ops *ops;
  148. };
  149. /* converts bits per word to CR0.DSS value */
  150. #define bits_per_word_to_dss(bpw) ((bpw) - 1)
  151. static inline void
  152. ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
  153. {
  154. __raw_writeb(value, espi->regs_base + reg);
  155. }
  156. static inline u8
  157. ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
  158. {
  159. return __raw_readb(spi->regs_base + reg);
  160. }
  161. static inline void
  162. ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
  163. {
  164. __raw_writew(value, espi->regs_base + reg);
  165. }
  166. static inline u16
  167. ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
  168. {
  169. return __raw_readw(spi->regs_base + reg);
  170. }
  171. static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
  172. {
  173. u8 regval;
  174. int err;
  175. err = clk_enable(espi->clk);
  176. if (err)
  177. return err;
  178. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  179. regval |= SSPCR1_SSE;
  180. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  181. return 0;
  182. }
  183. static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
  184. {
  185. u8 regval;
  186. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  187. regval &= ~SSPCR1_SSE;
  188. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  189. clk_disable(espi->clk);
  190. }
  191. static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
  192. {
  193. u8 regval;
  194. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  195. regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  196. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  197. }
  198. static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
  199. {
  200. u8 regval;
  201. regval = ep93xx_spi_read_u8(espi, SSPCR1);
  202. regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
  203. ep93xx_spi_write_u8(espi, SSPCR1, regval);
  204. }
  205. /**
  206. * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
  207. * @espi: ep93xx SPI controller struct
  208. * @chip: divisors are calculated for this chip
  209. * @rate: desired SPI output clock rate
  210. *
  211. * Function calculates cpsr (clock pre-scaler) and scr divisors based on
  212. * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
  213. * for some reason, divisors cannot be calculated nothing is stored and
  214. * %-EINVAL is returned.
  215. */
  216. static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
  217. struct ep93xx_spi_chip *chip,
  218. unsigned long rate)
  219. {
  220. unsigned long spi_clk_rate = clk_get_rate(espi->clk);
  221. int cpsr, scr;
  222. /*
  223. * Make sure that max value is between values supported by the
  224. * controller. Note that minimum value is already checked in
  225. * ep93xx_spi_transfer().
  226. */
  227. rate = clamp(rate, espi->min_rate, espi->max_rate);
  228. /*
  229. * Calculate divisors so that we can get speed according the
  230. * following formula:
  231. * rate = spi_clock_rate / (cpsr * (1 + scr))
  232. *
  233. * cpsr must be even number and starts from 2, scr can be any number
  234. * between 0 and 255.
  235. */
  236. for (cpsr = 2; cpsr <= 254; cpsr += 2) {
  237. for (scr = 0; scr <= 255; scr++) {
  238. if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
  239. chip->div_scr = (u8)scr;
  240. chip->div_cpsr = (u8)cpsr;
  241. return 0;
  242. }
  243. }
  244. }
  245. return -EINVAL;
  246. }
  247. static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
  248. {
  249. struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
  250. int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
  251. if (chip->ops && chip->ops->cs_control)
  252. chip->ops->cs_control(spi, value);
  253. }
  254. /**
  255. * ep93xx_spi_setup() - setup an SPI device
  256. * @spi: SPI device to setup
  257. *
  258. * This function sets up SPI device mode, speed etc. Can be called multiple
  259. * times for a single device. Returns %0 in case of success, negative error in
  260. * case of failure. When this function returns success, the device is
  261. * deselected.
  262. */
  263. static int ep93xx_spi_setup(struct spi_device *spi)
  264. {
  265. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  266. struct ep93xx_spi_chip *chip;
  267. if (spi->bits_per_word < 4 || spi->bits_per_word > 16) {
  268. dev_err(&espi->pdev->dev, "invalid bits per word %d\n",
  269. spi->bits_per_word);
  270. return -EINVAL;
  271. }
  272. chip = spi_get_ctldata(spi);
  273. if (!chip) {
  274. dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
  275. spi->modalias);
  276. chip = kzalloc(sizeof(*chip), GFP_KERNEL);
  277. if (!chip)
  278. return -ENOMEM;
  279. chip->spi = spi;
  280. chip->ops = spi->controller_data;
  281. if (chip->ops && chip->ops->setup) {
  282. int ret = chip->ops->setup(spi);
  283. if (ret) {
  284. kfree(chip);
  285. return ret;
  286. }
  287. }
  288. spi_set_ctldata(spi, chip);
  289. }
  290. if (spi->max_speed_hz != chip->rate) {
  291. int err;
  292. err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
  293. if (err != 0) {
  294. spi_set_ctldata(spi, NULL);
  295. kfree(chip);
  296. return err;
  297. }
  298. chip->rate = spi->max_speed_hz;
  299. }
  300. chip->dss = bits_per_word_to_dss(spi->bits_per_word);
  301. ep93xx_spi_cs_control(spi, false);
  302. return 0;
  303. }
  304. /**
  305. * ep93xx_spi_transfer() - queue message to be transferred
  306. * @spi: target SPI device
  307. * @msg: message to be transferred
  308. *
  309. * This function is called by SPI device drivers when they are going to transfer
  310. * a new message. It simply puts the message in the queue and schedules
  311. * workqueue to perform the actual transfer later on.
  312. *
  313. * Returns %0 on success and negative error in case of failure.
  314. */
  315. static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
  316. {
  317. struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
  318. struct spi_transfer *t;
  319. unsigned long flags;
  320. if (!msg || !msg->complete)
  321. return -EINVAL;
  322. /* first validate each transfer */
  323. list_for_each_entry(t, &msg->transfers, transfer_list) {
  324. if (t->bits_per_word) {
  325. if (t->bits_per_word < 4 || t->bits_per_word > 16)
  326. return -EINVAL;
  327. }
  328. if (t->speed_hz && t->speed_hz < espi->min_rate)
  329. return -EINVAL;
  330. }
  331. /*
  332. * Now that we own the message, let's initialize it so that it is
  333. * suitable for us. We use @msg->status to signal whether there was
  334. * error in transfer and @msg->state is used to hold pointer to the
  335. * current transfer (or %NULL if no active current transfer).
  336. */
  337. msg->state = NULL;
  338. msg->status = 0;
  339. msg->actual_length = 0;
  340. spin_lock_irqsave(&espi->lock, flags);
  341. if (!espi->running) {
  342. spin_unlock_irqrestore(&espi->lock, flags);
  343. return -ESHUTDOWN;
  344. }
  345. list_add_tail(&msg->queue, &espi->msg_queue);
  346. queue_work(espi->wq, &espi->msg_work);
  347. spin_unlock_irqrestore(&espi->lock, flags);
  348. return 0;
  349. }
  350. /**
  351. * ep93xx_spi_cleanup() - cleans up master controller specific state
  352. * @spi: SPI device to cleanup
  353. *
  354. * This function releases master controller specific state for given @spi
  355. * device.
  356. */
  357. static void ep93xx_spi_cleanup(struct spi_device *spi)
  358. {
  359. struct ep93xx_spi_chip *chip;
  360. chip = spi_get_ctldata(spi);
  361. if (chip) {
  362. if (chip->ops && chip->ops->cleanup)
  363. chip->ops->cleanup(spi);
  364. spi_set_ctldata(spi, NULL);
  365. kfree(chip);
  366. }
  367. }
  368. /**
  369. * ep93xx_spi_chip_setup() - configures hardware according to given @chip
  370. * @espi: ep93xx SPI controller struct
  371. * @chip: chip specific settings
  372. *
  373. * This function sets up the actual hardware registers with settings given in
  374. * @chip. Note that no validation is done so make sure that callers validate
  375. * settings before calling this.
  376. */
  377. static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
  378. const struct ep93xx_spi_chip *chip)
  379. {
  380. u16 cr0;
  381. cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
  382. cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
  383. cr0 |= chip->dss;
  384. dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
  385. chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
  386. dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
  387. ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
  388. ep93xx_spi_write_u16(espi, SSPCR0, cr0);
  389. }
  390. static inline int bits_per_word(const struct ep93xx_spi *espi)
  391. {
  392. struct spi_message *msg = espi->current_msg;
  393. struct spi_transfer *t = msg->state;
  394. return t->bits_per_word ? t->bits_per_word : msg->spi->bits_per_word;
  395. }
  396. static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
  397. {
  398. if (bits_per_word(espi) > 8) {
  399. u16 tx_val = 0;
  400. if (t->tx_buf)
  401. tx_val = ((u16 *)t->tx_buf)[espi->tx];
  402. ep93xx_spi_write_u16(espi, SSPDR, tx_val);
  403. espi->tx += sizeof(tx_val);
  404. } else {
  405. u8 tx_val = 0;
  406. if (t->tx_buf)
  407. tx_val = ((u8 *)t->tx_buf)[espi->tx];
  408. ep93xx_spi_write_u8(espi, SSPDR, tx_val);
  409. espi->tx += sizeof(tx_val);
  410. }
  411. }
  412. static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
  413. {
  414. if (bits_per_word(espi) > 8) {
  415. u16 rx_val;
  416. rx_val = ep93xx_spi_read_u16(espi, SSPDR);
  417. if (t->rx_buf)
  418. ((u16 *)t->rx_buf)[espi->rx] = rx_val;
  419. espi->rx += sizeof(rx_val);
  420. } else {
  421. u8 rx_val;
  422. rx_val = ep93xx_spi_read_u8(espi, SSPDR);
  423. if (t->rx_buf)
  424. ((u8 *)t->rx_buf)[espi->rx] = rx_val;
  425. espi->rx += sizeof(rx_val);
  426. }
  427. }
  428. /**
  429. * ep93xx_spi_read_write() - perform next RX/TX transfer
  430. * @espi: ep93xx SPI controller struct
  431. *
  432. * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
  433. * called several times, the whole transfer will be completed. Returns
  434. * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
  435. *
  436. * When this function is finished, RX FIFO should be empty and TX FIFO should be
  437. * full.
  438. */
  439. static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
  440. {
  441. struct spi_message *msg = espi->current_msg;
  442. struct spi_transfer *t = msg->state;
  443. /* read as long as RX FIFO has frames in it */
  444. while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
  445. ep93xx_do_read(espi, t);
  446. espi->fifo_level--;
  447. }
  448. /* write as long as TX FIFO has room */
  449. while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
  450. ep93xx_do_write(espi, t);
  451. espi->fifo_level++;
  452. }
  453. if (espi->rx == t->len)
  454. return 0;
  455. return -EINPROGRESS;
  456. }
  457. static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
  458. {
  459. /*
  460. * Now everything is set up for the current transfer. We prime the TX
  461. * FIFO, enable interrupts, and wait for the transfer to complete.
  462. */
  463. if (ep93xx_spi_read_write(espi)) {
  464. ep93xx_spi_enable_interrupts(espi);
  465. wait_for_completion(&espi->wait);
  466. }
  467. }
  468. /**
  469. * ep93xx_spi_dma_prepare() - prepares a DMA transfer
  470. * @espi: ep93xx SPI controller struct
  471. * @dir: DMA transfer direction
  472. *
  473. * Function configures the DMA, maps the buffer and prepares the DMA
  474. * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
  475. * in case of failure.
  476. */
  477. static struct dma_async_tx_descriptor *
  478. ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
  479. {
  480. struct spi_transfer *t = espi->current_msg->state;
  481. struct dma_async_tx_descriptor *txd;
  482. enum dma_slave_buswidth buswidth;
  483. struct dma_slave_config conf;
  484. struct scatterlist *sg;
  485. struct sg_table *sgt;
  486. struct dma_chan *chan;
  487. const void *buf, *pbuf;
  488. size_t len = t->len;
  489. int i, ret, nents;
  490. if (bits_per_word(espi) > 8)
  491. buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
  492. else
  493. buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
  494. memset(&conf, 0, sizeof(conf));
  495. conf.direction = dir;
  496. if (dir == DMA_DEV_TO_MEM) {
  497. chan = espi->dma_rx;
  498. buf = t->rx_buf;
  499. sgt = &espi->rx_sgt;
  500. conf.src_addr = espi->sspdr_phys;
  501. conf.src_addr_width = buswidth;
  502. } else {
  503. chan = espi->dma_tx;
  504. buf = t->tx_buf;
  505. sgt = &espi->tx_sgt;
  506. conf.dst_addr = espi->sspdr_phys;
  507. conf.dst_addr_width = buswidth;
  508. }
  509. ret = dmaengine_slave_config(chan, &conf);
  510. if (ret)
  511. return ERR_PTR(ret);
  512. /*
  513. * We need to split the transfer into PAGE_SIZE'd chunks. This is
  514. * because we are using @espi->zeropage to provide a zero RX buffer
  515. * for the TX transfers and we have only allocated one page for that.
  516. *
  517. * For performance reasons we allocate a new sg_table only when
  518. * needed. Otherwise we will re-use the current one. Eventually the
  519. * last sg_table is released in ep93xx_spi_release_dma().
  520. */
  521. nents = DIV_ROUND_UP(len, PAGE_SIZE);
  522. if (nents != sgt->nents) {
  523. sg_free_table(sgt);
  524. ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
  525. if (ret)
  526. return ERR_PTR(ret);
  527. }
  528. pbuf = buf;
  529. for_each_sg(sgt->sgl, sg, sgt->nents, i) {
  530. size_t bytes = min_t(size_t, len, PAGE_SIZE);
  531. if (buf) {
  532. sg_set_page(sg, virt_to_page(pbuf), bytes,
  533. offset_in_page(pbuf));
  534. } else {
  535. sg_set_page(sg, virt_to_page(espi->zeropage),
  536. bytes, 0);
  537. }
  538. pbuf += bytes;
  539. len -= bytes;
  540. }
  541. if (WARN_ON(len)) {
  542. dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
  543. return ERR_PTR(-EINVAL);
  544. }
  545. nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  546. if (!nents)
  547. return ERR_PTR(-ENOMEM);
  548. txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
  549. if (!txd) {
  550. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  551. return ERR_PTR(-ENOMEM);
  552. }
  553. return txd;
  554. }
  555. /**
  556. * ep93xx_spi_dma_finish() - finishes with a DMA transfer
  557. * @espi: ep93xx SPI controller struct
  558. * @dir: DMA transfer direction
  559. *
  560. * Function finishes with the DMA transfer. After this, the DMA buffer is
  561. * unmapped.
  562. */
  563. static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
  564. enum dma_transfer_direction dir)
  565. {
  566. struct dma_chan *chan;
  567. struct sg_table *sgt;
  568. if (dir == DMA_DEV_TO_MEM) {
  569. chan = espi->dma_rx;
  570. sgt = &espi->rx_sgt;
  571. } else {
  572. chan = espi->dma_tx;
  573. sgt = &espi->tx_sgt;
  574. }
  575. dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
  576. }
  577. static void ep93xx_spi_dma_callback(void *callback_param)
  578. {
  579. complete(callback_param);
  580. }
  581. static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
  582. {
  583. struct spi_message *msg = espi->current_msg;
  584. struct dma_async_tx_descriptor *rxd, *txd;
  585. rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
  586. if (IS_ERR(rxd)) {
  587. dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
  588. msg->status = PTR_ERR(rxd);
  589. return;
  590. }
  591. txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
  592. if (IS_ERR(txd)) {
  593. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  594. dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
  595. msg->status = PTR_ERR(txd);
  596. return;
  597. }
  598. /* We are ready when RX is done */
  599. rxd->callback = ep93xx_spi_dma_callback;
  600. rxd->callback_param = &espi->wait;
  601. /* Now submit both descriptors and wait while they finish */
  602. dmaengine_submit(rxd);
  603. dmaengine_submit(txd);
  604. dma_async_issue_pending(espi->dma_rx);
  605. dma_async_issue_pending(espi->dma_tx);
  606. wait_for_completion(&espi->wait);
  607. ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
  608. ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
  609. }
  610. /**
  611. * ep93xx_spi_process_transfer() - processes one SPI transfer
  612. * @espi: ep93xx SPI controller struct
  613. * @msg: current message
  614. * @t: transfer to process
  615. *
  616. * This function processes one SPI transfer given in @t. Function waits until
  617. * transfer is complete (may sleep) and updates @msg->status based on whether
  618. * transfer was successfully processed or not.
  619. */
  620. static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
  621. struct spi_message *msg,
  622. struct spi_transfer *t)
  623. {
  624. struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
  625. msg->state = t;
  626. /*
  627. * Handle any transfer specific settings if needed. We use
  628. * temporary chip settings here and restore original later when
  629. * the transfer is finished.
  630. */
  631. if (t->speed_hz || t->bits_per_word) {
  632. struct ep93xx_spi_chip tmp_chip = *chip;
  633. if (t->speed_hz) {
  634. int err;
  635. err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
  636. t->speed_hz);
  637. if (err) {
  638. dev_err(&espi->pdev->dev,
  639. "failed to adjust speed\n");
  640. msg->status = err;
  641. return;
  642. }
  643. }
  644. if (t->bits_per_word)
  645. tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
  646. /*
  647. * Set up temporary new hw settings for this transfer.
  648. */
  649. ep93xx_spi_chip_setup(espi, &tmp_chip);
  650. }
  651. espi->rx = 0;
  652. espi->tx = 0;
  653. /*
  654. * There is no point of setting up DMA for the transfers which will
  655. * fit into the FIFO and can be transferred with a single interrupt.
  656. * So in these cases we will be using PIO and don't bother for DMA.
  657. */
  658. if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
  659. ep93xx_spi_dma_transfer(espi);
  660. else
  661. ep93xx_spi_pio_transfer(espi);
  662. /*
  663. * In case of error during transmit, we bail out from processing
  664. * the message.
  665. */
  666. if (msg->status)
  667. return;
  668. msg->actual_length += t->len;
  669. /*
  670. * After this transfer is finished, perform any possible
  671. * post-transfer actions requested by the protocol driver.
  672. */
  673. if (t->delay_usecs) {
  674. set_current_state(TASK_UNINTERRUPTIBLE);
  675. schedule_timeout(usecs_to_jiffies(t->delay_usecs));
  676. }
  677. if (t->cs_change) {
  678. if (!list_is_last(&t->transfer_list, &msg->transfers)) {
  679. /*
  680. * In case protocol driver is asking us to drop the
  681. * chipselect briefly, we let the scheduler to handle
  682. * any "delay" here.
  683. */
  684. ep93xx_spi_cs_control(msg->spi, false);
  685. cond_resched();
  686. ep93xx_spi_cs_control(msg->spi, true);
  687. }
  688. }
  689. if (t->speed_hz || t->bits_per_word)
  690. ep93xx_spi_chip_setup(espi, chip);
  691. }
  692. /*
  693. * ep93xx_spi_process_message() - process one SPI message
  694. * @espi: ep93xx SPI controller struct
  695. * @msg: message to process
  696. *
  697. * This function processes a single SPI message. We go through all transfers in
  698. * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
  699. * asserted during the whole message (unless per transfer cs_change is set).
  700. *
  701. * @msg->status contains %0 in case of success or negative error code in case of
  702. * failure.
  703. */
  704. static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
  705. struct spi_message *msg)
  706. {
  707. unsigned long timeout;
  708. struct spi_transfer *t;
  709. int err;
  710. /*
  711. * Enable the SPI controller and its clock.
  712. */
  713. err = ep93xx_spi_enable(espi);
  714. if (err) {
  715. dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
  716. msg->status = err;
  717. return;
  718. }
  719. /*
  720. * Just to be sure: flush any data from RX FIFO.
  721. */
  722. timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
  723. while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
  724. if (time_after(jiffies, timeout)) {
  725. dev_warn(&espi->pdev->dev,
  726. "timeout while flushing RX FIFO\n");
  727. msg->status = -ETIMEDOUT;
  728. return;
  729. }
  730. ep93xx_spi_read_u16(espi, SSPDR);
  731. }
  732. /*
  733. * We explicitly handle FIFO level. This way we don't have to check TX
  734. * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
  735. */
  736. espi->fifo_level = 0;
  737. /*
  738. * Update SPI controller registers according to spi device and assert
  739. * the chipselect.
  740. */
  741. ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
  742. ep93xx_spi_cs_control(msg->spi, true);
  743. list_for_each_entry(t, &msg->transfers, transfer_list) {
  744. ep93xx_spi_process_transfer(espi, msg, t);
  745. if (msg->status)
  746. break;
  747. }
  748. /*
  749. * Now the whole message is transferred (or failed for some reason). We
  750. * deselect the device and disable the SPI controller.
  751. */
  752. ep93xx_spi_cs_control(msg->spi, false);
  753. ep93xx_spi_disable(espi);
  754. }
  755. #define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
  756. /**
  757. * ep93xx_spi_work() - EP93xx SPI workqueue worker function
  758. * @work: work struct
  759. *
  760. * Workqueue worker function. This function is called when there are new
  761. * SPI messages to be processed. Message is taken out from the queue and then
  762. * passed to ep93xx_spi_process_message().
  763. *
  764. * After message is transferred, protocol driver is notified by calling
  765. * @msg->complete(). In case of error, @msg->status is set to negative error
  766. * number, otherwise it contains zero (and @msg->actual_length is updated).
  767. */
  768. static void ep93xx_spi_work(struct work_struct *work)
  769. {
  770. struct ep93xx_spi *espi = work_to_espi(work);
  771. struct spi_message *msg;
  772. spin_lock_irq(&espi->lock);
  773. if (!espi->running || espi->current_msg ||
  774. list_empty(&espi->msg_queue)) {
  775. spin_unlock_irq(&espi->lock);
  776. return;
  777. }
  778. msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
  779. list_del_init(&msg->queue);
  780. espi->current_msg = msg;
  781. spin_unlock_irq(&espi->lock);
  782. ep93xx_spi_process_message(espi, msg);
  783. /*
  784. * Update the current message and re-schedule ourselves if there are
  785. * more messages in the queue.
  786. */
  787. spin_lock_irq(&espi->lock);
  788. espi->current_msg = NULL;
  789. if (espi->running && !list_empty(&espi->msg_queue))
  790. queue_work(espi->wq, &espi->msg_work);
  791. spin_unlock_irq(&espi->lock);
  792. /* notify the protocol driver that we are done with this message */
  793. msg->complete(msg->context);
  794. }
  795. static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
  796. {
  797. struct ep93xx_spi *espi = dev_id;
  798. u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
  799. /*
  800. * If we got ROR (receive overrun) interrupt we know that something is
  801. * wrong. Just abort the message.
  802. */
  803. if (unlikely(irq_status & SSPIIR_RORIS)) {
  804. /* clear the overrun interrupt */
  805. ep93xx_spi_write_u8(espi, SSPICR, 0);
  806. dev_warn(&espi->pdev->dev,
  807. "receive overrun, aborting the message\n");
  808. espi->current_msg->status = -EIO;
  809. } else {
  810. /*
  811. * Interrupt is either RX (RIS) or TX (TIS). For both cases we
  812. * simply execute next data transfer.
  813. */
  814. if (ep93xx_spi_read_write(espi)) {
  815. /*
  816. * In normal case, there still is some processing left
  817. * for current transfer. Let's wait for the next
  818. * interrupt then.
  819. */
  820. return IRQ_HANDLED;
  821. }
  822. }
  823. /*
  824. * Current transfer is finished, either with error or with success. In
  825. * any case we disable interrupts and notify the worker to handle
  826. * any post-processing of the message.
  827. */
  828. ep93xx_spi_disable_interrupts(espi);
  829. complete(&espi->wait);
  830. return IRQ_HANDLED;
  831. }
  832. static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
  833. {
  834. if (ep93xx_dma_chan_is_m2p(chan))
  835. return false;
  836. chan->private = filter_param;
  837. return true;
  838. }
  839. static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
  840. {
  841. dma_cap_mask_t mask;
  842. int ret;
  843. espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
  844. if (!espi->zeropage)
  845. return -ENOMEM;
  846. dma_cap_zero(mask);
  847. dma_cap_set(DMA_SLAVE, mask);
  848. espi->dma_rx_data.port = EP93XX_DMA_SSP;
  849. espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
  850. espi->dma_rx_data.name = "ep93xx-spi-rx";
  851. espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  852. &espi->dma_rx_data);
  853. if (!espi->dma_rx) {
  854. ret = -ENODEV;
  855. goto fail_free_page;
  856. }
  857. espi->dma_tx_data.port = EP93XX_DMA_SSP;
  858. espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
  859. espi->dma_tx_data.name = "ep93xx-spi-tx";
  860. espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
  861. &espi->dma_tx_data);
  862. if (!espi->dma_tx) {
  863. ret = -ENODEV;
  864. goto fail_release_rx;
  865. }
  866. return 0;
  867. fail_release_rx:
  868. dma_release_channel(espi->dma_rx);
  869. espi->dma_rx = NULL;
  870. fail_free_page:
  871. free_page((unsigned long)espi->zeropage);
  872. return ret;
  873. }
  874. static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
  875. {
  876. if (espi->dma_rx) {
  877. dma_release_channel(espi->dma_rx);
  878. sg_free_table(&espi->rx_sgt);
  879. }
  880. if (espi->dma_tx) {
  881. dma_release_channel(espi->dma_tx);
  882. sg_free_table(&espi->tx_sgt);
  883. }
  884. if (espi->zeropage)
  885. free_page((unsigned long)espi->zeropage);
  886. }
  887. static int __devinit ep93xx_spi_probe(struct platform_device *pdev)
  888. {
  889. struct spi_master *master;
  890. struct ep93xx_spi_info *info;
  891. struct ep93xx_spi *espi;
  892. struct resource *res;
  893. int error;
  894. info = pdev->dev.platform_data;
  895. master = spi_alloc_master(&pdev->dev, sizeof(*espi));
  896. if (!master) {
  897. dev_err(&pdev->dev, "failed to allocate spi master\n");
  898. return -ENOMEM;
  899. }
  900. master->setup = ep93xx_spi_setup;
  901. master->transfer = ep93xx_spi_transfer;
  902. master->cleanup = ep93xx_spi_cleanup;
  903. master->bus_num = pdev->id;
  904. master->num_chipselect = info->num_chipselect;
  905. master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
  906. platform_set_drvdata(pdev, master);
  907. espi = spi_master_get_devdata(master);
  908. espi->clk = clk_get(&pdev->dev, NULL);
  909. if (IS_ERR(espi->clk)) {
  910. dev_err(&pdev->dev, "unable to get spi clock\n");
  911. error = PTR_ERR(espi->clk);
  912. goto fail_release_master;
  913. }
  914. spin_lock_init(&espi->lock);
  915. init_completion(&espi->wait);
  916. /*
  917. * Calculate maximum and minimum supported clock rates
  918. * for the controller.
  919. */
  920. espi->max_rate = clk_get_rate(espi->clk) / 2;
  921. espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
  922. espi->pdev = pdev;
  923. espi->irq = platform_get_irq(pdev, 0);
  924. if (espi->irq < 0) {
  925. error = -EBUSY;
  926. dev_err(&pdev->dev, "failed to get irq resources\n");
  927. goto fail_put_clock;
  928. }
  929. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  930. if (!res) {
  931. dev_err(&pdev->dev, "unable to get iomem resource\n");
  932. error = -ENODEV;
  933. goto fail_put_clock;
  934. }
  935. res = request_mem_region(res->start, resource_size(res), pdev->name);
  936. if (!res) {
  937. dev_err(&pdev->dev, "unable to request iomem resources\n");
  938. error = -EBUSY;
  939. goto fail_put_clock;
  940. }
  941. espi->sspdr_phys = res->start + SSPDR;
  942. espi->regs_base = ioremap(res->start, resource_size(res));
  943. if (!espi->regs_base) {
  944. dev_err(&pdev->dev, "failed to map resources\n");
  945. error = -ENODEV;
  946. goto fail_free_mem;
  947. }
  948. error = request_irq(espi->irq, ep93xx_spi_interrupt, 0,
  949. "ep93xx-spi", espi);
  950. if (error) {
  951. dev_err(&pdev->dev, "failed to request irq\n");
  952. goto fail_unmap_regs;
  953. }
  954. if (info->use_dma && ep93xx_spi_setup_dma(espi))
  955. dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
  956. espi->wq = create_singlethread_workqueue("ep93xx_spid");
  957. if (!espi->wq) {
  958. dev_err(&pdev->dev, "unable to create workqueue\n");
  959. goto fail_free_dma;
  960. }
  961. INIT_WORK(&espi->msg_work, ep93xx_spi_work);
  962. INIT_LIST_HEAD(&espi->msg_queue);
  963. espi->running = true;
  964. /* make sure that the hardware is disabled */
  965. ep93xx_spi_write_u8(espi, SSPCR1, 0);
  966. error = spi_register_master(master);
  967. if (error) {
  968. dev_err(&pdev->dev, "failed to register SPI master\n");
  969. goto fail_free_queue;
  970. }
  971. dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
  972. (unsigned long)res->start, espi->irq);
  973. return 0;
  974. fail_free_queue:
  975. destroy_workqueue(espi->wq);
  976. fail_free_dma:
  977. ep93xx_spi_release_dma(espi);
  978. free_irq(espi->irq, espi);
  979. fail_unmap_regs:
  980. iounmap(espi->regs_base);
  981. fail_free_mem:
  982. release_mem_region(res->start, resource_size(res));
  983. fail_put_clock:
  984. clk_put(espi->clk);
  985. fail_release_master:
  986. spi_master_put(master);
  987. platform_set_drvdata(pdev, NULL);
  988. return error;
  989. }
  990. static int __devexit ep93xx_spi_remove(struct platform_device *pdev)
  991. {
  992. struct spi_master *master = platform_get_drvdata(pdev);
  993. struct ep93xx_spi *espi = spi_master_get_devdata(master);
  994. struct resource *res;
  995. spin_lock_irq(&espi->lock);
  996. espi->running = false;
  997. spin_unlock_irq(&espi->lock);
  998. destroy_workqueue(espi->wq);
  999. /*
  1000. * Complete remaining messages with %-ESHUTDOWN status.
  1001. */
  1002. spin_lock_irq(&espi->lock);
  1003. while (!list_empty(&espi->msg_queue)) {
  1004. struct spi_message *msg;
  1005. msg = list_first_entry(&espi->msg_queue,
  1006. struct spi_message, queue);
  1007. list_del_init(&msg->queue);
  1008. msg->status = -ESHUTDOWN;
  1009. spin_unlock_irq(&espi->lock);
  1010. msg->complete(msg->context);
  1011. spin_lock_irq(&espi->lock);
  1012. }
  1013. spin_unlock_irq(&espi->lock);
  1014. ep93xx_spi_release_dma(espi);
  1015. free_irq(espi->irq, espi);
  1016. iounmap(espi->regs_base);
  1017. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1018. release_mem_region(res->start, resource_size(res));
  1019. clk_put(espi->clk);
  1020. platform_set_drvdata(pdev, NULL);
  1021. spi_unregister_master(master);
  1022. return 0;
  1023. }
  1024. static struct platform_driver ep93xx_spi_driver = {
  1025. .driver = {
  1026. .name = "ep93xx-spi",
  1027. .owner = THIS_MODULE,
  1028. },
  1029. .probe = ep93xx_spi_probe,
  1030. .remove = __devexit_p(ep93xx_spi_remove),
  1031. };
  1032. module_platform_driver(ep93xx_spi_driver);
  1033. MODULE_DESCRIPTION("EP93xx SPI Controller driver");
  1034. MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
  1035. MODULE_LICENSE("GPL");
  1036. MODULE_ALIAS("platform:ep93xx-spi");