pata_octeon_cf.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * Driver for the Octeon bootbus compact flash.
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2005 - 2009 Cavium Networks
  9. * Copyright (C) 2008 Wind River Systems
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/libata.h>
  14. #include <linux/irq.h>
  15. #include <linux/slab.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/workqueue.h>
  18. #include <scsi/scsi_host.h>
  19. #include <asm/octeon/octeon.h>
  20. /*
  21. * The Octeon bootbus compact flash interface is connected in at least
  22. * 3 different configurations on various evaluation boards:
  23. *
  24. * -- 8 bits no irq, no DMA
  25. * -- 16 bits no irq, no DMA
  26. * -- 16 bits True IDE mode with DMA, but no irq.
  27. *
  28. * In the last case the DMA engine can generate an interrupt when the
  29. * transfer is complete. For the first two cases only PIO is supported.
  30. *
  31. */
  32. #define DRV_NAME "pata_octeon_cf"
  33. #define DRV_VERSION "2.1"
  34. struct octeon_cf_port {
  35. struct workqueue_struct *wq;
  36. struct delayed_work delayed_finish;
  37. struct ata_port *ap;
  38. int dma_finished;
  39. };
  40. static struct scsi_host_template octeon_cf_sht = {
  41. ATA_PIO_SHT(DRV_NAME),
  42. };
  43. /**
  44. * Convert nanosecond based time to setting used in the
  45. * boot bus timing register, based on timing multiple
  46. */
  47. static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
  48. {
  49. unsigned int val;
  50. /*
  51. * Compute # of eclock periods to get desired duration in
  52. * nanoseconds.
  53. */
  54. val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
  55. 1000 * tim_mult);
  56. return val;
  57. }
  58. static void octeon_cf_set_boot_reg_cfg(int cs)
  59. {
  60. union cvmx_mio_boot_reg_cfgx reg_cfg;
  61. reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(cs));
  62. reg_cfg.s.dmack = 0; /* Don't assert DMACK on access */
  63. reg_cfg.s.tim_mult = 2; /* Timing mutiplier 2x */
  64. reg_cfg.s.rd_dly = 0; /* Sample on falling edge of BOOT_OE */
  65. reg_cfg.s.sam = 0; /* Don't combine write and output enable */
  66. reg_cfg.s.we_ext = 0; /* No write enable extension */
  67. reg_cfg.s.oe_ext = 0; /* No read enable extension */
  68. reg_cfg.s.en = 1; /* Enable this region */
  69. reg_cfg.s.orbit = 0; /* Don't combine with previous region */
  70. reg_cfg.s.ale = 0; /* Don't do address multiplexing */
  71. cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
  72. }
  73. /**
  74. * Called after libata determines the needed PIO mode. This
  75. * function programs the Octeon bootbus regions to support the
  76. * timing requirements of the PIO mode.
  77. *
  78. * @ap: ATA port information
  79. * @dev: ATA device
  80. */
  81. static void octeon_cf_set_piomode(struct ata_port *ap, struct ata_device *dev)
  82. {
  83. struct octeon_cf_data *ocd = ap->dev->platform_data;
  84. union cvmx_mio_boot_reg_timx reg_tim;
  85. int cs = ocd->base_region;
  86. int T;
  87. struct ata_timing timing;
  88. int use_iordy;
  89. int trh;
  90. int pause;
  91. /* These names are timing parameters from the ATA spec */
  92. int t1;
  93. int t2;
  94. int t2i;
  95. T = (int)(2000000000000LL / octeon_get_clock_rate());
  96. if (ata_timing_compute(dev, dev->pio_mode, &timing, T, T))
  97. BUG();
  98. t1 = timing.setup;
  99. if (t1)
  100. t1--;
  101. t2 = timing.active;
  102. if (t2)
  103. t2--;
  104. t2i = timing.act8b;
  105. if (t2i)
  106. t2i--;
  107. trh = ns_to_tim_reg(2, 20);
  108. if (trh)
  109. trh--;
  110. pause = timing.cycle - timing.active - timing.setup - trh;
  111. if (pause)
  112. pause--;
  113. octeon_cf_set_boot_reg_cfg(cs);
  114. if (ocd->dma_engine >= 0)
  115. /* True IDE mode, program both chip selects. */
  116. octeon_cf_set_boot_reg_cfg(cs + 1);
  117. use_iordy = ata_pio_need_iordy(dev);
  118. reg_tim.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_TIMX(cs));
  119. /* Disable page mode */
  120. reg_tim.s.pagem = 0;
  121. /* Enable dynamic timing */
  122. reg_tim.s.waitm = use_iordy;
  123. /* Pages are disabled */
  124. reg_tim.s.pages = 0;
  125. /* We don't use multiplexed address mode */
  126. reg_tim.s.ale = 0;
  127. /* Not used */
  128. reg_tim.s.page = 0;
  129. /* Time after IORDY to coninue to assert the data */
  130. reg_tim.s.wait = 0;
  131. /* Time to wait to complete the cycle. */
  132. reg_tim.s.pause = pause;
  133. /* How long to hold after a write to de-assert CE. */
  134. reg_tim.s.wr_hld = trh;
  135. /* How long to wait after a read to de-assert CE. */
  136. reg_tim.s.rd_hld = trh;
  137. /* How long write enable is asserted */
  138. reg_tim.s.we = t2;
  139. /* How long read enable is asserted */
  140. reg_tim.s.oe = t2;
  141. /* Time after CE that read/write starts */
  142. reg_tim.s.ce = ns_to_tim_reg(2, 5);
  143. /* Time before CE that address is valid */
  144. reg_tim.s.adr = 0;
  145. /* Program the bootbus region timing for the data port chip select. */
  146. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs), reg_tim.u64);
  147. if (ocd->dma_engine >= 0)
  148. /* True IDE mode, program both chip selects. */
  149. cvmx_write_csr(CVMX_MIO_BOOT_REG_TIMX(cs + 1), reg_tim.u64);
  150. }
  151. static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
  152. {
  153. struct octeon_cf_data *ocd = dev->link->ap->dev->platform_data;
  154. union cvmx_mio_boot_dma_timx dma_tim;
  155. unsigned int oe_a;
  156. unsigned int oe_n;
  157. unsigned int dma_ackh;
  158. unsigned int dma_arq;
  159. unsigned int pause;
  160. unsigned int T0, Tkr, Td;
  161. unsigned int tim_mult;
  162. const struct ata_timing *timing;
  163. timing = ata_timing_find_mode(dev->dma_mode);
  164. T0 = timing->cycle;
  165. Td = timing->active;
  166. Tkr = timing->recover;
  167. dma_ackh = timing->dmack_hold;
  168. dma_tim.u64 = 0;
  169. /* dma_tim.s.tim_mult = 0 --> 4x */
  170. tim_mult = 4;
  171. /* not spec'ed, value in eclocks, not affected by tim_mult */
  172. dma_arq = 8;
  173. pause = 25 - dma_arq * 1000 /
  174. (octeon_get_clock_rate() / 1000000); /* Tz */
  175. oe_a = Td;
  176. /* Tkr from cf spec, lengthened to meet T0 */
  177. oe_n = max(T0 - oe_a, Tkr);
  178. dma_tim.s.dmack_pi = 1;
  179. dma_tim.s.oe_n = ns_to_tim_reg(tim_mult, oe_n);
  180. dma_tim.s.oe_a = ns_to_tim_reg(tim_mult, oe_a);
  181. /*
  182. * This is tI, C.F. spec. says 0, but Sony CF card requires
  183. * more, we use 20 nS.
  184. */
  185. dma_tim.s.dmack_s = ns_to_tim_reg(tim_mult, 20);
  186. dma_tim.s.dmack_h = ns_to_tim_reg(tim_mult, dma_ackh);
  187. dma_tim.s.dmarq = dma_arq;
  188. dma_tim.s.pause = ns_to_tim_reg(tim_mult, pause);
  189. dma_tim.s.rd_dly = 0; /* Sample right on edge */
  190. /* writes only */
  191. dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
  192. dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
  193. pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
  194. ns_to_tim_reg(tim_mult, 60));
  195. pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: "
  196. "%d, dmarq: %d, pause: %d\n",
  197. dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
  198. dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
  199. cvmx_write_csr(CVMX_MIO_BOOT_DMA_TIMX(ocd->dma_engine),
  200. dma_tim.u64);
  201. }
  202. /**
  203. * Handle an 8 bit I/O request.
  204. *
  205. * @dev: Device to access
  206. * @buffer: Data buffer
  207. * @buflen: Length of the buffer.
  208. * @rw: True to write.
  209. */
  210. static unsigned int octeon_cf_data_xfer8(struct ata_device *dev,
  211. unsigned char *buffer,
  212. unsigned int buflen,
  213. int rw)
  214. {
  215. struct ata_port *ap = dev->link->ap;
  216. void __iomem *data_addr = ap->ioaddr.data_addr;
  217. unsigned long words;
  218. int count;
  219. words = buflen;
  220. if (rw) {
  221. count = 16;
  222. while (words--) {
  223. iowrite8(*buffer, data_addr);
  224. buffer++;
  225. /*
  226. * Every 16 writes do a read so the bootbus
  227. * FIFO doesn't fill up.
  228. */
  229. if (--count == 0) {
  230. ioread8(ap->ioaddr.altstatus_addr);
  231. count = 16;
  232. }
  233. }
  234. } else {
  235. ioread8_rep(data_addr, buffer, words);
  236. }
  237. return buflen;
  238. }
  239. /**
  240. * Handle a 16 bit I/O request.
  241. *
  242. * @dev: Device to access
  243. * @buffer: Data buffer
  244. * @buflen: Length of the buffer.
  245. * @rw: True to write.
  246. */
  247. static unsigned int octeon_cf_data_xfer16(struct ata_device *dev,
  248. unsigned char *buffer,
  249. unsigned int buflen,
  250. int rw)
  251. {
  252. struct ata_port *ap = dev->link->ap;
  253. void __iomem *data_addr = ap->ioaddr.data_addr;
  254. unsigned long words;
  255. int count;
  256. words = buflen / 2;
  257. if (rw) {
  258. count = 16;
  259. while (words--) {
  260. iowrite16(*(uint16_t *)buffer, data_addr);
  261. buffer += sizeof(uint16_t);
  262. /*
  263. * Every 16 writes do a read so the bootbus
  264. * FIFO doesn't fill up.
  265. */
  266. if (--count == 0) {
  267. ioread8(ap->ioaddr.altstatus_addr);
  268. count = 16;
  269. }
  270. }
  271. } else {
  272. while (words--) {
  273. *(uint16_t *)buffer = ioread16(data_addr);
  274. buffer += sizeof(uint16_t);
  275. }
  276. }
  277. /* Transfer trailing 1 byte, if any. */
  278. if (unlikely(buflen & 0x01)) {
  279. __le16 align_buf[1] = { 0 };
  280. if (rw == READ) {
  281. align_buf[0] = cpu_to_le16(ioread16(data_addr));
  282. memcpy(buffer, align_buf, 1);
  283. } else {
  284. memcpy(align_buf, buffer, 1);
  285. iowrite16(le16_to_cpu(align_buf[0]), data_addr);
  286. }
  287. words++;
  288. }
  289. return buflen;
  290. }
  291. /**
  292. * Read the taskfile for 16bit non-True IDE only.
  293. */
  294. static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
  295. {
  296. u16 blob;
  297. /* The base of the registers is at ioaddr.data_addr. */
  298. void __iomem *base = ap->ioaddr.data_addr;
  299. blob = __raw_readw(base + 0xc);
  300. tf->feature = blob >> 8;
  301. blob = __raw_readw(base + 2);
  302. tf->nsect = blob & 0xff;
  303. tf->lbal = blob >> 8;
  304. blob = __raw_readw(base + 4);
  305. tf->lbam = blob & 0xff;
  306. tf->lbah = blob >> 8;
  307. blob = __raw_readw(base + 6);
  308. tf->device = blob & 0xff;
  309. tf->command = blob >> 8;
  310. if (tf->flags & ATA_TFLAG_LBA48) {
  311. if (likely(ap->ioaddr.ctl_addr)) {
  312. iowrite8(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr);
  313. blob = __raw_readw(base + 0xc);
  314. tf->hob_feature = blob >> 8;
  315. blob = __raw_readw(base + 2);
  316. tf->hob_nsect = blob & 0xff;
  317. tf->hob_lbal = blob >> 8;
  318. blob = __raw_readw(base + 4);
  319. tf->hob_lbam = blob & 0xff;
  320. tf->hob_lbah = blob >> 8;
  321. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  322. ap->last_ctl = tf->ctl;
  323. } else {
  324. WARN_ON(1);
  325. }
  326. }
  327. }
  328. static u8 octeon_cf_check_status16(struct ata_port *ap)
  329. {
  330. u16 blob;
  331. void __iomem *base = ap->ioaddr.data_addr;
  332. blob = __raw_readw(base + 6);
  333. return blob >> 8;
  334. }
  335. static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
  336. unsigned long deadline)
  337. {
  338. struct ata_port *ap = link->ap;
  339. void __iomem *base = ap->ioaddr.data_addr;
  340. int rc;
  341. u8 err;
  342. DPRINTK("about to softreset\n");
  343. __raw_writew(ap->ctl, base + 0xe);
  344. udelay(20);
  345. __raw_writew(ap->ctl | ATA_SRST, base + 0xe);
  346. udelay(20);
  347. __raw_writew(ap->ctl, base + 0xe);
  348. rc = ata_sff_wait_after_reset(link, 1, deadline);
  349. if (rc) {
  350. ata_link_err(link, "SRST failed (errno=%d)\n", rc);
  351. return rc;
  352. }
  353. /* determine by signature whether we have ATA or ATAPI devices */
  354. classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
  355. DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
  356. return 0;
  357. }
  358. /**
  359. * Load the taskfile for 16bit non-True IDE only. The device_addr is
  360. * not loaded, we do this as part of octeon_cf_exec_command16.
  361. */
  362. static void octeon_cf_tf_load16(struct ata_port *ap,
  363. const struct ata_taskfile *tf)
  364. {
  365. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  366. /* The base of the registers is at ioaddr.data_addr. */
  367. void __iomem *base = ap->ioaddr.data_addr;
  368. if (tf->ctl != ap->last_ctl) {
  369. iowrite8(tf->ctl, ap->ioaddr.ctl_addr);
  370. ap->last_ctl = tf->ctl;
  371. ata_wait_idle(ap);
  372. }
  373. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  374. __raw_writew(tf->hob_feature << 8, base + 0xc);
  375. __raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
  376. __raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
  377. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  378. tf->hob_feature,
  379. tf->hob_nsect,
  380. tf->hob_lbal,
  381. tf->hob_lbam,
  382. tf->hob_lbah);
  383. }
  384. if (is_addr) {
  385. __raw_writew(tf->feature << 8, base + 0xc);
  386. __raw_writew(tf->nsect | tf->lbal << 8, base + 2);
  387. __raw_writew(tf->lbam | tf->lbah << 8, base + 4);
  388. VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  389. tf->feature,
  390. tf->nsect,
  391. tf->lbal,
  392. tf->lbam,
  393. tf->lbah);
  394. }
  395. ata_wait_idle(ap);
  396. }
  397. static void octeon_cf_dev_select(struct ata_port *ap, unsigned int device)
  398. {
  399. /* There is only one device, do nothing. */
  400. return;
  401. }
  402. /*
  403. * Issue ATA command to host controller. The device_addr is also sent
  404. * as it must be written in a combined write with the command.
  405. */
  406. static void octeon_cf_exec_command16(struct ata_port *ap,
  407. const struct ata_taskfile *tf)
  408. {
  409. /* The base of the registers is at ioaddr.data_addr. */
  410. void __iomem *base = ap->ioaddr.data_addr;
  411. u16 blob;
  412. if (tf->flags & ATA_TFLAG_DEVICE) {
  413. VPRINTK("device 0x%X\n", tf->device);
  414. blob = tf->device;
  415. } else {
  416. blob = 0;
  417. }
  418. DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
  419. blob |= (tf->command << 8);
  420. __raw_writew(blob, base + 6);
  421. ata_wait_idle(ap);
  422. }
  423. static void octeon_cf_irq_on(struct ata_port *ap)
  424. {
  425. }
  426. static void octeon_cf_irq_clear(struct ata_port *ap)
  427. {
  428. return;
  429. }
  430. static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
  431. {
  432. struct ata_port *ap = qc->ap;
  433. struct octeon_cf_port *cf_port;
  434. cf_port = ap->private_data;
  435. DPRINTK("ENTER\n");
  436. /* issue r/w command */
  437. qc->cursg = qc->sg;
  438. cf_port->dma_finished = 0;
  439. ap->ops->sff_exec_command(ap, &qc->tf);
  440. DPRINTK("EXIT\n");
  441. }
  442. /**
  443. * Start a DMA transfer that was already setup
  444. *
  445. * @qc: Information about the DMA
  446. */
  447. static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
  448. {
  449. struct octeon_cf_data *ocd = qc->ap->dev->platform_data;
  450. union cvmx_mio_boot_dma_cfgx mio_boot_dma_cfg;
  451. union cvmx_mio_boot_dma_intx mio_boot_dma_int;
  452. struct scatterlist *sg;
  453. VPRINTK("%d scatterlists\n", qc->n_elem);
  454. /* Get the scatter list entry we need to DMA into */
  455. sg = qc->cursg;
  456. BUG_ON(!sg);
  457. /*
  458. * Clear the DMA complete status.
  459. */
  460. mio_boot_dma_int.u64 = 0;
  461. mio_boot_dma_int.s.done = 1;
  462. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
  463. mio_boot_dma_int.u64);
  464. /* Enable the interrupt. */
  465. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine),
  466. mio_boot_dma_int.u64);
  467. /* Set the direction of the DMA */
  468. mio_boot_dma_cfg.u64 = 0;
  469. mio_boot_dma_cfg.s.en = 1;
  470. mio_boot_dma_cfg.s.rw = ((qc->tf.flags & ATA_TFLAG_WRITE) != 0);
  471. /*
  472. * Don't stop the DMA if the device deasserts DMARQ. Many
  473. * compact flashes deassert DMARQ for a short time between
  474. * sectors. Instead of stopping and restarting the DMA, we'll
  475. * let the hardware do it. If the DMA is really stopped early
  476. * due to an error condition, a later timeout will force us to
  477. * stop.
  478. */
  479. mio_boot_dma_cfg.s.clr = 0;
  480. /* Size is specified in 16bit words and minus one notation */
  481. mio_boot_dma_cfg.s.size = sg_dma_len(sg) / 2 - 1;
  482. /* We need to swap the high and low bytes of every 16 bits */
  483. mio_boot_dma_cfg.s.swap8 = 1;
  484. mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
  485. VPRINTK("%s %d bytes address=%p\n",
  486. (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
  487. (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
  488. cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine),
  489. mio_boot_dma_cfg.u64);
  490. }
  491. /**
  492. *
  493. * LOCKING:
  494. * spin_lock_irqsave(host lock)
  495. *
  496. */
  497. static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
  498. struct ata_queued_cmd *qc)
  499. {
  500. struct ata_eh_info *ehi = &ap->link.eh_info;
  501. struct octeon_cf_data *ocd = ap->dev->platform_data;
  502. union cvmx_mio_boot_dma_cfgx dma_cfg;
  503. union cvmx_mio_boot_dma_intx dma_int;
  504. struct octeon_cf_port *cf_port;
  505. u8 status;
  506. VPRINTK("ata%u: protocol %d task_state %d\n",
  507. ap->print_id, qc->tf.protocol, ap->hsm_task_state);
  508. if (ap->hsm_task_state != HSM_ST_LAST)
  509. return 0;
  510. cf_port = ap->private_data;
  511. dma_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
  512. if (dma_cfg.s.size != 0xfffff) {
  513. /* Error, the transfer was not complete. */
  514. qc->err_mask |= AC_ERR_HOST_BUS;
  515. ap->hsm_task_state = HSM_ST_ERR;
  516. }
  517. /* Stop and clear the dma engine. */
  518. dma_cfg.u64 = 0;
  519. dma_cfg.s.size = -1;
  520. cvmx_write_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine), dma_cfg.u64);
  521. /* Disable the interrupt. */
  522. dma_int.u64 = 0;
  523. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INT_ENX(ocd->dma_engine), dma_int.u64);
  524. /* Clear the DMA complete status */
  525. dma_int.s.done = 1;
  526. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine), dma_int.u64);
  527. status = ap->ops->sff_check_status(ap);
  528. ata_sff_hsm_move(ap, qc, status, 0);
  529. if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA))
  530. ata_ehi_push_desc(ehi, "DMA stat 0x%x", status);
  531. return 1;
  532. }
  533. /*
  534. * Check if any queued commands have more DMAs, if so start the next
  535. * transfer, else do end of transfer handling.
  536. */
  537. static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
  538. {
  539. struct ata_host *host = dev_instance;
  540. struct octeon_cf_port *cf_port;
  541. int i;
  542. unsigned int handled = 0;
  543. unsigned long flags;
  544. spin_lock_irqsave(&host->lock, flags);
  545. DPRINTK("ENTER\n");
  546. for (i = 0; i < host->n_ports; i++) {
  547. u8 status;
  548. struct ata_port *ap;
  549. struct ata_queued_cmd *qc;
  550. union cvmx_mio_boot_dma_intx dma_int;
  551. union cvmx_mio_boot_dma_cfgx dma_cfg;
  552. struct octeon_cf_data *ocd;
  553. ap = host->ports[i];
  554. ocd = ap->dev->platform_data;
  555. cf_port = ap->private_data;
  556. dma_int.u64 =
  557. cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
  558. dma_cfg.u64 =
  559. cvmx_read_csr(CVMX_MIO_BOOT_DMA_CFGX(ocd->dma_engine));
  560. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  561. if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING)) {
  562. if (dma_int.s.done && !dma_cfg.s.en) {
  563. if (!sg_is_last(qc->cursg)) {
  564. qc->cursg = sg_next(qc->cursg);
  565. handled = 1;
  566. octeon_cf_dma_start(qc);
  567. continue;
  568. } else {
  569. cf_port->dma_finished = 1;
  570. }
  571. }
  572. if (!cf_port->dma_finished)
  573. continue;
  574. status = ioread8(ap->ioaddr.altstatus_addr);
  575. if (status & (ATA_BUSY | ATA_DRQ)) {
  576. /*
  577. * We are busy, try to handle it
  578. * later. This is the DMA finished
  579. * interrupt, and it could take a
  580. * little while for the card to be
  581. * ready for more commands.
  582. */
  583. /* Clear DMA irq. */
  584. dma_int.u64 = 0;
  585. dma_int.s.done = 1;
  586. cvmx_write_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine),
  587. dma_int.u64);
  588. queue_delayed_work(cf_port->wq,
  589. &cf_port->delayed_finish, 1);
  590. handled = 1;
  591. } else {
  592. handled |= octeon_cf_dma_finished(ap, qc);
  593. }
  594. }
  595. }
  596. spin_unlock_irqrestore(&host->lock, flags);
  597. DPRINTK("EXIT\n");
  598. return IRQ_RETVAL(handled);
  599. }
  600. static void octeon_cf_delayed_finish(struct work_struct *work)
  601. {
  602. struct octeon_cf_port *cf_port = container_of(work,
  603. struct octeon_cf_port,
  604. delayed_finish.work);
  605. struct ata_port *ap = cf_port->ap;
  606. struct ata_host *host = ap->host;
  607. struct ata_queued_cmd *qc;
  608. unsigned long flags;
  609. u8 status;
  610. spin_lock_irqsave(&host->lock, flags);
  611. /*
  612. * If the port is not waiting for completion, it must have
  613. * handled it previously. The hsm_task_state is
  614. * protected by host->lock.
  615. */
  616. if (ap->hsm_task_state != HSM_ST_LAST || !cf_port->dma_finished)
  617. goto out;
  618. status = ioread8(ap->ioaddr.altstatus_addr);
  619. if (status & (ATA_BUSY | ATA_DRQ)) {
  620. /* Still busy, try again. */
  621. queue_delayed_work(cf_port->wq,
  622. &cf_port->delayed_finish, 1);
  623. goto out;
  624. }
  625. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  626. if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
  627. octeon_cf_dma_finished(ap, qc);
  628. out:
  629. spin_unlock_irqrestore(&host->lock, flags);
  630. }
  631. static void octeon_cf_dev_config(struct ata_device *dev)
  632. {
  633. /*
  634. * A maximum of 2^20 - 1 16 bit transfers are possible with
  635. * the bootbus DMA. So we need to throttle max_sectors to
  636. * (2^12 - 1 == 4095) to assure that this can never happen.
  637. */
  638. dev->max_sectors = min(dev->max_sectors, 4095U);
  639. }
  640. /*
  641. * We don't do ATAPI DMA so return 0.
  642. */
  643. static int octeon_cf_check_atapi_dma(struct ata_queued_cmd *qc)
  644. {
  645. return 0;
  646. }
  647. static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
  648. {
  649. struct ata_port *ap = qc->ap;
  650. switch (qc->tf.protocol) {
  651. case ATA_PROT_DMA:
  652. WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
  653. ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
  654. octeon_cf_dma_setup(qc); /* set up dma */
  655. octeon_cf_dma_start(qc); /* initiate dma */
  656. ap->hsm_task_state = HSM_ST_LAST;
  657. break;
  658. case ATAPI_PROT_DMA:
  659. dev_err(ap->dev, "Error, ATAPI not supported\n");
  660. BUG();
  661. default:
  662. return ata_sff_qc_issue(qc);
  663. }
  664. return 0;
  665. }
  666. static struct ata_port_operations octeon_cf_ops = {
  667. .inherits = &ata_sff_port_ops,
  668. .check_atapi_dma = octeon_cf_check_atapi_dma,
  669. .qc_prep = ata_noop_qc_prep,
  670. .qc_issue = octeon_cf_qc_issue,
  671. .sff_dev_select = octeon_cf_dev_select,
  672. .sff_irq_on = octeon_cf_irq_on,
  673. .sff_irq_clear = octeon_cf_irq_clear,
  674. .cable_detect = ata_cable_40wire,
  675. .set_piomode = octeon_cf_set_piomode,
  676. .set_dmamode = octeon_cf_set_dmamode,
  677. .dev_config = octeon_cf_dev_config,
  678. };
  679. static int __devinit octeon_cf_probe(struct platform_device *pdev)
  680. {
  681. struct resource *res_cs0, *res_cs1;
  682. void __iomem *cs0;
  683. void __iomem *cs1 = NULL;
  684. struct ata_host *host;
  685. struct ata_port *ap;
  686. struct octeon_cf_data *ocd;
  687. int irq = 0;
  688. irq_handler_t irq_handler = NULL;
  689. void __iomem *base;
  690. struct octeon_cf_port *cf_port;
  691. char version[32];
  692. res_cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  693. if (!res_cs0)
  694. return -EINVAL;
  695. ocd = pdev->dev.platform_data;
  696. cs0 = devm_ioremap_nocache(&pdev->dev, res_cs0->start,
  697. resource_size(res_cs0));
  698. if (!cs0)
  699. return -ENOMEM;
  700. /* Determine from availability of DMA if True IDE mode or not */
  701. if (ocd->dma_engine >= 0) {
  702. res_cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  703. if (!res_cs1)
  704. return -EINVAL;
  705. cs1 = devm_ioremap_nocache(&pdev->dev, res_cs1->start,
  706. resource_size(res_cs1));
  707. if (!cs1)
  708. return -ENOMEM;
  709. }
  710. cf_port = kzalloc(sizeof(*cf_port), GFP_KERNEL);
  711. if (!cf_port)
  712. return -ENOMEM;
  713. /* allocate host */
  714. host = ata_host_alloc(&pdev->dev, 1);
  715. if (!host)
  716. goto free_cf_port;
  717. ap = host->ports[0];
  718. ap->private_data = cf_port;
  719. cf_port->ap = ap;
  720. ap->ops = &octeon_cf_ops;
  721. ap->pio_mask = ATA_PIO6;
  722. ap->flags |= ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING;
  723. base = cs0 + ocd->base_region_bias;
  724. if (!ocd->is16bit) {
  725. ap->ioaddr.cmd_addr = base;
  726. ata_sff_std_ports(&ap->ioaddr);
  727. ap->ioaddr.altstatus_addr = base + 0xe;
  728. ap->ioaddr.ctl_addr = base + 0xe;
  729. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer8;
  730. } else if (cs1) {
  731. /* Presence of cs1 indicates True IDE mode. */
  732. ap->ioaddr.cmd_addr = base + (ATA_REG_CMD << 1) + 1;
  733. ap->ioaddr.data_addr = base + (ATA_REG_DATA << 1);
  734. ap->ioaddr.error_addr = base + (ATA_REG_ERR << 1) + 1;
  735. ap->ioaddr.feature_addr = base + (ATA_REG_FEATURE << 1) + 1;
  736. ap->ioaddr.nsect_addr = base + (ATA_REG_NSECT << 1) + 1;
  737. ap->ioaddr.lbal_addr = base + (ATA_REG_LBAL << 1) + 1;
  738. ap->ioaddr.lbam_addr = base + (ATA_REG_LBAM << 1) + 1;
  739. ap->ioaddr.lbah_addr = base + (ATA_REG_LBAH << 1) + 1;
  740. ap->ioaddr.device_addr = base + (ATA_REG_DEVICE << 1) + 1;
  741. ap->ioaddr.status_addr = base + (ATA_REG_STATUS << 1) + 1;
  742. ap->ioaddr.command_addr = base + (ATA_REG_CMD << 1) + 1;
  743. ap->ioaddr.altstatus_addr = cs1 + (6 << 1) + 1;
  744. ap->ioaddr.ctl_addr = cs1 + (6 << 1) + 1;
  745. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  746. ap->mwdma_mask = ATA_MWDMA4;
  747. irq = platform_get_irq(pdev, 0);
  748. irq_handler = octeon_cf_interrupt;
  749. /* True IDE mode needs delayed work to poll for not-busy. */
  750. cf_port->wq = create_singlethread_workqueue(DRV_NAME);
  751. if (!cf_port->wq)
  752. goto free_cf_port;
  753. INIT_DELAYED_WORK(&cf_port->delayed_finish,
  754. octeon_cf_delayed_finish);
  755. } else {
  756. /* 16 bit but not True IDE */
  757. octeon_cf_ops.sff_data_xfer = octeon_cf_data_xfer16;
  758. octeon_cf_ops.softreset = octeon_cf_softreset16;
  759. octeon_cf_ops.sff_check_status = octeon_cf_check_status16;
  760. octeon_cf_ops.sff_tf_read = octeon_cf_tf_read16;
  761. octeon_cf_ops.sff_tf_load = octeon_cf_tf_load16;
  762. octeon_cf_ops.sff_exec_command = octeon_cf_exec_command16;
  763. ap->ioaddr.data_addr = base + ATA_REG_DATA;
  764. ap->ioaddr.nsect_addr = base + ATA_REG_NSECT;
  765. ap->ioaddr.lbal_addr = base + ATA_REG_LBAL;
  766. ap->ioaddr.ctl_addr = base + 0xe;
  767. ap->ioaddr.altstatus_addr = base + 0xe;
  768. }
  769. ata_port_desc(ap, "cmd %p ctl %p", base, ap->ioaddr.ctl_addr);
  770. snprintf(version, sizeof(version), "%s %d bit%s",
  771. DRV_VERSION,
  772. (ocd->is16bit) ? 16 : 8,
  773. (cs1) ? ", True IDE" : "");
  774. ata_print_version_once(&pdev->dev, version);
  775. return ata_host_activate(host, irq, irq_handler, 0, &octeon_cf_sht);
  776. free_cf_port:
  777. kfree(cf_port);
  778. return -ENOMEM;
  779. }
  780. static struct platform_driver octeon_cf_driver = {
  781. .probe = octeon_cf_probe,
  782. .driver = {
  783. .name = DRV_NAME,
  784. .owner = THIS_MODULE,
  785. },
  786. };
  787. static int __init octeon_cf_init(void)
  788. {
  789. return platform_driver_register(&octeon_cf_driver);
  790. }
  791. MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
  792. MODULE_DESCRIPTION("low-level driver for Cavium OCTEON Compact Flash PATA");
  793. MODULE_LICENSE("GPL");
  794. MODULE_VERSION(DRV_VERSION);
  795. MODULE_ALIAS("platform:" DRV_NAME);
  796. module_init(octeon_cf_init);