pata_pxa.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /*
  2. * Generic PXA PATA driver
  3. *
  4. * Copyright (C) 2010 Marek Vasut <marek.vasut@gmail.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2, or (at your option)
  9. * any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; see the file COPYING. If not, write to
  18. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/blkdev.h>
  23. #include <linux/ata.h>
  24. #include <linux/libata.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/dmaengine.h>
  27. #include <linux/dma/pxa-dma.h>
  28. #include <linux/gpio.h>
  29. #include <linux/slab.h>
  30. #include <linux/completion.h>
  31. #include <scsi/scsi_host.h>
  32. #include <linux/platform_data/ata-pxa.h>
  33. #define DRV_NAME "pata_pxa"
  34. #define DRV_VERSION "0.1"
  35. struct pata_pxa_data {
  36. struct dma_chan *dma_chan;
  37. dma_cookie_t dma_cookie;
  38. struct completion dma_done;
  39. };
  40. /*
  41. * DMA interrupt handler.
  42. */
  43. static void pxa_ata_dma_irq(void *d)
  44. {
  45. struct pata_pxa_data *pd = d;
  46. enum dma_status status;
  47. status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  48. if (status == DMA_ERROR || status == DMA_COMPLETE)
  49. complete(&pd->dma_done);
  50. }
  51. /*
  52. * Prepare taskfile for submission.
  53. */
  54. static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc)
  55. {
  56. struct pata_pxa_data *pd = qc->ap->private_data;
  57. struct dma_async_tx_descriptor *tx;
  58. enum dma_transfer_direction dir;
  59. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  60. return AC_ERR_OK;
  61. dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
  62. tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir,
  63. DMA_PREP_INTERRUPT);
  64. if (!tx) {
  65. ata_dev_err(qc->dev, "prep_slave_sg() failed\n");
  66. return AC_ERR_OK;
  67. }
  68. tx->callback = pxa_ata_dma_irq;
  69. tx->callback_param = pd;
  70. pd->dma_cookie = dmaengine_submit(tx);
  71. return AC_ERR_OK;
  72. }
  73. /*
  74. * Configure the DMA controller, load the DMA descriptors, but don't start the
  75. * DMA controller yet. Only issue the ATA command.
  76. */
  77. static void pxa_bmdma_setup(struct ata_queued_cmd *qc)
  78. {
  79. qc->ap->ops->sff_exec_command(qc->ap, &qc->tf);
  80. }
  81. /*
  82. * Execute the DMA transfer.
  83. */
  84. static void pxa_bmdma_start(struct ata_queued_cmd *qc)
  85. {
  86. struct pata_pxa_data *pd = qc->ap->private_data;
  87. init_completion(&pd->dma_done);
  88. dma_async_issue_pending(pd->dma_chan);
  89. }
  90. /*
  91. * Wait until the DMA transfer completes, then stop the DMA controller.
  92. */
  93. static void pxa_bmdma_stop(struct ata_queued_cmd *qc)
  94. {
  95. struct pata_pxa_data *pd = qc->ap->private_data;
  96. enum dma_status status;
  97. status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, NULL);
  98. if (status != DMA_ERROR && status != DMA_COMPLETE &&
  99. wait_for_completion_timeout(&pd->dma_done, HZ))
  100. ata_dev_err(qc->dev, "Timeout waiting for DMA completion!");
  101. dmaengine_terminate_all(pd->dma_chan);
  102. }
  103. /*
  104. * Read DMA status. The bmdma_stop() will take care of properly finishing the
  105. * DMA transfer so we always have DMA-complete interrupt here.
  106. */
  107. static unsigned char pxa_bmdma_status(struct ata_port *ap)
  108. {
  109. struct pata_pxa_data *pd = ap->private_data;
  110. unsigned char ret = ATA_DMA_INTR;
  111. struct dma_tx_state state;
  112. enum dma_status status;
  113. status = dmaengine_tx_status(pd->dma_chan, pd->dma_cookie, &state);
  114. if (status != DMA_COMPLETE)
  115. ret |= ATA_DMA_ERR;
  116. return ret;
  117. }
  118. /*
  119. * No IRQ register present so we do nothing.
  120. */
  121. static void pxa_irq_clear(struct ata_port *ap)
  122. {
  123. }
  124. /*
  125. * Check for ATAPI DMA. ATAPI DMA is unsupported by this driver. It's still
  126. * unclear why ATAPI has DMA issues.
  127. */
  128. static int pxa_check_atapi_dma(struct ata_queued_cmd *qc)
  129. {
  130. return -EOPNOTSUPP;
  131. }
  132. static struct scsi_host_template pxa_ata_sht = {
  133. ATA_BMDMA_SHT(DRV_NAME),
  134. };
  135. static struct ata_port_operations pxa_ata_port_ops = {
  136. .inherits = &ata_bmdma_port_ops,
  137. .cable_detect = ata_cable_40wire,
  138. .bmdma_setup = pxa_bmdma_setup,
  139. .bmdma_start = pxa_bmdma_start,
  140. .bmdma_stop = pxa_bmdma_stop,
  141. .bmdma_status = pxa_bmdma_status,
  142. .check_atapi_dma = pxa_check_atapi_dma,
  143. .sff_irq_clear = pxa_irq_clear,
  144. .qc_prep = pxa_qc_prep,
  145. };
  146. static int pxa_ata_probe(struct platform_device *pdev)
  147. {
  148. struct ata_host *host;
  149. struct ata_port *ap;
  150. struct pata_pxa_data *data;
  151. struct resource *cmd_res;
  152. struct resource *ctl_res;
  153. struct resource *dma_res;
  154. struct resource *irq_res;
  155. struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
  156. struct dma_slave_config config;
  157. dma_cap_mask_t mask;
  158. struct pxad_param param;
  159. int ret = 0;
  160. /*
  161. * Resource validation, three resources are needed:
  162. * - CMD port base address
  163. * - CTL port base address
  164. * - DMA port base address
  165. * - IRQ pin
  166. */
  167. if (pdev->num_resources != 4) {
  168. dev_err(&pdev->dev, "invalid number of resources\n");
  169. return -EINVAL;
  170. }
  171. /*
  172. * CMD port base address
  173. */
  174. cmd_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  175. if (unlikely(cmd_res == NULL))
  176. return -EINVAL;
  177. /*
  178. * CTL port base address
  179. */
  180. ctl_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  181. if (unlikely(ctl_res == NULL))
  182. return -EINVAL;
  183. /*
  184. * DMA port base address
  185. */
  186. dma_res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
  187. if (unlikely(dma_res == NULL))
  188. return -EINVAL;
  189. /*
  190. * IRQ pin
  191. */
  192. irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  193. if (unlikely(irq_res == NULL))
  194. return -EINVAL;
  195. /*
  196. * Allocate the host
  197. */
  198. host = ata_host_alloc(&pdev->dev, 1);
  199. if (!host)
  200. return -ENOMEM;
  201. ap = host->ports[0];
  202. ap->ops = &pxa_ata_port_ops;
  203. ap->pio_mask = ATA_PIO4;
  204. ap->mwdma_mask = ATA_MWDMA2;
  205. ap->ioaddr.cmd_addr = devm_ioremap(&pdev->dev, cmd_res->start,
  206. resource_size(cmd_res));
  207. ap->ioaddr.ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
  208. resource_size(ctl_res));
  209. ap->ioaddr.bmdma_addr = devm_ioremap(&pdev->dev, dma_res->start,
  210. resource_size(dma_res));
  211. /*
  212. * Adjust register offsets
  213. */
  214. ap->ioaddr.altstatus_addr = ap->ioaddr.ctl_addr;
  215. ap->ioaddr.data_addr = ap->ioaddr.cmd_addr +
  216. (ATA_REG_DATA << pdata->reg_shift);
  217. ap->ioaddr.error_addr = ap->ioaddr.cmd_addr +
  218. (ATA_REG_ERR << pdata->reg_shift);
  219. ap->ioaddr.feature_addr = ap->ioaddr.cmd_addr +
  220. (ATA_REG_FEATURE << pdata->reg_shift);
  221. ap->ioaddr.nsect_addr = ap->ioaddr.cmd_addr +
  222. (ATA_REG_NSECT << pdata->reg_shift);
  223. ap->ioaddr.lbal_addr = ap->ioaddr.cmd_addr +
  224. (ATA_REG_LBAL << pdata->reg_shift);
  225. ap->ioaddr.lbam_addr = ap->ioaddr.cmd_addr +
  226. (ATA_REG_LBAM << pdata->reg_shift);
  227. ap->ioaddr.lbah_addr = ap->ioaddr.cmd_addr +
  228. (ATA_REG_LBAH << pdata->reg_shift);
  229. ap->ioaddr.device_addr = ap->ioaddr.cmd_addr +
  230. (ATA_REG_DEVICE << pdata->reg_shift);
  231. ap->ioaddr.status_addr = ap->ioaddr.cmd_addr +
  232. (ATA_REG_STATUS << pdata->reg_shift);
  233. ap->ioaddr.command_addr = ap->ioaddr.cmd_addr +
  234. (ATA_REG_CMD << pdata->reg_shift);
  235. /*
  236. * Allocate and load driver's internal data structure
  237. */
  238. data = devm_kzalloc(&pdev->dev, sizeof(struct pata_pxa_data),
  239. GFP_KERNEL);
  240. if (!data)
  241. return -ENOMEM;
  242. ap->private_data = data;
  243. dma_cap_zero(mask);
  244. dma_cap_set(DMA_SLAVE, mask);
  245. param.prio = PXAD_PRIO_LOWEST;
  246. param.drcmr = pdata->dma_dreq;
  247. memset(&config, 0, sizeof(config));
  248. config.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  249. config.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
  250. config.src_addr = dma_res->start;
  251. config.dst_addr = dma_res->start;
  252. config.src_maxburst = 32;
  253. config.dst_maxburst = 32;
  254. /*
  255. * Request the DMA channel
  256. */
  257. data->dma_chan =
  258. dma_request_slave_channel_compat(mask, pxad_filter_fn,
  259. &param, &pdev->dev, "data");
  260. if (!data->dma_chan)
  261. return -EBUSY;
  262. ret = dmaengine_slave_config(data->dma_chan, &config);
  263. if (ret < 0) {
  264. dev_err(&pdev->dev, "dma configuration failed: %d\n", ret);
  265. return ret;
  266. }
  267. /*
  268. * Activate the ATA host
  269. */
  270. ret = ata_host_activate(host, irq_res->start, ata_sff_interrupt,
  271. pdata->irq_flags, &pxa_ata_sht);
  272. if (ret)
  273. dma_release_channel(data->dma_chan);
  274. return ret;
  275. }
  276. static int pxa_ata_remove(struct platform_device *pdev)
  277. {
  278. struct ata_host *host = platform_get_drvdata(pdev);
  279. struct pata_pxa_data *data = host->ports[0]->private_data;
  280. dma_release_channel(data->dma_chan);
  281. ata_host_detach(host);
  282. return 0;
  283. }
  284. static struct platform_driver pxa_ata_driver = {
  285. .probe = pxa_ata_probe,
  286. .remove = pxa_ata_remove,
  287. .driver = {
  288. .name = DRV_NAME,
  289. },
  290. };
  291. module_platform_driver(pxa_ata_driver);
  292. MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
  293. MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
  294. MODULE_LICENSE("GPL");
  295. MODULE_VERSION(DRV_VERSION);
  296. MODULE_ALIAS("platform:" DRV_NAME);