sata_qstor.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * sata_qstor.c - Pacific Digital Corporation QStor SATA
  3. *
  4. * Maintained by: Mark Lord <mlord@pobox.com>
  5. *
  6. * Copyright 2005 Pacific Digital Corporation.
  7. * (OSL/GPL code release authorized by Jalil Fadavi).
  8. *
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2, or (at your option)
  13. * any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; see the file COPYING. If not, write to
  22. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  23. *
  24. *
  25. * libata documentation is available via 'make {ps|pdf}docs',
  26. * as Documentation/driver-api/libata.rst
  27. *
  28. */
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/gfp.h>
  32. #include <linux/pci.h>
  33. #include <linux/blkdev.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/device.h>
  37. #include <scsi/scsi_host.h>
  38. #include <linux/libata.h>
  39. #define DRV_NAME "sata_qstor"
  40. #define DRV_VERSION "0.09"
  41. enum {
  42. QS_MMIO_BAR = 4,
  43. QS_PORTS = 4,
  44. QS_MAX_PRD = LIBATA_MAX_PRD,
  45. QS_CPB_ORDER = 6,
  46. QS_CPB_BYTES = (1 << QS_CPB_ORDER),
  47. QS_PRD_BYTES = QS_MAX_PRD * 16,
  48. QS_PKT_BYTES = QS_CPB_BYTES + QS_PRD_BYTES,
  49. /* global register offsets */
  50. QS_HCF_CNFG3 = 0x0003, /* host configuration offset */
  51. QS_HID_HPHY = 0x0004, /* host physical interface info */
  52. QS_HCT_CTRL = 0x00e4, /* global interrupt mask offset */
  53. QS_HST_SFF = 0x0100, /* host status fifo offset */
  54. QS_HVS_SERD3 = 0x0393, /* PHY enable offset */
  55. /* global control bits */
  56. QS_HPHY_64BIT = (1 << 1), /* 64-bit bus detected */
  57. QS_CNFG3_GSRST = 0x01, /* global chip reset */
  58. QS_SERD3_PHY_ENA = 0xf0, /* PHY detection ENAble*/
  59. /* per-channel register offsets */
  60. QS_CCF_CPBA = 0x0710, /* chan CPB base address */
  61. QS_CCF_CSEP = 0x0718, /* chan CPB separation factor */
  62. QS_CFC_HUFT = 0x0800, /* host upstream fifo threshold */
  63. QS_CFC_HDFT = 0x0804, /* host downstream fifo threshold */
  64. QS_CFC_DUFT = 0x0808, /* dev upstream fifo threshold */
  65. QS_CFC_DDFT = 0x080c, /* dev downstream fifo threshold */
  66. QS_CCT_CTR0 = 0x0900, /* chan control-0 offset */
  67. QS_CCT_CTR1 = 0x0901, /* chan control-1 offset */
  68. QS_CCT_CFF = 0x0a00, /* chan command fifo offset */
  69. /* channel control bits */
  70. QS_CTR0_REG = (1 << 1), /* register mode (vs. pkt mode) */
  71. QS_CTR0_CLER = (1 << 2), /* clear channel errors */
  72. QS_CTR1_RDEV = (1 << 1), /* sata phy/comms reset */
  73. QS_CTR1_RCHN = (1 << 4), /* reset channel logic */
  74. QS_CCF_RUN_PKT = 0x107, /* RUN a new dma PKT */
  75. /* pkt sub-field headers */
  76. QS_HCB_HDR = 0x01, /* Host Control Block header */
  77. QS_DCB_HDR = 0x02, /* Device Control Block header */
  78. /* pkt HCB flag bits */
  79. QS_HF_DIRO = (1 << 0), /* data DIRection Out */
  80. QS_HF_DAT = (1 << 3), /* DATa pkt */
  81. QS_HF_IEN = (1 << 4), /* Interrupt ENable */
  82. QS_HF_VLD = (1 << 5), /* VaLiD pkt */
  83. /* pkt DCB flag bits */
  84. QS_DF_PORD = (1 << 2), /* Pio OR Dma */
  85. QS_DF_ELBA = (1 << 3), /* Extended LBA (lba48) */
  86. /* PCI device IDs */
  87. board_2068_idx = 0, /* QStor 4-port SATA/RAID */
  88. };
  89. enum {
  90. QS_DMA_BOUNDARY = ~0UL
  91. };
  92. typedef enum { qs_state_mmio, qs_state_pkt } qs_state_t;
  93. struct qs_port_priv {
  94. u8 *pkt;
  95. dma_addr_t pkt_dma;
  96. qs_state_t state;
  97. };
  98. static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
  99. static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
  100. static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  101. static int qs_port_start(struct ata_port *ap);
  102. static void qs_host_stop(struct ata_host *host);
  103. static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc);
  104. static unsigned int qs_qc_issue(struct ata_queued_cmd *qc);
  105. static int qs_check_atapi_dma(struct ata_queued_cmd *qc);
  106. static void qs_freeze(struct ata_port *ap);
  107. static void qs_thaw(struct ata_port *ap);
  108. static int qs_prereset(struct ata_link *link, unsigned long deadline);
  109. static void qs_error_handler(struct ata_port *ap);
  110. static struct scsi_host_template qs_ata_sht = {
  111. ATA_BASE_SHT(DRV_NAME),
  112. .sg_tablesize = QS_MAX_PRD,
  113. .dma_boundary = QS_DMA_BOUNDARY,
  114. };
  115. static struct ata_port_operations qs_ata_ops = {
  116. .inherits = &ata_sff_port_ops,
  117. .check_atapi_dma = qs_check_atapi_dma,
  118. .qc_prep = qs_qc_prep,
  119. .qc_issue = qs_qc_issue,
  120. .freeze = qs_freeze,
  121. .thaw = qs_thaw,
  122. .prereset = qs_prereset,
  123. .softreset = ATA_OP_NULL,
  124. .error_handler = qs_error_handler,
  125. .lost_interrupt = ATA_OP_NULL,
  126. .scr_read = qs_scr_read,
  127. .scr_write = qs_scr_write,
  128. .port_start = qs_port_start,
  129. .host_stop = qs_host_stop,
  130. };
  131. static const struct ata_port_info qs_port_info[] = {
  132. /* board_2068_idx */
  133. {
  134. .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
  135. .pio_mask = ATA_PIO4_ONLY,
  136. .udma_mask = ATA_UDMA6,
  137. .port_ops = &qs_ata_ops,
  138. },
  139. };
  140. static const struct pci_device_id qs_ata_pci_tbl[] = {
  141. { PCI_VDEVICE(PDC, 0x2068), board_2068_idx },
  142. { } /* terminate list */
  143. };
  144. static struct pci_driver qs_ata_pci_driver = {
  145. .name = DRV_NAME,
  146. .id_table = qs_ata_pci_tbl,
  147. .probe = qs_ata_init_one,
  148. .remove = ata_pci_remove_one,
  149. };
  150. static void __iomem *qs_mmio_base(struct ata_host *host)
  151. {
  152. return host->iomap[QS_MMIO_BAR];
  153. }
  154. static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
  155. {
  156. return 1; /* ATAPI DMA not supported */
  157. }
  158. static inline void qs_enter_reg_mode(struct ata_port *ap)
  159. {
  160. u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
  161. struct qs_port_priv *pp = ap->private_data;
  162. pp->state = qs_state_mmio;
  163. writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
  164. readb(chan + QS_CCT_CTR0); /* flush */
  165. }
  166. static inline void qs_reset_channel_logic(struct ata_port *ap)
  167. {
  168. u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
  169. writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
  170. readb(chan + QS_CCT_CTR0); /* flush */
  171. qs_enter_reg_mode(ap);
  172. }
  173. static void qs_freeze(struct ata_port *ap)
  174. {
  175. u8 __iomem *mmio_base = qs_mmio_base(ap->host);
  176. writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
  177. qs_enter_reg_mode(ap);
  178. }
  179. static void qs_thaw(struct ata_port *ap)
  180. {
  181. u8 __iomem *mmio_base = qs_mmio_base(ap->host);
  182. qs_enter_reg_mode(ap);
  183. writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
  184. }
  185. static int qs_prereset(struct ata_link *link, unsigned long deadline)
  186. {
  187. struct ata_port *ap = link->ap;
  188. qs_reset_channel_logic(ap);
  189. return ata_sff_prereset(link, deadline);
  190. }
  191. static int qs_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
  192. {
  193. if (sc_reg > SCR_CONTROL)
  194. return -EINVAL;
  195. *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 8));
  196. return 0;
  197. }
  198. static void qs_error_handler(struct ata_port *ap)
  199. {
  200. qs_enter_reg_mode(ap);
  201. ata_sff_error_handler(ap);
  202. }
  203. static int qs_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
  204. {
  205. if (sc_reg > SCR_CONTROL)
  206. return -EINVAL;
  207. writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 8));
  208. return 0;
  209. }
  210. static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
  211. {
  212. struct scatterlist *sg;
  213. struct ata_port *ap = qc->ap;
  214. struct qs_port_priv *pp = ap->private_data;
  215. u8 *prd = pp->pkt + QS_CPB_BYTES;
  216. unsigned int si;
  217. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  218. u64 addr;
  219. u32 len;
  220. addr = sg_dma_address(sg);
  221. *(__le64 *)prd = cpu_to_le64(addr);
  222. prd += sizeof(u64);
  223. len = sg_dma_len(sg);
  224. *(__le32 *)prd = cpu_to_le32(len);
  225. prd += sizeof(u64);
  226. VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
  227. (unsigned long long)addr, len);
  228. }
  229. return si;
  230. }
  231. static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
  232. {
  233. struct qs_port_priv *pp = qc->ap->private_data;
  234. u8 dflags = QS_DF_PORD, *buf = pp->pkt;
  235. u8 hflags = QS_HF_DAT | QS_HF_IEN | QS_HF_VLD;
  236. u64 addr;
  237. unsigned int nelem;
  238. VPRINTK("ENTER\n");
  239. qs_enter_reg_mode(qc->ap);
  240. if (qc->tf.protocol != ATA_PROT_DMA)
  241. return AC_ERR_OK;
  242. nelem = qs_fill_sg(qc);
  243. if ((qc->tf.flags & ATA_TFLAG_WRITE))
  244. hflags |= QS_HF_DIRO;
  245. if ((qc->tf.flags & ATA_TFLAG_LBA48))
  246. dflags |= QS_DF_ELBA;
  247. /* host control block (HCB) */
  248. buf[ 0] = QS_HCB_HDR;
  249. buf[ 1] = hflags;
  250. *(__le32 *)(&buf[ 4]) = cpu_to_le32(qc->nbytes);
  251. *(__le32 *)(&buf[ 8]) = cpu_to_le32(nelem);
  252. addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES;
  253. *(__le64 *)(&buf[16]) = cpu_to_le64(addr);
  254. /* device control block (DCB) */
  255. buf[24] = QS_DCB_HDR;
  256. buf[28] = dflags;
  257. /* frame information structure (FIS) */
  258. ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]);
  259. return AC_ERR_OK;
  260. }
  261. static inline void qs_packet_start(struct ata_queued_cmd *qc)
  262. {
  263. struct ata_port *ap = qc->ap;
  264. u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
  265. VPRINTK("ENTER, ap %p\n", ap);
  266. writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
  267. wmb(); /* flush PRDs and pkt to memory */
  268. writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
  269. readl(chan + QS_CCT_CFF); /* flush */
  270. }
  271. static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
  272. {
  273. struct qs_port_priv *pp = qc->ap->private_data;
  274. switch (qc->tf.protocol) {
  275. case ATA_PROT_DMA:
  276. pp->state = qs_state_pkt;
  277. qs_packet_start(qc);
  278. return 0;
  279. case ATAPI_PROT_DMA:
  280. BUG();
  281. break;
  282. default:
  283. break;
  284. }
  285. pp->state = qs_state_mmio;
  286. return ata_sff_qc_issue(qc);
  287. }
  288. static void qs_do_or_die(struct ata_queued_cmd *qc, u8 status)
  289. {
  290. qc->err_mask |= ac_err_mask(status);
  291. if (!qc->err_mask) {
  292. ata_qc_complete(qc);
  293. } else {
  294. struct ata_port *ap = qc->ap;
  295. struct ata_eh_info *ehi = &ap->link.eh_info;
  296. ata_ehi_clear_desc(ehi);
  297. ata_ehi_push_desc(ehi, "status 0x%02X", status);
  298. if (qc->err_mask == AC_ERR_DEV)
  299. ata_port_abort(ap);
  300. else
  301. ata_port_freeze(ap);
  302. }
  303. }
  304. static inline unsigned int qs_intr_pkt(struct ata_host *host)
  305. {
  306. unsigned int handled = 0;
  307. u8 sFFE;
  308. u8 __iomem *mmio_base = qs_mmio_base(host);
  309. do {
  310. u32 sff0 = readl(mmio_base + QS_HST_SFF);
  311. u32 sff1 = readl(mmio_base + QS_HST_SFF + 4);
  312. u8 sEVLD = (sff1 >> 30) & 0x01; /* valid flag */
  313. sFFE = sff1 >> 31; /* empty flag */
  314. if (sEVLD) {
  315. u8 sDST = sff0 >> 16; /* dev status */
  316. u8 sHST = sff1 & 0x3f; /* host status */
  317. unsigned int port_no = (sff1 >> 8) & 0x03;
  318. struct ata_port *ap = host->ports[port_no];
  319. struct qs_port_priv *pp = ap->private_data;
  320. struct ata_queued_cmd *qc;
  321. DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
  322. sff1, sff0, port_no, sHST, sDST);
  323. handled = 1;
  324. if (!pp || pp->state != qs_state_pkt)
  325. continue;
  326. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  327. if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
  328. switch (sHST) {
  329. case 0: /* successful CPB */
  330. case 3: /* device error */
  331. qs_enter_reg_mode(qc->ap);
  332. qs_do_or_die(qc, sDST);
  333. break;
  334. default:
  335. break;
  336. }
  337. }
  338. }
  339. } while (!sFFE);
  340. return handled;
  341. }
  342. static inline unsigned int qs_intr_mmio(struct ata_host *host)
  343. {
  344. unsigned int handled = 0, port_no;
  345. for (port_no = 0; port_no < host->n_ports; ++port_no) {
  346. struct ata_port *ap = host->ports[port_no];
  347. struct qs_port_priv *pp = ap->private_data;
  348. struct ata_queued_cmd *qc;
  349. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  350. if (!qc) {
  351. /*
  352. * The qstor hardware generates spurious
  353. * interrupts from time to time when switching
  354. * in and out of packet mode. There's no
  355. * obvious way to know if we're here now due
  356. * to that, so just ack the irq and pretend we
  357. * knew it was ours.. (ugh). This does not
  358. * affect packet mode.
  359. */
  360. ata_sff_check_status(ap);
  361. handled = 1;
  362. continue;
  363. }
  364. if (!pp || pp->state != qs_state_mmio)
  365. continue;
  366. if (!(qc->tf.flags & ATA_TFLAG_POLLING))
  367. handled |= ata_sff_port_intr(ap, qc);
  368. }
  369. return handled;
  370. }
  371. static irqreturn_t qs_intr(int irq, void *dev_instance)
  372. {
  373. struct ata_host *host = dev_instance;
  374. unsigned int handled = 0;
  375. unsigned long flags;
  376. VPRINTK("ENTER\n");
  377. spin_lock_irqsave(&host->lock, flags);
  378. handled = qs_intr_pkt(host) | qs_intr_mmio(host);
  379. spin_unlock_irqrestore(&host->lock, flags);
  380. VPRINTK("EXIT\n");
  381. return IRQ_RETVAL(handled);
  382. }
  383. static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
  384. {
  385. port->cmd_addr =
  386. port->data_addr = base + 0x400;
  387. port->error_addr =
  388. port->feature_addr = base + 0x408; /* hob_feature = 0x409 */
  389. port->nsect_addr = base + 0x410; /* hob_nsect = 0x411 */
  390. port->lbal_addr = base + 0x418; /* hob_lbal = 0x419 */
  391. port->lbam_addr = base + 0x420; /* hob_lbam = 0x421 */
  392. port->lbah_addr = base + 0x428; /* hob_lbah = 0x429 */
  393. port->device_addr = base + 0x430;
  394. port->status_addr =
  395. port->command_addr = base + 0x438;
  396. port->altstatus_addr =
  397. port->ctl_addr = base + 0x440;
  398. port->scr_addr = base + 0xc00;
  399. }
  400. static int qs_port_start(struct ata_port *ap)
  401. {
  402. struct device *dev = ap->host->dev;
  403. struct qs_port_priv *pp;
  404. void __iomem *mmio_base = qs_mmio_base(ap->host);
  405. void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
  406. u64 addr;
  407. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  408. if (!pp)
  409. return -ENOMEM;
  410. pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma,
  411. GFP_KERNEL);
  412. if (!pp->pkt)
  413. return -ENOMEM;
  414. memset(pp->pkt, 0, QS_PKT_BYTES);
  415. ap->private_data = pp;
  416. qs_enter_reg_mode(ap);
  417. addr = (u64)pp->pkt_dma;
  418. writel((u32) addr, chan + QS_CCF_CPBA);
  419. writel((u32)(addr >> 32), chan + QS_CCF_CPBA + 4);
  420. return 0;
  421. }
  422. static void qs_host_stop(struct ata_host *host)
  423. {
  424. void __iomem *mmio_base = qs_mmio_base(host);
  425. writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
  426. writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
  427. }
  428. static void qs_host_init(struct ata_host *host, unsigned int chip_id)
  429. {
  430. void __iomem *mmio_base = host->iomap[QS_MMIO_BAR];
  431. unsigned int port_no;
  432. writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
  433. writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
  434. /* reset each channel in turn */
  435. for (port_no = 0; port_no < host->n_ports; ++port_no) {
  436. u8 __iomem *chan = mmio_base + (port_no * 0x4000);
  437. writeb(QS_CTR1_RDEV|QS_CTR1_RCHN, chan + QS_CCT_CTR1);
  438. writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
  439. readb(chan + QS_CCT_CTR0); /* flush */
  440. }
  441. writeb(QS_SERD3_PHY_ENA, mmio_base + QS_HVS_SERD3); /* enable phy */
  442. for (port_no = 0; port_no < host->n_ports; ++port_no) {
  443. u8 __iomem *chan = mmio_base + (port_no * 0x4000);
  444. /* set FIFO depths to same settings as Windows driver */
  445. writew(32, chan + QS_CFC_HUFT);
  446. writew(32, chan + QS_CFC_HDFT);
  447. writew(10, chan + QS_CFC_DUFT);
  448. writew( 8, chan + QS_CFC_DDFT);
  449. /* set CPB size in bytes, as a power of two */
  450. writeb(QS_CPB_ORDER, chan + QS_CCF_CSEP);
  451. }
  452. writeb(1, mmio_base + QS_HCT_CTRL); /* enable host interrupts */
  453. }
  454. /*
  455. * The QStor understands 64-bit buses, and uses 64-bit fields
  456. * for DMA pointers regardless of bus width. We just have to
  457. * make sure our DMA masks are set appropriately for whatever
  458. * bridge lies between us and the QStor, and then the DMA mapping
  459. * code will ensure we only ever "see" appropriate buffer addresses.
  460. * If we're 32-bit limited somewhere, then our 64-bit fields will
  461. * just end up with zeros in the upper 32-bits, without any special
  462. * logic required outside of this routine (below).
  463. */
  464. static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
  465. {
  466. u32 bus_info = readl(mmio_base + QS_HID_HPHY);
  467. int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
  468. if (have_64bit_bus &&
  469. !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
  470. rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
  471. if (rc) {
  472. rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  473. if (rc) {
  474. dev_err(&pdev->dev,
  475. "64-bit DMA enable failed\n");
  476. return rc;
  477. }
  478. }
  479. } else {
  480. rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
  481. if (rc) {
  482. dev_err(&pdev->dev, "32-bit DMA enable failed\n");
  483. return rc;
  484. }
  485. rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  486. if (rc) {
  487. dev_err(&pdev->dev,
  488. "32-bit consistent DMA enable failed\n");
  489. return rc;
  490. }
  491. }
  492. return 0;
  493. }
  494. static int qs_ata_init_one(struct pci_dev *pdev,
  495. const struct pci_device_id *ent)
  496. {
  497. unsigned int board_idx = (unsigned int) ent->driver_data;
  498. const struct ata_port_info *ppi[] = { &qs_port_info[board_idx], NULL };
  499. struct ata_host *host;
  500. int rc, port_no;
  501. ata_print_version_once(&pdev->dev, DRV_VERSION);
  502. /* alloc host */
  503. host = ata_host_alloc_pinfo(&pdev->dev, ppi, QS_PORTS);
  504. if (!host)
  505. return -ENOMEM;
  506. /* acquire resources and fill host */
  507. rc = pcim_enable_device(pdev);
  508. if (rc)
  509. return rc;
  510. if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
  511. return -ENODEV;
  512. rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
  513. if (rc)
  514. return rc;
  515. host->iomap = pcim_iomap_table(pdev);
  516. rc = qs_set_dma_masks(pdev, host->iomap[QS_MMIO_BAR]);
  517. if (rc)
  518. return rc;
  519. for (port_no = 0; port_no < host->n_ports; ++port_no) {
  520. struct ata_port *ap = host->ports[port_no];
  521. unsigned int offset = port_no * 0x4000;
  522. void __iomem *chan = host->iomap[QS_MMIO_BAR] + offset;
  523. qs_ata_setup_port(&ap->ioaddr, chan);
  524. ata_port_pbar_desc(ap, QS_MMIO_BAR, -1, "mmio");
  525. ata_port_pbar_desc(ap, QS_MMIO_BAR, offset, "port");
  526. }
  527. /* initialize adapter */
  528. qs_host_init(host, board_idx);
  529. pci_set_master(pdev);
  530. return ata_host_activate(host, pdev->irq, qs_intr, IRQF_SHARED,
  531. &qs_ata_sht);
  532. }
  533. module_pci_driver(qs_ata_pci_driver);
  534. MODULE_AUTHOR("Mark Lord");
  535. MODULE_DESCRIPTION("Pacific Digital Corporation QStor SATA low-level driver");
  536. MODULE_LICENSE("GPL");
  537. MODULE_DEVICE_TABLE(pci, qs_ata_pci_tbl);
  538. MODULE_VERSION(DRV_VERSION);