sun_esp.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /* sun_esp.c: ESP front-end for Sparc SBUS systems.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/delay.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/of.h>
  13. #include <linux/of_device.h>
  14. #include <linux/gfp.h>
  15. #include <asm/irq.h>
  16. #include <asm/io.h>
  17. #include <asm/dma.h>
  18. #include <scsi/scsi_host.h>
  19. #include "esp_scsi.h"
  20. #define DRV_MODULE_NAME "sun_esp"
  21. #define PFX DRV_MODULE_NAME ": "
  22. #define DRV_VERSION "1.100"
  23. #define DRV_MODULE_RELDATE "August 27, 2008"
  24. #define dma_read32(REG) \
  25. sbus_readl(esp->dma_regs + (REG))
  26. #define dma_write32(VAL, REG) \
  27. sbus_writel((VAL), esp->dma_regs + (REG))
  28. /* DVMA chip revisions */
  29. enum dvma_rev {
  30. dvmarev0,
  31. dvmaesc1,
  32. dvmarev1,
  33. dvmarev2,
  34. dvmarev3,
  35. dvmarevplus,
  36. dvmahme
  37. };
  38. static int __devinit esp_sbus_setup_dma(struct esp *esp,
  39. struct platform_device *dma_of)
  40. {
  41. esp->dma = dma_of;
  42. esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
  43. resource_size(&dma_of->resource[0]),
  44. "espdma");
  45. if (!esp->dma_regs)
  46. return -ENOMEM;
  47. switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
  48. case DMA_VERS0:
  49. esp->dmarev = dvmarev0;
  50. break;
  51. case DMA_ESCV1:
  52. esp->dmarev = dvmaesc1;
  53. break;
  54. case DMA_VERS1:
  55. esp->dmarev = dvmarev1;
  56. break;
  57. case DMA_VERS2:
  58. esp->dmarev = dvmarev2;
  59. break;
  60. case DMA_VERHME:
  61. esp->dmarev = dvmahme;
  62. break;
  63. case DMA_VERSPLUS:
  64. esp->dmarev = dvmarevplus;
  65. break;
  66. }
  67. return 0;
  68. }
  69. static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
  70. {
  71. struct platform_device *op = esp->dev;
  72. struct resource *res;
  73. /* On HME, two reg sets exist, first is DVMA,
  74. * second is ESP registers.
  75. */
  76. if (hme)
  77. res = &op->resource[1];
  78. else
  79. res = &op->resource[0];
  80. esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
  81. if (!esp->regs)
  82. return -ENOMEM;
  83. return 0;
  84. }
  85. static int __devinit esp_sbus_map_command_block(struct esp *esp)
  86. {
  87. struct platform_device *op = esp->dev;
  88. esp->command_block = dma_alloc_coherent(&op->dev, 16,
  89. &esp->command_block_dma,
  90. GFP_ATOMIC);
  91. if (!esp->command_block)
  92. return -ENOMEM;
  93. return 0;
  94. }
  95. static int __devinit esp_sbus_register_irq(struct esp *esp)
  96. {
  97. struct Scsi_Host *host = esp->host;
  98. struct platform_device *op = esp->dev;
  99. host->irq = op->archdata.irqs[0];
  100. return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
  101. }
  102. static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
  103. {
  104. struct platform_device *op = esp->dev;
  105. struct device_node *dp;
  106. dp = op->dev.of_node;
  107. esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
  108. if (esp->scsi_id != 0xff)
  109. goto done;
  110. esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
  111. if (esp->scsi_id != 0xff)
  112. goto done;
  113. esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
  114. "scsi-initiator-id", 7);
  115. done:
  116. esp->host->this_id = esp->scsi_id;
  117. esp->scsi_id_mask = (1 << esp->scsi_id);
  118. }
  119. static void __devinit esp_get_differential(struct esp *esp)
  120. {
  121. struct platform_device *op = esp->dev;
  122. struct device_node *dp;
  123. dp = op->dev.of_node;
  124. if (of_find_property(dp, "differential", NULL))
  125. esp->flags |= ESP_FLAG_DIFFERENTIAL;
  126. else
  127. esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
  128. }
  129. static void __devinit esp_get_clock_params(struct esp *esp)
  130. {
  131. struct platform_device *op = esp->dev;
  132. struct device_node *bus_dp, *dp;
  133. int fmhz;
  134. dp = op->dev.of_node;
  135. bus_dp = dp->parent;
  136. fmhz = of_getintprop_default(dp, "clock-frequency", 0);
  137. if (fmhz == 0)
  138. fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
  139. esp->cfreq = fmhz;
  140. }
  141. static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
  142. {
  143. struct device_node *dma_dp = dma_of->dev.of_node;
  144. struct platform_device *op = esp->dev;
  145. struct device_node *dp;
  146. u8 bursts, val;
  147. dp = op->dev.of_node;
  148. bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
  149. val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
  150. if (val != 0xff)
  151. bursts &= val;
  152. val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
  153. if (val != 0xff)
  154. bursts &= val;
  155. if (bursts == 0xff ||
  156. (bursts & DMA_BURST16) == 0 ||
  157. (bursts & DMA_BURST32) == 0)
  158. bursts = (DMA_BURST32 - 1);
  159. esp->bursts = bursts;
  160. }
  161. static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
  162. {
  163. esp_get_scsi_id(esp, espdma);
  164. esp_get_differential(esp);
  165. esp_get_clock_params(esp);
  166. esp_get_bursts(esp, espdma);
  167. }
  168. static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  169. {
  170. sbus_writeb(val, esp->regs + (reg * 4UL));
  171. }
  172. static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
  173. {
  174. return sbus_readb(esp->regs + (reg * 4UL));
  175. }
  176. static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
  177. size_t sz, int dir)
  178. {
  179. struct platform_device *op = esp->dev;
  180. return dma_map_single(&op->dev, buf, sz, dir);
  181. }
  182. static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
  183. int num_sg, int dir)
  184. {
  185. struct platform_device *op = esp->dev;
  186. return dma_map_sg(&op->dev, sg, num_sg, dir);
  187. }
  188. static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
  189. size_t sz, int dir)
  190. {
  191. struct platform_device *op = esp->dev;
  192. dma_unmap_single(&op->dev, addr, sz, dir);
  193. }
  194. static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
  195. int num_sg, int dir)
  196. {
  197. struct platform_device *op = esp->dev;
  198. dma_unmap_sg(&op->dev, sg, num_sg, dir);
  199. }
  200. static int sbus_esp_irq_pending(struct esp *esp)
  201. {
  202. if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
  203. return 1;
  204. return 0;
  205. }
  206. static void sbus_esp_reset_dma(struct esp *esp)
  207. {
  208. int can_do_burst16, can_do_burst32, can_do_burst64;
  209. int can_do_sbus64, lim;
  210. struct platform_device *op;
  211. u32 val;
  212. can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
  213. can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
  214. can_do_burst64 = 0;
  215. can_do_sbus64 = 0;
  216. op = esp->dev;
  217. if (sbus_can_dma_64bit())
  218. can_do_sbus64 = 1;
  219. if (sbus_can_burst64())
  220. can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
  221. /* Put the DVMA into a known state. */
  222. if (esp->dmarev != dvmahme) {
  223. val = dma_read32(DMA_CSR);
  224. dma_write32(val | DMA_RST_SCSI, DMA_CSR);
  225. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  226. }
  227. switch (esp->dmarev) {
  228. case dvmahme:
  229. dma_write32(DMA_RESET_FAS366, DMA_CSR);
  230. dma_write32(DMA_RST_SCSI, DMA_CSR);
  231. esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
  232. DMA_SCSI_DISAB | DMA_INT_ENAB);
  233. esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
  234. DMA_BRST_SZ);
  235. if (can_do_burst64)
  236. esp->prev_hme_dmacsr |= DMA_BRST64;
  237. else if (can_do_burst32)
  238. esp->prev_hme_dmacsr |= DMA_BRST32;
  239. if (can_do_sbus64) {
  240. esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
  241. sbus_set_sbus64(&op->dev, esp->bursts);
  242. }
  243. lim = 1000;
  244. while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
  245. if (--lim == 0) {
  246. printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
  247. "will not clear!\n",
  248. esp->host->unique_id);
  249. break;
  250. }
  251. udelay(1);
  252. }
  253. dma_write32(0, DMA_CSR);
  254. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  255. dma_write32(0, DMA_ADDR);
  256. break;
  257. case dvmarev2:
  258. if (esp->rev != ESP100) {
  259. val = dma_read32(DMA_CSR);
  260. dma_write32(val | DMA_3CLKS, DMA_CSR);
  261. }
  262. break;
  263. case dvmarev3:
  264. val = dma_read32(DMA_CSR);
  265. val &= ~DMA_3CLKS;
  266. val |= DMA_2CLKS;
  267. if (can_do_burst32) {
  268. val &= ~DMA_BRST_SZ;
  269. val |= DMA_BRST32;
  270. }
  271. dma_write32(val, DMA_CSR);
  272. break;
  273. case dvmaesc1:
  274. val = dma_read32(DMA_CSR);
  275. val |= DMA_ADD_ENABLE;
  276. val &= ~DMA_BCNT_ENAB;
  277. if (!can_do_burst32 && can_do_burst16) {
  278. val |= DMA_ESC_BURST;
  279. } else {
  280. val &= ~(DMA_ESC_BURST);
  281. }
  282. dma_write32(val, DMA_CSR);
  283. break;
  284. default:
  285. break;
  286. }
  287. /* Enable interrupts. */
  288. val = dma_read32(DMA_CSR);
  289. dma_write32(val | DMA_INT_ENAB, DMA_CSR);
  290. }
  291. static void sbus_esp_dma_drain(struct esp *esp)
  292. {
  293. u32 csr;
  294. int lim;
  295. if (esp->dmarev == dvmahme)
  296. return;
  297. csr = dma_read32(DMA_CSR);
  298. if (!(csr & DMA_FIFO_ISDRAIN))
  299. return;
  300. if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
  301. dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
  302. lim = 1000;
  303. while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
  304. if (--lim == 0) {
  305. printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
  306. esp->host->unique_id);
  307. break;
  308. }
  309. udelay(1);
  310. }
  311. }
  312. static void sbus_esp_dma_invalidate(struct esp *esp)
  313. {
  314. if (esp->dmarev == dvmahme) {
  315. dma_write32(DMA_RST_SCSI, DMA_CSR);
  316. esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
  317. (DMA_PARITY_OFF | DMA_2CLKS |
  318. DMA_SCSI_DISAB | DMA_INT_ENAB)) &
  319. ~(DMA_ST_WRITE | DMA_ENABLE));
  320. dma_write32(0, DMA_CSR);
  321. dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
  322. /* This is necessary to avoid having the SCSI channel
  323. * engine lock up on us.
  324. */
  325. dma_write32(0, DMA_ADDR);
  326. } else {
  327. u32 val;
  328. int lim;
  329. lim = 1000;
  330. while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
  331. if (--lim == 0) {
  332. printk(KERN_ALERT PFX "esp%d: DMA will not "
  333. "invalidate!\n", esp->host->unique_id);
  334. break;
  335. }
  336. udelay(1);
  337. }
  338. val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
  339. val |= DMA_FIFO_INV;
  340. dma_write32(val, DMA_CSR);
  341. val &= ~DMA_FIFO_INV;
  342. dma_write32(val, DMA_CSR);
  343. }
  344. }
  345. static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  346. u32 dma_count, int write, u8 cmd)
  347. {
  348. u32 csr;
  349. BUG_ON(!(cmd & ESP_CMD_DMA));
  350. sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  351. sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  352. if (esp->rev == FASHME) {
  353. sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
  354. sbus_esp_write8(esp, 0, FAS_RHI);
  355. scsi_esp_cmd(esp, cmd);
  356. csr = esp->prev_hme_dmacsr;
  357. csr |= DMA_SCSI_DISAB | DMA_ENABLE;
  358. if (write)
  359. csr |= DMA_ST_WRITE;
  360. else
  361. csr &= ~DMA_ST_WRITE;
  362. esp->prev_hme_dmacsr = csr;
  363. dma_write32(dma_count, DMA_COUNT);
  364. dma_write32(addr, DMA_ADDR);
  365. dma_write32(csr, DMA_CSR);
  366. } else {
  367. csr = dma_read32(DMA_CSR);
  368. csr |= DMA_ENABLE;
  369. if (write)
  370. csr |= DMA_ST_WRITE;
  371. else
  372. csr &= ~DMA_ST_WRITE;
  373. dma_write32(csr, DMA_CSR);
  374. if (esp->dmarev == dvmaesc1) {
  375. u32 end = PAGE_ALIGN(addr + dma_count + 16U);
  376. dma_write32(end - addr, DMA_COUNT);
  377. }
  378. dma_write32(addr, DMA_ADDR);
  379. scsi_esp_cmd(esp, cmd);
  380. }
  381. }
  382. static int sbus_esp_dma_error(struct esp *esp)
  383. {
  384. u32 csr = dma_read32(DMA_CSR);
  385. if (csr & DMA_HNDL_ERROR)
  386. return 1;
  387. return 0;
  388. }
  389. static const struct esp_driver_ops sbus_esp_ops = {
  390. .esp_write8 = sbus_esp_write8,
  391. .esp_read8 = sbus_esp_read8,
  392. .map_single = sbus_esp_map_single,
  393. .map_sg = sbus_esp_map_sg,
  394. .unmap_single = sbus_esp_unmap_single,
  395. .unmap_sg = sbus_esp_unmap_sg,
  396. .irq_pending = sbus_esp_irq_pending,
  397. .reset_dma = sbus_esp_reset_dma,
  398. .dma_drain = sbus_esp_dma_drain,
  399. .dma_invalidate = sbus_esp_dma_invalidate,
  400. .send_dma_cmd = sbus_esp_send_dma_cmd,
  401. .dma_error = sbus_esp_dma_error,
  402. };
  403. static int __devinit esp_sbus_probe_one(struct platform_device *op,
  404. struct platform_device *espdma,
  405. int hme)
  406. {
  407. struct scsi_host_template *tpnt = &scsi_esp_template;
  408. struct Scsi_Host *host;
  409. struct esp *esp;
  410. int err;
  411. host = scsi_host_alloc(tpnt, sizeof(struct esp));
  412. err = -ENOMEM;
  413. if (!host)
  414. goto fail;
  415. host->max_id = (hme ? 16 : 8);
  416. esp = shost_priv(host);
  417. esp->host = host;
  418. esp->dev = op;
  419. esp->ops = &sbus_esp_ops;
  420. if (hme)
  421. esp->flags |= ESP_FLAG_WIDE_CAPABLE;
  422. err = esp_sbus_setup_dma(esp, espdma);
  423. if (err < 0)
  424. goto fail_unlink;
  425. err = esp_sbus_map_regs(esp, hme);
  426. if (err < 0)
  427. goto fail_unlink;
  428. err = esp_sbus_map_command_block(esp);
  429. if (err < 0)
  430. goto fail_unmap_regs;
  431. err = esp_sbus_register_irq(esp);
  432. if (err < 0)
  433. goto fail_unmap_command_block;
  434. esp_sbus_get_props(esp, espdma);
  435. /* Before we try to touch the ESP chip, ESC1 dma can
  436. * come up with the reset bit set, so make sure that
  437. * is clear first.
  438. */
  439. if (esp->dmarev == dvmaesc1) {
  440. u32 val = dma_read32(DMA_CSR);
  441. dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
  442. }
  443. dev_set_drvdata(&op->dev, esp);
  444. err = scsi_esp_register(esp, &op->dev);
  445. if (err)
  446. goto fail_free_irq;
  447. return 0;
  448. fail_free_irq:
  449. free_irq(host->irq, esp);
  450. fail_unmap_command_block:
  451. dma_free_coherent(&op->dev, 16,
  452. esp->command_block,
  453. esp->command_block_dma);
  454. fail_unmap_regs:
  455. of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
  456. fail_unlink:
  457. scsi_host_put(host);
  458. fail:
  459. return err;
  460. }
  461. static int __devinit esp_sbus_probe(struct platform_device *op)
  462. {
  463. struct device_node *dma_node = NULL;
  464. struct device_node *dp = op->dev.of_node;
  465. struct platform_device *dma_of = NULL;
  466. int hme = 0;
  467. if (dp->parent &&
  468. (!strcmp(dp->parent->name, "espdma") ||
  469. !strcmp(dp->parent->name, "dma")))
  470. dma_node = dp->parent;
  471. else if (!strcmp(dp->name, "SUNW,fas")) {
  472. dma_node = op->dev.of_node;
  473. hme = 1;
  474. }
  475. if (dma_node)
  476. dma_of = of_find_device_by_node(dma_node);
  477. if (!dma_of)
  478. return -ENODEV;
  479. return esp_sbus_probe_one(op, dma_of, hme);
  480. }
  481. static int __devexit esp_sbus_remove(struct platform_device *op)
  482. {
  483. struct esp *esp = dev_get_drvdata(&op->dev);
  484. struct platform_device *dma_of = esp->dma;
  485. unsigned int irq = esp->host->irq;
  486. bool is_hme;
  487. u32 val;
  488. scsi_esp_unregister(esp);
  489. /* Disable interrupts. */
  490. val = dma_read32(DMA_CSR);
  491. dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
  492. free_irq(irq, esp);
  493. is_hme = (esp->dmarev == dvmahme);
  494. dma_free_coherent(&op->dev, 16,
  495. esp->command_block,
  496. esp->command_block_dma);
  497. of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
  498. SBUS_ESP_REG_SIZE);
  499. of_iounmap(&dma_of->resource[0], esp->dma_regs,
  500. resource_size(&dma_of->resource[0]));
  501. scsi_host_put(esp->host);
  502. dev_set_drvdata(&op->dev, NULL);
  503. return 0;
  504. }
  505. static const struct of_device_id esp_match[] = {
  506. {
  507. .name = "SUNW,esp",
  508. },
  509. {
  510. .name = "SUNW,fas",
  511. },
  512. {
  513. .name = "esp",
  514. },
  515. {},
  516. };
  517. MODULE_DEVICE_TABLE(of, esp_match);
  518. static struct platform_driver esp_sbus_driver = {
  519. .driver = {
  520. .name = "esp",
  521. .owner = THIS_MODULE,
  522. .of_match_table = esp_match,
  523. },
  524. .probe = esp_sbus_probe,
  525. .remove = __devexit_p(esp_sbus_remove),
  526. };
  527. static int __init sunesp_init(void)
  528. {
  529. return platform_driver_register(&esp_sbus_driver);
  530. }
  531. static void __exit sunesp_exit(void)
  532. {
  533. platform_driver_unregister(&esp_sbus_driver);
  534. }
  535. MODULE_DESCRIPTION("Sun ESP SCSI driver");
  536. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  537. MODULE_LICENSE("GPL");
  538. MODULE_VERSION(DRV_VERSION);
  539. module_init(sunesp_init);
  540. module_exit(sunesp_exit);