sgiseeq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842
  1. /*
  2. * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
  3. *
  4. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  5. */
  6. #undef DEBUG
  7. #include <linux/dma-mapping.h>
  8. #include <linux/kernel.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/errno.h>
  12. #include <linux/init.h>
  13. #include <linux/types.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/string.h>
  16. #include <linux/delay.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/etherdevice.h>
  20. #include <linux/skbuff.h>
  21. #include <asm/sgi/hpc3.h>
  22. #include <asm/sgi/ip22.h>
  23. #include <asm/sgi/seeq.h>
  24. #include "sgiseeq.h"
  25. static char *sgiseeqstr = "SGI Seeq8003";
  26. /*
  27. * If you want speed, you do something silly, it always has worked for me. So,
  28. * with that in mind, I've decided to make this driver look completely like a
  29. * stupid Lance from a driver architecture perspective. Only difference is that
  30. * here our "ring buffer" looks and acts like a real Lance one does but is
  31. * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
  32. * how a stupid idea like this can pay off in performance, not to mention
  33. * making this driver 2,000 times easier to write. ;-)
  34. */
  35. /* Tune these if we tend to run out often etc. */
  36. #define SEEQ_RX_BUFFERS 16
  37. #define SEEQ_TX_BUFFERS 16
  38. #define PKT_BUF_SZ 1584
  39. #define NEXT_RX(i) (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
  40. #define NEXT_TX(i) (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
  41. #define PREV_RX(i) (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
  42. #define PREV_TX(i) (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
  43. #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
  44. sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
  45. sp->tx_old - sp->tx_new - 1)
  46. #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma + \
  47. (dma_addr_t)((unsigned long)(v) - \
  48. (unsigned long)((sp)->rx_desc)))
  49. /* Copy frames shorter than rx_copybreak, otherwise pass on up in
  50. * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
  51. */
  52. static int rx_copybreak = 100;
  53. #define PAD_SIZE (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
  54. struct sgiseeq_rx_desc {
  55. volatile struct hpc_dma_desc rdma;
  56. u8 padding[PAD_SIZE];
  57. struct sk_buff *skb;
  58. };
  59. struct sgiseeq_tx_desc {
  60. volatile struct hpc_dma_desc tdma;
  61. u8 padding[PAD_SIZE];
  62. struct sk_buff *skb;
  63. };
  64. /*
  65. * Warning: This structure is laid out in a certain way because HPC dma
  66. * descriptors must be 8-byte aligned. So don't touch this without
  67. * some care.
  68. */
  69. struct sgiseeq_init_block { /* Note the name ;-) */
  70. struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
  71. struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
  72. };
  73. struct sgiseeq_private {
  74. struct sgiseeq_init_block *srings;
  75. dma_addr_t srings_dma;
  76. /* Ptrs to the descriptors in uncached space. */
  77. struct sgiseeq_rx_desc *rx_desc;
  78. struct sgiseeq_tx_desc *tx_desc;
  79. char *name;
  80. struct hpc3_ethregs *hregs;
  81. struct sgiseeq_regs *sregs;
  82. /* Ring entry counters. */
  83. unsigned int rx_new, tx_new;
  84. unsigned int rx_old, tx_old;
  85. int is_edlc;
  86. unsigned char control;
  87. unsigned char mode;
  88. spinlock_t tx_lock;
  89. };
  90. static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
  91. {
  92. dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
  93. DMA_FROM_DEVICE);
  94. }
  95. static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
  96. {
  97. dma_cache_sync(dev->dev.parent, addr, sizeof(struct sgiseeq_rx_desc),
  98. DMA_TO_DEVICE);
  99. }
  100. static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
  101. {
  102. hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
  103. udelay(20);
  104. hregs->reset = 0;
  105. }
  106. static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
  107. struct sgiseeq_regs *sregs)
  108. {
  109. hregs->rx_ctrl = hregs->tx_ctrl = 0;
  110. hpc3_eth_reset(hregs);
  111. }
  112. #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
  113. SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
  114. static inline void seeq_go(struct sgiseeq_private *sp,
  115. struct hpc3_ethregs *hregs,
  116. struct sgiseeq_regs *sregs)
  117. {
  118. sregs->rstat = sp->mode | RSTAT_GO_BITS;
  119. hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
  120. }
  121. static inline void __sgiseeq_set_mac_address(struct net_device *dev)
  122. {
  123. struct sgiseeq_private *sp = netdev_priv(dev);
  124. struct sgiseeq_regs *sregs = sp->sregs;
  125. int i;
  126. sregs->tstat = SEEQ_TCMD_RB0;
  127. for (i = 0; i < 6; i++)
  128. sregs->rw.eth_addr[i] = dev->dev_addr[i];
  129. }
  130. static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
  131. {
  132. struct sgiseeq_private *sp = netdev_priv(dev);
  133. struct sockaddr *sa = addr;
  134. memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
  135. spin_lock_irq(&sp->tx_lock);
  136. __sgiseeq_set_mac_address(dev);
  137. spin_unlock_irq(&sp->tx_lock);
  138. return 0;
  139. }
  140. #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
  141. #define RCNTCFG_INIT (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
  142. #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
  143. static int seeq_init_ring(struct net_device *dev)
  144. {
  145. struct sgiseeq_private *sp = netdev_priv(dev);
  146. int i;
  147. netif_stop_queue(dev);
  148. sp->rx_new = sp->tx_new = 0;
  149. sp->rx_old = sp->tx_old = 0;
  150. __sgiseeq_set_mac_address(dev);
  151. /* Setup tx ring. */
  152. for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
  153. sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
  154. dma_sync_desc_dev(dev, &sp->tx_desc[i]);
  155. }
  156. /* And now the rx ring. */
  157. for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
  158. if (!sp->rx_desc[i].skb) {
  159. dma_addr_t dma_addr;
  160. struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
  161. if (skb == NULL)
  162. return -ENOMEM;
  163. skb_reserve(skb, 2);
  164. dma_addr = dma_map_single(dev->dev.parent,
  165. skb->data - 2,
  166. PKT_BUF_SZ, DMA_FROM_DEVICE);
  167. sp->rx_desc[i].skb = skb;
  168. sp->rx_desc[i].rdma.pbuf = dma_addr;
  169. }
  170. sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
  171. dma_sync_desc_dev(dev, &sp->rx_desc[i]);
  172. }
  173. sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
  174. dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
  175. return 0;
  176. }
  177. static void seeq_purge_ring(struct net_device *dev)
  178. {
  179. struct sgiseeq_private *sp = netdev_priv(dev);
  180. int i;
  181. /* clear tx ring. */
  182. for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
  183. if (sp->tx_desc[i].skb) {
  184. dev_kfree_skb(sp->tx_desc[i].skb);
  185. sp->tx_desc[i].skb = NULL;
  186. }
  187. }
  188. /* And now the rx ring. */
  189. for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
  190. if (sp->rx_desc[i].skb) {
  191. dev_kfree_skb(sp->rx_desc[i].skb);
  192. sp->rx_desc[i].skb = NULL;
  193. }
  194. }
  195. }
  196. #ifdef DEBUG
  197. static struct sgiseeq_private *gpriv;
  198. static struct net_device *gdev;
  199. static void sgiseeq_dump_rings(void)
  200. {
  201. static int once;
  202. struct sgiseeq_rx_desc *r = gpriv->rx_desc;
  203. struct sgiseeq_tx_desc *t = gpriv->tx_desc;
  204. struct hpc3_ethregs *hregs = gpriv->hregs;
  205. int i;
  206. if (once)
  207. return;
  208. once++;
  209. printk("RING DUMP:\n");
  210. for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
  211. printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
  212. i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
  213. r[i].rdma.pnext);
  214. i += 1;
  215. printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
  216. i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
  217. r[i].rdma.pnext);
  218. }
  219. for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
  220. printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
  221. i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
  222. t[i].tdma.pnext);
  223. i += 1;
  224. printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
  225. i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
  226. t[i].tdma.pnext);
  227. }
  228. printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
  229. gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
  230. printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
  231. hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
  232. printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
  233. hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
  234. }
  235. #endif
  236. #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
  237. #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
  238. static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
  239. struct sgiseeq_regs *sregs)
  240. {
  241. struct hpc3_ethregs *hregs = sp->hregs;
  242. int err;
  243. reset_hpc3_and_seeq(hregs, sregs);
  244. err = seeq_init_ring(dev);
  245. if (err)
  246. return err;
  247. /* Setup to field the proper interrupt types. */
  248. if (sp->is_edlc) {
  249. sregs->tstat = TSTAT_INIT_EDLC;
  250. sregs->rw.wregs.control = sp->control;
  251. sregs->rw.wregs.frame_gap = 0;
  252. } else {
  253. sregs->tstat = TSTAT_INIT_SEEQ;
  254. }
  255. hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
  256. hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
  257. seeq_go(sp, hregs, sregs);
  258. return 0;
  259. }
  260. static void record_rx_errors(struct net_device *dev, unsigned char status)
  261. {
  262. if (status & SEEQ_RSTAT_OVERF ||
  263. status & SEEQ_RSTAT_SFRAME)
  264. dev->stats.rx_over_errors++;
  265. if (status & SEEQ_RSTAT_CERROR)
  266. dev->stats.rx_crc_errors++;
  267. if (status & SEEQ_RSTAT_DERROR)
  268. dev->stats.rx_frame_errors++;
  269. if (status & SEEQ_RSTAT_REOF)
  270. dev->stats.rx_errors++;
  271. }
  272. static inline void rx_maybe_restart(struct sgiseeq_private *sp,
  273. struct hpc3_ethregs *hregs,
  274. struct sgiseeq_regs *sregs)
  275. {
  276. if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
  277. hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
  278. seeq_go(sp, hregs, sregs);
  279. }
  280. }
  281. static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
  282. struct hpc3_ethregs *hregs,
  283. struct sgiseeq_regs *sregs)
  284. {
  285. struct sgiseeq_rx_desc *rd;
  286. struct sk_buff *skb = NULL;
  287. struct sk_buff *newskb;
  288. unsigned char pkt_status;
  289. int len = 0;
  290. unsigned int orig_end = PREV_RX(sp->rx_new);
  291. /* Service every received packet. */
  292. rd = &sp->rx_desc[sp->rx_new];
  293. dma_sync_desc_cpu(dev, rd);
  294. while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
  295. len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
  296. dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
  297. PKT_BUF_SZ, DMA_FROM_DEVICE);
  298. pkt_status = rd->skb->data[len];
  299. if (pkt_status & SEEQ_RSTAT_FIG) {
  300. /* Packet is OK. */
  301. /* We don't want to receive our own packets */
  302. if (memcmp(rd->skb->data + 6, dev->dev_addr, ETH_ALEN)) {
  303. if (len > rx_copybreak) {
  304. skb = rd->skb;
  305. newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
  306. if (!newskb) {
  307. newskb = skb;
  308. skb = NULL;
  309. goto memory_squeeze;
  310. }
  311. skb_reserve(newskb, 2);
  312. } else {
  313. skb = netdev_alloc_skb_ip_align(dev, len);
  314. if (skb)
  315. skb_copy_to_linear_data(skb, rd->skb->data, len);
  316. newskb = rd->skb;
  317. }
  318. memory_squeeze:
  319. if (skb) {
  320. skb_put(skb, len);
  321. skb->protocol = eth_type_trans(skb, dev);
  322. netif_rx(skb);
  323. dev->stats.rx_packets++;
  324. dev->stats.rx_bytes += len;
  325. } else {
  326. printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
  327. dev->name);
  328. dev->stats.rx_dropped++;
  329. }
  330. } else {
  331. /* Silently drop my own packets */
  332. newskb = rd->skb;
  333. }
  334. } else {
  335. record_rx_errors(dev, pkt_status);
  336. newskb = rd->skb;
  337. }
  338. rd->skb = newskb;
  339. rd->rdma.pbuf = dma_map_single(dev->dev.parent,
  340. newskb->data - 2,
  341. PKT_BUF_SZ, DMA_FROM_DEVICE);
  342. /* Return the entry to the ring pool. */
  343. rd->rdma.cntinfo = RCNTINFO_INIT;
  344. sp->rx_new = NEXT_RX(sp->rx_new);
  345. dma_sync_desc_dev(dev, rd);
  346. rd = &sp->rx_desc[sp->rx_new];
  347. dma_sync_desc_cpu(dev, rd);
  348. }
  349. dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
  350. sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
  351. dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
  352. dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
  353. sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
  354. dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
  355. rx_maybe_restart(sp, hregs, sregs);
  356. }
  357. static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
  358. struct sgiseeq_regs *sregs)
  359. {
  360. if (sp->is_edlc) {
  361. sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
  362. sregs->rw.wregs.control = sp->control;
  363. }
  364. }
  365. static inline void kick_tx(struct net_device *dev,
  366. struct sgiseeq_private *sp,
  367. struct hpc3_ethregs *hregs)
  368. {
  369. struct sgiseeq_tx_desc *td;
  370. int i = sp->tx_old;
  371. /* If the HPC aint doin nothin, and there are more packets
  372. * with ETXD cleared and XIU set we must make very certain
  373. * that we restart the HPC else we risk locking up the
  374. * adapter. The following code is only safe iff the HPCDMA
  375. * is not active!
  376. */
  377. td = &sp->tx_desc[i];
  378. dma_sync_desc_cpu(dev, td);
  379. while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
  380. (HPCDMA_XIU | HPCDMA_ETXD)) {
  381. i = NEXT_TX(i);
  382. td = &sp->tx_desc[i];
  383. dma_sync_desc_cpu(dev, td);
  384. }
  385. if (td->tdma.cntinfo & HPCDMA_XIU) {
  386. hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
  387. hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
  388. }
  389. }
  390. static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
  391. struct hpc3_ethregs *hregs,
  392. struct sgiseeq_regs *sregs)
  393. {
  394. struct sgiseeq_tx_desc *td;
  395. unsigned long status = hregs->tx_ctrl;
  396. int j;
  397. tx_maybe_reset_collisions(sp, sregs);
  398. if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
  399. /* Oops, HPC detected some sort of error. */
  400. if (status & SEEQ_TSTAT_R16)
  401. dev->stats.tx_aborted_errors++;
  402. if (status & SEEQ_TSTAT_UFLOW)
  403. dev->stats.tx_fifo_errors++;
  404. if (status & SEEQ_TSTAT_LCLS)
  405. dev->stats.collisions++;
  406. }
  407. /* Ack 'em... */
  408. for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
  409. td = &sp->tx_desc[j];
  410. dma_sync_desc_cpu(dev, td);
  411. if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
  412. break;
  413. if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
  414. if (!(status & HPC3_ETXCTRL_ACTIVE)) {
  415. hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
  416. hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
  417. }
  418. break;
  419. }
  420. dev->stats.tx_packets++;
  421. sp->tx_old = NEXT_TX(sp->tx_old);
  422. td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
  423. td->tdma.cntinfo |= HPCDMA_EOX;
  424. if (td->skb) {
  425. dev_kfree_skb_any(td->skb);
  426. td->skb = NULL;
  427. }
  428. dma_sync_desc_dev(dev, td);
  429. }
  430. }
  431. static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
  432. {
  433. struct net_device *dev = (struct net_device *) dev_id;
  434. struct sgiseeq_private *sp = netdev_priv(dev);
  435. struct hpc3_ethregs *hregs = sp->hregs;
  436. struct sgiseeq_regs *sregs = sp->sregs;
  437. spin_lock(&sp->tx_lock);
  438. /* Ack the IRQ and set software state. */
  439. hregs->reset = HPC3_ERST_CLRIRQ;
  440. /* Always check for received packets. */
  441. sgiseeq_rx(dev, sp, hregs, sregs);
  442. /* Only check for tx acks if we have something queued. */
  443. if (sp->tx_old != sp->tx_new)
  444. sgiseeq_tx(dev, sp, hregs, sregs);
  445. if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
  446. netif_wake_queue(dev);
  447. }
  448. spin_unlock(&sp->tx_lock);
  449. return IRQ_HANDLED;
  450. }
  451. static int sgiseeq_open(struct net_device *dev)
  452. {
  453. struct sgiseeq_private *sp = netdev_priv(dev);
  454. struct sgiseeq_regs *sregs = sp->sregs;
  455. unsigned int irq = dev->irq;
  456. int err;
  457. if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
  458. printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
  459. return -EAGAIN;
  460. }
  461. err = init_seeq(dev, sp, sregs);
  462. if (err)
  463. goto out_free_irq;
  464. netif_start_queue(dev);
  465. return 0;
  466. out_free_irq:
  467. free_irq(irq, dev);
  468. return err;
  469. }
  470. static int sgiseeq_close(struct net_device *dev)
  471. {
  472. struct sgiseeq_private *sp = netdev_priv(dev);
  473. struct sgiseeq_regs *sregs = sp->sregs;
  474. unsigned int irq = dev->irq;
  475. netif_stop_queue(dev);
  476. /* Shutdown the Seeq. */
  477. reset_hpc3_and_seeq(sp->hregs, sregs);
  478. free_irq(irq, dev);
  479. seeq_purge_ring(dev);
  480. return 0;
  481. }
  482. static inline int sgiseeq_reset(struct net_device *dev)
  483. {
  484. struct sgiseeq_private *sp = netdev_priv(dev);
  485. struct sgiseeq_regs *sregs = sp->sregs;
  486. int err;
  487. err = init_seeq(dev, sp, sregs);
  488. if (err)
  489. return err;
  490. dev->trans_start = jiffies; /* prevent tx timeout */
  491. netif_wake_queue(dev);
  492. return 0;
  493. }
  494. static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
  495. {
  496. struct sgiseeq_private *sp = netdev_priv(dev);
  497. struct hpc3_ethregs *hregs = sp->hregs;
  498. unsigned long flags;
  499. struct sgiseeq_tx_desc *td;
  500. int len, entry;
  501. spin_lock_irqsave(&sp->tx_lock, flags);
  502. /* Setup... */
  503. len = skb->len;
  504. if (len < ETH_ZLEN) {
  505. if (skb_padto(skb, ETH_ZLEN)) {
  506. spin_unlock_irqrestore(&sp->tx_lock, flags);
  507. return NETDEV_TX_OK;
  508. }
  509. len = ETH_ZLEN;
  510. }
  511. dev->stats.tx_bytes += len;
  512. entry = sp->tx_new;
  513. td = &sp->tx_desc[entry];
  514. dma_sync_desc_cpu(dev, td);
  515. /* Create entry. There are so many races with adding a new
  516. * descriptor to the chain:
  517. * 1) Assume that the HPC is off processing a DMA chain while
  518. * we are changing all of the following.
  519. * 2) Do no allow the HPC to look at a new descriptor until
  520. * we have completely set up it's state. This means, do
  521. * not clear HPCDMA_EOX in the current last descritptor
  522. * until the one we are adding looks consistent and could
  523. * be processes right now.
  524. * 3) The tx interrupt code must notice when we've added a new
  525. * entry and the HPC got to the end of the chain before we
  526. * added this new entry and restarted it.
  527. */
  528. td->skb = skb;
  529. td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
  530. len, DMA_TO_DEVICE);
  531. td->tdma.cntinfo = (len & HPCDMA_BCNT) |
  532. HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
  533. dma_sync_desc_dev(dev, td);
  534. if (sp->tx_old != sp->tx_new) {
  535. struct sgiseeq_tx_desc *backend;
  536. backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
  537. dma_sync_desc_cpu(dev, backend);
  538. backend->tdma.cntinfo &= ~HPCDMA_EOX;
  539. dma_sync_desc_dev(dev, backend);
  540. }
  541. sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
  542. /* Maybe kick the HPC back into motion. */
  543. if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
  544. kick_tx(dev, sp, hregs);
  545. if (!TX_BUFFS_AVAIL(sp))
  546. netif_stop_queue(dev);
  547. spin_unlock_irqrestore(&sp->tx_lock, flags);
  548. return NETDEV_TX_OK;
  549. }
  550. static void timeout(struct net_device *dev)
  551. {
  552. printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
  553. sgiseeq_reset(dev);
  554. dev->trans_start = jiffies; /* prevent tx timeout */
  555. netif_wake_queue(dev);
  556. }
  557. static void sgiseeq_set_multicast(struct net_device *dev)
  558. {
  559. struct sgiseeq_private *sp = netdev_priv(dev);
  560. unsigned char oldmode = sp->mode;
  561. if(dev->flags & IFF_PROMISC)
  562. sp->mode = SEEQ_RCMD_RANY;
  563. else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
  564. sp->mode = SEEQ_RCMD_RBMCAST;
  565. else
  566. sp->mode = SEEQ_RCMD_RBCAST;
  567. /* XXX I know this sucks, but is there a better way to reprogram
  568. * XXX the receiver? At least, this shouldn't happen too often.
  569. */
  570. if (oldmode != sp->mode)
  571. sgiseeq_reset(dev);
  572. }
  573. static inline void setup_tx_ring(struct net_device *dev,
  574. struct sgiseeq_tx_desc *buf,
  575. int nbufs)
  576. {
  577. struct sgiseeq_private *sp = netdev_priv(dev);
  578. int i = 0;
  579. while (i < (nbufs - 1)) {
  580. buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
  581. buf[i].tdma.pbuf = 0;
  582. dma_sync_desc_dev(dev, &buf[i]);
  583. i++;
  584. }
  585. buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
  586. dma_sync_desc_dev(dev, &buf[i]);
  587. }
  588. static inline void setup_rx_ring(struct net_device *dev,
  589. struct sgiseeq_rx_desc *buf,
  590. int nbufs)
  591. {
  592. struct sgiseeq_private *sp = netdev_priv(dev);
  593. int i = 0;
  594. while (i < (nbufs - 1)) {
  595. buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
  596. buf[i].rdma.pbuf = 0;
  597. dma_sync_desc_dev(dev, &buf[i]);
  598. i++;
  599. }
  600. buf[i].rdma.pbuf = 0;
  601. buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
  602. dma_sync_desc_dev(dev, &buf[i]);
  603. }
  604. static const struct net_device_ops sgiseeq_netdev_ops = {
  605. .ndo_open = sgiseeq_open,
  606. .ndo_stop = sgiseeq_close,
  607. .ndo_start_xmit = sgiseeq_start_xmit,
  608. .ndo_tx_timeout = timeout,
  609. .ndo_set_rx_mode = sgiseeq_set_multicast,
  610. .ndo_set_mac_address = sgiseeq_set_mac_address,
  611. .ndo_change_mtu = eth_change_mtu,
  612. .ndo_validate_addr = eth_validate_addr,
  613. };
  614. static int __devinit sgiseeq_probe(struct platform_device *pdev)
  615. {
  616. struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
  617. struct hpc3_regs *hpcregs = pd->hpc;
  618. struct sgiseeq_init_block *sr;
  619. unsigned int irq = pd->irq;
  620. struct sgiseeq_private *sp;
  621. struct net_device *dev;
  622. int err;
  623. dev = alloc_etherdev(sizeof (struct sgiseeq_private));
  624. if (!dev) {
  625. err = -ENOMEM;
  626. goto err_out;
  627. }
  628. platform_set_drvdata(pdev, dev);
  629. sp = netdev_priv(dev);
  630. /* Make private data page aligned */
  631. sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
  632. &sp->srings_dma, GFP_KERNEL);
  633. if (!sr) {
  634. printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
  635. err = -ENOMEM;
  636. goto err_out_free_dev;
  637. }
  638. sp->srings = sr;
  639. sp->rx_desc = sp->srings->rxvector;
  640. sp->tx_desc = sp->srings->txvector;
  641. /* A couple calculations now, saves many cycles later. */
  642. setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
  643. setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
  644. memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
  645. #ifdef DEBUG
  646. gpriv = sp;
  647. gdev = dev;
  648. #endif
  649. sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
  650. sp->hregs = &hpcregs->ethregs;
  651. sp->name = sgiseeqstr;
  652. sp->mode = SEEQ_RCMD_RBCAST;
  653. /* Setup PIO and DMA transfer timing */
  654. sp->hregs->pconfig = 0x161;
  655. sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
  656. HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
  657. /* Setup PIO and DMA transfer timing */
  658. sp->hregs->pconfig = 0x161;
  659. sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
  660. HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
  661. /* Reset the chip. */
  662. hpc3_eth_reset(sp->hregs);
  663. sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
  664. if (sp->is_edlc)
  665. sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
  666. SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
  667. SEEQ_CTRL_ENCARR;
  668. dev->netdev_ops = &sgiseeq_netdev_ops;
  669. dev->watchdog_timeo = (200 * HZ) / 1000;
  670. dev->irq = irq;
  671. if (register_netdev(dev)) {
  672. printk(KERN_ERR "Sgiseeq: Cannot register net device, "
  673. "aborting.\n");
  674. err = -ENODEV;
  675. goto err_out_free_page;
  676. }
  677. printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
  678. return 0;
  679. err_out_free_page:
  680. free_page((unsigned long) sp->srings);
  681. err_out_free_dev:
  682. free_netdev(dev);
  683. err_out:
  684. return err;
  685. }
  686. static int __exit sgiseeq_remove(struct platform_device *pdev)
  687. {
  688. struct net_device *dev = platform_get_drvdata(pdev);
  689. struct sgiseeq_private *sp = netdev_priv(dev);
  690. unregister_netdev(dev);
  691. dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
  692. sp->srings_dma);
  693. free_netdev(dev);
  694. platform_set_drvdata(pdev, NULL);
  695. return 0;
  696. }
  697. static struct platform_driver sgiseeq_driver = {
  698. .probe = sgiseeq_probe,
  699. .remove = __exit_p(sgiseeq_remove),
  700. .driver = {
  701. .name = "sgiseeq",
  702. .owner = THIS_MODULE,
  703. }
  704. };
  705. module_platform_driver(sgiseeq_driver);
  706. MODULE_DESCRIPTION("SGI Seeq 8003 driver");
  707. MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
  708. MODULE_LICENSE("GPL");
  709. MODULE_ALIAS("platform:sgiseeq");