macmace.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * Driver for the Macintosh 68K onboard MACE controller with PSC
  3. * driven DMA. The MACE driver code is derived from mace.c. The
  4. * Mac68k theory of operation is courtesy of the MacBSD wizards.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * Copyright (C) 1996 Paul Mackerras.
  12. * Copyright (C) 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
  13. *
  14. * Modified heavily by Joshua M. Thompson based on Dave Huang's NetBSD driver
  15. *
  16. * Copyright (C) 2007 Finn Thain
  17. *
  18. * Converted to DMA API, converted to unified driver model,
  19. * sync'd some routines with mace.c and fixed various bugs.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/netdevice.h>
  24. #include <linux/etherdevice.h>
  25. #include <linux/delay.h>
  26. #include <linux/string.h>
  27. #include <linux/crc32.h>
  28. #include <linux/bitrev.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/gfp.h>
  32. #include <linux/interrupt.h>
  33. #include <asm/io.h>
  34. #include <asm/macints.h>
  35. #include <asm/mac_psc.h>
  36. #include <asm/page.h>
  37. #include "mace.h"
  38. static char mac_mace_string[] = "macmace";
  39. #define N_TX_BUFF_ORDER 0
  40. #define N_TX_RING (1 << N_TX_BUFF_ORDER)
  41. #define N_RX_BUFF_ORDER 3
  42. #define N_RX_RING (1 << N_RX_BUFF_ORDER)
  43. #define TX_TIMEOUT HZ
  44. #define MACE_BUFF_SIZE 0x800
  45. /* Chip rev needs workaround on HW & multicast addr change */
  46. #define BROKEN_ADDRCHG_REV 0x0941
  47. /* The MACE is simply wired down on a Mac68K box */
  48. #define MACE_BASE (void *)(0x50F1C000)
  49. #define MACE_PROM (void *)(0x50F08001)
  50. struct mace_data {
  51. volatile struct mace *mace;
  52. unsigned char *tx_ring;
  53. dma_addr_t tx_ring_phys;
  54. unsigned char *rx_ring;
  55. dma_addr_t rx_ring_phys;
  56. int dma_intr;
  57. int rx_slot, rx_tail;
  58. int tx_slot, tx_sloti, tx_count;
  59. int chipid;
  60. struct device *device;
  61. };
  62. struct mace_frame {
  63. u8 rcvcnt;
  64. u8 pad1;
  65. u8 rcvsts;
  66. u8 pad2;
  67. u8 rntpc;
  68. u8 pad3;
  69. u8 rcvcc;
  70. u8 pad4;
  71. u32 pad5;
  72. u32 pad6;
  73. u8 data[1];
  74. /* And frame continues.. */
  75. };
  76. #define PRIV_BYTES sizeof(struct mace_data)
  77. static int mace_open(struct net_device *dev);
  78. static int mace_close(struct net_device *dev);
  79. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
  80. static void mace_set_multicast(struct net_device *dev);
  81. static int mace_set_address(struct net_device *dev, void *addr);
  82. static void mace_reset(struct net_device *dev);
  83. static irqreturn_t mace_interrupt(int irq, void *dev_id);
  84. static irqreturn_t mace_dma_intr(int irq, void *dev_id);
  85. static void mace_tx_timeout(struct net_device *dev);
  86. static void __mace_set_address(struct net_device *dev, void *addr);
  87. /*
  88. * Load a receive DMA channel with a base address and ring length
  89. */
  90. static void mace_load_rxdma_base(struct net_device *dev, int set)
  91. {
  92. struct mace_data *mp = netdev_priv(dev);
  93. psc_write_word(PSC_ENETRD_CMD + set, 0x0100);
  94. psc_write_long(PSC_ENETRD_ADDR + set, (u32) mp->rx_ring_phys);
  95. psc_write_long(PSC_ENETRD_LEN + set, N_RX_RING);
  96. psc_write_word(PSC_ENETRD_CMD + set, 0x9800);
  97. mp->rx_tail = 0;
  98. }
  99. /*
  100. * Reset the receive DMA subsystem
  101. */
  102. static void mace_rxdma_reset(struct net_device *dev)
  103. {
  104. struct mace_data *mp = netdev_priv(dev);
  105. volatile struct mace *mace = mp->mace;
  106. u8 maccc = mace->maccc;
  107. mace->maccc = maccc & ~ENRCV;
  108. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  109. mace_load_rxdma_base(dev, 0x00);
  110. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  111. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  112. mace_load_rxdma_base(dev, 0x10);
  113. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  114. mace->maccc = maccc;
  115. mp->rx_slot = 0;
  116. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x9800);
  117. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x9800);
  118. }
  119. /*
  120. * Reset the transmit DMA subsystem
  121. */
  122. static void mace_txdma_reset(struct net_device *dev)
  123. {
  124. struct mace_data *mp = netdev_priv(dev);
  125. volatile struct mace *mace = mp->mace;
  126. u8 maccc;
  127. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  128. maccc = mace->maccc;
  129. mace->maccc = maccc & ~ENXMT;
  130. mp->tx_slot = mp->tx_sloti = 0;
  131. mp->tx_count = N_TX_RING;
  132. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  133. mace->maccc = maccc;
  134. }
  135. /*
  136. * Disable DMA
  137. */
  138. static void mace_dma_off(struct net_device *dev)
  139. {
  140. psc_write_word(PSC_ENETRD_CTL, 0x8800);
  141. psc_write_word(PSC_ENETRD_CTL, 0x1000);
  142. psc_write_word(PSC_ENETRD_CMD + PSC_SET0, 0x1100);
  143. psc_write_word(PSC_ENETRD_CMD + PSC_SET1, 0x1100);
  144. psc_write_word(PSC_ENETWR_CTL, 0x8800);
  145. psc_write_word(PSC_ENETWR_CTL, 0x1000);
  146. psc_write_word(PSC_ENETWR_CMD + PSC_SET0, 0x1100);
  147. psc_write_word(PSC_ENETWR_CMD + PSC_SET1, 0x1100);
  148. }
  149. static const struct net_device_ops mace_netdev_ops = {
  150. .ndo_open = mace_open,
  151. .ndo_stop = mace_close,
  152. .ndo_start_xmit = mace_xmit_start,
  153. .ndo_tx_timeout = mace_tx_timeout,
  154. .ndo_set_rx_mode = mace_set_multicast,
  155. .ndo_set_mac_address = mace_set_address,
  156. .ndo_change_mtu = eth_change_mtu,
  157. .ndo_validate_addr = eth_validate_addr,
  158. };
  159. /*
  160. * Not really much of a probe. The hardware table tells us if this
  161. * model of Macintrash has a MACE (AV macintoshes)
  162. */
  163. static int __devinit mace_probe(struct platform_device *pdev)
  164. {
  165. int j;
  166. struct mace_data *mp;
  167. unsigned char *addr;
  168. struct net_device *dev;
  169. unsigned char checksum = 0;
  170. int err;
  171. dev = alloc_etherdev(PRIV_BYTES);
  172. if (!dev)
  173. return -ENOMEM;
  174. mp = netdev_priv(dev);
  175. mp->device = &pdev->dev;
  176. SET_NETDEV_DEV(dev, &pdev->dev);
  177. dev->base_addr = (u32)MACE_BASE;
  178. mp->mace = MACE_BASE;
  179. dev->irq = IRQ_MAC_MACE;
  180. mp->dma_intr = IRQ_MAC_MACE_DMA;
  181. mp->chipid = mp->mace->chipid_hi << 8 | mp->mace->chipid_lo;
  182. /*
  183. * The PROM contains 8 bytes which total 0xFF when XOR'd
  184. * together. Due to the usual peculiar apple brain damage
  185. * the bytes are spaced out in a strange boundary and the
  186. * bits are reversed.
  187. */
  188. addr = MACE_PROM;
  189. for (j = 0; j < 6; ++j) {
  190. u8 v = bitrev8(addr[j<<4]);
  191. checksum ^= v;
  192. dev->dev_addr[j] = v;
  193. }
  194. for (; j < 8; ++j) {
  195. checksum ^= bitrev8(addr[j<<4]);
  196. }
  197. if (checksum != 0xFF) {
  198. free_netdev(dev);
  199. return -ENODEV;
  200. }
  201. dev->netdev_ops = &mace_netdev_ops;
  202. dev->watchdog_timeo = TX_TIMEOUT;
  203. printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
  204. dev->name, dev->dev_addr);
  205. err = register_netdev(dev);
  206. if (!err)
  207. return 0;
  208. free_netdev(dev);
  209. return err;
  210. }
  211. /*
  212. * Reset the chip.
  213. */
  214. static void mace_reset(struct net_device *dev)
  215. {
  216. struct mace_data *mp = netdev_priv(dev);
  217. volatile struct mace *mb = mp->mace;
  218. int i;
  219. /* soft-reset the chip */
  220. i = 200;
  221. while (--i) {
  222. mb->biucc = SWRST;
  223. if (mb->biucc & SWRST) {
  224. udelay(10);
  225. continue;
  226. }
  227. break;
  228. }
  229. if (!i) {
  230. printk(KERN_ERR "macmace: cannot reset chip!\n");
  231. return;
  232. }
  233. mb->maccc = 0; /* turn off tx, rx */
  234. mb->imr = 0xFF; /* disable all intrs for now */
  235. i = mb->ir;
  236. mb->biucc = XMTSP_64;
  237. mb->utr = RTRD;
  238. mb->fifocc = XMTFW_8 | RCVFW_64 | XMTFWU | RCVFWU;
  239. mb->xmtfc = AUTO_PAD_XMIT; /* auto-pad short frames */
  240. mb->rcvfc = 0;
  241. /* load up the hardware address */
  242. __mace_set_address(dev, dev->dev_addr);
  243. /* clear the multicast filter */
  244. if (mp->chipid == BROKEN_ADDRCHG_REV)
  245. mb->iac = LOGADDR;
  246. else {
  247. mb->iac = ADDRCHG | LOGADDR;
  248. while ((mb->iac & ADDRCHG) != 0)
  249. ;
  250. }
  251. for (i = 0; i < 8; ++i)
  252. mb->ladrf = 0;
  253. /* done changing address */
  254. if (mp->chipid != BROKEN_ADDRCHG_REV)
  255. mb->iac = 0;
  256. mb->plscc = PORTSEL_AUI;
  257. }
  258. /*
  259. * Load the address on a mace controller.
  260. */
  261. static void __mace_set_address(struct net_device *dev, void *addr)
  262. {
  263. struct mace_data *mp = netdev_priv(dev);
  264. volatile struct mace *mb = mp->mace;
  265. unsigned char *p = addr;
  266. int i;
  267. /* load up the hardware address */
  268. if (mp->chipid == BROKEN_ADDRCHG_REV)
  269. mb->iac = PHYADDR;
  270. else {
  271. mb->iac = ADDRCHG | PHYADDR;
  272. while ((mb->iac & ADDRCHG) != 0)
  273. ;
  274. }
  275. for (i = 0; i < 6; ++i)
  276. mb->padr = dev->dev_addr[i] = p[i];
  277. if (mp->chipid != BROKEN_ADDRCHG_REV)
  278. mb->iac = 0;
  279. }
  280. static int mace_set_address(struct net_device *dev, void *addr)
  281. {
  282. struct mace_data *mp = netdev_priv(dev);
  283. volatile struct mace *mb = mp->mace;
  284. unsigned long flags;
  285. u8 maccc;
  286. local_irq_save(flags);
  287. maccc = mb->maccc;
  288. __mace_set_address(dev, addr);
  289. mb->maccc = maccc;
  290. local_irq_restore(flags);
  291. return 0;
  292. }
  293. /*
  294. * Open the Macintosh MACE. Most of this is playing with the DMA
  295. * engine. The ethernet chip is quite friendly.
  296. */
  297. static int mace_open(struct net_device *dev)
  298. {
  299. struct mace_data *mp = netdev_priv(dev);
  300. volatile struct mace *mb = mp->mace;
  301. /* reset the chip */
  302. mace_reset(dev);
  303. if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
  304. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
  305. return -EAGAIN;
  306. }
  307. if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
  308. printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
  309. free_irq(dev->irq, dev);
  310. return -EAGAIN;
  311. }
  312. /* Allocate the DMA ring buffers */
  313. mp->tx_ring = dma_alloc_coherent(mp->device,
  314. N_TX_RING * MACE_BUFF_SIZE,
  315. &mp->tx_ring_phys, GFP_KERNEL);
  316. if (mp->tx_ring == NULL) {
  317. printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
  318. goto out1;
  319. }
  320. mp->rx_ring = dma_alloc_coherent(mp->device,
  321. N_RX_RING * MACE_BUFF_SIZE,
  322. &mp->rx_ring_phys, GFP_KERNEL);
  323. if (mp->rx_ring == NULL) {
  324. printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
  325. goto out2;
  326. }
  327. mace_dma_off(dev);
  328. /* Not sure what these do */
  329. psc_write_word(PSC_ENETWR_CTL, 0x9000);
  330. psc_write_word(PSC_ENETRD_CTL, 0x9000);
  331. psc_write_word(PSC_ENETWR_CTL, 0x0400);
  332. psc_write_word(PSC_ENETRD_CTL, 0x0400);
  333. mace_rxdma_reset(dev);
  334. mace_txdma_reset(dev);
  335. /* turn it on! */
  336. mb->maccc = ENXMT | ENRCV;
  337. /* enable all interrupts except receive interrupts */
  338. mb->imr = RCVINT;
  339. return 0;
  340. out2:
  341. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  342. mp->tx_ring, mp->tx_ring_phys);
  343. out1:
  344. free_irq(dev->irq, dev);
  345. free_irq(mp->dma_intr, dev);
  346. return -ENOMEM;
  347. }
  348. /*
  349. * Shut down the mace and its interrupt channel
  350. */
  351. static int mace_close(struct net_device *dev)
  352. {
  353. struct mace_data *mp = netdev_priv(dev);
  354. volatile struct mace *mb = mp->mace;
  355. mb->maccc = 0; /* disable rx and tx */
  356. mb->imr = 0xFF; /* disable all irqs */
  357. mace_dma_off(dev); /* disable rx and tx dma */
  358. return 0;
  359. }
  360. /*
  361. * Transmit a frame
  362. */
  363. static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
  364. {
  365. struct mace_data *mp = netdev_priv(dev);
  366. unsigned long flags;
  367. /* Stop the queue since there's only the one buffer */
  368. local_irq_save(flags);
  369. netif_stop_queue(dev);
  370. if (!mp->tx_count) {
  371. printk(KERN_ERR "macmace: tx queue running but no free buffers.\n");
  372. local_irq_restore(flags);
  373. return NETDEV_TX_BUSY;
  374. }
  375. mp->tx_count--;
  376. local_irq_restore(flags);
  377. dev->stats.tx_packets++;
  378. dev->stats.tx_bytes += skb->len;
  379. /* We need to copy into our xmit buffer to take care of alignment and caching issues */
  380. skb_copy_from_linear_data(skb, mp->tx_ring, skb->len);
  381. /* load the Tx DMA and fire it off */
  382. psc_write_long(PSC_ENETWR_ADDR + mp->tx_slot, (u32) mp->tx_ring_phys);
  383. psc_write_long(PSC_ENETWR_LEN + mp->tx_slot, skb->len);
  384. psc_write_word(PSC_ENETWR_CMD + mp->tx_slot, 0x9800);
  385. mp->tx_slot ^= 0x10;
  386. dev_kfree_skb(skb);
  387. return NETDEV_TX_OK;
  388. }
  389. static void mace_set_multicast(struct net_device *dev)
  390. {
  391. struct mace_data *mp = netdev_priv(dev);
  392. volatile struct mace *mb = mp->mace;
  393. int i;
  394. u32 crc;
  395. u8 maccc;
  396. unsigned long flags;
  397. local_irq_save(flags);
  398. maccc = mb->maccc;
  399. mb->maccc &= ~PROM;
  400. if (dev->flags & IFF_PROMISC) {
  401. mb->maccc |= PROM;
  402. } else {
  403. unsigned char multicast_filter[8];
  404. struct netdev_hw_addr *ha;
  405. if (dev->flags & IFF_ALLMULTI) {
  406. for (i = 0; i < 8; i++) {
  407. multicast_filter[i] = 0xFF;
  408. }
  409. } else {
  410. for (i = 0; i < 8; i++)
  411. multicast_filter[i] = 0;
  412. netdev_for_each_mc_addr(ha, dev) {
  413. crc = ether_crc_le(6, ha->addr);
  414. /* bit number in multicast_filter */
  415. i = crc >> 26;
  416. multicast_filter[i >> 3] |= 1 << (i & 7);
  417. }
  418. }
  419. if (mp->chipid == BROKEN_ADDRCHG_REV)
  420. mb->iac = LOGADDR;
  421. else {
  422. mb->iac = ADDRCHG | LOGADDR;
  423. while ((mb->iac & ADDRCHG) != 0)
  424. ;
  425. }
  426. for (i = 0; i < 8; ++i)
  427. mb->ladrf = multicast_filter[i];
  428. if (mp->chipid != BROKEN_ADDRCHG_REV)
  429. mb->iac = 0;
  430. }
  431. mb->maccc = maccc;
  432. local_irq_restore(flags);
  433. }
  434. static void mace_handle_misc_intrs(struct net_device *dev, int intr)
  435. {
  436. struct mace_data *mp = netdev_priv(dev);
  437. volatile struct mace *mb = mp->mace;
  438. static int mace_babbles, mace_jabbers;
  439. if (intr & MPCO)
  440. dev->stats.rx_missed_errors += 256;
  441. dev->stats.rx_missed_errors += mb->mpc; /* reading clears it */
  442. if (intr & RNTPCO)
  443. dev->stats.rx_length_errors += 256;
  444. dev->stats.rx_length_errors += mb->rntpc; /* reading clears it */
  445. if (intr & CERR)
  446. ++dev->stats.tx_heartbeat_errors;
  447. if (intr & BABBLE)
  448. if (mace_babbles++ < 4)
  449. printk(KERN_DEBUG "macmace: babbling transmitter\n");
  450. if (intr & JABBER)
  451. if (mace_jabbers++ < 4)
  452. printk(KERN_DEBUG "macmace: jabbering transceiver\n");
  453. }
  454. static irqreturn_t mace_interrupt(int irq, void *dev_id)
  455. {
  456. struct net_device *dev = (struct net_device *) dev_id;
  457. struct mace_data *mp = netdev_priv(dev);
  458. volatile struct mace *mb = mp->mace;
  459. int intr, fs;
  460. unsigned long flags;
  461. /* don't want the dma interrupt handler to fire */
  462. local_irq_save(flags);
  463. intr = mb->ir; /* read interrupt register */
  464. mace_handle_misc_intrs(dev, intr);
  465. if (intr & XMTINT) {
  466. fs = mb->xmtfs;
  467. if ((fs & XMTSV) == 0) {
  468. printk(KERN_ERR "macmace: xmtfs not valid! (fs=%x)\n", fs);
  469. mace_reset(dev);
  470. /*
  471. * XXX mace likes to hang the machine after a xmtfs error.
  472. * This is hard to reproduce, reseting *may* help
  473. */
  474. }
  475. /* dma should have finished */
  476. if (!mp->tx_count) {
  477. printk(KERN_DEBUG "macmace: tx ring ran out? (fs=%x)\n", fs);
  478. }
  479. /* Update stats */
  480. if (fs & (UFLO|LCOL|LCAR|RTRY)) {
  481. ++dev->stats.tx_errors;
  482. if (fs & LCAR)
  483. ++dev->stats.tx_carrier_errors;
  484. else if (fs & (UFLO|LCOL|RTRY)) {
  485. ++dev->stats.tx_aborted_errors;
  486. if (mb->xmtfs & UFLO) {
  487. printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
  488. dev->stats.tx_fifo_errors++;
  489. mace_txdma_reset(dev);
  490. }
  491. }
  492. }
  493. }
  494. if (mp->tx_count)
  495. netif_wake_queue(dev);
  496. local_irq_restore(flags);
  497. return IRQ_HANDLED;
  498. }
  499. static void mace_tx_timeout(struct net_device *dev)
  500. {
  501. struct mace_data *mp = netdev_priv(dev);
  502. volatile struct mace *mb = mp->mace;
  503. unsigned long flags;
  504. local_irq_save(flags);
  505. /* turn off both tx and rx and reset the chip */
  506. mb->maccc = 0;
  507. printk(KERN_ERR "macmace: transmit timeout - resetting\n");
  508. mace_txdma_reset(dev);
  509. mace_reset(dev);
  510. /* restart rx dma */
  511. mace_rxdma_reset(dev);
  512. mp->tx_count = N_TX_RING;
  513. netif_wake_queue(dev);
  514. /* turn it on! */
  515. mb->maccc = ENXMT | ENRCV;
  516. /* enable all interrupts except receive interrupts */
  517. mb->imr = RCVINT;
  518. local_irq_restore(flags);
  519. }
  520. /*
  521. * Handle a newly arrived frame
  522. */
  523. static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
  524. {
  525. struct sk_buff *skb;
  526. unsigned int frame_status = mf->rcvsts;
  527. if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
  528. dev->stats.rx_errors++;
  529. if (frame_status & RS_OFLO) {
  530. printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
  531. dev->stats.rx_fifo_errors++;
  532. }
  533. if (frame_status & RS_CLSN)
  534. dev->stats.collisions++;
  535. if (frame_status & RS_FRAMERR)
  536. dev->stats.rx_frame_errors++;
  537. if (frame_status & RS_FCSERR)
  538. dev->stats.rx_crc_errors++;
  539. } else {
  540. unsigned int frame_length = mf->rcvcnt + ((frame_status & 0x0F) << 8 );
  541. skb = netdev_alloc_skb(dev, frame_length + 2);
  542. if (!skb) {
  543. dev->stats.rx_dropped++;
  544. return;
  545. }
  546. skb_reserve(skb, 2);
  547. memcpy(skb_put(skb, frame_length), mf->data, frame_length);
  548. skb->protocol = eth_type_trans(skb, dev);
  549. netif_rx(skb);
  550. dev->stats.rx_packets++;
  551. dev->stats.rx_bytes += frame_length;
  552. }
  553. }
  554. /*
  555. * The PSC has passed us a DMA interrupt event.
  556. */
  557. static irqreturn_t mace_dma_intr(int irq, void *dev_id)
  558. {
  559. struct net_device *dev = (struct net_device *) dev_id;
  560. struct mace_data *mp = netdev_priv(dev);
  561. int left, head;
  562. u16 status;
  563. u32 baka;
  564. /* Not sure what this does */
  565. while ((baka = psc_read_long(PSC_MYSTERY)) != psc_read_long(PSC_MYSTERY));
  566. if (!(baka & 0x60000000)) return IRQ_NONE;
  567. /*
  568. * Process the read queue
  569. */
  570. status = psc_read_word(PSC_ENETRD_CTL);
  571. if (status & 0x2000) {
  572. mace_rxdma_reset(dev);
  573. } else if (status & 0x0100) {
  574. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x1100);
  575. left = psc_read_long(PSC_ENETRD_LEN + mp->rx_slot);
  576. head = N_RX_RING - left;
  577. /* Loop through the ring buffer and process new packages */
  578. while (mp->rx_tail < head) {
  579. mace_dma_rx_frame(dev, (struct mace_frame*) (mp->rx_ring
  580. + (mp->rx_tail * MACE_BUFF_SIZE)));
  581. mp->rx_tail++;
  582. }
  583. /* If we're out of buffers in this ring then switch to */
  584. /* the other set, otherwise just reactivate this one. */
  585. if (!left) {
  586. mace_load_rxdma_base(dev, mp->rx_slot);
  587. mp->rx_slot ^= 0x10;
  588. } else {
  589. psc_write_word(PSC_ENETRD_CMD + mp->rx_slot, 0x9800);
  590. }
  591. }
  592. /*
  593. * Process the write queue
  594. */
  595. status = psc_read_word(PSC_ENETWR_CTL);
  596. if (status & 0x2000) {
  597. mace_txdma_reset(dev);
  598. } else if (status & 0x0100) {
  599. psc_write_word(PSC_ENETWR_CMD + mp->tx_sloti, 0x0100);
  600. mp->tx_sloti ^= 0x10;
  601. mp->tx_count++;
  602. }
  603. return IRQ_HANDLED;
  604. }
  605. MODULE_LICENSE("GPL");
  606. MODULE_DESCRIPTION("Macintosh MACE ethernet driver");
  607. MODULE_ALIAS("platform:macmace");
  608. static int __devexit mac_mace_device_remove (struct platform_device *pdev)
  609. {
  610. struct net_device *dev = platform_get_drvdata(pdev);
  611. struct mace_data *mp = netdev_priv(dev);
  612. unregister_netdev(dev);
  613. free_irq(dev->irq, dev);
  614. free_irq(IRQ_MAC_MACE_DMA, dev);
  615. dma_free_coherent(mp->device, N_RX_RING * MACE_BUFF_SIZE,
  616. mp->rx_ring, mp->rx_ring_phys);
  617. dma_free_coherent(mp->device, N_TX_RING * MACE_BUFF_SIZE,
  618. mp->tx_ring, mp->tx_ring_phys);
  619. free_netdev(dev);
  620. return 0;
  621. }
  622. static struct platform_driver mac_mace_driver = {
  623. .probe = mace_probe,
  624. .remove = __devexit_p(mac_mace_device_remove),
  625. .driver = {
  626. .name = mac_mace_string,
  627. .owner = THIS_MODULE,
  628. },
  629. };
  630. static int __init mac_mace_init_module(void)
  631. {
  632. if (!MACH_IS_MAC)
  633. return -ENODEV;
  634. return platform_driver_register(&mac_mace_driver);
  635. }
  636. static void __exit mac_mace_cleanup_module(void)
  637. {
  638. platform_driver_unregister(&mac_mace_driver);
  639. }
  640. module_init(mac_mace_init_module);
  641. module_exit(mac_mace_cleanup_module);