3c527.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661
  1. /* 3c527.c: 3Com Etherlink/MC32 driver for Linux 2.4 and 2.6.
  2. *
  3. * (c) Copyright 1998 Red Hat Software Inc
  4. * Written by Alan Cox.
  5. * Further debugging by Carl Drougge.
  6. * Initial SMP support by Felipe W Damasio <felipewd@terra.com.br>
  7. * Heavily modified by Richard Procter <rnp@paradise.net.nz>
  8. *
  9. * Based on skeleton.c written 1993-94 by Donald Becker and ne2.c
  10. * (for the MCA stuff) written by Wim Dumon.
  11. *
  12. * Thanks to 3Com for making this possible by providing me with the
  13. * documentation.
  14. *
  15. * This software may be used and distributed according to the terms
  16. * of the GNU General Public License, incorporated herein by reference.
  17. *
  18. */
  19. #define DRV_NAME "3c527"
  20. #define DRV_VERSION "0.7-SMP"
  21. #define DRV_RELDATE "2003/09/21"
  22. static const char *version =
  23. DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.nz>\n";
  24. /**
  25. * DOC: Traps for the unwary
  26. *
  27. * The diagram (Figure 1-1) and the POS summary disagree with the
  28. * "Interrupt Level" section in the manual.
  29. *
  30. * The manual contradicts itself when describing the minimum number
  31. * buffers in the 'configure lists' command.
  32. * My card accepts a buffer config of 4/4.
  33. *
  34. * Setting the SAV BP bit does not save bad packets, but
  35. * only enables RX on-card stats collection.
  36. *
  37. * The documentation in places seems to miss things. In actual fact
  38. * I've always eventually found everything is documented, it just
  39. * requires careful study.
  40. *
  41. * DOC: Theory Of Operation
  42. *
  43. * The 3com 3c527 is a 32bit MCA bus mastering adapter with a large
  44. * amount of on board intelligence that housekeeps a somewhat dumber
  45. * Intel NIC. For performance we want to keep the transmit queue deep
  46. * as the card can transmit packets while fetching others from main
  47. * memory by bus master DMA. Transmission and reception are driven by
  48. * circular buffer queues.
  49. *
  50. * The mailboxes can be used for controlling how the card traverses
  51. * its buffer rings, but are used only for initial setup in this
  52. * implementation. The exec mailbox allows a variety of commands to
  53. * be executed. Each command must complete before the next is
  54. * executed. Primarily we use the exec mailbox for controlling the
  55. * multicast lists. We have to do a certain amount of interesting
  56. * hoop jumping as the multicast list changes can occur in interrupt
  57. * state when the card has an exec command pending. We defer such
  58. * events until the command completion interrupt.
  59. *
  60. * A copy break scheme (taken from 3c59x.c) is employed whereby
  61. * received frames exceeding a configurable length are passed
  62. * directly to the higher networking layers without incuring a copy,
  63. * in what amounts to a time/space trade-off.
  64. *
  65. * The card also keeps a large amount of statistical information
  66. * on-board. In a perfect world, these could be used safely at no
  67. * cost. However, lacking information to the contrary, processing
  68. * them without races would involve so much extra complexity as to
  69. * make it unworthwhile to do so. In the end, a hybrid SW/HW
  70. * implementation was made necessary --- see mc32_update_stats().
  71. *
  72. * DOC: Notes
  73. *
  74. * It should be possible to use two or more cards, but at this stage
  75. * only by loading two copies of the same module.
  76. *
  77. * The on-board 82586 NIC has trouble receiving multiple
  78. * back-to-back frames and so is likely to drop packets from fast
  79. * senders.
  80. **/
  81. #include <linux/module.h>
  82. #include <linux/errno.h>
  83. #include <linux/netdevice.h>
  84. #include <linux/etherdevice.h>
  85. #include <linux/if_ether.h>
  86. #include <linux/init.h>
  87. #include <linux/kernel.h>
  88. #include <linux/types.h>
  89. #include <linux/fcntl.h>
  90. #include <linux/interrupt.h>
  91. #include <linux/mca-legacy.h>
  92. #include <linux/ioport.h>
  93. #include <linux/in.h>
  94. #include <linux/skbuff.h>
  95. #include <linux/slab.h>
  96. #include <linux/string.h>
  97. #include <linux/wait.h>
  98. #include <linux/ethtool.h>
  99. #include <linux/completion.h>
  100. #include <linux/bitops.h>
  101. #include <linux/semaphore.h>
  102. #include <asm/uaccess.h>
  103. #include <asm/io.h>
  104. #include <asm/dma.h>
  105. #include "3c527.h"
  106. MODULE_LICENSE("GPL");
  107. /*
  108. * The name of the card. Is used for messages and in the requests for
  109. * io regions, irqs and dma channels
  110. */
  111. static const char* cardname = DRV_NAME;
  112. /* use 0 for production, 1 for verification, >2 for debug */
  113. #ifndef NET_DEBUG
  114. #define NET_DEBUG 2
  115. #endif
  116. static unsigned int mc32_debug = NET_DEBUG;
  117. /* The number of low I/O ports used by the ethercard. */
  118. #define MC32_IO_EXTENT 8
  119. /* As implemented, values must be a power-of-2 -- 4/8/16/32 */
  120. #define TX_RING_LEN 32 /* Typically the card supports 37 */
  121. #define RX_RING_LEN 8 /* " " " */
  122. /* Copy break point, see above for details.
  123. * Setting to > 1512 effectively disables this feature. */
  124. #define RX_COPYBREAK 200 /* Value from 3c59x.c */
  125. /* Issue the 82586 workaround command - this is for "busy lans", but
  126. * basically means for all lans now days - has a performance (latency)
  127. * cost, but best set. */
  128. static const int WORKAROUND_82586=1;
  129. /* Pointers to buffers and their on-card records */
  130. struct mc32_ring_desc
  131. {
  132. volatile struct skb_header *p;
  133. struct sk_buff *skb;
  134. };
  135. /* Information that needs to be kept for each board. */
  136. struct mc32_local
  137. {
  138. int slot;
  139. u32 base;
  140. volatile struct mc32_mailbox *rx_box;
  141. volatile struct mc32_mailbox *tx_box;
  142. volatile struct mc32_mailbox *exec_box;
  143. volatile struct mc32_stats *stats; /* Start of on-card statistics */
  144. u16 tx_chain; /* Transmit list start offset */
  145. u16 rx_chain; /* Receive list start offset */
  146. u16 tx_len; /* Transmit list count */
  147. u16 rx_len; /* Receive list count */
  148. u16 xceiver_desired_state; /* HALTED or RUNNING */
  149. u16 cmd_nonblocking; /* Thread is uninterested in command result */
  150. u16 mc_reload_wait; /* A multicast load request is pending */
  151. u32 mc_list_valid; /* True when the mclist is set */
  152. struct mc32_ring_desc tx_ring[TX_RING_LEN]; /* Host Transmit ring */
  153. struct mc32_ring_desc rx_ring[RX_RING_LEN]; /* Host Receive ring */
  154. atomic_t tx_count; /* buffers left */
  155. atomic_t tx_ring_head; /* index to tx en-queue end */
  156. u16 tx_ring_tail; /* index to tx de-queue end */
  157. u16 rx_ring_tail; /* index to rx de-queue end */
  158. struct semaphore cmd_mutex; /* Serialises issuing of execute commands */
  159. struct completion execution_cmd; /* Card has completed an execute command */
  160. struct completion xceiver_cmd; /* Card has completed a tx or rx command */
  161. };
  162. /* The station (ethernet) address prefix, used for a sanity check. */
  163. #define SA_ADDR0 0x02
  164. #define SA_ADDR1 0x60
  165. #define SA_ADDR2 0xAC
  166. struct mca_adapters_t {
  167. unsigned int id;
  168. char *name;
  169. };
  170. static const struct mca_adapters_t mc32_adapters[] = {
  171. { 0x0041, "3COM EtherLink MC/32" },
  172. { 0x8EF5, "IBM High Performance Lan Adapter" },
  173. { 0x0000, NULL }
  174. };
  175. /* Macros for ring index manipulations */
  176. static inline u16 next_rx(u16 rx) { return (rx+1)&(RX_RING_LEN-1); };
  177. static inline u16 prev_rx(u16 rx) { return (rx-1)&(RX_RING_LEN-1); };
  178. static inline u16 next_tx(u16 tx) { return (tx+1)&(TX_RING_LEN-1); };
  179. /* Index to functions, as function prototypes. */
  180. static int mc32_probe1(struct net_device *dev, int ioaddr);
  181. static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len);
  182. static int mc32_open(struct net_device *dev);
  183. static void mc32_timeout(struct net_device *dev);
  184. static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
  185. struct net_device *dev);
  186. static irqreturn_t mc32_interrupt(int irq, void *dev_id);
  187. static int mc32_close(struct net_device *dev);
  188. static struct net_device_stats *mc32_get_stats(struct net_device *dev);
  189. static void mc32_set_multicast_list(struct net_device *dev);
  190. static void mc32_reset_multicast_list(struct net_device *dev);
  191. static const struct ethtool_ops netdev_ethtool_ops;
  192. static void cleanup_card(struct net_device *dev)
  193. {
  194. struct mc32_local *lp = netdev_priv(dev);
  195. unsigned slot = lp->slot;
  196. mca_mark_as_unused(slot);
  197. mca_set_adapter_name(slot, NULL);
  198. free_irq(dev->irq, dev);
  199. release_region(dev->base_addr, MC32_IO_EXTENT);
  200. }
  201. /**
  202. * mc32_probe - Search for supported boards
  203. * @unit: interface number to use
  204. *
  205. * Because MCA bus is a real bus and we can scan for cards we could do a
  206. * single scan for all boards here. Right now we use the passed in device
  207. * structure and scan for only one board. This needs fixing for modules
  208. * in particular.
  209. */
  210. struct net_device *__init mc32_probe(int unit)
  211. {
  212. struct net_device *dev = alloc_etherdev(sizeof(struct mc32_local));
  213. static int current_mca_slot = -1;
  214. int i;
  215. int err;
  216. if (!dev)
  217. return ERR_PTR(-ENOMEM);
  218. if (unit >= 0)
  219. sprintf(dev->name, "eth%d", unit);
  220. /* Do not check any supplied i/o locations.
  221. POS registers usually don't fail :) */
  222. /* MCA cards have POS registers.
  223. Autodetecting MCA cards is extremely simple.
  224. Just search for the card. */
  225. for(i = 0; (mc32_adapters[i].name != NULL); i++) {
  226. current_mca_slot =
  227. mca_find_unused_adapter(mc32_adapters[i].id, 0);
  228. if(current_mca_slot != MCA_NOTFOUND) {
  229. if(!mc32_probe1(dev, current_mca_slot))
  230. {
  231. mca_set_adapter_name(current_mca_slot,
  232. mc32_adapters[i].name);
  233. mca_mark_as_used(current_mca_slot);
  234. err = register_netdev(dev);
  235. if (err) {
  236. cleanup_card(dev);
  237. free_netdev(dev);
  238. dev = ERR_PTR(err);
  239. }
  240. return dev;
  241. }
  242. }
  243. }
  244. free_netdev(dev);
  245. return ERR_PTR(-ENODEV);
  246. }
  247. static const struct net_device_ops netdev_ops = {
  248. .ndo_open = mc32_open,
  249. .ndo_stop = mc32_close,
  250. .ndo_start_xmit = mc32_send_packet,
  251. .ndo_get_stats = mc32_get_stats,
  252. .ndo_set_rx_mode = mc32_set_multicast_list,
  253. .ndo_tx_timeout = mc32_timeout,
  254. .ndo_change_mtu = eth_change_mtu,
  255. .ndo_set_mac_address = eth_mac_addr,
  256. .ndo_validate_addr = eth_validate_addr,
  257. };
  258. /**
  259. * mc32_probe1 - Check a given slot for a board and test the card
  260. * @dev: Device structure to fill in
  261. * @slot: The MCA bus slot being used by this card
  262. *
  263. * Decode the slot data and configure the card structures. Having done this we
  264. * can reset the card and configure it. The card does a full self test cycle
  265. * in firmware so we have to wait for it to return and post us either a
  266. * failure case or some addresses we use to find the board internals.
  267. */
  268. static int __init mc32_probe1(struct net_device *dev, int slot)
  269. {
  270. static unsigned version_printed;
  271. int i, err;
  272. u8 POS;
  273. u32 base;
  274. struct mc32_local *lp = netdev_priv(dev);
  275. static const u16 mca_io_bases[] = {
  276. 0x7280,0x7290,
  277. 0x7680,0x7690,
  278. 0x7A80,0x7A90,
  279. 0x7E80,0x7E90
  280. };
  281. static const u32 mca_mem_bases[] = {
  282. 0x00C0000,
  283. 0x00C4000,
  284. 0x00C8000,
  285. 0x00CC000,
  286. 0x00D0000,
  287. 0x00D4000,
  288. 0x00D8000,
  289. 0x00DC000
  290. };
  291. static const char * const failures[] = {
  292. "Processor instruction",
  293. "Processor data bus",
  294. "Processor data bus",
  295. "Processor data bus",
  296. "Adapter bus",
  297. "ROM checksum",
  298. "Base RAM",
  299. "Extended RAM",
  300. "82586 internal loopback",
  301. "82586 initialisation failure",
  302. "Adapter list configuration error"
  303. };
  304. /* Time to play MCA games */
  305. if (mc32_debug && version_printed++ == 0)
  306. pr_debug("%s", version);
  307. pr_info("%s: %s found in slot %d: ", dev->name, cardname, slot);
  308. POS = mca_read_stored_pos(slot, 2);
  309. if(!(POS&1))
  310. {
  311. pr_cont("disabled.\n");
  312. return -ENODEV;
  313. }
  314. /* Fill in the 'dev' fields. */
  315. dev->base_addr = mca_io_bases[(POS>>1)&7];
  316. dev->mem_start = mca_mem_bases[(POS>>4)&7];
  317. POS = mca_read_stored_pos(slot, 4);
  318. if(!(POS&1))
  319. {
  320. pr_cont("memory window disabled.\n");
  321. return -ENODEV;
  322. }
  323. POS = mca_read_stored_pos(slot, 5);
  324. i=(POS>>4)&3;
  325. if(i==3)
  326. {
  327. pr_cont("invalid memory window.\n");
  328. return -ENODEV;
  329. }
  330. i*=16384;
  331. i+=16384;
  332. dev->mem_end=dev->mem_start + i;
  333. dev->irq = ((POS>>2)&3)+9;
  334. if(!request_region(dev->base_addr, MC32_IO_EXTENT, cardname))
  335. {
  336. pr_cont("io 0x%3lX, which is busy.\n", dev->base_addr);
  337. return -EBUSY;
  338. }
  339. pr_cont("io 0x%3lX irq %d mem 0x%lX (%dK)\n",
  340. dev->base_addr, dev->irq, dev->mem_start, i/1024);
  341. /* We ought to set the cache line size here.. */
  342. /*
  343. * Go PROM browsing
  344. */
  345. /* Retrieve and print the ethernet address. */
  346. for (i = 0; i < 6; i++)
  347. {
  348. mca_write_pos(slot, 6, i+12);
  349. mca_write_pos(slot, 7, 0);
  350. dev->dev_addr[i] = mca_read_pos(slot,3);
  351. }
  352. pr_info("%s: Address %pM ", dev->name, dev->dev_addr);
  353. mca_write_pos(slot, 6, 0);
  354. mca_write_pos(slot, 7, 0);
  355. POS = mca_read_stored_pos(slot, 4);
  356. if(POS&2)
  357. pr_cont(": BNC port selected.\n");
  358. else
  359. pr_cont(": AUI port selected.\n");
  360. POS=inb(dev->base_addr+HOST_CTRL);
  361. POS|=HOST_CTRL_ATTN|HOST_CTRL_RESET;
  362. POS&=~HOST_CTRL_INTE;
  363. outb(POS, dev->base_addr+HOST_CTRL);
  364. /* Reset adapter */
  365. udelay(100);
  366. /* Reset off */
  367. POS&=~(HOST_CTRL_ATTN|HOST_CTRL_RESET);
  368. outb(POS, dev->base_addr+HOST_CTRL);
  369. udelay(300);
  370. /*
  371. * Grab the IRQ
  372. */
  373. err = request_irq(dev->irq, mc32_interrupt, IRQF_SHARED, DRV_NAME, dev);
  374. if (err) {
  375. release_region(dev->base_addr, MC32_IO_EXTENT);
  376. pr_err("%s: unable to get IRQ %d.\n", DRV_NAME, dev->irq);
  377. goto err_exit_ports;
  378. }
  379. memset(lp, 0, sizeof(struct mc32_local));
  380. lp->slot = slot;
  381. i=0;
  382. base = inb(dev->base_addr);
  383. while(base == 0xFF)
  384. {
  385. i++;
  386. if(i == 1000)
  387. {
  388. pr_err("%s: failed to boot adapter.\n", dev->name);
  389. err = -ENODEV;
  390. goto err_exit_irq;
  391. }
  392. udelay(1000);
  393. if(inb(dev->base_addr+2)&(1<<5))
  394. base = inb(dev->base_addr);
  395. }
  396. if(base>0)
  397. {
  398. if(base < 0x0C)
  399. pr_err("%s: %s%s.\n", dev->name, failures[base-1],
  400. base<0x0A?" test failure":"");
  401. else
  402. pr_err("%s: unknown failure %d.\n", dev->name, base);
  403. err = -ENODEV;
  404. goto err_exit_irq;
  405. }
  406. base=0;
  407. for(i=0;i<4;i++)
  408. {
  409. int n=0;
  410. while(!(inb(dev->base_addr+2)&(1<<5)))
  411. {
  412. n++;
  413. udelay(50);
  414. if(n>100)
  415. {
  416. pr_err("%s: mailbox read fail (%d).\n", dev->name, i);
  417. err = -ENODEV;
  418. goto err_exit_irq;
  419. }
  420. }
  421. base|=(inb(dev->base_addr)<<(8*i));
  422. }
  423. lp->exec_box=isa_bus_to_virt(dev->mem_start+base);
  424. base=lp->exec_box->data[1]<<16|lp->exec_box->data[0];
  425. lp->base = dev->mem_start+base;
  426. lp->rx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[2]);
  427. lp->tx_box=isa_bus_to_virt(lp->base + lp->exec_box->data[3]);
  428. lp->stats = isa_bus_to_virt(lp->base + lp->exec_box->data[5]);
  429. /*
  430. * Descriptor chains (card relative)
  431. */
  432. lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
  433. lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
  434. lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
  435. lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
  436. sema_init(&lp->cmd_mutex, 0);
  437. init_completion(&lp->execution_cmd);
  438. init_completion(&lp->xceiver_cmd);
  439. pr_info("%s: Firmware Rev %d. %d RX buffers, %d TX buffers. Base of 0x%08X.\n",
  440. dev->name, lp->exec_box->data[12], lp->rx_len, lp->tx_len, lp->base);
  441. dev->netdev_ops = &netdev_ops;
  442. dev->watchdog_timeo = HZ*5; /* Board does all the work */
  443. dev->ethtool_ops = &netdev_ethtool_ops;
  444. return 0;
  445. err_exit_irq:
  446. free_irq(dev->irq, dev);
  447. err_exit_ports:
  448. release_region(dev->base_addr, MC32_IO_EXTENT);
  449. return err;
  450. }
  451. /**
  452. * mc32_ready_poll - wait until we can feed it a command
  453. * @dev: The device to wait for
  454. *
  455. * Wait until the card becomes ready to accept a command via the
  456. * command register. This tells us nothing about the completion
  457. * status of any pending commands and takes very little time at all.
  458. */
  459. static inline void mc32_ready_poll(struct net_device *dev)
  460. {
  461. int ioaddr = dev->base_addr;
  462. while(!(inb(ioaddr+HOST_STATUS)&HOST_STATUS_CRR));
  463. }
  464. /**
  465. * mc32_command_nowait - send a command non blocking
  466. * @dev: The 3c527 to issue the command to
  467. * @cmd: The command word to write to the mailbox
  468. * @data: A data block if the command expects one
  469. * @len: Length of the data block
  470. *
  471. * Send a command from interrupt state. If there is a command
  472. * currently being executed then we return an error of -1. It
  473. * simply isn't viable to wait around as commands may be
  474. * slow. This can theoretically be starved on SMP, but it's hard
  475. * to see a realistic situation. We do not wait for the command
  476. * to complete --- we rely on the interrupt handler to tidy up
  477. * after us.
  478. */
  479. static int mc32_command_nowait(struct net_device *dev, u16 cmd, void *data, int len)
  480. {
  481. struct mc32_local *lp = netdev_priv(dev);
  482. int ioaddr = dev->base_addr;
  483. int ret = -1;
  484. if (down_trylock(&lp->cmd_mutex) == 0)
  485. {
  486. lp->cmd_nonblocking=1;
  487. lp->exec_box->mbox=0;
  488. lp->exec_box->mbox=cmd;
  489. memcpy((void *)lp->exec_box->data, data, len);
  490. barrier(); /* the memcpy forgot the volatile so be sure */
  491. /* Send the command */
  492. mc32_ready_poll(dev);
  493. outb(1<<6, ioaddr+HOST_CMD);
  494. ret = 0;
  495. /* Interrupt handler will signal mutex on completion */
  496. }
  497. return ret;
  498. }
  499. /**
  500. * mc32_command - send a command and sleep until completion
  501. * @dev: The 3c527 card to issue the command to
  502. * @cmd: The command word to write to the mailbox
  503. * @data: A data block if the command expects one
  504. * @len: Length of the data block
  505. *
  506. * Sends exec commands in a user context. This permits us to wait around
  507. * for the replies and also to wait for the command buffer to complete
  508. * from a previous command before we execute our command. After our
  509. * command completes we will attempt any pending multicast reload
  510. * we blocked off by hogging the exec buffer.
  511. *
  512. * You feed the card a command, you wait, it interrupts you get a
  513. * reply. All well and good. The complication arises because you use
  514. * commands for filter list changes which come in at bh level from things
  515. * like IPV6 group stuff.
  516. */
  517. static int mc32_command(struct net_device *dev, u16 cmd, void *data, int len)
  518. {
  519. struct mc32_local *lp = netdev_priv(dev);
  520. int ioaddr = dev->base_addr;
  521. int ret = 0;
  522. down(&lp->cmd_mutex);
  523. /*
  524. * My Turn
  525. */
  526. lp->cmd_nonblocking=0;
  527. lp->exec_box->mbox=0;
  528. lp->exec_box->mbox=cmd;
  529. memcpy((void *)lp->exec_box->data, data, len);
  530. barrier(); /* the memcpy forgot the volatile so be sure */
  531. mc32_ready_poll(dev);
  532. outb(1<<6, ioaddr+HOST_CMD);
  533. wait_for_completion(&lp->execution_cmd);
  534. if(lp->exec_box->mbox&(1<<13))
  535. ret = -1;
  536. up(&lp->cmd_mutex);
  537. /*
  538. * A multicast set got blocked - try it now
  539. */
  540. if(lp->mc_reload_wait)
  541. {
  542. mc32_reset_multicast_list(dev);
  543. }
  544. return ret;
  545. }
  546. /**
  547. * mc32_start_transceiver - tell board to restart tx/rx
  548. * @dev: The 3c527 card to issue the command to
  549. *
  550. * This may be called from the interrupt state, where it is used
  551. * to restart the rx ring if the card runs out of rx buffers.
  552. *
  553. * We must first check if it's ok to (re)start the transceiver. See
  554. * mc32_close for details.
  555. */
  556. static void mc32_start_transceiver(struct net_device *dev) {
  557. struct mc32_local *lp = netdev_priv(dev);
  558. int ioaddr = dev->base_addr;
  559. /* Ignore RX overflow on device closure */
  560. if (lp->xceiver_desired_state==HALTED)
  561. return;
  562. /* Give the card the offset to the post-EOL-bit RX descriptor */
  563. mc32_ready_poll(dev);
  564. lp->rx_box->mbox=0;
  565. lp->rx_box->data[0]=lp->rx_ring[prev_rx(lp->rx_ring_tail)].p->next;
  566. outb(HOST_CMD_START_RX, ioaddr+HOST_CMD);
  567. mc32_ready_poll(dev);
  568. lp->tx_box->mbox=0;
  569. outb(HOST_CMD_RESTRT_TX, ioaddr+HOST_CMD); /* card ignores this on RX restart */
  570. /* We are not interrupted on start completion */
  571. }
  572. /**
  573. * mc32_halt_transceiver - tell board to stop tx/rx
  574. * @dev: The 3c527 card to issue the command to
  575. *
  576. * We issue the commands to halt the card's transceiver. In fact,
  577. * after some experimenting we now simply tell the card to
  578. * suspend. When issuing aborts occasionally odd things happened.
  579. *
  580. * We then sleep until the card has notified us that both rx and
  581. * tx have been suspended.
  582. */
  583. static void mc32_halt_transceiver(struct net_device *dev)
  584. {
  585. struct mc32_local *lp = netdev_priv(dev);
  586. int ioaddr = dev->base_addr;
  587. mc32_ready_poll(dev);
  588. lp->rx_box->mbox=0;
  589. outb(HOST_CMD_SUSPND_RX, ioaddr+HOST_CMD);
  590. wait_for_completion(&lp->xceiver_cmd);
  591. mc32_ready_poll(dev);
  592. lp->tx_box->mbox=0;
  593. outb(HOST_CMD_SUSPND_TX, ioaddr+HOST_CMD);
  594. wait_for_completion(&lp->xceiver_cmd);
  595. }
  596. /**
  597. * mc32_load_rx_ring - load the ring of receive buffers
  598. * @dev: 3c527 to build the ring for
  599. *
  600. * This initialises the on-card and driver datastructures to
  601. * the point where mc32_start_transceiver() can be called.
  602. *
  603. * The card sets up the receive ring for us. We are required to use the
  604. * ring it provides, although the size of the ring is configurable.
  605. *
  606. * We allocate an sk_buff for each ring entry in turn and
  607. * initialise its house-keeping info. At the same time, we read
  608. * each 'next' pointer in our rx_ring array. This reduces slow
  609. * shared-memory reads and makes it easy to access predecessor
  610. * descriptors.
  611. *
  612. * We then set the end-of-list bit for the last entry so that the
  613. * card will know when it has run out of buffers.
  614. */
  615. static int mc32_load_rx_ring(struct net_device *dev)
  616. {
  617. struct mc32_local *lp = netdev_priv(dev);
  618. int i;
  619. u16 rx_base;
  620. volatile struct skb_header *p;
  621. rx_base=lp->rx_chain;
  622. for(i=0; i<RX_RING_LEN; i++) {
  623. lp->rx_ring[i].skb=alloc_skb(1532, GFP_KERNEL);
  624. if (lp->rx_ring[i].skb==NULL) {
  625. for (;i>=0;i--)
  626. kfree_skb(lp->rx_ring[i].skb);
  627. return -ENOBUFS;
  628. }
  629. skb_reserve(lp->rx_ring[i].skb, 18);
  630. p=isa_bus_to_virt(lp->base+rx_base);
  631. p->control=0;
  632. p->data=isa_virt_to_bus(lp->rx_ring[i].skb->data);
  633. p->status=0;
  634. p->length=1532;
  635. lp->rx_ring[i].p=p;
  636. rx_base=p->next;
  637. }
  638. lp->rx_ring[i-1].p->control |= CONTROL_EOL;
  639. lp->rx_ring_tail=0;
  640. return 0;
  641. }
  642. /**
  643. * mc32_flush_rx_ring - free the ring of receive buffers
  644. * @lp: Local data of 3c527 to flush the rx ring of
  645. *
  646. * Free the buffer for each ring slot. This may be called
  647. * before mc32_load_rx_ring(), eg. on error in mc32_open().
  648. * Requires rx skb pointers to point to a valid skb, or NULL.
  649. */
  650. static void mc32_flush_rx_ring(struct net_device *dev)
  651. {
  652. struct mc32_local *lp = netdev_priv(dev);
  653. int i;
  654. for(i=0; i < RX_RING_LEN; i++)
  655. {
  656. if (lp->rx_ring[i].skb) {
  657. dev_kfree_skb(lp->rx_ring[i].skb);
  658. lp->rx_ring[i].skb = NULL;
  659. }
  660. lp->rx_ring[i].p=NULL;
  661. }
  662. }
  663. /**
  664. * mc32_load_tx_ring - load transmit ring
  665. * @dev: The 3c527 card to issue the command to
  666. *
  667. * This sets up the host transmit data-structures.
  668. *
  669. * First, we obtain from the card it's current position in the tx
  670. * ring, so that we will know where to begin transmitting
  671. * packets.
  672. *
  673. * Then, we read the 'next' pointers from the on-card tx ring into
  674. * our tx_ring array to reduce slow shared-mem reads. Finally, we
  675. * intitalise the tx house keeping variables.
  676. *
  677. */
  678. static void mc32_load_tx_ring(struct net_device *dev)
  679. {
  680. struct mc32_local *lp = netdev_priv(dev);
  681. volatile struct skb_header *p;
  682. int i;
  683. u16 tx_base;
  684. tx_base=lp->tx_box->data[0];
  685. for(i=0 ; i<TX_RING_LEN ; i++)
  686. {
  687. p=isa_bus_to_virt(lp->base+tx_base);
  688. lp->tx_ring[i].p=p;
  689. lp->tx_ring[i].skb=NULL;
  690. tx_base=p->next;
  691. }
  692. /* -1 so that tx_ring_head cannot "lap" tx_ring_tail */
  693. /* see mc32_tx_ring */
  694. atomic_set(&lp->tx_count, TX_RING_LEN-1);
  695. atomic_set(&lp->tx_ring_head, 0);
  696. lp->tx_ring_tail=0;
  697. }
  698. /**
  699. * mc32_flush_tx_ring - free transmit ring
  700. * @lp: Local data of 3c527 to flush the tx ring of
  701. *
  702. * If the ring is non-empty, zip over the it, freeing any
  703. * allocated skb_buffs. The tx ring house-keeping variables are
  704. * then reset. Requires rx skb pointers to point to a valid skb,
  705. * or NULL.
  706. */
  707. static void mc32_flush_tx_ring(struct net_device *dev)
  708. {
  709. struct mc32_local *lp = netdev_priv(dev);
  710. int i;
  711. for (i=0; i < TX_RING_LEN; i++)
  712. {
  713. if (lp->tx_ring[i].skb)
  714. {
  715. dev_kfree_skb(lp->tx_ring[i].skb);
  716. lp->tx_ring[i].skb = NULL;
  717. }
  718. }
  719. atomic_set(&lp->tx_count, 0);
  720. atomic_set(&lp->tx_ring_head, 0);
  721. lp->tx_ring_tail=0;
  722. }
  723. /**
  724. * mc32_open - handle 'up' of card
  725. * @dev: device to open
  726. *
  727. * The user is trying to bring the card into ready state. This requires
  728. * a brief dialogue with the card. Firstly we enable interrupts and then
  729. * 'indications'. Without these enabled the card doesn't bother telling
  730. * us what it has done. This had me puzzled for a week.
  731. *
  732. * We configure the number of card descriptors, then load the network
  733. * address and multicast filters. Turn on the workaround mode. This
  734. * works around a bug in the 82586 - it asks the firmware to do
  735. * so. It has a performance (latency) hit but is needed on busy
  736. * [read most] lans. We load the ring with buffers then we kick it
  737. * all off.
  738. */
  739. static int mc32_open(struct net_device *dev)
  740. {
  741. int ioaddr = dev->base_addr;
  742. struct mc32_local *lp = netdev_priv(dev);
  743. u8 one=1;
  744. u8 regs;
  745. u16 descnumbuffs[2] = {TX_RING_LEN, RX_RING_LEN};
  746. /*
  747. * Interrupts enabled
  748. */
  749. regs=inb(ioaddr+HOST_CTRL);
  750. regs|=HOST_CTRL_INTE;
  751. outb(regs, ioaddr+HOST_CTRL);
  752. /*
  753. * Allow ourselves to issue commands
  754. */
  755. up(&lp->cmd_mutex);
  756. /*
  757. * Send the indications on command
  758. */
  759. mc32_command(dev, 4, &one, 2);
  760. /*
  761. * Poke it to make sure it's really dead.
  762. */
  763. mc32_halt_transceiver(dev);
  764. mc32_flush_tx_ring(dev);
  765. /*
  766. * Ask card to set up on-card descriptors to our spec
  767. */
  768. if(mc32_command(dev, 8, descnumbuffs, 4)) {
  769. pr_info("%s: %s rejected our buffer configuration!\n",
  770. dev->name, cardname);
  771. mc32_close(dev);
  772. return -ENOBUFS;
  773. }
  774. /* Report new configuration */
  775. mc32_command(dev, 6, NULL, 0);
  776. lp->tx_chain = lp->exec_box->data[8]; /* Transmit list start offset */
  777. lp->rx_chain = lp->exec_box->data[10]; /* Receive list start offset */
  778. lp->tx_len = lp->exec_box->data[9]; /* Transmit list count */
  779. lp->rx_len = lp->exec_box->data[11]; /* Receive list count */
  780. /* Set Network Address */
  781. mc32_command(dev, 1, dev->dev_addr, 6);
  782. /* Set the filters */
  783. mc32_set_multicast_list(dev);
  784. if (WORKAROUND_82586) {
  785. u16 zero_word=0;
  786. mc32_command(dev, 0x0D, &zero_word, 2); /* 82586 bug workaround on */
  787. }
  788. mc32_load_tx_ring(dev);
  789. if(mc32_load_rx_ring(dev))
  790. {
  791. mc32_close(dev);
  792. return -ENOBUFS;
  793. }
  794. lp->xceiver_desired_state = RUNNING;
  795. /* And finally, set the ball rolling... */
  796. mc32_start_transceiver(dev);
  797. netif_start_queue(dev);
  798. return 0;
  799. }
  800. /**
  801. * mc32_timeout - handle a timeout from the network layer
  802. * @dev: 3c527 that timed out
  803. *
  804. * Handle a timeout on transmit from the 3c527. This normally means
  805. * bad things as the hardware handles cable timeouts and mess for
  806. * us.
  807. *
  808. */
  809. static void mc32_timeout(struct net_device *dev)
  810. {
  811. pr_warning("%s: transmit timed out?\n", dev->name);
  812. /* Try to restart the adaptor. */
  813. netif_wake_queue(dev);
  814. }
  815. /**
  816. * mc32_send_packet - queue a frame for transmit
  817. * @skb: buffer to transmit
  818. * @dev: 3c527 to send it out of
  819. *
  820. * Transmit a buffer. This normally means throwing the buffer onto
  821. * the transmit queue as the queue is quite large. If the queue is
  822. * full then we set tx_busy and return. Once the interrupt handler
  823. * gets messages telling it to reclaim transmit queue entries, we will
  824. * clear tx_busy and the kernel will start calling this again.
  825. *
  826. * We do not disable interrupts or acquire any locks; this can
  827. * run concurrently with mc32_tx_ring(), and the function itself
  828. * is serialised at a higher layer. However, similarly for the
  829. * card itself, we must ensure that we update tx_ring_head only
  830. * after we've established a valid packet on the tx ring (and
  831. * before we let the card "see" it, to prevent it racing with the
  832. * irq handler).
  833. *
  834. */
  835. static netdev_tx_t mc32_send_packet(struct sk_buff *skb,
  836. struct net_device *dev)
  837. {
  838. struct mc32_local *lp = netdev_priv(dev);
  839. u32 head = atomic_read(&lp->tx_ring_head);
  840. volatile struct skb_header *p, *np;
  841. netif_stop_queue(dev);
  842. if(atomic_read(&lp->tx_count)==0) {
  843. return NETDEV_TX_BUSY;
  844. }
  845. if (skb_padto(skb, ETH_ZLEN)) {
  846. netif_wake_queue(dev);
  847. return NETDEV_TX_OK;
  848. }
  849. atomic_dec(&lp->tx_count);
  850. /* P is the last sending/sent buffer as a pointer */
  851. p=lp->tx_ring[head].p;
  852. head = next_tx(head);
  853. /* NP is the buffer we will be loading */
  854. np=lp->tx_ring[head].p;
  855. /* We will need this to flush the buffer out */
  856. lp->tx_ring[head].skb=skb;
  857. np->length = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;
  858. np->data = isa_virt_to_bus(skb->data);
  859. np->status = 0;
  860. np->control = CONTROL_EOP | CONTROL_EOL;
  861. wmb();
  862. /*
  863. * The new frame has been setup; we can now
  864. * let the interrupt handler and card "see" it
  865. */
  866. atomic_set(&lp->tx_ring_head, head);
  867. p->control &= ~CONTROL_EOL;
  868. netif_wake_queue(dev);
  869. return NETDEV_TX_OK;
  870. }
  871. /**
  872. * mc32_update_stats - pull off the on board statistics
  873. * @dev: 3c527 to service
  874. *
  875. *
  876. * Query and reset the on-card stats. There's the small possibility
  877. * of a race here, which would result in an underestimation of
  878. * actual errors. As such, we'd prefer to keep all our stats
  879. * collection in software. As a rule, we do. However it can't be
  880. * used for rx errors and collisions as, by default, the card discards
  881. * bad rx packets.
  882. *
  883. * Setting the SAV BP in the rx filter command supposedly
  884. * stops this behaviour. However, testing shows that it only seems to
  885. * enable the collation of on-card rx statistics --- the driver
  886. * never sees an RX descriptor with an error status set.
  887. *
  888. */
  889. static void mc32_update_stats(struct net_device *dev)
  890. {
  891. struct mc32_local *lp = netdev_priv(dev);
  892. volatile struct mc32_stats *st = lp->stats;
  893. u32 rx_errors=0;
  894. rx_errors+=dev->stats.rx_crc_errors +=st->rx_crc_errors;
  895. st->rx_crc_errors=0;
  896. rx_errors+=dev->stats.rx_fifo_errors +=st->rx_overrun_errors;
  897. st->rx_overrun_errors=0;
  898. rx_errors+=dev->stats.rx_frame_errors +=st->rx_alignment_errors;
  899. st->rx_alignment_errors=0;
  900. rx_errors+=dev->stats.rx_length_errors+=st->rx_tooshort_errors;
  901. st->rx_tooshort_errors=0;
  902. rx_errors+=dev->stats.rx_missed_errors+=st->rx_outofresource_errors;
  903. st->rx_outofresource_errors=0;
  904. dev->stats.rx_errors=rx_errors;
  905. /* Number of packets which saw one collision */
  906. dev->stats.collisions+=st->dataC[10];
  907. st->dataC[10]=0;
  908. /* Number of packets which saw 2--15 collisions */
  909. dev->stats.collisions+=st->dataC[11];
  910. st->dataC[11]=0;
  911. }
  912. /**
  913. * mc32_rx_ring - process the receive ring
  914. * @dev: 3c527 that needs its receive ring processing
  915. *
  916. *
  917. * We have received one or more indications from the card that a
  918. * receive has completed. The buffer ring thus contains dirty
  919. * entries. We walk the ring by iterating over the circular rx_ring
  920. * array, starting at the next dirty buffer (which happens to be the
  921. * one we finished up at last time around).
  922. *
  923. * For each completed packet, we will either copy it and pass it up
  924. * the stack or, if the packet is near MTU sized, we allocate
  925. * another buffer and flip the old one up the stack.
  926. *
  927. * We must succeed in keeping a buffer on the ring. If necessary we
  928. * will toss a received packet rather than lose a ring entry. Once
  929. * the first uncompleted descriptor is found, we move the
  930. * End-Of-List bit to include the buffers just processed.
  931. *
  932. */
  933. static void mc32_rx_ring(struct net_device *dev)
  934. {
  935. struct mc32_local *lp = netdev_priv(dev);
  936. volatile struct skb_header *p;
  937. u16 rx_ring_tail;
  938. u16 rx_old_tail;
  939. int x=0;
  940. rx_old_tail = rx_ring_tail = lp->rx_ring_tail;
  941. do
  942. {
  943. p=lp->rx_ring[rx_ring_tail].p;
  944. if(!(p->status & (1<<7))) { /* Not COMPLETED */
  945. break;
  946. }
  947. if(p->status & (1<<6)) /* COMPLETED_OK */
  948. {
  949. u16 length=p->length;
  950. struct sk_buff *skb;
  951. struct sk_buff *newskb;
  952. /* Try to save time by avoiding a copy on big frames */
  953. if ((length > RX_COPYBREAK) &&
  954. ((newskb = netdev_alloc_skb(dev, 1532)) != NULL))
  955. {
  956. skb=lp->rx_ring[rx_ring_tail].skb;
  957. skb_put(skb, length);
  958. skb_reserve(newskb,18);
  959. lp->rx_ring[rx_ring_tail].skb=newskb;
  960. p->data=isa_virt_to_bus(newskb->data);
  961. }
  962. else
  963. {
  964. skb = netdev_alloc_skb(dev, length + 2);
  965. if(skb==NULL) {
  966. dev->stats.rx_dropped++;
  967. goto dropped;
  968. }
  969. skb_reserve(skb,2);
  970. memcpy(skb_put(skb, length),
  971. lp->rx_ring[rx_ring_tail].skb->data, length);
  972. }
  973. skb->protocol=eth_type_trans(skb,dev);
  974. dev->stats.rx_packets++;
  975. dev->stats.rx_bytes += length;
  976. netif_rx(skb);
  977. }
  978. dropped:
  979. p->length = 1532;
  980. p->status = 0;
  981. rx_ring_tail=next_rx(rx_ring_tail);
  982. }
  983. while(x++<48);
  984. /* If there was actually a frame to be processed, place the EOL bit */
  985. /* at the descriptor prior to the one to be filled next */
  986. if (rx_ring_tail != rx_old_tail)
  987. {
  988. lp->rx_ring[prev_rx(rx_ring_tail)].p->control |= CONTROL_EOL;
  989. lp->rx_ring[prev_rx(rx_old_tail)].p->control &= ~CONTROL_EOL;
  990. lp->rx_ring_tail=rx_ring_tail;
  991. }
  992. }
  993. /**
  994. * mc32_tx_ring - process completed transmits
  995. * @dev: 3c527 that needs its transmit ring processing
  996. *
  997. *
  998. * This operates in a similar fashion to mc32_rx_ring. We iterate
  999. * over the transmit ring. For each descriptor which has been
  1000. * processed by the card, we free its associated buffer and note
  1001. * any errors. This continues until the transmit ring is emptied
  1002. * or we reach a descriptor that hasn't yet been processed by the
  1003. * card.
  1004. *
  1005. */
  1006. static void mc32_tx_ring(struct net_device *dev)
  1007. {
  1008. struct mc32_local *lp = netdev_priv(dev);
  1009. volatile struct skb_header *np;
  1010. /*
  1011. * We rely on head==tail to mean 'queue empty'.
  1012. * This is why lp->tx_count=TX_RING_LEN-1: in order to prevent
  1013. * tx_ring_head wrapping to tail and confusing a 'queue empty'
  1014. * condition with 'queue full'
  1015. */
  1016. while (lp->tx_ring_tail != atomic_read(&lp->tx_ring_head))
  1017. {
  1018. u16 t;
  1019. t=next_tx(lp->tx_ring_tail);
  1020. np=lp->tx_ring[t].p;
  1021. if(!(np->status & (1<<7)))
  1022. {
  1023. /* Not COMPLETED */
  1024. break;
  1025. }
  1026. dev->stats.tx_packets++;
  1027. if(!(np->status & (1<<6))) /* Not COMPLETED_OK */
  1028. {
  1029. dev->stats.tx_errors++;
  1030. switch(np->status&0x0F)
  1031. {
  1032. case 1:
  1033. dev->stats.tx_aborted_errors++;
  1034. break; /* Max collisions */
  1035. case 2:
  1036. dev->stats.tx_fifo_errors++;
  1037. break;
  1038. case 3:
  1039. dev->stats.tx_carrier_errors++;
  1040. break;
  1041. case 4:
  1042. dev->stats.tx_window_errors++;
  1043. break; /* CTS Lost */
  1044. case 5:
  1045. dev->stats.tx_aborted_errors++;
  1046. break; /* Transmit timeout */
  1047. }
  1048. }
  1049. /* Packets are sent in order - this is
  1050. basically a FIFO queue of buffers matching
  1051. the card ring */
  1052. dev->stats.tx_bytes+=lp->tx_ring[t].skb->len;
  1053. dev_kfree_skb_irq(lp->tx_ring[t].skb);
  1054. lp->tx_ring[t].skb=NULL;
  1055. atomic_inc(&lp->tx_count);
  1056. netif_wake_queue(dev);
  1057. lp->tx_ring_tail=t;
  1058. }
  1059. }
  1060. /**
  1061. * mc32_interrupt - handle an interrupt from a 3c527
  1062. * @irq: Interrupt number
  1063. * @dev_id: 3c527 that requires servicing
  1064. * @regs: Registers (unused)
  1065. *
  1066. *
  1067. * An interrupt is raised whenever the 3c527 writes to the command
  1068. * register. This register contains the message it wishes to send us
  1069. * packed into a single byte field. We keep reading status entries
  1070. * until we have processed all the control items, but simply count
  1071. * transmit and receive reports. When all reports are in we empty the
  1072. * transceiver rings as appropriate. This saves the overhead of
  1073. * multiple command requests.
  1074. *
  1075. * Because MCA is level-triggered, we shouldn't miss indications.
  1076. * Therefore, we needn't ask the card to suspend interrupts within
  1077. * this handler. The card receives an implicit acknowledgment of the
  1078. * current interrupt when we read the command register.
  1079. *
  1080. */
  1081. static irqreturn_t mc32_interrupt(int irq, void *dev_id)
  1082. {
  1083. struct net_device *dev = dev_id;
  1084. struct mc32_local *lp;
  1085. int ioaddr, status, boguscount = 0;
  1086. int rx_event = 0;
  1087. int tx_event = 0;
  1088. ioaddr = dev->base_addr;
  1089. lp = netdev_priv(dev);
  1090. /* See whats cooking */
  1091. while((inb(ioaddr+HOST_STATUS)&HOST_STATUS_CWR) && boguscount++<2000)
  1092. {
  1093. status=inb(ioaddr+HOST_CMD);
  1094. pr_debug("Status TX%d RX%d EX%d OV%d BC%d\n",
  1095. (status&7), (status>>3)&7, (status>>6)&1,
  1096. (status>>7)&1, boguscount);
  1097. switch(status&7)
  1098. {
  1099. case 0:
  1100. break;
  1101. case 6: /* TX fail */
  1102. case 2: /* TX ok */
  1103. tx_event = 1;
  1104. break;
  1105. case 3: /* Halt */
  1106. case 4: /* Abort */
  1107. complete(&lp->xceiver_cmd);
  1108. break;
  1109. default:
  1110. pr_notice("%s: strange tx ack %d\n", dev->name, status&7);
  1111. }
  1112. status>>=3;
  1113. switch(status&7)
  1114. {
  1115. case 0:
  1116. break;
  1117. case 2: /* RX */
  1118. rx_event=1;
  1119. break;
  1120. case 3: /* Halt */
  1121. case 4: /* Abort */
  1122. complete(&lp->xceiver_cmd);
  1123. break;
  1124. case 6:
  1125. /* Out of RX buffers stat */
  1126. /* Must restart rx */
  1127. dev->stats.rx_dropped++;
  1128. mc32_rx_ring(dev);
  1129. mc32_start_transceiver(dev);
  1130. break;
  1131. default:
  1132. pr_notice("%s: strange rx ack %d\n",
  1133. dev->name, status&7);
  1134. }
  1135. status>>=3;
  1136. if(status&1)
  1137. {
  1138. /*
  1139. * No thread is waiting: we need to tidy
  1140. * up ourself.
  1141. */
  1142. if (lp->cmd_nonblocking) {
  1143. up(&lp->cmd_mutex);
  1144. if (lp->mc_reload_wait)
  1145. mc32_reset_multicast_list(dev);
  1146. }
  1147. else complete(&lp->execution_cmd);
  1148. }
  1149. if(status&2)
  1150. {
  1151. /*
  1152. * We get interrupted once per
  1153. * counter that is about to overflow.
  1154. */
  1155. mc32_update_stats(dev);
  1156. }
  1157. }
  1158. /*
  1159. * Process the transmit and receive rings
  1160. */
  1161. if(tx_event)
  1162. mc32_tx_ring(dev);
  1163. if(rx_event)
  1164. mc32_rx_ring(dev);
  1165. return IRQ_HANDLED;
  1166. }
  1167. /**
  1168. * mc32_close - user configuring the 3c527 down
  1169. * @dev: 3c527 card to shut down
  1170. *
  1171. * The 3c527 is a bus mastering device. We must be careful how we
  1172. * shut it down. It may also be running shared interrupt so we have
  1173. * to be sure to silence it properly
  1174. *
  1175. * We indicate that the card is closing to the rest of the
  1176. * driver. Otherwise, it is possible that the card may run out
  1177. * of receive buffers and restart the transceiver while we're
  1178. * trying to close it.
  1179. *
  1180. * We abort any receive and transmits going on and then wait until
  1181. * any pending exec commands have completed in other code threads.
  1182. * In theory we can't get here while that is true, in practice I am
  1183. * paranoid
  1184. *
  1185. * We turn off the interrupt enable for the board to be sure it can't
  1186. * intefere with other devices.
  1187. */
  1188. static int mc32_close(struct net_device *dev)
  1189. {
  1190. struct mc32_local *lp = netdev_priv(dev);
  1191. int ioaddr = dev->base_addr;
  1192. u8 regs;
  1193. u16 one=1;
  1194. lp->xceiver_desired_state = HALTED;
  1195. netif_stop_queue(dev);
  1196. /*
  1197. * Send the indications on command (handy debug check)
  1198. */
  1199. mc32_command(dev, 4, &one, 2);
  1200. /* Shut down the transceiver */
  1201. mc32_halt_transceiver(dev);
  1202. /* Ensure we issue no more commands beyond this point */
  1203. down(&lp->cmd_mutex);
  1204. /* Ok the card is now stopping */
  1205. regs=inb(ioaddr+HOST_CTRL);
  1206. regs&=~HOST_CTRL_INTE;
  1207. outb(regs, ioaddr+HOST_CTRL);
  1208. mc32_flush_rx_ring(dev);
  1209. mc32_flush_tx_ring(dev);
  1210. mc32_update_stats(dev);
  1211. return 0;
  1212. }
  1213. /**
  1214. * mc32_get_stats - hand back stats to network layer
  1215. * @dev: The 3c527 card to handle
  1216. *
  1217. * We've collected all the stats we can in software already. Now
  1218. * it's time to update those kept on-card and return the lot.
  1219. *
  1220. */
  1221. static struct net_device_stats *mc32_get_stats(struct net_device *dev)
  1222. {
  1223. mc32_update_stats(dev);
  1224. return &dev->stats;
  1225. }
  1226. /**
  1227. * do_mc32_set_multicast_list - attempt to update multicasts
  1228. * @dev: 3c527 device to load the list on
  1229. * @retry: indicates this is not the first call.
  1230. *
  1231. *
  1232. * Actually set or clear the multicast filter for this adaptor. The
  1233. * locking issues are handled by this routine. We have to track
  1234. * state as it may take multiple calls to get the command sequence
  1235. * completed. We just keep trying to schedule the loads until we
  1236. * manage to process them all.
  1237. *
  1238. * num_addrs == -1 Promiscuous mode, receive all packets
  1239. *
  1240. * num_addrs == 0 Normal mode, clear multicast list
  1241. *
  1242. * num_addrs > 0 Multicast mode, receive normal and MC packets,
  1243. * and do best-effort filtering.
  1244. *
  1245. * See mc32_update_stats() regards setting the SAV BP bit.
  1246. *
  1247. */
  1248. static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
  1249. {
  1250. struct mc32_local *lp = netdev_priv(dev);
  1251. u16 filt = (1<<2); /* Save Bad Packets, for stats purposes */
  1252. if ((dev->flags&IFF_PROMISC) ||
  1253. (dev->flags&IFF_ALLMULTI) ||
  1254. netdev_mc_count(dev) > 10)
  1255. /* Enable promiscuous mode */
  1256. filt |= 1;
  1257. else if (!netdev_mc_empty(dev))
  1258. {
  1259. unsigned char block[62];
  1260. unsigned char *bp;
  1261. struct netdev_hw_addr *ha;
  1262. if(retry==0)
  1263. lp->mc_list_valid = 0;
  1264. if(!lp->mc_list_valid)
  1265. {
  1266. block[1]=0;
  1267. block[0]=netdev_mc_count(dev);
  1268. bp=block+2;
  1269. netdev_for_each_mc_addr(ha, dev) {
  1270. memcpy(bp, ha->addr, 6);
  1271. bp+=6;
  1272. }
  1273. if(mc32_command_nowait(dev, 2, block,
  1274. 2+6*netdev_mc_count(dev))==-1)
  1275. {
  1276. lp->mc_reload_wait = 1;
  1277. return;
  1278. }
  1279. lp->mc_list_valid=1;
  1280. }
  1281. }
  1282. if(mc32_command_nowait(dev, 0, &filt, 2)==-1)
  1283. {
  1284. lp->mc_reload_wait = 1;
  1285. }
  1286. else {
  1287. lp->mc_reload_wait = 0;
  1288. }
  1289. }
  1290. /**
  1291. * mc32_set_multicast_list - queue multicast list update
  1292. * @dev: The 3c527 to use
  1293. *
  1294. * Commence loading the multicast list. This is called when the kernel
  1295. * changes the lists. It will override any pending list we are trying to
  1296. * load.
  1297. */
  1298. static void mc32_set_multicast_list(struct net_device *dev)
  1299. {
  1300. do_mc32_set_multicast_list(dev,0);
  1301. }
  1302. /**
  1303. * mc32_reset_multicast_list - reset multicast list
  1304. * @dev: The 3c527 to use
  1305. *
  1306. * Attempt the next step in loading the multicast lists. If this attempt
  1307. * fails to complete then it will be scheduled and this function called
  1308. * again later from elsewhere.
  1309. */
  1310. static void mc32_reset_multicast_list(struct net_device *dev)
  1311. {
  1312. do_mc32_set_multicast_list(dev,1);
  1313. }
  1314. static void netdev_get_drvinfo(struct net_device *dev,
  1315. struct ethtool_drvinfo *info)
  1316. {
  1317. strcpy(info->driver, DRV_NAME);
  1318. strcpy(info->version, DRV_VERSION);
  1319. sprintf(info->bus_info, "MCA 0x%lx", dev->base_addr);
  1320. }
  1321. static u32 netdev_get_msglevel(struct net_device *dev)
  1322. {
  1323. return mc32_debug;
  1324. }
  1325. static void netdev_set_msglevel(struct net_device *dev, u32 level)
  1326. {
  1327. mc32_debug = level;
  1328. }
  1329. static const struct ethtool_ops netdev_ethtool_ops = {
  1330. .get_drvinfo = netdev_get_drvinfo,
  1331. .get_msglevel = netdev_get_msglevel,
  1332. .set_msglevel = netdev_set_msglevel,
  1333. };
  1334. #ifdef MODULE
  1335. static struct net_device *this_device;
  1336. /**
  1337. * init_module - entry point
  1338. *
  1339. * Probe and locate a 3c527 card. This really should probe and locate
  1340. * all the 3c527 cards in the machine not just one of them. Yes you can
  1341. * insmod multiple modules for now but it's a hack.
  1342. */
  1343. int __init init_module(void)
  1344. {
  1345. this_device = mc32_probe(-1);
  1346. if (IS_ERR(this_device))
  1347. return PTR_ERR(this_device);
  1348. return 0;
  1349. }
  1350. /**
  1351. * cleanup_module - free resources for an unload
  1352. *
  1353. * Unloading time. We release the MCA bus resources and the interrupt
  1354. * at which point everything is ready to unload. The card must be stopped
  1355. * at this point or we would not have been called. When we unload we
  1356. * leave the card stopped but not totally shut down. When the card is
  1357. * initialized it must be rebooted or the rings reloaded before any
  1358. * transmit operations are allowed to start scribbling into memory.
  1359. */
  1360. void __exit cleanup_module(void)
  1361. {
  1362. unregister_netdev(this_device);
  1363. cleanup_card(this_device);
  1364. free_netdev(this_device);
  1365. }
  1366. #endif /* MODULE */