plip.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404
  1. /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
  2. /* PLIP: A parallel port "network" driver for Linux. */
  3. /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
  4. /*
  5. * Authors: Donald Becker <becker@scyld.com>
  6. * Tommy Thorn <thorn@daimi.aau.dk>
  7. * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
  8. * Alan Cox <gw4pts@gw4pts.ampr.org>
  9. * Peter Bauer <100136.3530@compuserve.com>
  10. * Niibe Yutaka <gniibe@mri.co.jp>
  11. * Nimrod Zimerman <zimerman@mailandnews.com>
  12. *
  13. * Enhancements:
  14. * Modularization and ifreq/ifmap support by Alan Cox.
  15. * Rewritten by Niibe Yutaka.
  16. * parport-sharing awareness code by Philip Blundell.
  17. * SMP locking by Niibe Yutaka.
  18. * Support for parallel ports with no IRQ (poll mode),
  19. * Modifications to use the parallel port API
  20. * by Nimrod Zimerman.
  21. *
  22. * Fixes:
  23. * Niibe Yutaka
  24. * - Module initialization.
  25. * - MTU fix.
  26. * - Make sure other end is OK, before sending a packet.
  27. * - Fix immediate timer problem.
  28. *
  29. * Al Viro
  30. * - Changed {enable,disable}_irq handling to make it work
  31. * with new ("stack") semantics.
  32. *
  33. * This program is free software; you can redistribute it and/or
  34. * modify it under the terms of the GNU General Public License
  35. * as published by the Free Software Foundation; either version
  36. * 2 of the License, or (at your option) any later version.
  37. */
  38. /*
  39. * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
  40. * inspired by Russ Nelson's parallel port packet driver.
  41. *
  42. * NOTE:
  43. * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
  44. * Because of the necessity to communicate to DOS machines with the
  45. * Crynwr packet driver, Peter Bauer changed the protocol again
  46. * back to original protocol.
  47. *
  48. * This version follows original PLIP protocol.
  49. * So, this PLIP can't communicate the PLIP of Linux v1.0.
  50. */
  51. /*
  52. * To use with DOS box, please do (Turn on ARP switch):
  53. * # ifconfig plip[0-2] arp
  54. */
  55. static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
  56. /*
  57. Sources:
  58. Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
  59. "parallel.asm" parallel port packet driver.
  60. The "Crynwr" parallel port standard specifies the following protocol:
  61. Trigger by sending nibble '0x8' (this causes interrupt on other end)
  62. count-low octet
  63. count-high octet
  64. ... data octets
  65. checksum octet
  66. Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
  67. <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
  68. The packet is encapsulated as if it were ethernet.
  69. The cable used is a de facto standard parallel null cable -- sold as
  70. a "LapLink" cable by various places. You'll need a 12-conductor cable to
  71. make one yourself. The wiring is:
  72. SLCTIN 17 - 17
  73. GROUND 25 - 25
  74. D0->ERROR 2 - 15 15 - 2
  75. D1->SLCT 3 - 13 13 - 3
  76. D2->PAPOUT 4 - 12 12 - 4
  77. D3->ACK 5 - 10 10 - 5
  78. D4->BUSY 6 - 11 11 - 6
  79. Do not connect the other pins. They are
  80. D5,D6,D7 are 7,8,9
  81. STROBE is 1, FEED is 14, INIT is 16
  82. extra grounds are 18,19,20,21,22,23,24
  83. */
  84. #include <linux/module.h>
  85. #include <linux/kernel.h>
  86. #include <linux/types.h>
  87. #include <linux/fcntl.h>
  88. #include <linux/interrupt.h>
  89. #include <linux/string.h>
  90. #include <linux/slab.h>
  91. #include <linux/if_ether.h>
  92. #include <linux/in.h>
  93. #include <linux/errno.h>
  94. #include <linux/delay.h>
  95. #include <linux/init.h>
  96. #include <linux/netdevice.h>
  97. #include <linux/etherdevice.h>
  98. #include <linux/inetdevice.h>
  99. #include <linux/skbuff.h>
  100. #include <linux/if_plip.h>
  101. #include <linux/workqueue.h>
  102. #include <linux/spinlock.h>
  103. #include <linux/completion.h>
  104. #include <linux/parport.h>
  105. #include <linux/bitops.h>
  106. #include <net/neighbour.h>
  107. #include <asm/system.h>
  108. #include <asm/irq.h>
  109. #include <asm/byteorder.h>
  110. /* Maximum number of devices to support. */
  111. #define PLIP_MAX 8
  112. /* Use 0 for production, 1 for verification, >2 for debug */
  113. #ifndef NET_DEBUG
  114. #define NET_DEBUG 1
  115. #endif
  116. static const unsigned int net_debug = NET_DEBUG;
  117. #define ENABLE(irq) if (irq != -1) enable_irq(irq)
  118. #define DISABLE(irq) if (irq != -1) disable_irq(irq)
  119. /* In micro second */
  120. #define PLIP_DELAY_UNIT 1
  121. /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
  122. #define PLIP_TRIGGER_WAIT 500
  123. /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
  124. #define PLIP_NIBBLE_WAIT 3000
  125. /* Bottom halves */
  126. static void plip_kick_bh(struct work_struct *work);
  127. static void plip_bh(struct work_struct *work);
  128. static void plip_timer_bh(struct work_struct *work);
  129. /* Interrupt handler */
  130. static void plip_interrupt(void *dev_id);
  131. /* Functions for DEV methods */
  132. static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
  133. static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
  134. unsigned short type, const void *daddr,
  135. const void *saddr, unsigned len);
  136. static int plip_hard_header_cache(const struct neighbour *neigh,
  137. struct hh_cache *hh);
  138. static int plip_open(struct net_device *dev);
  139. static int plip_close(struct net_device *dev);
  140. static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
  141. static int plip_preempt(void *handle);
  142. static void plip_wakeup(void *handle);
  143. enum plip_connection_state {
  144. PLIP_CN_NONE=0,
  145. PLIP_CN_RECEIVE,
  146. PLIP_CN_SEND,
  147. PLIP_CN_CLOSING,
  148. PLIP_CN_ERROR
  149. };
  150. enum plip_packet_state {
  151. PLIP_PK_DONE=0,
  152. PLIP_PK_TRIGGER,
  153. PLIP_PK_LENGTH_LSB,
  154. PLIP_PK_LENGTH_MSB,
  155. PLIP_PK_DATA,
  156. PLIP_PK_CHECKSUM
  157. };
  158. enum plip_nibble_state {
  159. PLIP_NB_BEGIN,
  160. PLIP_NB_1,
  161. PLIP_NB_2,
  162. };
  163. struct plip_local {
  164. enum plip_packet_state state;
  165. enum plip_nibble_state nibble;
  166. union {
  167. struct {
  168. #if defined(__LITTLE_ENDIAN)
  169. unsigned char lsb;
  170. unsigned char msb;
  171. #elif defined(__BIG_ENDIAN)
  172. unsigned char msb;
  173. unsigned char lsb;
  174. #else
  175. #error "Please fix the endianness defines in <asm/byteorder.h>"
  176. #endif
  177. } b;
  178. unsigned short h;
  179. } length;
  180. unsigned short byte;
  181. unsigned char checksum;
  182. unsigned char data;
  183. struct sk_buff *skb;
  184. };
  185. struct net_local {
  186. struct net_device *dev;
  187. struct work_struct immediate;
  188. struct delayed_work deferred;
  189. struct delayed_work timer;
  190. struct plip_local snd_data;
  191. struct plip_local rcv_data;
  192. struct pardevice *pardev;
  193. unsigned long trigger;
  194. unsigned long nibble;
  195. enum plip_connection_state connection;
  196. unsigned short timeout_count;
  197. int is_deferred;
  198. int port_owner;
  199. int should_relinquish;
  200. spinlock_t lock;
  201. atomic_t kill_timer;
  202. struct completion killed_timer_cmp;
  203. };
  204. static inline void enable_parport_interrupts (struct net_device *dev)
  205. {
  206. if (dev->irq != -1)
  207. {
  208. struct parport *port =
  209. ((struct net_local *)netdev_priv(dev))->pardev->port;
  210. port->ops->enable_irq (port);
  211. }
  212. }
  213. static inline void disable_parport_interrupts (struct net_device *dev)
  214. {
  215. if (dev->irq != -1)
  216. {
  217. struct parport *port =
  218. ((struct net_local *)netdev_priv(dev))->pardev->port;
  219. port->ops->disable_irq (port);
  220. }
  221. }
  222. static inline void write_data (struct net_device *dev, unsigned char data)
  223. {
  224. struct parport *port =
  225. ((struct net_local *)netdev_priv(dev))->pardev->port;
  226. port->ops->write_data (port, data);
  227. }
  228. static inline unsigned char read_status (struct net_device *dev)
  229. {
  230. struct parport *port =
  231. ((struct net_local *)netdev_priv(dev))->pardev->port;
  232. return port->ops->read_status (port);
  233. }
  234. static const struct header_ops plip_header_ops = {
  235. .create = plip_hard_header,
  236. .cache = plip_hard_header_cache,
  237. };
  238. static const struct net_device_ops plip_netdev_ops = {
  239. .ndo_open = plip_open,
  240. .ndo_stop = plip_close,
  241. .ndo_start_xmit = plip_tx_packet,
  242. .ndo_do_ioctl = plip_ioctl,
  243. .ndo_change_mtu = eth_change_mtu,
  244. .ndo_set_mac_address = eth_mac_addr,
  245. .ndo_validate_addr = eth_validate_addr,
  246. };
  247. /* Entry point of PLIP driver.
  248. Probe the hardware, and register/initialize the driver.
  249. PLIP is rather weird, because of the way it interacts with the parport
  250. system. It is _not_ initialised from Space.c. Instead, plip_init()
  251. is called, and that function makes up a "struct net_device" for each port, and
  252. then calls us here.
  253. */
  254. static void
  255. plip_init_netdev(struct net_device *dev)
  256. {
  257. struct net_local *nl = netdev_priv(dev);
  258. /* Then, override parts of it */
  259. dev->tx_queue_len = 10;
  260. dev->flags = IFF_POINTOPOINT|IFF_NOARP;
  261. memset(dev->dev_addr, 0xfc, ETH_ALEN);
  262. dev->netdev_ops = &plip_netdev_ops;
  263. dev->header_ops = &plip_header_ops;
  264. nl->port_owner = 0;
  265. /* Initialize constants */
  266. nl->trigger = PLIP_TRIGGER_WAIT;
  267. nl->nibble = PLIP_NIBBLE_WAIT;
  268. /* Initialize task queue structures */
  269. INIT_WORK(&nl->immediate, plip_bh);
  270. INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
  271. if (dev->irq == -1)
  272. INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
  273. spin_lock_init(&nl->lock);
  274. }
  275. /* Bottom half handler for the delayed request.
  276. This routine is kicked by do_timer().
  277. Request `plip_bh' to be invoked. */
  278. static void
  279. plip_kick_bh(struct work_struct *work)
  280. {
  281. struct net_local *nl =
  282. container_of(work, struct net_local, deferred.work);
  283. if (nl->is_deferred)
  284. schedule_work(&nl->immediate);
  285. }
  286. /* Forward declarations of internal routines */
  287. static int plip_none(struct net_device *, struct net_local *,
  288. struct plip_local *, struct plip_local *);
  289. static int plip_receive_packet(struct net_device *, struct net_local *,
  290. struct plip_local *, struct plip_local *);
  291. static int plip_send_packet(struct net_device *, struct net_local *,
  292. struct plip_local *, struct plip_local *);
  293. static int plip_connection_close(struct net_device *, struct net_local *,
  294. struct plip_local *, struct plip_local *);
  295. static int plip_error(struct net_device *, struct net_local *,
  296. struct plip_local *, struct plip_local *);
  297. static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
  298. struct plip_local *snd,
  299. struct plip_local *rcv,
  300. int error);
  301. #define OK 0
  302. #define TIMEOUT 1
  303. #define ERROR 2
  304. #define HS_TIMEOUT 3
  305. typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
  306. struct plip_local *snd, struct plip_local *rcv);
  307. static const plip_func connection_state_table[] =
  308. {
  309. plip_none,
  310. plip_receive_packet,
  311. plip_send_packet,
  312. plip_connection_close,
  313. plip_error
  314. };
  315. /* Bottom half handler of PLIP. */
  316. static void
  317. plip_bh(struct work_struct *work)
  318. {
  319. struct net_local *nl = container_of(work, struct net_local, immediate);
  320. struct plip_local *snd = &nl->snd_data;
  321. struct plip_local *rcv = &nl->rcv_data;
  322. plip_func f;
  323. int r;
  324. nl->is_deferred = 0;
  325. f = connection_state_table[nl->connection];
  326. if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
  327. (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
  328. nl->is_deferred = 1;
  329. schedule_delayed_work(&nl->deferred, 1);
  330. }
  331. }
  332. static void
  333. plip_timer_bh(struct work_struct *work)
  334. {
  335. struct net_local *nl =
  336. container_of(work, struct net_local, timer.work);
  337. if (!(atomic_read (&nl->kill_timer))) {
  338. plip_interrupt (nl->dev);
  339. schedule_delayed_work(&nl->timer, 1);
  340. }
  341. else {
  342. complete(&nl->killed_timer_cmp);
  343. }
  344. }
  345. static int
  346. plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
  347. struct plip_local *snd, struct plip_local *rcv,
  348. int error)
  349. {
  350. unsigned char c0;
  351. /*
  352. * This is tricky. If we got here from the beginning of send (either
  353. * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
  354. * already disabled. With the old variant of {enable,disable}_irq()
  355. * extra disable_irq() was a no-op. Now it became mortal - it's
  356. * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
  357. * that is). So we have to treat HS_TIMEOUT and ERROR from send
  358. * in a special way.
  359. */
  360. spin_lock_irq(&nl->lock);
  361. if (nl->connection == PLIP_CN_SEND) {
  362. if (error != ERROR) { /* Timeout */
  363. nl->timeout_count++;
  364. if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
  365. nl->timeout_count <= 3) {
  366. spin_unlock_irq(&nl->lock);
  367. /* Try again later */
  368. return TIMEOUT;
  369. }
  370. c0 = read_status(dev);
  371. printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
  372. dev->name, snd->state, c0);
  373. } else
  374. error = HS_TIMEOUT;
  375. dev->stats.tx_errors++;
  376. dev->stats.tx_aborted_errors++;
  377. } else if (nl->connection == PLIP_CN_RECEIVE) {
  378. if (rcv->state == PLIP_PK_TRIGGER) {
  379. /* Transmission was interrupted. */
  380. spin_unlock_irq(&nl->lock);
  381. return OK;
  382. }
  383. if (error != ERROR) { /* Timeout */
  384. if (++nl->timeout_count <= 3) {
  385. spin_unlock_irq(&nl->lock);
  386. /* Try again later */
  387. return TIMEOUT;
  388. }
  389. c0 = read_status(dev);
  390. printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
  391. dev->name, rcv->state, c0);
  392. }
  393. dev->stats.rx_dropped++;
  394. }
  395. rcv->state = PLIP_PK_DONE;
  396. if (rcv->skb) {
  397. kfree_skb(rcv->skb);
  398. rcv->skb = NULL;
  399. }
  400. snd->state = PLIP_PK_DONE;
  401. if (snd->skb) {
  402. dev_kfree_skb(snd->skb);
  403. snd->skb = NULL;
  404. }
  405. spin_unlock_irq(&nl->lock);
  406. if (error == HS_TIMEOUT) {
  407. DISABLE(dev->irq);
  408. synchronize_irq(dev->irq);
  409. }
  410. disable_parport_interrupts (dev);
  411. netif_stop_queue (dev);
  412. nl->connection = PLIP_CN_ERROR;
  413. write_data (dev, 0x00);
  414. return TIMEOUT;
  415. }
  416. static int
  417. plip_none(struct net_device *dev, struct net_local *nl,
  418. struct plip_local *snd, struct plip_local *rcv)
  419. {
  420. return OK;
  421. }
  422. /* PLIP_RECEIVE --- receive a byte(two nibbles)
  423. Returns OK on success, TIMEOUT on timeout */
  424. static inline int
  425. plip_receive(unsigned short nibble_timeout, struct net_device *dev,
  426. enum plip_nibble_state *ns_p, unsigned char *data_p)
  427. {
  428. unsigned char c0, c1;
  429. unsigned int cx;
  430. switch (*ns_p) {
  431. case PLIP_NB_BEGIN:
  432. cx = nibble_timeout;
  433. while (1) {
  434. c0 = read_status(dev);
  435. udelay(PLIP_DELAY_UNIT);
  436. if ((c0 & 0x80) == 0) {
  437. c1 = read_status(dev);
  438. if (c0 == c1)
  439. break;
  440. }
  441. if (--cx == 0)
  442. return TIMEOUT;
  443. }
  444. *data_p = (c0 >> 3) & 0x0f;
  445. write_data (dev, 0x10); /* send ACK */
  446. *ns_p = PLIP_NB_1;
  447. case PLIP_NB_1:
  448. cx = nibble_timeout;
  449. while (1) {
  450. c0 = read_status(dev);
  451. udelay(PLIP_DELAY_UNIT);
  452. if (c0 & 0x80) {
  453. c1 = read_status(dev);
  454. if (c0 == c1)
  455. break;
  456. }
  457. if (--cx == 0)
  458. return TIMEOUT;
  459. }
  460. *data_p |= (c0 << 1) & 0xf0;
  461. write_data (dev, 0x00); /* send ACK */
  462. *ns_p = PLIP_NB_BEGIN;
  463. case PLIP_NB_2:
  464. break;
  465. }
  466. return OK;
  467. }
  468. /*
  469. * Determine the packet's protocol ID. The rule here is that we
  470. * assume 802.3 if the type field is short enough to be a length.
  471. * This is normal practice and works for any 'now in use' protocol.
  472. *
  473. * PLIP is ethernet ish but the daddr might not be valid if unicast.
  474. * PLIP fortunately has no bus architecture (its Point-to-point).
  475. *
  476. * We can't fix the daddr thing as that quirk (more bug) is embedded
  477. * in far too many old systems not all even running Linux.
  478. */
  479. static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
  480. {
  481. struct ethhdr *eth;
  482. unsigned char *rawp;
  483. skb_reset_mac_header(skb);
  484. skb_pull(skb,dev->hard_header_len);
  485. eth = eth_hdr(skb);
  486. if(*eth->h_dest&1)
  487. {
  488. if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
  489. skb->pkt_type=PACKET_BROADCAST;
  490. else
  491. skb->pkt_type=PACKET_MULTICAST;
  492. }
  493. /*
  494. * This ALLMULTI check should be redundant by 1.4
  495. * so don't forget to remove it.
  496. */
  497. if (ntohs(eth->h_proto) >= 1536)
  498. return eth->h_proto;
  499. rawp = skb->data;
  500. /*
  501. * This is a magic hack to spot IPX packets. Older Novell breaks
  502. * the protocol design and runs IPX over 802.3 without an 802.2 LLC
  503. * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
  504. * won't work for fault tolerant netware but does for the rest.
  505. */
  506. if (*(unsigned short *)rawp == 0xFFFF)
  507. return htons(ETH_P_802_3);
  508. /*
  509. * Real 802.2 LLC
  510. */
  511. return htons(ETH_P_802_2);
  512. }
  513. /* PLIP_RECEIVE_PACKET --- receive a packet */
  514. static int
  515. plip_receive_packet(struct net_device *dev, struct net_local *nl,
  516. struct plip_local *snd, struct plip_local *rcv)
  517. {
  518. unsigned short nibble_timeout = nl->nibble;
  519. unsigned char *lbuf;
  520. switch (rcv->state) {
  521. case PLIP_PK_TRIGGER:
  522. DISABLE(dev->irq);
  523. /* Don't need to synchronize irq, as we can safely ignore it */
  524. disable_parport_interrupts (dev);
  525. write_data (dev, 0x01); /* send ACK */
  526. if (net_debug > 2)
  527. printk(KERN_DEBUG "%s: receive start\n", dev->name);
  528. rcv->state = PLIP_PK_LENGTH_LSB;
  529. rcv->nibble = PLIP_NB_BEGIN;
  530. case PLIP_PK_LENGTH_LSB:
  531. if (snd->state != PLIP_PK_DONE) {
  532. if (plip_receive(nl->trigger, dev,
  533. &rcv->nibble, &rcv->length.b.lsb)) {
  534. /* collision, here dev->tbusy == 1 */
  535. rcv->state = PLIP_PK_DONE;
  536. nl->is_deferred = 1;
  537. nl->connection = PLIP_CN_SEND;
  538. schedule_delayed_work(&nl->deferred, 1);
  539. enable_parport_interrupts (dev);
  540. ENABLE(dev->irq);
  541. return OK;
  542. }
  543. } else {
  544. if (plip_receive(nibble_timeout, dev,
  545. &rcv->nibble, &rcv->length.b.lsb))
  546. return TIMEOUT;
  547. }
  548. rcv->state = PLIP_PK_LENGTH_MSB;
  549. case PLIP_PK_LENGTH_MSB:
  550. if (plip_receive(nibble_timeout, dev,
  551. &rcv->nibble, &rcv->length.b.msb))
  552. return TIMEOUT;
  553. if (rcv->length.h > dev->mtu + dev->hard_header_len ||
  554. rcv->length.h < 8) {
  555. printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
  556. return ERROR;
  557. }
  558. /* Malloc up new buffer. */
  559. rcv->skb = dev_alloc_skb(rcv->length.h + 2);
  560. if (rcv->skb == NULL) {
  561. printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
  562. return ERROR;
  563. }
  564. skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
  565. skb_put(rcv->skb,rcv->length.h);
  566. rcv->skb->dev = dev;
  567. rcv->state = PLIP_PK_DATA;
  568. rcv->byte = 0;
  569. rcv->checksum = 0;
  570. case PLIP_PK_DATA:
  571. lbuf = rcv->skb->data;
  572. do {
  573. if (plip_receive(nibble_timeout, dev,
  574. &rcv->nibble, &lbuf[rcv->byte]))
  575. return TIMEOUT;
  576. } while (++rcv->byte < rcv->length.h);
  577. do {
  578. rcv->checksum += lbuf[--rcv->byte];
  579. } while (rcv->byte);
  580. rcv->state = PLIP_PK_CHECKSUM;
  581. case PLIP_PK_CHECKSUM:
  582. if (plip_receive(nibble_timeout, dev,
  583. &rcv->nibble, &rcv->data))
  584. return TIMEOUT;
  585. if (rcv->data != rcv->checksum) {
  586. dev->stats.rx_crc_errors++;
  587. if (net_debug)
  588. printk(KERN_DEBUG "%s: checksum error\n", dev->name);
  589. return ERROR;
  590. }
  591. rcv->state = PLIP_PK_DONE;
  592. case PLIP_PK_DONE:
  593. /* Inform the upper layer for the arrival of a packet. */
  594. rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
  595. netif_rx_ni(rcv->skb);
  596. dev->stats.rx_bytes += rcv->length.h;
  597. dev->stats.rx_packets++;
  598. rcv->skb = NULL;
  599. if (net_debug > 2)
  600. printk(KERN_DEBUG "%s: receive end\n", dev->name);
  601. /* Close the connection. */
  602. write_data (dev, 0x00);
  603. spin_lock_irq(&nl->lock);
  604. if (snd->state != PLIP_PK_DONE) {
  605. nl->connection = PLIP_CN_SEND;
  606. spin_unlock_irq(&nl->lock);
  607. schedule_work(&nl->immediate);
  608. enable_parport_interrupts (dev);
  609. ENABLE(dev->irq);
  610. return OK;
  611. } else {
  612. nl->connection = PLIP_CN_NONE;
  613. spin_unlock_irq(&nl->lock);
  614. enable_parport_interrupts (dev);
  615. ENABLE(dev->irq);
  616. return OK;
  617. }
  618. }
  619. return OK;
  620. }
  621. /* PLIP_SEND --- send a byte (two nibbles)
  622. Returns OK on success, TIMEOUT when timeout */
  623. static inline int
  624. plip_send(unsigned short nibble_timeout, struct net_device *dev,
  625. enum plip_nibble_state *ns_p, unsigned char data)
  626. {
  627. unsigned char c0;
  628. unsigned int cx;
  629. switch (*ns_p) {
  630. case PLIP_NB_BEGIN:
  631. write_data (dev, data & 0x0f);
  632. *ns_p = PLIP_NB_1;
  633. case PLIP_NB_1:
  634. write_data (dev, 0x10 | (data & 0x0f));
  635. cx = nibble_timeout;
  636. while (1) {
  637. c0 = read_status(dev);
  638. if ((c0 & 0x80) == 0)
  639. break;
  640. if (--cx == 0)
  641. return TIMEOUT;
  642. udelay(PLIP_DELAY_UNIT);
  643. }
  644. write_data (dev, 0x10 | (data >> 4));
  645. *ns_p = PLIP_NB_2;
  646. case PLIP_NB_2:
  647. write_data (dev, (data >> 4));
  648. cx = nibble_timeout;
  649. while (1) {
  650. c0 = read_status(dev);
  651. if (c0 & 0x80)
  652. break;
  653. if (--cx == 0)
  654. return TIMEOUT;
  655. udelay(PLIP_DELAY_UNIT);
  656. }
  657. *ns_p = PLIP_NB_BEGIN;
  658. return OK;
  659. }
  660. return OK;
  661. }
  662. /* PLIP_SEND_PACKET --- send a packet */
  663. static int
  664. plip_send_packet(struct net_device *dev, struct net_local *nl,
  665. struct plip_local *snd, struct plip_local *rcv)
  666. {
  667. unsigned short nibble_timeout = nl->nibble;
  668. unsigned char *lbuf;
  669. unsigned char c0;
  670. unsigned int cx;
  671. if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
  672. printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
  673. snd->state = PLIP_PK_DONE;
  674. snd->skb = NULL;
  675. return ERROR;
  676. }
  677. switch (snd->state) {
  678. case PLIP_PK_TRIGGER:
  679. if ((read_status(dev) & 0xf8) != 0x80)
  680. return HS_TIMEOUT;
  681. /* Trigger remote rx interrupt. */
  682. write_data (dev, 0x08);
  683. cx = nl->trigger;
  684. while (1) {
  685. udelay(PLIP_DELAY_UNIT);
  686. spin_lock_irq(&nl->lock);
  687. if (nl->connection == PLIP_CN_RECEIVE) {
  688. spin_unlock_irq(&nl->lock);
  689. /* Interrupted. */
  690. dev->stats.collisions++;
  691. return OK;
  692. }
  693. c0 = read_status(dev);
  694. if (c0 & 0x08) {
  695. spin_unlock_irq(&nl->lock);
  696. DISABLE(dev->irq);
  697. synchronize_irq(dev->irq);
  698. if (nl->connection == PLIP_CN_RECEIVE) {
  699. /* Interrupted.
  700. We don't need to enable irq,
  701. as it is soon disabled. */
  702. /* Yes, we do. New variant of
  703. {enable,disable}_irq *counts*
  704. them. -- AV */
  705. ENABLE(dev->irq);
  706. dev->stats.collisions++;
  707. return OK;
  708. }
  709. disable_parport_interrupts (dev);
  710. if (net_debug > 2)
  711. printk(KERN_DEBUG "%s: send start\n", dev->name);
  712. snd->state = PLIP_PK_LENGTH_LSB;
  713. snd->nibble = PLIP_NB_BEGIN;
  714. nl->timeout_count = 0;
  715. break;
  716. }
  717. spin_unlock_irq(&nl->lock);
  718. if (--cx == 0) {
  719. write_data (dev, 0x00);
  720. return HS_TIMEOUT;
  721. }
  722. }
  723. case PLIP_PK_LENGTH_LSB:
  724. if (plip_send(nibble_timeout, dev,
  725. &snd->nibble, snd->length.b.lsb))
  726. return TIMEOUT;
  727. snd->state = PLIP_PK_LENGTH_MSB;
  728. case PLIP_PK_LENGTH_MSB:
  729. if (plip_send(nibble_timeout, dev,
  730. &snd->nibble, snd->length.b.msb))
  731. return TIMEOUT;
  732. snd->state = PLIP_PK_DATA;
  733. snd->byte = 0;
  734. snd->checksum = 0;
  735. case PLIP_PK_DATA:
  736. do {
  737. if (plip_send(nibble_timeout, dev,
  738. &snd->nibble, lbuf[snd->byte]))
  739. return TIMEOUT;
  740. } while (++snd->byte < snd->length.h);
  741. do {
  742. snd->checksum += lbuf[--snd->byte];
  743. } while (snd->byte);
  744. snd->state = PLIP_PK_CHECKSUM;
  745. case PLIP_PK_CHECKSUM:
  746. if (plip_send(nibble_timeout, dev,
  747. &snd->nibble, snd->checksum))
  748. return TIMEOUT;
  749. dev->stats.tx_bytes += snd->skb->len;
  750. dev_kfree_skb(snd->skb);
  751. dev->stats.tx_packets++;
  752. snd->state = PLIP_PK_DONE;
  753. case PLIP_PK_DONE:
  754. /* Close the connection */
  755. write_data (dev, 0x00);
  756. snd->skb = NULL;
  757. if (net_debug > 2)
  758. printk(KERN_DEBUG "%s: send end\n", dev->name);
  759. nl->connection = PLIP_CN_CLOSING;
  760. nl->is_deferred = 1;
  761. schedule_delayed_work(&nl->deferred, 1);
  762. enable_parport_interrupts (dev);
  763. ENABLE(dev->irq);
  764. return OK;
  765. }
  766. return OK;
  767. }
  768. static int
  769. plip_connection_close(struct net_device *dev, struct net_local *nl,
  770. struct plip_local *snd, struct plip_local *rcv)
  771. {
  772. spin_lock_irq(&nl->lock);
  773. if (nl->connection == PLIP_CN_CLOSING) {
  774. nl->connection = PLIP_CN_NONE;
  775. netif_wake_queue (dev);
  776. }
  777. spin_unlock_irq(&nl->lock);
  778. if (nl->should_relinquish) {
  779. nl->should_relinquish = nl->port_owner = 0;
  780. parport_release(nl->pardev);
  781. }
  782. return OK;
  783. }
  784. /* PLIP_ERROR --- wait till other end settled */
  785. static int
  786. plip_error(struct net_device *dev, struct net_local *nl,
  787. struct plip_local *snd, struct plip_local *rcv)
  788. {
  789. unsigned char status;
  790. status = read_status(dev);
  791. if ((status & 0xf8) == 0x80) {
  792. if (net_debug > 2)
  793. printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
  794. nl->connection = PLIP_CN_NONE;
  795. nl->should_relinquish = 0;
  796. netif_start_queue (dev);
  797. enable_parport_interrupts (dev);
  798. ENABLE(dev->irq);
  799. netif_wake_queue (dev);
  800. } else {
  801. nl->is_deferred = 1;
  802. schedule_delayed_work(&nl->deferred, 1);
  803. }
  804. return OK;
  805. }
  806. /* Handle the parallel port interrupts. */
  807. static void
  808. plip_interrupt(void *dev_id)
  809. {
  810. struct net_device *dev = dev_id;
  811. struct net_local *nl;
  812. struct plip_local *rcv;
  813. unsigned char c0;
  814. unsigned long flags;
  815. nl = netdev_priv(dev);
  816. rcv = &nl->rcv_data;
  817. spin_lock_irqsave (&nl->lock, flags);
  818. c0 = read_status(dev);
  819. if ((c0 & 0xf8) != 0xc0) {
  820. if ((dev->irq != -1) && (net_debug > 1))
  821. printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
  822. spin_unlock_irqrestore (&nl->lock, flags);
  823. return;
  824. }
  825. if (net_debug > 3)
  826. printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
  827. switch (nl->connection) {
  828. case PLIP_CN_CLOSING:
  829. netif_wake_queue (dev);
  830. case PLIP_CN_NONE:
  831. case PLIP_CN_SEND:
  832. rcv->state = PLIP_PK_TRIGGER;
  833. nl->connection = PLIP_CN_RECEIVE;
  834. nl->timeout_count = 0;
  835. schedule_work(&nl->immediate);
  836. break;
  837. case PLIP_CN_RECEIVE:
  838. /* May occur because there is race condition
  839. around test and set of dev->interrupt.
  840. Ignore this interrupt. */
  841. break;
  842. case PLIP_CN_ERROR:
  843. printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
  844. break;
  845. }
  846. spin_unlock_irqrestore(&nl->lock, flags);
  847. }
  848. static int
  849. plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
  850. {
  851. struct net_local *nl = netdev_priv(dev);
  852. struct plip_local *snd = &nl->snd_data;
  853. if (netif_queue_stopped(dev))
  854. return NETDEV_TX_BUSY;
  855. /* We may need to grab the bus */
  856. if (!nl->port_owner) {
  857. if (parport_claim(nl->pardev))
  858. return NETDEV_TX_BUSY;
  859. nl->port_owner = 1;
  860. }
  861. netif_stop_queue (dev);
  862. if (skb->len > dev->mtu + dev->hard_header_len) {
  863. printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
  864. netif_start_queue (dev);
  865. return NETDEV_TX_BUSY;
  866. }
  867. if (net_debug > 2)
  868. printk(KERN_DEBUG "%s: send request\n", dev->name);
  869. spin_lock_irq(&nl->lock);
  870. snd->skb = skb;
  871. snd->length.h = skb->len;
  872. snd->state = PLIP_PK_TRIGGER;
  873. if (nl->connection == PLIP_CN_NONE) {
  874. nl->connection = PLIP_CN_SEND;
  875. nl->timeout_count = 0;
  876. }
  877. schedule_work(&nl->immediate);
  878. spin_unlock_irq(&nl->lock);
  879. return NETDEV_TX_OK;
  880. }
  881. static void
  882. plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
  883. {
  884. const struct in_device *in_dev;
  885. rcu_read_lock();
  886. in_dev = __in_dev_get_rcu(dev);
  887. if (in_dev) {
  888. /* Any address will do - we take the first */
  889. const struct in_ifaddr *ifa = in_dev->ifa_list;
  890. if (ifa) {
  891. memcpy(eth->h_source, dev->dev_addr, 6);
  892. memset(eth->h_dest, 0xfc, 2);
  893. memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
  894. }
  895. }
  896. rcu_read_unlock();
  897. }
  898. static int
  899. plip_hard_header(struct sk_buff *skb, struct net_device *dev,
  900. unsigned short type, const void *daddr,
  901. const void *saddr, unsigned len)
  902. {
  903. int ret;
  904. ret = eth_header(skb, dev, type, daddr, saddr, len);
  905. if (ret >= 0)
  906. plip_rewrite_address (dev, (struct ethhdr *)skb->data);
  907. return ret;
  908. }
  909. static int plip_hard_header_cache(const struct neighbour *neigh,
  910. struct hh_cache *hh)
  911. {
  912. int ret;
  913. ret = eth_header_cache(neigh, hh);
  914. if (ret == 0) {
  915. struct ethhdr *eth;
  916. eth = (struct ethhdr*)(((u8*)hh->hh_data) +
  917. HH_DATA_OFF(sizeof(*eth)));
  918. plip_rewrite_address (neigh->dev, eth);
  919. }
  920. return ret;
  921. }
  922. /* Open/initialize the board. This is called (in the current kernel)
  923. sometime after booting when the 'ifconfig' program is run.
  924. This routine gets exclusive access to the parallel port by allocating
  925. its IRQ line.
  926. */
  927. static int
  928. plip_open(struct net_device *dev)
  929. {
  930. struct net_local *nl = netdev_priv(dev);
  931. struct in_device *in_dev;
  932. /* Grab the port */
  933. if (!nl->port_owner) {
  934. if (parport_claim(nl->pardev)) return -EAGAIN;
  935. nl->port_owner = 1;
  936. }
  937. nl->should_relinquish = 0;
  938. /* Clear the data port. */
  939. write_data (dev, 0x00);
  940. /* Enable rx interrupt. */
  941. enable_parport_interrupts (dev);
  942. if (dev->irq == -1)
  943. {
  944. atomic_set (&nl->kill_timer, 0);
  945. schedule_delayed_work(&nl->timer, 1);
  946. }
  947. /* Initialize the state machine. */
  948. nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
  949. nl->rcv_data.skb = nl->snd_data.skb = NULL;
  950. nl->connection = PLIP_CN_NONE;
  951. nl->is_deferred = 0;
  952. /* Fill in the MAC-level header.
  953. We used to abuse dev->broadcast to store the point-to-point
  954. MAC address, but we no longer do it. Instead, we fetch the
  955. interface address whenever it is needed, which is cheap enough
  956. because we use the hh_cache. Actually, abusing dev->broadcast
  957. didn't work, because when using plip_open the point-to-point
  958. address isn't yet known.
  959. PLIP doesn't have a real MAC address, but we need it to be
  960. DOS compatible, and to properly support taps (otherwise,
  961. when the device address isn't identical to the address of a
  962. received frame, the kernel incorrectly drops it). */
  963. in_dev=__in_dev_get_rtnl(dev);
  964. if (in_dev) {
  965. /* Any address will do - we take the first. We already
  966. have the first two bytes filled with 0xfc, from
  967. plip_init_dev(). */
  968. struct in_ifaddr *ifa=in_dev->ifa_list;
  969. if (ifa != NULL) {
  970. memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
  971. }
  972. }
  973. netif_start_queue (dev);
  974. return 0;
  975. }
  976. /* The inverse routine to plip_open (). */
  977. static int
  978. plip_close(struct net_device *dev)
  979. {
  980. struct net_local *nl = netdev_priv(dev);
  981. struct plip_local *snd = &nl->snd_data;
  982. struct plip_local *rcv = &nl->rcv_data;
  983. netif_stop_queue (dev);
  984. DISABLE(dev->irq);
  985. synchronize_irq(dev->irq);
  986. if (dev->irq == -1)
  987. {
  988. init_completion(&nl->killed_timer_cmp);
  989. atomic_set (&nl->kill_timer, 1);
  990. wait_for_completion(&nl->killed_timer_cmp);
  991. }
  992. #ifdef NOTDEF
  993. outb(0x00, PAR_DATA(dev));
  994. #endif
  995. nl->is_deferred = 0;
  996. nl->connection = PLIP_CN_NONE;
  997. if (nl->port_owner) {
  998. parport_release(nl->pardev);
  999. nl->port_owner = 0;
  1000. }
  1001. snd->state = PLIP_PK_DONE;
  1002. if (snd->skb) {
  1003. dev_kfree_skb(snd->skb);
  1004. snd->skb = NULL;
  1005. }
  1006. rcv->state = PLIP_PK_DONE;
  1007. if (rcv->skb) {
  1008. kfree_skb(rcv->skb);
  1009. rcv->skb = NULL;
  1010. }
  1011. #ifdef NOTDEF
  1012. /* Reset. */
  1013. outb(0x00, PAR_CONTROL(dev));
  1014. #endif
  1015. return 0;
  1016. }
  1017. static int
  1018. plip_preempt(void *handle)
  1019. {
  1020. struct net_device *dev = (struct net_device *)handle;
  1021. struct net_local *nl = netdev_priv(dev);
  1022. /* Stand our ground if a datagram is on the wire */
  1023. if (nl->connection != PLIP_CN_NONE) {
  1024. nl->should_relinquish = 1;
  1025. return 1;
  1026. }
  1027. nl->port_owner = 0; /* Remember that we released the bus */
  1028. return 0;
  1029. }
  1030. static void
  1031. plip_wakeup(void *handle)
  1032. {
  1033. struct net_device *dev = (struct net_device *)handle;
  1034. struct net_local *nl = netdev_priv(dev);
  1035. if (nl->port_owner) {
  1036. /* Why are we being woken up? */
  1037. printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
  1038. if (!parport_claim(nl->pardev))
  1039. /* bus_owner is already set (but why?) */
  1040. printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
  1041. else
  1042. return;
  1043. }
  1044. if (!(dev->flags & IFF_UP))
  1045. /* Don't need the port when the interface is down */
  1046. return;
  1047. if (!parport_claim(nl->pardev)) {
  1048. nl->port_owner = 1;
  1049. /* Clear the data port. */
  1050. write_data (dev, 0x00);
  1051. }
  1052. }
  1053. static int
  1054. plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1055. {
  1056. struct net_local *nl = netdev_priv(dev);
  1057. struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
  1058. if (cmd != SIOCDEVPLIP)
  1059. return -EOPNOTSUPP;
  1060. switch(pc->pcmd) {
  1061. case PLIP_GET_TIMEOUT:
  1062. pc->trigger = nl->trigger;
  1063. pc->nibble = nl->nibble;
  1064. break;
  1065. case PLIP_SET_TIMEOUT:
  1066. if(!capable(CAP_NET_ADMIN))
  1067. return -EPERM;
  1068. nl->trigger = pc->trigger;
  1069. nl->nibble = pc->nibble;
  1070. break;
  1071. default:
  1072. return -EOPNOTSUPP;
  1073. }
  1074. return 0;
  1075. }
  1076. static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
  1077. static int timid;
  1078. module_param_array(parport, int, NULL, 0);
  1079. module_param(timid, int, 0);
  1080. MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
  1081. static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
  1082. static inline int
  1083. plip_searchfor(int list[], int a)
  1084. {
  1085. int i;
  1086. for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
  1087. if (list[i] == a) return 1;
  1088. }
  1089. return 0;
  1090. }
  1091. /* plip_attach() is called (by the parport code) when a port is
  1092. * available to use. */
  1093. static void plip_attach (struct parport *port)
  1094. {
  1095. static int unit;
  1096. struct net_device *dev;
  1097. struct net_local *nl;
  1098. char name[IFNAMSIZ];
  1099. if ((parport[0] == -1 && (!timid || !port->devices)) ||
  1100. plip_searchfor(parport, port->number)) {
  1101. if (unit == PLIP_MAX) {
  1102. printk(KERN_ERR "plip: too many devices\n");
  1103. return;
  1104. }
  1105. sprintf(name, "plip%d", unit);
  1106. dev = alloc_etherdev(sizeof(struct net_local));
  1107. if (!dev) {
  1108. printk(KERN_ERR "plip: memory squeeze\n");
  1109. return;
  1110. }
  1111. strcpy(dev->name, name);
  1112. dev->irq = port->irq;
  1113. dev->base_addr = port->base;
  1114. if (port->irq == -1) {
  1115. printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
  1116. "which is fairly inefficient!\n", port->name);
  1117. }
  1118. nl = netdev_priv(dev);
  1119. nl->dev = dev;
  1120. nl->pardev = parport_register_device(port, dev->name, plip_preempt,
  1121. plip_wakeup, plip_interrupt,
  1122. 0, dev);
  1123. if (!nl->pardev) {
  1124. printk(KERN_ERR "%s: parport_register failed\n", name);
  1125. goto err_free_dev;
  1126. }
  1127. plip_init_netdev(dev);
  1128. if (register_netdev(dev)) {
  1129. printk(KERN_ERR "%s: network register failed\n", name);
  1130. goto err_parport_unregister;
  1131. }
  1132. printk(KERN_INFO "%s", version);
  1133. if (dev->irq != -1)
  1134. printk(KERN_INFO "%s: Parallel port at %#3lx, "
  1135. "using IRQ %d.\n",
  1136. dev->name, dev->base_addr, dev->irq);
  1137. else
  1138. printk(KERN_INFO "%s: Parallel port at %#3lx, "
  1139. "not using IRQ.\n",
  1140. dev->name, dev->base_addr);
  1141. dev_plip[unit++] = dev;
  1142. }
  1143. return;
  1144. err_parport_unregister:
  1145. parport_unregister_device(nl->pardev);
  1146. err_free_dev:
  1147. free_netdev(dev);
  1148. }
  1149. /* plip_detach() is called (by the parport code) when a port is
  1150. * no longer available to use. */
  1151. static void plip_detach (struct parport *port)
  1152. {
  1153. /* Nothing to do */
  1154. }
  1155. static struct parport_driver plip_driver = {
  1156. .name = "plip",
  1157. .attach = plip_attach,
  1158. .detach = plip_detach
  1159. };
  1160. static void __exit plip_cleanup_module (void)
  1161. {
  1162. struct net_device *dev;
  1163. int i;
  1164. parport_unregister_driver (&plip_driver);
  1165. for (i=0; i < PLIP_MAX; i++) {
  1166. if ((dev = dev_plip[i])) {
  1167. struct net_local *nl = netdev_priv(dev);
  1168. unregister_netdev(dev);
  1169. if (nl->port_owner)
  1170. parport_release(nl->pardev);
  1171. parport_unregister_device(nl->pardev);
  1172. free_netdev(dev);
  1173. dev_plip[i] = NULL;
  1174. }
  1175. }
  1176. }
  1177. #ifndef MODULE
  1178. static int parport_ptr;
  1179. static int __init plip_setup(char *str)
  1180. {
  1181. int ints[4];
  1182. str = get_options(str, ARRAY_SIZE(ints), ints);
  1183. /* Ugh. */
  1184. if (!strncmp(str, "parport", 7)) {
  1185. int n = simple_strtoul(str+7, NULL, 10);
  1186. if (parport_ptr < PLIP_MAX)
  1187. parport[parport_ptr++] = n;
  1188. else
  1189. printk(KERN_INFO "plip: too many ports, %s ignored.\n",
  1190. str);
  1191. } else if (!strcmp(str, "timid")) {
  1192. timid = 1;
  1193. } else {
  1194. if (ints[0] == 0 || ints[1] == 0) {
  1195. /* disable driver on "plip=" or "plip=0" */
  1196. parport[0] = -2;
  1197. } else {
  1198. printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
  1199. ints[1]);
  1200. }
  1201. }
  1202. return 1;
  1203. }
  1204. __setup("plip=", plip_setup);
  1205. #endif /* !MODULE */
  1206. static int __init plip_init (void)
  1207. {
  1208. if (parport[0] == -2)
  1209. return 0;
  1210. if (parport[0] != -1 && timid) {
  1211. printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
  1212. timid = 0;
  1213. }
  1214. if (parport_register_driver (&plip_driver)) {
  1215. printk (KERN_WARNING "plip: couldn't register driver\n");
  1216. return 1;
  1217. }
  1218. return 0;
  1219. }
  1220. module_init(plip_init);
  1221. module_exit(plip_cleanup_module);
  1222. MODULE_LICENSE("GPL");