w83977af_ir.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. /*********************************************************************
  2. *
  3. * Filename: w83977af_ir.c
  4. * Version: 1.0
  5. * Description: FIR driver for the Winbond W83977AF Super I/O chip
  6. * Status: Experimental.
  7. * Author: Paul VanderSpek
  8. * Created at: Wed Nov 4 11:46:16 1998
  9. * Modified at: Fri Jan 28 12:10:59 2000
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. *
  12. * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
  13. * Copyright (c) 1998-1999 Rebel.com
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License as
  17. * published by the Free Software Foundation; either version 2 of
  18. * the License, or (at your option) any later version.
  19. *
  20. * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
  21. * warranty for any of this software. This material is provided "AS-IS"
  22. * and at no charge.
  23. *
  24. * If you find bugs in this file, its very likely that the same bug
  25. * will also be in pc87108.c since the implementations are quite
  26. * similar.
  27. *
  28. * Notice that all functions that needs to access the chip in _any_
  29. * way, must save BSR register on entry, and restore it on exit.
  30. * It is _very_ important to follow this policy!
  31. *
  32. * __u8 bank;
  33. *
  34. * bank = inb( iobase+BSR);
  35. *
  36. * do_your_stuff_here();
  37. *
  38. * outb( bank, iobase+BSR);
  39. *
  40. ********************************************************************/
  41. #include <linux/module.h>
  42. #include <linux/kernel.h>
  43. #include <linux/types.h>
  44. #include <linux/skbuff.h>
  45. #include <linux/netdevice.h>
  46. #include <linux/ioport.h>
  47. #include <linux/delay.h>
  48. #include <linux/init.h>
  49. #include <linux/interrupt.h>
  50. #include <linux/rtnetlink.h>
  51. #include <linux/dma-mapping.h>
  52. #include <linux/gfp.h>
  53. #include <asm/io.h>
  54. #include <asm/dma.h>
  55. #include <asm/byteorder.h>
  56. #include <net/irda/irda.h>
  57. #include <net/irda/wrapper.h>
  58. #include <net/irda/irda_device.h>
  59. #include "w83977af.h"
  60. #include "w83977af_ir.h"
  61. #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
  62. #undef CONFIG_NETWINDER_TX_DMA_PROBLEMS /* Not needed */
  63. #define CONFIG_NETWINDER_RX_DMA_PROBLEMS /* Must have this one! */
  64. #endif
  65. #define CONFIG_USE_W977_PNP /* Currently needed */
  66. #define PIO_MAX_SPEED 115200
  67. static char *driver_name = "w83977af_ir";
  68. static int qos_mtt_bits = 0x07; /* 1 ms or more */
  69. #define CHIP_IO_EXTENT 8
  70. static unsigned int io[] = { 0x180, ~0, ~0, ~0 };
  71. #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
  72. static unsigned int irq[] = { 6, 0, 0, 0 };
  73. #else
  74. static unsigned int irq[] = { 11, 0, 0, 0 };
  75. #endif
  76. static unsigned int dma[] = { 1, 0, 0, 0 };
  77. static unsigned int efbase[] = { W977_EFIO_BASE, W977_EFIO2_BASE };
  78. static unsigned int efio = W977_EFIO_BASE;
  79. static struct w83977af_ir *dev_self[] = { NULL, NULL, NULL, NULL};
  80. /* Some prototypes */
  81. static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
  82. unsigned int dma);
  83. static int w83977af_close(struct w83977af_ir *self);
  84. static int w83977af_probe(int iobase, int irq, int dma);
  85. static int w83977af_dma_receive(struct w83977af_ir *self);
  86. static int w83977af_dma_receive_complete(struct w83977af_ir *self);
  87. static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
  88. struct net_device *dev);
  89. static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size);
  90. static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
  91. static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
  92. static int w83977af_is_receiving(struct w83977af_ir *self);
  93. static int w83977af_net_open(struct net_device *dev);
  94. static int w83977af_net_close(struct net_device *dev);
  95. static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  96. /*
  97. * Function w83977af_init ()
  98. *
  99. * Initialize chip. Just try to find out how many chips we are dealing with
  100. * and where they are
  101. */
  102. static int __init w83977af_init(void)
  103. {
  104. int i;
  105. IRDA_DEBUG(0, "%s()\n", __func__ );
  106. for (i=0; i < ARRAY_SIZE(dev_self) && io[i] < 2000; i++) {
  107. if (w83977af_open(i, io[i], irq[i], dma[i]) == 0)
  108. return 0;
  109. }
  110. return -ENODEV;
  111. }
  112. /*
  113. * Function w83977af_cleanup ()
  114. *
  115. * Close all configured chips
  116. *
  117. */
  118. static void __exit w83977af_cleanup(void)
  119. {
  120. int i;
  121. IRDA_DEBUG(4, "%s()\n", __func__ );
  122. for (i=0; i < ARRAY_SIZE(dev_self); i++) {
  123. if (dev_self[i])
  124. w83977af_close(dev_self[i]);
  125. }
  126. }
  127. static const struct net_device_ops w83977_netdev_ops = {
  128. .ndo_open = w83977af_net_open,
  129. .ndo_stop = w83977af_net_close,
  130. .ndo_start_xmit = w83977af_hard_xmit,
  131. .ndo_do_ioctl = w83977af_net_ioctl,
  132. };
  133. /*
  134. * Function w83977af_open (iobase, irq)
  135. *
  136. * Open driver instance
  137. *
  138. */
  139. static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
  140. unsigned int dma)
  141. {
  142. struct net_device *dev;
  143. struct w83977af_ir *self;
  144. int err;
  145. IRDA_DEBUG(0, "%s()\n", __func__ );
  146. /* Lock the port that we need */
  147. if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
  148. IRDA_DEBUG(0, "%s(), can't get iobase of 0x%03x\n",
  149. __func__ , iobase);
  150. return -ENODEV;
  151. }
  152. if (w83977af_probe(iobase, irq, dma) == -1) {
  153. err = -1;
  154. goto err_out;
  155. }
  156. /*
  157. * Allocate new instance of the driver
  158. */
  159. dev = alloc_irdadev(sizeof(struct w83977af_ir));
  160. if (dev == NULL) {
  161. printk( KERN_ERR "IrDA: Can't allocate memory for "
  162. "IrDA control block!\n");
  163. err = -ENOMEM;
  164. goto err_out;
  165. }
  166. self = netdev_priv(dev);
  167. spin_lock_init(&self->lock);
  168. /* Initialize IO */
  169. self->io.fir_base = iobase;
  170. self->io.irq = irq;
  171. self->io.fir_ext = CHIP_IO_EXTENT;
  172. self->io.dma = dma;
  173. self->io.fifo_size = 32;
  174. /* Initialize QoS for this device */
  175. irda_init_max_qos_capabilies(&self->qos);
  176. /* The only value we must override it the baudrate */
  177. /* FIXME: The HP HDLS-1100 does not support 1152000! */
  178. self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
  179. IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);
  180. /* The HP HDLS-1100 needs 1 ms according to the specs */
  181. self->qos.min_turn_time.bits = qos_mtt_bits;
  182. irda_qos_bits_to_value(&self->qos);
  183. /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
  184. self->rx_buff.truesize = 14384;
  185. self->tx_buff.truesize = 4000;
  186. /* Allocate memory if needed */
  187. self->rx_buff.head =
  188. dma_alloc_coherent(NULL, self->rx_buff.truesize,
  189. &self->rx_buff_dma, GFP_KERNEL);
  190. if (self->rx_buff.head == NULL) {
  191. err = -ENOMEM;
  192. goto err_out1;
  193. }
  194. memset(self->rx_buff.head, 0, self->rx_buff.truesize);
  195. self->tx_buff.head =
  196. dma_alloc_coherent(NULL, self->tx_buff.truesize,
  197. &self->tx_buff_dma, GFP_KERNEL);
  198. if (self->tx_buff.head == NULL) {
  199. err = -ENOMEM;
  200. goto err_out2;
  201. }
  202. memset(self->tx_buff.head, 0, self->tx_buff.truesize);
  203. self->rx_buff.in_frame = FALSE;
  204. self->rx_buff.state = OUTSIDE_FRAME;
  205. self->tx_buff.data = self->tx_buff.head;
  206. self->rx_buff.data = self->rx_buff.head;
  207. self->netdev = dev;
  208. dev->netdev_ops = &w83977_netdev_ops;
  209. err = register_netdev(dev);
  210. if (err) {
  211. IRDA_ERROR("%s(), register_netdevice() failed!\n", __func__);
  212. goto err_out3;
  213. }
  214. IRDA_MESSAGE("IrDA: Registered device %s\n", dev->name);
  215. /* Need to store self somewhere */
  216. dev_self[i] = self;
  217. return 0;
  218. err_out3:
  219. dma_free_coherent(NULL, self->tx_buff.truesize,
  220. self->tx_buff.head, self->tx_buff_dma);
  221. err_out2:
  222. dma_free_coherent(NULL, self->rx_buff.truesize,
  223. self->rx_buff.head, self->rx_buff_dma);
  224. err_out1:
  225. free_netdev(dev);
  226. err_out:
  227. release_region(iobase, CHIP_IO_EXTENT);
  228. return err;
  229. }
  230. /*
  231. * Function w83977af_close (self)
  232. *
  233. * Close driver instance
  234. *
  235. */
  236. static int w83977af_close(struct w83977af_ir *self)
  237. {
  238. int iobase;
  239. IRDA_DEBUG(0, "%s()\n", __func__ );
  240. iobase = self->io.fir_base;
  241. #ifdef CONFIG_USE_W977_PNP
  242. /* enter PnP configuration mode */
  243. w977_efm_enter(efio);
  244. w977_select_device(W977_DEVICE_IR, efio);
  245. /* Deactivate device */
  246. w977_write_reg(0x30, 0x00, efio);
  247. w977_efm_exit(efio);
  248. #endif /* CONFIG_USE_W977_PNP */
  249. /* Remove netdevice */
  250. unregister_netdev(self->netdev);
  251. /* Release the PORT that this driver is using */
  252. IRDA_DEBUG(0 , "%s(), Releasing Region %03x\n",
  253. __func__ , self->io.fir_base);
  254. release_region(self->io.fir_base, self->io.fir_ext);
  255. if (self->tx_buff.head)
  256. dma_free_coherent(NULL, self->tx_buff.truesize,
  257. self->tx_buff.head, self->tx_buff_dma);
  258. if (self->rx_buff.head)
  259. dma_free_coherent(NULL, self->rx_buff.truesize,
  260. self->rx_buff.head, self->rx_buff_dma);
  261. free_netdev(self->netdev);
  262. return 0;
  263. }
  264. static int w83977af_probe(int iobase, int irq, int dma)
  265. {
  266. int version;
  267. int i;
  268. for (i=0; i < 2; i++) {
  269. IRDA_DEBUG( 0, "%s()\n", __func__ );
  270. #ifdef CONFIG_USE_W977_PNP
  271. /* Enter PnP configuration mode */
  272. w977_efm_enter(efbase[i]);
  273. w977_select_device(W977_DEVICE_IR, efbase[i]);
  274. /* Configure PnP port, IRQ, and DMA channel */
  275. w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
  276. w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
  277. w977_write_reg(0x70, irq, efbase[i]);
  278. #ifdef CONFIG_ARCH_NETWINDER
  279. /* Netwinder uses 1 higher than Linux */
  280. w977_write_reg(0x74, dma+1, efbase[i]);
  281. #else
  282. w977_write_reg(0x74, dma, efbase[i]);
  283. #endif /*CONFIG_ARCH_NETWINDER */
  284. w977_write_reg(0x75, 0x04, efbase[i]); /* Disable Tx DMA */
  285. /* Set append hardware CRC, enable IR bank selection */
  286. w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
  287. /* Activate device */
  288. w977_write_reg(0x30, 0x01, efbase[i]);
  289. w977_efm_exit(efbase[i]);
  290. #endif /* CONFIG_USE_W977_PNP */
  291. /* Disable Advanced mode */
  292. switch_bank(iobase, SET2);
  293. outb(iobase+2, 0x00);
  294. /* Turn on UART (global) interrupts */
  295. switch_bank(iobase, SET0);
  296. outb(HCR_EN_IRQ, iobase+HCR);
  297. /* Switch to advanced mode */
  298. switch_bank(iobase, SET2);
  299. outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
  300. /* Set default IR-mode */
  301. switch_bank(iobase, SET0);
  302. outb(HCR_SIR, iobase+HCR);
  303. /* Read the Advanced IR ID */
  304. switch_bank(iobase, SET3);
  305. version = inb(iobase+AUID);
  306. /* Should be 0x1? */
  307. if (0x10 == (version & 0xf0)) {
  308. efio = efbase[i];
  309. /* Set FIFO size to 32 */
  310. switch_bank(iobase, SET2);
  311. outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
  312. /* Set FIFO threshold to TX17, RX16 */
  313. switch_bank(iobase, SET0);
  314. outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
  315. UFR_EN_FIFO,iobase+UFR);
  316. /* Receiver frame length */
  317. switch_bank(iobase, SET4);
  318. outb(2048 & 0xff, iobase+6);
  319. outb((2048 >> 8) & 0x1f, iobase+7);
  320. /*
  321. * Init HP HSDL-1100 transceiver.
  322. *
  323. * Set IRX_MSL since we have 2 * receive paths IRRX,
  324. * and IRRXH. Clear IRSL0D since we want IRSL0 * to
  325. * be a input pin used for IRRXH
  326. *
  327. * IRRX pin 37 connected to receiver
  328. * IRTX pin 38 connected to transmitter
  329. * FIRRX pin 39 connected to receiver (IRSL0)
  330. * CIRRX pin 40 connected to pin 37
  331. */
  332. switch_bank(iobase, SET7);
  333. outb(0x40, iobase+7);
  334. IRDA_MESSAGE("W83977AF (IR) driver loaded. "
  335. "Version: 0x%02x\n", version);
  336. return 0;
  337. } else {
  338. /* Try next extented function register address */
  339. IRDA_DEBUG( 0, "%s(), Wrong chip version", __func__ );
  340. }
  341. }
  342. return -1;
  343. }
  344. static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed)
  345. {
  346. int ir_mode = HCR_SIR;
  347. int iobase;
  348. __u8 set;
  349. iobase = self->io.fir_base;
  350. /* Update accounting for new speed */
  351. self->io.speed = speed;
  352. /* Save current bank */
  353. set = inb(iobase+SSR);
  354. /* Disable interrupts */
  355. switch_bank(iobase, SET0);
  356. outb(0, iobase+ICR);
  357. /* Select Set 2 */
  358. switch_bank(iobase, SET2);
  359. outb(0x00, iobase+ABHL);
  360. switch (speed) {
  361. case 9600: outb(0x0c, iobase+ABLL); break;
  362. case 19200: outb(0x06, iobase+ABLL); break;
  363. case 38400: outb(0x03, iobase+ABLL); break;
  364. case 57600: outb(0x02, iobase+ABLL); break;
  365. case 115200: outb(0x01, iobase+ABLL); break;
  366. case 576000:
  367. ir_mode = HCR_MIR_576;
  368. IRDA_DEBUG(0, "%s(), handling baud of 576000\n", __func__ );
  369. break;
  370. case 1152000:
  371. ir_mode = HCR_MIR_1152;
  372. IRDA_DEBUG(0, "%s(), handling baud of 1152000\n", __func__ );
  373. break;
  374. case 4000000:
  375. ir_mode = HCR_FIR;
  376. IRDA_DEBUG(0, "%s(), handling baud of 4000000\n", __func__ );
  377. break;
  378. default:
  379. ir_mode = HCR_FIR;
  380. IRDA_DEBUG(0, "%s(), unknown baud rate of %d\n", __func__ , speed);
  381. break;
  382. }
  383. /* Set speed mode */
  384. switch_bank(iobase, SET0);
  385. outb(ir_mode, iobase+HCR);
  386. /* set FIFO size to 32 */
  387. switch_bank(iobase, SET2);
  388. outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);
  389. /* set FIFO threshold to TX17, RX16 */
  390. switch_bank(iobase, SET0);
  391. outb(0x00, iobase+UFR); /* Reset */
  392. outb(UFR_EN_FIFO, iobase+UFR); /* First we must enable FIFO */
  393. outb(0xa7, iobase+UFR);
  394. netif_wake_queue(self->netdev);
  395. /* Enable some interrupts so we can receive frames */
  396. switch_bank(iobase, SET0);
  397. if (speed > PIO_MAX_SPEED) {
  398. outb(ICR_EFSFI, iobase+ICR);
  399. w83977af_dma_receive(self);
  400. } else
  401. outb(ICR_ERBRI, iobase+ICR);
  402. /* Restore SSR */
  403. outb(set, iobase+SSR);
  404. }
  405. /*
  406. * Function w83977af_hard_xmit (skb, dev)
  407. *
  408. * Sets up a DMA transfer to send the current frame.
  409. *
  410. */
  411. static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
  412. struct net_device *dev)
  413. {
  414. struct w83977af_ir *self;
  415. __s32 speed;
  416. int iobase;
  417. __u8 set;
  418. int mtt;
  419. self = netdev_priv(dev);
  420. iobase = self->io.fir_base;
  421. IRDA_DEBUG(4, "%s(%ld), skb->len=%d\n", __func__ , jiffies,
  422. (int) skb->len);
  423. /* Lock transmit buffer */
  424. netif_stop_queue(dev);
  425. /* Check if we need to change the speed */
  426. speed = irda_get_next_speed(skb);
  427. if ((speed != self->io.speed) && (speed != -1)) {
  428. /* Check for empty frame */
  429. if (!skb->len) {
  430. w83977af_change_speed(self, speed);
  431. dev_kfree_skb(skb);
  432. return NETDEV_TX_OK;
  433. } else
  434. self->new_speed = speed;
  435. }
  436. /* Save current set */
  437. set = inb(iobase+SSR);
  438. /* Decide if we should use PIO or DMA transfer */
  439. if (self->io.speed > PIO_MAX_SPEED) {
  440. self->tx_buff.data = self->tx_buff.head;
  441. skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len);
  442. self->tx_buff.len = skb->len;
  443. mtt = irda_get_mtt(skb);
  444. IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
  445. if (mtt)
  446. udelay(mtt);
  447. /* Enable DMA interrupt */
  448. switch_bank(iobase, SET0);
  449. outb(ICR_EDMAI, iobase+ICR);
  450. w83977af_dma_write(self, iobase);
  451. } else {
  452. self->tx_buff.data = self->tx_buff.head;
  453. self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
  454. self->tx_buff.truesize);
  455. /* Add interrupt on tx low level (will fire immediately) */
  456. switch_bank(iobase, SET0);
  457. outb(ICR_ETXTHI, iobase+ICR);
  458. }
  459. dev_kfree_skb(skb);
  460. /* Restore set register */
  461. outb(set, iobase+SSR);
  462. return NETDEV_TX_OK;
  463. }
  464. /*
  465. * Function w83977af_dma_write (self, iobase)
  466. *
  467. * Send frame using DMA
  468. *
  469. */
  470. static void w83977af_dma_write(struct w83977af_ir *self, int iobase)
  471. {
  472. __u8 set;
  473. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  474. unsigned long flags;
  475. __u8 hcr;
  476. #endif
  477. IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len);
  478. /* Save current set */
  479. set = inb(iobase+SSR);
  480. /* Disable DMA */
  481. switch_bank(iobase, SET0);
  482. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  483. /* Choose transmit DMA channel */
  484. switch_bank(iobase, SET2);
  485. outb(ADCR1_D_CHSW|/*ADCR1_DMA_F|*/ADCR1_ADV_SL, iobase+ADCR1);
  486. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  487. spin_lock_irqsave(&self->lock, flags);
  488. disable_dma(self->io.dma);
  489. clear_dma_ff(self->io.dma);
  490. set_dma_mode(self->io.dma, DMA_MODE_READ);
  491. set_dma_addr(self->io.dma, self->tx_buff_dma);
  492. set_dma_count(self->io.dma, self->tx_buff.len);
  493. #else
  494. irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len,
  495. DMA_MODE_WRITE);
  496. #endif
  497. self->io.direction = IO_XMIT;
  498. /* Enable DMA */
  499. switch_bank(iobase, SET0);
  500. #ifdef CONFIG_NETWINDER_TX_DMA_PROBLEMS
  501. hcr = inb(iobase+HCR);
  502. outb(hcr | HCR_EN_DMA, iobase+HCR);
  503. enable_dma(self->io.dma);
  504. spin_unlock_irqrestore(&self->lock, flags);
  505. #else
  506. outb(inb(iobase+HCR) | HCR_EN_DMA | HCR_TX_WT, iobase+HCR);
  507. #endif
  508. /* Restore set register */
  509. outb(set, iobase+SSR);
  510. }
  511. /*
  512. * Function w83977af_pio_write (iobase, buf, len, fifo_size)
  513. *
  514. *
  515. *
  516. */
  517. static int w83977af_pio_write(int iobase, __u8 *buf, int len, int fifo_size)
  518. {
  519. int actual = 0;
  520. __u8 set;
  521. IRDA_DEBUG(4, "%s()\n", __func__ );
  522. /* Save current bank */
  523. set = inb(iobase+SSR);
  524. switch_bank(iobase, SET0);
  525. if (!(inb_p(iobase+USR) & USR_TSRE)) {
  526. IRDA_DEBUG(4,
  527. "%s(), warning, FIFO not empty yet!\n", __func__ );
  528. fifo_size -= 17;
  529. IRDA_DEBUG(4, "%s(), %d bytes left in tx fifo\n",
  530. __func__ , fifo_size);
  531. }
  532. /* Fill FIFO with current frame */
  533. while ((fifo_size-- > 0) && (actual < len)) {
  534. /* Transmit next byte */
  535. outb(buf[actual++], iobase+TBR);
  536. }
  537. IRDA_DEBUG(4, "%s(), fifo_size %d ; %d sent of %d\n",
  538. __func__ , fifo_size, actual, len);
  539. /* Restore bank */
  540. outb(set, iobase+SSR);
  541. return actual;
  542. }
  543. /*
  544. * Function w83977af_dma_xmit_complete (self)
  545. *
  546. * The transfer of a frame in finished. So do the necessary things
  547. *
  548. *
  549. */
  550. static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
  551. {
  552. int iobase;
  553. __u8 set;
  554. IRDA_DEBUG(4, "%s(%ld)\n", __func__ , jiffies);
  555. IRDA_ASSERT(self != NULL, return;);
  556. iobase = self->io.fir_base;
  557. /* Save current set */
  558. set = inb(iobase+SSR);
  559. /* Disable DMA */
  560. switch_bank(iobase, SET0);
  561. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  562. /* Check for underrun! */
  563. if (inb(iobase+AUDR) & AUDR_UNDR) {
  564. IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
  565. self->netdev->stats.tx_errors++;
  566. self->netdev->stats.tx_fifo_errors++;
  567. /* Clear bit, by writing 1 to it */
  568. outb(AUDR_UNDR, iobase+AUDR);
  569. } else
  570. self->netdev->stats.tx_packets++;
  571. if (self->new_speed) {
  572. w83977af_change_speed(self, self->new_speed);
  573. self->new_speed = 0;
  574. }
  575. /* Unlock tx_buff and request another frame */
  576. /* Tell the network layer, that we want more frames */
  577. netif_wake_queue(self->netdev);
  578. /* Restore set */
  579. outb(set, iobase+SSR);
  580. }
  581. /*
  582. * Function w83977af_dma_receive (self)
  583. *
  584. * Get ready for receiving a frame. The device will initiate a DMA
  585. * if it starts to receive a frame.
  586. *
  587. */
  588. static int w83977af_dma_receive(struct w83977af_ir *self)
  589. {
  590. int iobase;
  591. __u8 set;
  592. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  593. unsigned long flags;
  594. __u8 hcr;
  595. #endif
  596. IRDA_ASSERT(self != NULL, return -1;);
  597. IRDA_DEBUG(4, "%s\n", __func__ );
  598. iobase= self->io.fir_base;
  599. /* Save current set */
  600. set = inb(iobase+SSR);
  601. /* Disable DMA */
  602. switch_bank(iobase, SET0);
  603. outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
  604. /* Choose DMA Rx, DMA Fairness, and Advanced mode */
  605. switch_bank(iobase, SET2);
  606. outb((inb(iobase+ADCR1) & ~ADCR1_D_CHSW)/*|ADCR1_DMA_F*/|ADCR1_ADV_SL,
  607. iobase+ADCR1);
  608. self->io.direction = IO_RECV;
  609. self->rx_buff.data = self->rx_buff.head;
  610. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  611. spin_lock_irqsave(&self->lock, flags);
  612. disable_dma(self->io.dma);
  613. clear_dma_ff(self->io.dma);
  614. set_dma_mode(self->io.dma, DMA_MODE_READ);
  615. set_dma_addr(self->io.dma, self->rx_buff_dma);
  616. set_dma_count(self->io.dma, self->rx_buff.truesize);
  617. #else
  618. irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize,
  619. DMA_MODE_READ);
  620. #endif
  621. /*
  622. * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
  623. * important that we don't reset the Tx FIFO since it might not
  624. * be finished transmitting yet
  625. */
  626. switch_bank(iobase, SET0);
  627. outb(UFR_RXTL|UFR_TXTL|UFR_RXF_RST|UFR_EN_FIFO, iobase+UFR);
  628. self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0;
  629. /* Enable DMA */
  630. switch_bank(iobase, SET0);
  631. #ifdef CONFIG_NETWINDER_RX_DMA_PROBLEMS
  632. hcr = inb(iobase+HCR);
  633. outb(hcr | HCR_EN_DMA, iobase+HCR);
  634. enable_dma(self->io.dma);
  635. spin_unlock_irqrestore(&self->lock, flags);
  636. #else
  637. outb(inb(iobase+HCR) | HCR_EN_DMA, iobase+HCR);
  638. #endif
  639. /* Restore set */
  640. outb(set, iobase+SSR);
  641. return 0;
  642. }
  643. /*
  644. * Function w83977af_receive_complete (self)
  645. *
  646. * Finished with receiving a frame
  647. *
  648. */
  649. static int w83977af_dma_receive_complete(struct w83977af_ir *self)
  650. {
  651. struct sk_buff *skb;
  652. struct st_fifo *st_fifo;
  653. int len;
  654. int iobase;
  655. __u8 set;
  656. __u8 status;
  657. IRDA_DEBUG(4, "%s\n", __func__ );
  658. st_fifo = &self->st_fifo;
  659. iobase = self->io.fir_base;
  660. /* Save current set */
  661. set = inb(iobase+SSR);
  662. iobase = self->io.fir_base;
  663. /* Read status FIFO */
  664. switch_bank(iobase, SET5);
  665. while ((status = inb(iobase+FS_FO)) & FS_FO_FSFDR) {
  666. st_fifo->entries[st_fifo->tail].status = status;
  667. st_fifo->entries[st_fifo->tail].len = inb(iobase+RFLFL);
  668. st_fifo->entries[st_fifo->tail].len |= inb(iobase+RFLFH) << 8;
  669. st_fifo->tail++;
  670. st_fifo->len++;
  671. }
  672. while (st_fifo->len) {
  673. /* Get first entry */
  674. status = st_fifo->entries[st_fifo->head].status;
  675. len = st_fifo->entries[st_fifo->head].len;
  676. st_fifo->head++;
  677. st_fifo->len--;
  678. /* Check for errors */
  679. if (status & FS_FO_ERR_MSK) {
  680. if (status & FS_FO_LST_FR) {
  681. /* Add number of lost frames to stats */
  682. self->netdev->stats.rx_errors += len;
  683. } else {
  684. /* Skip frame */
  685. self->netdev->stats.rx_errors++;
  686. self->rx_buff.data += len;
  687. if (status & FS_FO_MX_LEX)
  688. self->netdev->stats.rx_length_errors++;
  689. if (status & FS_FO_PHY_ERR)
  690. self->netdev->stats.rx_frame_errors++;
  691. if (status & FS_FO_CRC_ERR)
  692. self->netdev->stats.rx_crc_errors++;
  693. }
  694. /* The errors below can be reported in both cases */
  695. if (status & FS_FO_RX_OV)
  696. self->netdev->stats.rx_fifo_errors++;
  697. if (status & FS_FO_FSF_OV)
  698. self->netdev->stats.rx_fifo_errors++;
  699. } else {
  700. /* Check if we have transferred all data to memory */
  701. switch_bank(iobase, SET0);
  702. if (inb(iobase+USR) & USR_RDR) {
  703. udelay(80); /* Should be enough!? */
  704. }
  705. skb = dev_alloc_skb(len+1);
  706. if (skb == NULL) {
  707. printk(KERN_INFO
  708. "%s(), memory squeeze, dropping frame.\n", __func__);
  709. /* Restore set register */
  710. outb(set, iobase+SSR);
  711. return FALSE;
  712. }
  713. /* Align to 20 bytes */
  714. skb_reserve(skb, 1);
  715. /* Copy frame without CRC */
  716. if (self->io.speed < 4000000) {
  717. skb_put(skb, len-2);
  718. skb_copy_to_linear_data(skb,
  719. self->rx_buff.data,
  720. len - 2);
  721. } else {
  722. skb_put(skb, len-4);
  723. skb_copy_to_linear_data(skb,
  724. self->rx_buff.data,
  725. len - 4);
  726. }
  727. /* Move to next frame */
  728. self->rx_buff.data += len;
  729. self->netdev->stats.rx_packets++;
  730. skb->dev = self->netdev;
  731. skb_reset_mac_header(skb);
  732. skb->protocol = htons(ETH_P_IRDA);
  733. netif_rx(skb);
  734. }
  735. }
  736. /* Restore set register */
  737. outb(set, iobase+SSR);
  738. return TRUE;
  739. }
  740. /*
  741. * Function pc87108_pio_receive (self)
  742. *
  743. * Receive all data in receiver FIFO
  744. *
  745. */
  746. static void w83977af_pio_receive(struct w83977af_ir *self)
  747. {
  748. __u8 byte = 0x00;
  749. int iobase;
  750. IRDA_DEBUG(4, "%s()\n", __func__ );
  751. IRDA_ASSERT(self != NULL, return;);
  752. iobase = self->io.fir_base;
  753. /* Receive all characters in Rx FIFO */
  754. do {
  755. byte = inb(iobase+RBR);
  756. async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff,
  757. byte);
  758. } while (inb(iobase+USR) & USR_RDR); /* Data available */
  759. }
  760. /*
  761. * Function w83977af_sir_interrupt (self, eir)
  762. *
  763. * Handle SIR interrupt
  764. *
  765. */
  766. static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr)
  767. {
  768. int actual;
  769. __u8 new_icr = 0;
  770. __u8 set;
  771. int iobase;
  772. IRDA_DEBUG(4, "%s(), isr=%#x\n", __func__ , isr);
  773. iobase = self->io.fir_base;
  774. /* Transmit FIFO low on data */
  775. if (isr & ISR_TXTH_I) {
  776. /* Write data left in transmit buffer */
  777. actual = w83977af_pio_write(self->io.fir_base,
  778. self->tx_buff.data,
  779. self->tx_buff.len,
  780. self->io.fifo_size);
  781. self->tx_buff.data += actual;
  782. self->tx_buff.len -= actual;
  783. self->io.direction = IO_XMIT;
  784. /* Check if finished */
  785. if (self->tx_buff.len > 0) {
  786. new_icr |= ICR_ETXTHI;
  787. } else {
  788. set = inb(iobase+SSR);
  789. switch_bank(iobase, SET0);
  790. outb(AUDR_SFEND, iobase+AUDR);
  791. outb(set, iobase+SSR);
  792. self->netdev->stats.tx_packets++;
  793. /* Feed me more packets */
  794. netif_wake_queue(self->netdev);
  795. new_icr |= ICR_ETBREI;
  796. }
  797. }
  798. /* Check if transmission has completed */
  799. if (isr & ISR_TXEMP_I) {
  800. /* Check if we need to change the speed? */
  801. if (self->new_speed) {
  802. IRDA_DEBUG(2,
  803. "%s(), Changing speed!\n", __func__ );
  804. w83977af_change_speed(self, self->new_speed);
  805. self->new_speed = 0;
  806. }
  807. /* Turn around and get ready to receive some data */
  808. self->io.direction = IO_RECV;
  809. new_icr |= ICR_ERBRI;
  810. }
  811. /* Rx FIFO threshold or timeout */
  812. if (isr & ISR_RXTH_I) {
  813. w83977af_pio_receive(self);
  814. /* Keep receiving */
  815. new_icr |= ICR_ERBRI;
  816. }
  817. return new_icr;
  818. }
  819. /*
  820. * Function pc87108_fir_interrupt (self, eir)
  821. *
  822. * Handle MIR/FIR interrupt
  823. *
  824. */
  825. static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr)
  826. {
  827. __u8 new_icr = 0;
  828. __u8 set;
  829. int iobase;
  830. iobase = self->io.fir_base;
  831. set = inb(iobase+SSR);
  832. /* End of frame detected in FIFO */
  833. if (isr & (ISR_FEND_I|ISR_FSF_I)) {
  834. if (w83977af_dma_receive_complete(self)) {
  835. /* Wait for next status FIFO interrupt */
  836. new_icr |= ICR_EFSFI;
  837. } else {
  838. /* DMA not finished yet */
  839. /* Set timer value, resolution 1 ms */
  840. switch_bank(iobase, SET4);
  841. outb(0x01, iobase+TMRL); /* 1 ms */
  842. outb(0x00, iobase+TMRH);
  843. /* Start timer */
  844. outb(IR_MSL_EN_TMR, iobase+IR_MSL);
  845. new_icr |= ICR_ETMRI;
  846. }
  847. }
  848. /* Timer finished */
  849. if (isr & ISR_TMR_I) {
  850. /* Disable timer */
  851. switch_bank(iobase, SET4);
  852. outb(0, iobase+IR_MSL);
  853. /* Clear timer event */
  854. /* switch_bank(iobase, SET0); */
  855. /* outb(ASCR_CTE, iobase+ASCR); */
  856. /* Check if this is a TX timer interrupt */
  857. if (self->io.direction == IO_XMIT) {
  858. w83977af_dma_write(self, iobase);
  859. new_icr |= ICR_EDMAI;
  860. } else {
  861. /* Check if DMA has now finished */
  862. w83977af_dma_receive_complete(self);
  863. new_icr |= ICR_EFSFI;
  864. }
  865. }
  866. /* Finished with DMA */
  867. if (isr & ISR_DMA_I) {
  868. w83977af_dma_xmit_complete(self);
  869. /* Check if there are more frames to be transmitted */
  870. /* if (irda_device_txqueue_empty(self)) { */
  871. /* Prepare for receive
  872. *
  873. * ** Netwinder Tx DMA likes that we do this anyway **
  874. */
  875. w83977af_dma_receive(self);
  876. new_icr = ICR_EFSFI;
  877. /* } */
  878. }
  879. /* Restore set */
  880. outb(set, iobase+SSR);
  881. return new_icr;
  882. }
  883. /*
  884. * Function w83977af_interrupt (irq, dev_id, regs)
  885. *
  886. * An interrupt from the chip has arrived. Time to do some work
  887. *
  888. */
  889. static irqreturn_t w83977af_interrupt(int irq, void *dev_id)
  890. {
  891. struct net_device *dev = dev_id;
  892. struct w83977af_ir *self;
  893. __u8 set, icr, isr;
  894. int iobase;
  895. self = netdev_priv(dev);
  896. iobase = self->io.fir_base;
  897. /* Save current bank */
  898. set = inb(iobase+SSR);
  899. switch_bank(iobase, SET0);
  900. icr = inb(iobase+ICR);
  901. isr = inb(iobase+ISR) & icr; /* Mask out the interesting ones */
  902. outb(0, iobase+ICR); /* Disable interrupts */
  903. if (isr) {
  904. /* Dispatch interrupt handler for the current speed */
  905. if (self->io.speed > PIO_MAX_SPEED )
  906. icr = w83977af_fir_interrupt(self, isr);
  907. else
  908. icr = w83977af_sir_interrupt(self, isr);
  909. }
  910. outb(icr, iobase+ICR); /* Restore (new) interrupts */
  911. outb(set, iobase+SSR); /* Restore bank register */
  912. return IRQ_RETVAL(isr);
  913. }
  914. /*
  915. * Function w83977af_is_receiving (self)
  916. *
  917. * Return TRUE is we are currently receiving a frame
  918. *
  919. */
  920. static int w83977af_is_receiving(struct w83977af_ir *self)
  921. {
  922. int status = FALSE;
  923. int iobase;
  924. __u8 set;
  925. IRDA_ASSERT(self != NULL, return FALSE;);
  926. if (self->io.speed > 115200) {
  927. iobase = self->io.fir_base;
  928. /* Check if rx FIFO is not empty */
  929. set = inb(iobase+SSR);
  930. switch_bank(iobase, SET2);
  931. if ((inb(iobase+RXFDTH) & 0x3f) != 0) {
  932. /* We are receiving something */
  933. status = TRUE;
  934. }
  935. outb(set, iobase+SSR);
  936. } else
  937. status = (self->rx_buff.state != OUTSIDE_FRAME);
  938. return status;
  939. }
  940. /*
  941. * Function w83977af_net_open (dev)
  942. *
  943. * Start the device
  944. *
  945. */
  946. static int w83977af_net_open(struct net_device *dev)
  947. {
  948. struct w83977af_ir *self;
  949. int iobase;
  950. char hwname[32];
  951. __u8 set;
  952. IRDA_DEBUG(0, "%s()\n", __func__ );
  953. IRDA_ASSERT(dev != NULL, return -1;);
  954. self = netdev_priv(dev);
  955. IRDA_ASSERT(self != NULL, return 0;);
  956. iobase = self->io.fir_base;
  957. if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
  958. (void *) dev)) {
  959. return -EAGAIN;
  960. }
  961. /*
  962. * Always allocate the DMA channel after the IRQ,
  963. * and clean up on failure.
  964. */
  965. if (request_dma(self->io.dma, dev->name)) {
  966. free_irq(self->io.irq, dev);
  967. return -EAGAIN;
  968. }
  969. /* Save current set */
  970. set = inb(iobase+SSR);
  971. /* Enable some interrupts so we can receive frames again */
  972. switch_bank(iobase, SET0);
  973. if (self->io.speed > 115200) {
  974. outb(ICR_EFSFI, iobase+ICR);
  975. w83977af_dma_receive(self);
  976. } else
  977. outb(ICR_ERBRI, iobase+ICR);
  978. /* Restore bank register */
  979. outb(set, iobase+SSR);
  980. /* Ready to play! */
  981. netif_start_queue(dev);
  982. /* Give self a hardware name */
  983. sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base);
  984. /*
  985. * Open new IrLAP layer instance, now that everything should be
  986. * initialized properly
  987. */
  988. self->irlap = irlap_open(dev, &self->qos, hwname);
  989. return 0;
  990. }
  991. /*
  992. * Function w83977af_net_close (dev)
  993. *
  994. * Stop the device
  995. *
  996. */
  997. static int w83977af_net_close(struct net_device *dev)
  998. {
  999. struct w83977af_ir *self;
  1000. int iobase;
  1001. __u8 set;
  1002. IRDA_DEBUG(0, "%s()\n", __func__ );
  1003. IRDA_ASSERT(dev != NULL, return -1;);
  1004. self = netdev_priv(dev);
  1005. IRDA_ASSERT(self != NULL, return 0;);
  1006. iobase = self->io.fir_base;
  1007. /* Stop device */
  1008. netif_stop_queue(dev);
  1009. /* Stop and remove instance of IrLAP */
  1010. if (self->irlap)
  1011. irlap_close(self->irlap);
  1012. self->irlap = NULL;
  1013. disable_dma(self->io.dma);
  1014. /* Save current set */
  1015. set = inb(iobase+SSR);
  1016. /* Disable interrupts */
  1017. switch_bank(iobase, SET0);
  1018. outb(0, iobase+ICR);
  1019. free_irq(self->io.irq, dev);
  1020. free_dma(self->io.dma);
  1021. /* Restore bank register */
  1022. outb(set, iobase+SSR);
  1023. return 0;
  1024. }
  1025. /*
  1026. * Function w83977af_net_ioctl (dev, rq, cmd)
  1027. *
  1028. * Process IOCTL commands for this device
  1029. *
  1030. */
  1031. static int w83977af_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1032. {
  1033. struct if_irda_req *irq = (struct if_irda_req *) rq;
  1034. struct w83977af_ir *self;
  1035. unsigned long flags;
  1036. int ret = 0;
  1037. IRDA_ASSERT(dev != NULL, return -1;);
  1038. self = netdev_priv(dev);
  1039. IRDA_ASSERT(self != NULL, return -1;);
  1040. IRDA_DEBUG(2, "%s(), %s, (cmd=0x%X)\n", __func__ , dev->name, cmd);
  1041. spin_lock_irqsave(&self->lock, flags);
  1042. switch (cmd) {
  1043. case SIOCSBANDWIDTH: /* Set bandwidth */
  1044. if (!capable(CAP_NET_ADMIN)) {
  1045. ret = -EPERM;
  1046. goto out;
  1047. }
  1048. w83977af_change_speed(self, irq->ifr_baudrate);
  1049. break;
  1050. case SIOCSMEDIABUSY: /* Set media busy */
  1051. if (!capable(CAP_NET_ADMIN)) {
  1052. ret = -EPERM;
  1053. goto out;
  1054. }
  1055. irda_device_set_media_busy(self->netdev, TRUE);
  1056. break;
  1057. case SIOCGRECEIVING: /* Check if we are receiving right now */
  1058. irq->ifr_receiving = w83977af_is_receiving(self);
  1059. break;
  1060. default:
  1061. ret = -EOPNOTSUPP;
  1062. }
  1063. out:
  1064. spin_unlock_irqrestore(&self->lock, flags);
  1065. return ret;
  1066. }
  1067. MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
  1068. MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
  1069. MODULE_LICENSE("GPL");
  1070. module_param(qos_mtt_bits, int, 0);
  1071. MODULE_PARM_DESC(qos_mtt_bits, "Mimimum Turn Time");
  1072. module_param_array(io, int, NULL, 0);
  1073. MODULE_PARM_DESC(io, "Base I/O addresses");
  1074. module_param_array(irq, int, NULL, 0);
  1075. MODULE_PARM_DESC(irq, "IRQ lines");
  1076. /*
  1077. * Function init_module (void)
  1078. *
  1079. *
  1080. *
  1081. */
  1082. module_init(w83977af_init);
  1083. /*
  1084. * Function cleanup_module (void)
  1085. *
  1086. *
  1087. *
  1088. */
  1089. module_exit(w83977af_cleanup);