wanxl.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /*
  2. * wanXL serial card driver for Linux
  3. * host part
  4. *
  5. * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. * Status:
  12. * - Only DTE (external clock) support with NRZ and NRZI encodings
  13. * - wanXL100 will require minor driver modifications, no access to hw
  14. */
  15. #include <linux/module.h>
  16. #include <linux/kernel.h>
  17. #include <linux/slab.h>
  18. #include <linux/sched.h>
  19. #include <linux/types.h>
  20. #include <linux/fcntl.h>
  21. #include <linux/string.h>
  22. #include <linux/errno.h>
  23. #include <linux/init.h>
  24. #include <linux/ioport.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/hdlc.h>
  27. #include <linux/pci.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/delay.h>
  30. #include <asm/io.h>
  31. #include "wanxl.h"
  32. static const char* version = "wanXL serial card driver version: 0.48";
  33. #define PLX_CTL_RESET 0x40000000 /* adapter reset */
  34. #undef DEBUG_PKT
  35. #undef DEBUG_PCI
  36. /* MAILBOX #1 - PUTS COMMANDS */
  37. #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
  38. #ifdef __LITTLE_ENDIAN
  39. #define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */
  40. #else
  41. #define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */
  42. #endif
  43. /* MAILBOX #2 - DRAM SIZE */
  44. #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
  45. typedef struct {
  46. struct net_device *dev;
  47. struct card_t *card;
  48. spinlock_t lock; /* for wanxl_xmit */
  49. int node; /* physical port #0 - 3 */
  50. unsigned int clock_type;
  51. int tx_in, tx_out;
  52. struct sk_buff *tx_skbs[TX_BUFFERS];
  53. }port_t;
  54. typedef struct {
  55. desc_t rx_descs[RX_QUEUE_LENGTH];
  56. port_status_t port_status[4];
  57. }card_status_t;
  58. typedef struct card_t {
  59. int n_ports; /* 1, 2 or 4 ports */
  60. u8 irq;
  61. u8 __iomem *plx; /* PLX PCI9060 virtual base address */
  62. struct pci_dev *pdev; /* for pci_name(pdev) */
  63. int rx_in;
  64. struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
  65. card_status_t *status; /* shared between host and card */
  66. dma_addr_t status_address;
  67. port_t ports[0]; /* 1 - 4 port_t structures follow */
  68. }card_t;
  69. static inline port_t* dev_to_port(struct net_device *dev)
  70. {
  71. return (port_t *)dev_to_hdlc(dev)->priv;
  72. }
  73. static inline port_status_t* get_status(port_t *port)
  74. {
  75. return &port->card->status->port_status[port->node];
  76. }
  77. #ifdef DEBUG_PCI
  78. static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
  79. size_t size, int direction)
  80. {
  81. dma_addr_t addr = pci_map_single(pdev, ptr, size, direction);
  82. if (addr + size > 0x100000000LL)
  83. printk(KERN_CRIT "wanXL %s: pci_map_single() returned memory"
  84. " at 0x%LX!\n", pci_name(pdev),
  85. (unsigned long long)addr);
  86. return addr;
  87. }
  88. #undef pci_map_single
  89. #define pci_map_single pci_map_single_debug
  90. #endif
  91. /* Cable and/or personality module change interrupt service */
  92. static inline void wanxl_cable_intr(port_t *port)
  93. {
  94. u32 value = get_status(port)->cable;
  95. int valid = 1;
  96. const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
  97. switch(value & 0x7) {
  98. case STATUS_CABLE_V35: cable = "V.35"; break;
  99. case STATUS_CABLE_X21: cable = "X.21"; break;
  100. case STATUS_CABLE_V24: cable = "V.24"; break;
  101. case STATUS_CABLE_EIA530: cable = "EIA530"; break;
  102. case STATUS_CABLE_NONE: cable = "no"; break;
  103. default: cable = "invalid";
  104. }
  105. switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
  106. case STATUS_CABLE_V35: pm = "V.35"; break;
  107. case STATUS_CABLE_X21: pm = "X.21"; break;
  108. case STATUS_CABLE_V24: pm = "V.24"; break;
  109. case STATUS_CABLE_EIA530: pm = "EIA530"; break;
  110. case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break;
  111. default: pm = "invalid personality"; valid = 0;
  112. }
  113. if (valid) {
  114. if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
  115. dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
  116. ", DSR off";
  117. dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
  118. ", carrier off";
  119. }
  120. dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
  121. }
  122. printk(KERN_INFO "%s: %s%s module, %s cable%s%s\n",
  123. port->dev->name, pm, dte, cable, dsr, dcd);
  124. if (value & STATUS_CABLE_DCD)
  125. netif_carrier_on(port->dev);
  126. else
  127. netif_carrier_off(port->dev);
  128. }
  129. /* Transmit complete interrupt service */
  130. static inline void wanxl_tx_intr(port_t *port)
  131. {
  132. struct net_device *dev = port->dev;
  133. while (1) {
  134. desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
  135. struct sk_buff *skb = port->tx_skbs[port->tx_in];
  136. switch (desc->stat) {
  137. case PACKET_FULL:
  138. case PACKET_EMPTY:
  139. netif_wake_queue(dev);
  140. return;
  141. case PACKET_UNDERRUN:
  142. dev->stats.tx_errors++;
  143. dev->stats.tx_fifo_errors++;
  144. break;
  145. default:
  146. dev->stats.tx_packets++;
  147. dev->stats.tx_bytes += skb->len;
  148. }
  149. desc->stat = PACKET_EMPTY; /* Free descriptor */
  150. pci_unmap_single(port->card->pdev, desc->address, skb->len,
  151. PCI_DMA_TODEVICE);
  152. dev_kfree_skb_irq(skb);
  153. port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
  154. }
  155. }
  156. /* Receive complete interrupt service */
  157. static inline void wanxl_rx_intr(card_t *card)
  158. {
  159. desc_t *desc;
  160. while (desc = &card->status->rx_descs[card->rx_in],
  161. desc->stat != PACKET_EMPTY) {
  162. if ((desc->stat & PACKET_PORT_MASK) > card->n_ports)
  163. printk(KERN_CRIT "wanXL %s: received packet for"
  164. " nonexistent port\n", pci_name(card->pdev));
  165. else {
  166. struct sk_buff *skb = card->rx_skbs[card->rx_in];
  167. port_t *port = &card->ports[desc->stat &
  168. PACKET_PORT_MASK];
  169. struct net_device *dev = port->dev;
  170. if (!skb)
  171. dev->stats.rx_dropped++;
  172. else {
  173. pci_unmap_single(card->pdev, desc->address,
  174. BUFFER_LENGTH,
  175. PCI_DMA_FROMDEVICE);
  176. skb_put(skb, desc->length);
  177. #ifdef DEBUG_PKT
  178. printk(KERN_DEBUG "%s RX(%i):", dev->name,
  179. skb->len);
  180. debug_frame(skb);
  181. #endif
  182. dev->stats.rx_packets++;
  183. dev->stats.rx_bytes += skb->len;
  184. skb->protocol = hdlc_type_trans(skb, dev);
  185. netif_rx(skb);
  186. skb = NULL;
  187. }
  188. if (!skb) {
  189. skb = dev_alloc_skb(BUFFER_LENGTH);
  190. desc->address = skb ?
  191. pci_map_single(card->pdev, skb->data,
  192. BUFFER_LENGTH,
  193. PCI_DMA_FROMDEVICE) : 0;
  194. card->rx_skbs[card->rx_in] = skb;
  195. }
  196. }
  197. desc->stat = PACKET_EMPTY; /* Free descriptor */
  198. card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
  199. }
  200. }
  201. static irqreturn_t wanxl_intr(int irq, void* dev_id)
  202. {
  203. card_t *card = dev_id;
  204. int i;
  205. u32 stat;
  206. int handled = 0;
  207. while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
  208. handled = 1;
  209. writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
  210. for (i = 0; i < card->n_ports; i++) {
  211. if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
  212. wanxl_tx_intr(&card->ports[i]);
  213. if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
  214. wanxl_cable_intr(&card->ports[i]);
  215. }
  216. if (stat & (1 << DOORBELL_FROM_CARD_RX))
  217. wanxl_rx_intr(card);
  218. }
  219. return IRQ_RETVAL(handled);
  220. }
  221. static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
  222. {
  223. port_t *port = dev_to_port(dev);
  224. desc_t *desc;
  225. spin_lock(&port->lock);
  226. desc = &get_status(port)->tx_descs[port->tx_out];
  227. if (desc->stat != PACKET_EMPTY) {
  228. /* should never happen - previous xmit should stop queue */
  229. #ifdef DEBUG_PKT
  230. printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
  231. #endif
  232. netif_stop_queue(dev);
  233. spin_unlock_irq(&port->lock);
  234. return NETDEV_TX_BUSY; /* request packet to be queued */
  235. }
  236. #ifdef DEBUG_PKT
  237. printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
  238. debug_frame(skb);
  239. #endif
  240. port->tx_skbs[port->tx_out] = skb;
  241. desc->address = pci_map_single(port->card->pdev, skb->data, skb->len,
  242. PCI_DMA_TODEVICE);
  243. desc->length = skb->len;
  244. desc->stat = PACKET_FULL;
  245. writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
  246. port->card->plx + PLX_DOORBELL_TO_CARD);
  247. port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
  248. if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
  249. netif_stop_queue(dev);
  250. #ifdef DEBUG_PKT
  251. printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
  252. #endif
  253. }
  254. spin_unlock(&port->lock);
  255. return NETDEV_TX_OK;
  256. }
  257. static int wanxl_attach(struct net_device *dev, unsigned short encoding,
  258. unsigned short parity)
  259. {
  260. port_t *port = dev_to_port(dev);
  261. if (encoding != ENCODING_NRZ &&
  262. encoding != ENCODING_NRZI)
  263. return -EINVAL;
  264. if (parity != PARITY_NONE &&
  265. parity != PARITY_CRC32_PR1_CCITT &&
  266. parity != PARITY_CRC16_PR1_CCITT &&
  267. parity != PARITY_CRC32_PR0_CCITT &&
  268. parity != PARITY_CRC16_PR0_CCITT)
  269. return -EINVAL;
  270. get_status(port)->encoding = encoding;
  271. get_status(port)->parity = parity;
  272. return 0;
  273. }
  274. static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  275. {
  276. const size_t size = sizeof(sync_serial_settings);
  277. sync_serial_settings line;
  278. port_t *port = dev_to_port(dev);
  279. if (cmd != SIOCWANDEV)
  280. return hdlc_ioctl(dev, ifr, cmd);
  281. switch (ifr->ifr_settings.type) {
  282. case IF_GET_IFACE:
  283. ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
  284. if (ifr->ifr_settings.size < size) {
  285. ifr->ifr_settings.size = size; /* data size wanted */
  286. return -ENOBUFS;
  287. }
  288. line.clock_type = get_status(port)->clocking;
  289. line.clock_rate = 0;
  290. line.loopback = 0;
  291. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size))
  292. return -EFAULT;
  293. return 0;
  294. case IF_IFACE_SYNC_SERIAL:
  295. if (!capable(CAP_NET_ADMIN))
  296. return -EPERM;
  297. if (dev->flags & IFF_UP)
  298. return -EBUSY;
  299. if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync,
  300. size))
  301. return -EFAULT;
  302. if (line.clock_type != CLOCK_EXT &&
  303. line.clock_type != CLOCK_TXFROMRX)
  304. return -EINVAL; /* No such clock setting */
  305. if (line.loopback != 0)
  306. return -EINVAL;
  307. get_status(port)->clocking = line.clock_type;
  308. return 0;
  309. default:
  310. return hdlc_ioctl(dev, ifr, cmd);
  311. }
  312. }
  313. static int wanxl_open(struct net_device *dev)
  314. {
  315. port_t *port = dev_to_port(dev);
  316. u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
  317. unsigned long timeout;
  318. int i;
  319. if (get_status(port)->open) {
  320. printk(KERN_ERR "%s: port already open\n", dev->name);
  321. return -EIO;
  322. }
  323. if ((i = hdlc_open(dev)) != 0)
  324. return i;
  325. port->tx_in = port->tx_out = 0;
  326. for (i = 0; i < TX_BUFFERS; i++)
  327. get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
  328. /* signal the card */
  329. writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
  330. timeout = jiffies + HZ;
  331. do {
  332. if (get_status(port)->open) {
  333. netif_start_queue(dev);
  334. return 0;
  335. }
  336. } while (time_after(timeout, jiffies));
  337. printk(KERN_ERR "%s: unable to open port\n", dev->name);
  338. /* ask the card to close the port, should it be still alive */
  339. writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
  340. return -EFAULT;
  341. }
  342. static int wanxl_close(struct net_device *dev)
  343. {
  344. port_t *port = dev_to_port(dev);
  345. unsigned long timeout;
  346. int i;
  347. hdlc_close(dev);
  348. /* signal the card */
  349. writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
  350. port->card->plx + PLX_DOORBELL_TO_CARD);
  351. timeout = jiffies + HZ;
  352. do {
  353. if (!get_status(port)->open)
  354. break;
  355. } while (time_after(timeout, jiffies));
  356. if (get_status(port)->open)
  357. printk(KERN_ERR "%s: unable to close port\n", dev->name);
  358. netif_stop_queue(dev);
  359. for (i = 0; i < TX_BUFFERS; i++) {
  360. desc_t *desc = &get_status(port)->tx_descs[i];
  361. if (desc->stat != PACKET_EMPTY) {
  362. desc->stat = PACKET_EMPTY;
  363. pci_unmap_single(port->card->pdev, desc->address,
  364. port->tx_skbs[i]->len,
  365. PCI_DMA_TODEVICE);
  366. dev_kfree_skb(port->tx_skbs[i]);
  367. }
  368. }
  369. return 0;
  370. }
  371. static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
  372. {
  373. port_t *port = dev_to_port(dev);
  374. dev->stats.rx_over_errors = get_status(port)->rx_overruns;
  375. dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
  376. dev->stats.rx_errors = dev->stats.rx_over_errors +
  377. dev->stats.rx_frame_errors;
  378. return &dev->stats;
  379. }
  380. static int wanxl_puts_command(card_t *card, u32 cmd)
  381. {
  382. unsigned long timeout = jiffies + 5 * HZ;
  383. writel(cmd, card->plx + PLX_MAILBOX_1);
  384. do {
  385. if (readl(card->plx + PLX_MAILBOX_1) == 0)
  386. return 0;
  387. schedule();
  388. }while (time_after(timeout, jiffies));
  389. return -1;
  390. }
  391. static void wanxl_reset(card_t *card)
  392. {
  393. u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
  394. writel(0x80, card->plx + PLX_MAILBOX_0);
  395. writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
  396. readl(card->plx + PLX_CONTROL); /* wait for posted write */
  397. udelay(1);
  398. writel(old_value, card->plx + PLX_CONTROL);
  399. readl(card->plx + PLX_CONTROL); /* wait for posted write */
  400. }
  401. static void wanxl_pci_remove_one(struct pci_dev *pdev)
  402. {
  403. card_t *card = pci_get_drvdata(pdev);
  404. int i;
  405. for (i = 0; i < card->n_ports; i++) {
  406. unregister_hdlc_device(card->ports[i].dev);
  407. free_netdev(card->ports[i].dev);
  408. }
  409. /* unregister and free all host resources */
  410. if (card->irq)
  411. free_irq(card->irq, card);
  412. wanxl_reset(card);
  413. for (i = 0; i < RX_QUEUE_LENGTH; i++)
  414. if (card->rx_skbs[i]) {
  415. pci_unmap_single(card->pdev,
  416. card->status->rx_descs[i].address,
  417. BUFFER_LENGTH, PCI_DMA_FROMDEVICE);
  418. dev_kfree_skb(card->rx_skbs[i]);
  419. }
  420. if (card->plx)
  421. iounmap(card->plx);
  422. if (card->status)
  423. pci_free_consistent(pdev, sizeof(card_status_t),
  424. card->status, card->status_address);
  425. pci_release_regions(pdev);
  426. pci_disable_device(pdev);
  427. pci_set_drvdata(pdev, NULL);
  428. kfree(card);
  429. }
  430. #include "wanxlfw.inc"
  431. static const struct net_device_ops wanxl_ops = {
  432. .ndo_open = wanxl_open,
  433. .ndo_stop = wanxl_close,
  434. .ndo_change_mtu = hdlc_change_mtu,
  435. .ndo_start_xmit = hdlc_start_xmit,
  436. .ndo_do_ioctl = wanxl_ioctl,
  437. .ndo_get_stats = wanxl_get_stats,
  438. };
  439. static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
  440. const struct pci_device_id *ent)
  441. {
  442. card_t *card;
  443. u32 ramsize, stat;
  444. unsigned long timeout;
  445. u32 plx_phy; /* PLX PCI base address */
  446. u32 mem_phy; /* memory PCI base addr */
  447. u8 __iomem *mem; /* memory virtual base addr */
  448. int i, ports, alloc_size;
  449. #ifndef MODULE
  450. static int printed_version;
  451. if (!printed_version) {
  452. printed_version++;
  453. printk(KERN_INFO "%s\n", version);
  454. }
  455. #endif
  456. i = pci_enable_device(pdev);
  457. if (i)
  458. return i;
  459. /* QUICC can only access first 256 MB of host RAM directly,
  460. but PLX9060 DMA does 32-bits for actual packet data transfers */
  461. /* FIXME when PCI/DMA subsystems are fixed.
  462. We set both dma_mask and consistent_dma_mask to 28 bits
  463. and pray pci_alloc_consistent() will use this info. It should
  464. work on most platforms */
  465. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) ||
  466. pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) {
  467. printk(KERN_ERR "wanXL: No usable DMA configuration\n");
  468. return -EIO;
  469. }
  470. i = pci_request_regions(pdev, "wanXL");
  471. if (i) {
  472. pci_disable_device(pdev);
  473. return i;
  474. }
  475. switch (pdev->device) {
  476. case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break;
  477. case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break;
  478. default: ports = 4;
  479. }
  480. alloc_size = sizeof(card_t) + ports * sizeof(port_t);
  481. card = kzalloc(alloc_size, GFP_KERNEL);
  482. if (card == NULL) {
  483. printk(KERN_ERR "wanXL %s: unable to allocate memory\n",
  484. pci_name(pdev));
  485. pci_release_regions(pdev);
  486. pci_disable_device(pdev);
  487. return -ENOBUFS;
  488. }
  489. pci_set_drvdata(pdev, card);
  490. card->pdev = pdev;
  491. card->status = pci_alloc_consistent(pdev, sizeof(card_status_t),
  492. &card->status_address);
  493. if (card->status == NULL) {
  494. wanxl_pci_remove_one(pdev);
  495. return -ENOBUFS;
  496. }
  497. #ifdef DEBUG_PCI
  498. printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
  499. " at 0x%LX\n", pci_name(pdev),
  500. (unsigned long long)card->status_address);
  501. #endif
  502. /* FIXME when PCI/DMA subsystems are fixed.
  503. We set both dma_mask and consistent_dma_mask back to 32 bits
  504. to indicate the card can do 32-bit DMA addressing */
  505. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) ||
  506. pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  507. printk(KERN_ERR "wanXL: No usable DMA configuration\n");
  508. wanxl_pci_remove_one(pdev);
  509. return -EIO;
  510. }
  511. /* set up PLX mapping */
  512. plx_phy = pci_resource_start(pdev, 0);
  513. card->plx = ioremap_nocache(plx_phy, 0x70);
  514. if (!card->plx) {
  515. printk(KERN_ERR "wanxl: ioremap() failed\n");
  516. wanxl_pci_remove_one(pdev);
  517. return -EFAULT;
  518. }
  519. #if RESET_WHILE_LOADING
  520. wanxl_reset(card);
  521. #endif
  522. timeout = jiffies + 20 * HZ;
  523. while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
  524. if (time_before(timeout, jiffies)) {
  525. printk(KERN_WARNING "wanXL %s: timeout waiting for"
  526. " PUTS to complete\n", pci_name(pdev));
  527. wanxl_pci_remove_one(pdev);
  528. return -ENODEV;
  529. }
  530. switch(stat & 0xC0) {
  531. case 0x00: /* hmm - PUTS completed with non-zero code? */
  532. case 0x80: /* PUTS still testing the hardware */
  533. break;
  534. default:
  535. printk(KERN_WARNING "wanXL %s: PUTS test 0x%X"
  536. " failed\n", pci_name(pdev), stat & 0x30);
  537. wanxl_pci_remove_one(pdev);
  538. return -ENODEV;
  539. }
  540. schedule();
  541. }
  542. /* get on-board memory size (PUTS detects no more than 4 MB) */
  543. ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
  544. /* set up on-board RAM mapping */
  545. mem_phy = pci_resource_start(pdev, 2);
  546. /* sanity check the board's reported memory size */
  547. if (ramsize < BUFFERS_ADDR +
  548. (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
  549. printk(KERN_WARNING "wanXL %s: no enough on-board RAM"
  550. " (%u bytes detected, %u bytes required)\n",
  551. pci_name(pdev), ramsize, BUFFERS_ADDR +
  552. (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
  553. wanxl_pci_remove_one(pdev);
  554. return -ENODEV;
  555. }
  556. if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
  557. printk(KERN_WARNING "wanXL %s: unable to Set Byte Swap"
  558. " Mode\n", pci_name(pdev));
  559. wanxl_pci_remove_one(pdev);
  560. return -ENODEV;
  561. }
  562. for (i = 0; i < RX_QUEUE_LENGTH; i++) {
  563. struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
  564. card->rx_skbs[i] = skb;
  565. if (skb)
  566. card->status->rx_descs[i].address =
  567. pci_map_single(card->pdev, skb->data,
  568. BUFFER_LENGTH,
  569. PCI_DMA_FROMDEVICE);
  570. }
  571. mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware));
  572. if (!mem) {
  573. printk(KERN_ERR "wanxl: ioremap() failed\n");
  574. wanxl_pci_remove_one(pdev);
  575. return -EFAULT;
  576. }
  577. for (i = 0; i < sizeof(firmware); i += 4)
  578. writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i);
  579. for (i = 0; i < ports; i++)
  580. writel(card->status_address +
  581. (void *)&card->status->port_status[i] -
  582. (void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
  583. writel(card->status_address, mem + PDM_OFFSET + 20);
  584. writel(PDM_OFFSET, mem);
  585. iounmap(mem);
  586. writel(0, card->plx + PLX_MAILBOX_5);
  587. if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
  588. printk(KERN_WARNING "wanXL %s: unable to Abort and Jump\n",
  589. pci_name(pdev));
  590. wanxl_pci_remove_one(pdev);
  591. return -ENODEV;
  592. }
  593. stat = 0;
  594. timeout = jiffies + 5 * HZ;
  595. do {
  596. if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0)
  597. break;
  598. schedule();
  599. }while (time_after(timeout, jiffies));
  600. if (!stat) {
  601. printk(KERN_WARNING "wanXL %s: timeout while initializing card "
  602. "firmware\n", pci_name(pdev));
  603. wanxl_pci_remove_one(pdev);
  604. return -ENODEV;
  605. }
  606. #if DETECT_RAM
  607. ramsize = stat;
  608. #endif
  609. printk(KERN_INFO "wanXL %s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
  610. pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
  611. /* Allocate IRQ */
  612. if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
  613. printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
  614. pci_name(pdev), pdev->irq);
  615. wanxl_pci_remove_one(pdev);
  616. return -EBUSY;
  617. }
  618. card->irq = pdev->irq;
  619. for (i = 0; i < ports; i++) {
  620. hdlc_device *hdlc;
  621. port_t *port = &card->ports[i];
  622. struct net_device *dev = alloc_hdlcdev(port);
  623. if (!dev) {
  624. printk(KERN_ERR "wanXL %s: unable to allocate"
  625. " memory\n", pci_name(pdev));
  626. wanxl_pci_remove_one(pdev);
  627. return -ENOMEM;
  628. }
  629. port->dev = dev;
  630. hdlc = dev_to_hdlc(dev);
  631. spin_lock_init(&port->lock);
  632. dev->tx_queue_len = 50;
  633. dev->netdev_ops = &wanxl_ops;
  634. hdlc->attach = wanxl_attach;
  635. hdlc->xmit = wanxl_xmit;
  636. port->card = card;
  637. port->node = i;
  638. get_status(port)->clocking = CLOCK_EXT;
  639. if (register_hdlc_device(dev)) {
  640. printk(KERN_ERR "wanXL %s: unable to register hdlc"
  641. " device\n", pci_name(pdev));
  642. free_netdev(dev);
  643. wanxl_pci_remove_one(pdev);
  644. return -ENOBUFS;
  645. }
  646. card->n_ports++;
  647. }
  648. printk(KERN_INFO "wanXL %s: port", pci_name(pdev));
  649. for (i = 0; i < ports; i++)
  650. printk("%s #%i: %s", i ? "," : "", i,
  651. card->ports[i].dev->name);
  652. printk("\n");
  653. for (i = 0; i < ports; i++)
  654. wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
  655. return 0;
  656. }
  657. static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
  658. { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
  659. PCI_ANY_ID, 0, 0, 0 },
  660. { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
  661. PCI_ANY_ID, 0, 0, 0 },
  662. { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
  663. PCI_ANY_ID, 0, 0, 0 },
  664. { 0, }
  665. };
  666. static struct pci_driver wanxl_pci_driver = {
  667. .name = "wanXL",
  668. .id_table = wanxl_pci_tbl,
  669. .probe = wanxl_pci_init_one,
  670. .remove = wanxl_pci_remove_one,
  671. };
  672. static int __init wanxl_init_module(void)
  673. {
  674. #ifdef MODULE
  675. printk(KERN_INFO "%s\n", version);
  676. #endif
  677. return pci_register_driver(&wanxl_pci_driver);
  678. }
  679. static void __exit wanxl_cleanup_module(void)
  680. {
  681. pci_unregister_driver(&wanxl_pci_driver);
  682. }
  683. MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
  684. MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
  685. MODULE_LICENSE("GPL v2");
  686. MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
  687. module_init(wanxl_init_module);
  688. module_exit(wanxl_cleanup_module);