dnet.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * Dave DNET Ethernet Controller driver
  3. *
  4. * Copyright (C) 2008 Dave S.r.l. <www.dave.eu>
  5. * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/io.h>
  12. #include <linux/module.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/kernel.h>
  15. #include <linux/types.h>
  16. #include <linux/slab.h>
  17. #include <linux/delay.h>
  18. #include <linux/init.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/platform_device.h>
  23. #include <linux/phy.h>
  24. #include "dnet.h"
  25. #undef DEBUG
  26. /* function for reading internal MAC register */
  27. static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
  28. {
  29. u16 data_read;
  30. /* issue a read */
  31. dnet_writel(bp, reg, MACREG_ADDR);
  32. /* since a read/write op to the MAC is very slow,
  33. * we must wait before reading the data */
  34. ndelay(500);
  35. /* read data read from the MAC register */
  36. data_read = dnet_readl(bp, MACREG_DATA);
  37. /* all done */
  38. return data_read;
  39. }
  40. /* function for writing internal MAC register */
  41. static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
  42. {
  43. /* load data to write */
  44. dnet_writel(bp, val, MACREG_DATA);
  45. /* issue a write */
  46. dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
  47. /* since a read/write op to the MAC is very slow,
  48. * we must wait before exiting */
  49. ndelay(500);
  50. }
  51. static void __dnet_set_hwaddr(struct dnet *bp)
  52. {
  53. u16 tmp;
  54. tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
  55. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
  56. tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
  57. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
  58. tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
  59. dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
  60. }
  61. static void __devinit dnet_get_hwaddr(struct dnet *bp)
  62. {
  63. u16 tmp;
  64. u8 addr[6];
  65. /*
  66. * from MAC docs:
  67. * "Note that the MAC address is stored in the registers in Hexadecimal
  68. * form. For example, to set the MAC Address to: AC-DE-48-00-00-80
  69. * would require writing 0xAC (octet 0) to address 0x0B (high byte of
  70. * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of
  71. * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of
  72. * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of
  73. * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of
  74. * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of
  75. * Mac_addr[15:0]).
  76. */
  77. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
  78. *((__be16 *)addr) = cpu_to_be16(tmp);
  79. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
  80. *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
  81. tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
  82. *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
  83. if (is_valid_ether_addr(addr))
  84. memcpy(bp->dev->dev_addr, addr, sizeof(addr));
  85. }
  86. static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
  87. {
  88. struct dnet *bp = bus->priv;
  89. u16 value;
  90. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  91. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  92. cpu_relax();
  93. /* only 5 bits allowed for phy-addr and reg_offset */
  94. mii_id &= 0x1f;
  95. regnum &= 0x1f;
  96. /* prepare reg_value for a read */
  97. value = (mii_id << 8);
  98. value |= regnum;
  99. /* write control word */
  100. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
  101. /* wait for end of transfer */
  102. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  103. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  104. cpu_relax();
  105. value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
  106. pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
  107. return value;
  108. }
  109. static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
  110. u16 value)
  111. {
  112. struct dnet *bp = bus->priv;
  113. u16 tmp;
  114. pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
  115. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  116. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  117. cpu_relax();
  118. /* prepare for a write operation */
  119. tmp = (1 << 13);
  120. /* only 5 bits allowed for phy-addr and reg_offset */
  121. mii_id &= 0x1f;
  122. regnum &= 0x1f;
  123. /* only 16 bits on data */
  124. value &= 0xffff;
  125. /* prepare reg_value for a write */
  126. tmp |= (mii_id << 8);
  127. tmp |= regnum;
  128. /* write data to write first */
  129. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
  130. /* write control word */
  131. dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
  132. while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
  133. & DNET_INTERNAL_GMII_MNG_CMD_FIN))
  134. cpu_relax();
  135. return 0;
  136. }
  137. static int dnet_mdio_reset(struct mii_bus *bus)
  138. {
  139. return 0;
  140. }
  141. static void dnet_handle_link_change(struct net_device *dev)
  142. {
  143. struct dnet *bp = netdev_priv(dev);
  144. struct phy_device *phydev = bp->phy_dev;
  145. unsigned long flags;
  146. u32 mode_reg, ctl_reg;
  147. int status_change = 0;
  148. spin_lock_irqsave(&bp->lock, flags);
  149. mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
  150. ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
  151. if (phydev->link) {
  152. if (bp->duplex != phydev->duplex) {
  153. if (phydev->duplex)
  154. ctl_reg &=
  155. ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
  156. else
  157. ctl_reg |=
  158. DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
  159. bp->duplex = phydev->duplex;
  160. status_change = 1;
  161. }
  162. if (bp->speed != phydev->speed) {
  163. status_change = 1;
  164. switch (phydev->speed) {
  165. case 1000:
  166. mode_reg |= DNET_INTERNAL_MODE_GBITEN;
  167. break;
  168. case 100:
  169. case 10:
  170. mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
  171. break;
  172. default:
  173. printk(KERN_WARNING
  174. "%s: Ack! Speed (%d) is not "
  175. "10/100/1000!\n", dev->name,
  176. phydev->speed);
  177. break;
  178. }
  179. bp->speed = phydev->speed;
  180. }
  181. }
  182. if (phydev->link != bp->link) {
  183. if (phydev->link) {
  184. mode_reg |=
  185. (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
  186. } else {
  187. mode_reg &=
  188. ~(DNET_INTERNAL_MODE_RXEN |
  189. DNET_INTERNAL_MODE_TXEN);
  190. bp->speed = 0;
  191. bp->duplex = -1;
  192. }
  193. bp->link = phydev->link;
  194. status_change = 1;
  195. }
  196. if (status_change) {
  197. dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
  198. dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
  199. }
  200. spin_unlock_irqrestore(&bp->lock, flags);
  201. if (status_change) {
  202. if (phydev->link)
  203. printk(KERN_INFO "%s: link up (%d/%s)\n",
  204. dev->name, phydev->speed,
  205. DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
  206. else
  207. printk(KERN_INFO "%s: link down\n", dev->name);
  208. }
  209. }
  210. static int dnet_mii_probe(struct net_device *dev)
  211. {
  212. struct dnet *bp = netdev_priv(dev);
  213. struct phy_device *phydev = NULL;
  214. int phy_addr;
  215. /* find the first phy */
  216. for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
  217. if (bp->mii_bus->phy_map[phy_addr]) {
  218. phydev = bp->mii_bus->phy_map[phy_addr];
  219. break;
  220. }
  221. }
  222. if (!phydev) {
  223. printk(KERN_ERR "%s: no PHY found\n", dev->name);
  224. return -ENODEV;
  225. }
  226. /* TODO : add pin_irq */
  227. /* attach the mac to the phy */
  228. if (bp->capabilities & DNET_HAS_RMII) {
  229. phydev = phy_connect(dev, dev_name(&phydev->dev),
  230. &dnet_handle_link_change, 0,
  231. PHY_INTERFACE_MODE_RMII);
  232. } else {
  233. phydev = phy_connect(dev, dev_name(&phydev->dev),
  234. &dnet_handle_link_change, 0,
  235. PHY_INTERFACE_MODE_MII);
  236. }
  237. if (IS_ERR(phydev)) {
  238. printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
  239. return PTR_ERR(phydev);
  240. }
  241. /* mask with MAC supported features */
  242. if (bp->capabilities & DNET_HAS_GIGABIT)
  243. phydev->supported &= PHY_GBIT_FEATURES;
  244. else
  245. phydev->supported &= PHY_BASIC_FEATURES;
  246. phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause;
  247. phydev->advertising = phydev->supported;
  248. bp->link = 0;
  249. bp->speed = 0;
  250. bp->duplex = -1;
  251. bp->phy_dev = phydev;
  252. return 0;
  253. }
  254. static int dnet_mii_init(struct dnet *bp)
  255. {
  256. int err, i;
  257. bp->mii_bus = mdiobus_alloc();
  258. if (bp->mii_bus == NULL)
  259. return -ENOMEM;
  260. bp->mii_bus->name = "dnet_mii_bus";
  261. bp->mii_bus->read = &dnet_mdio_read;
  262. bp->mii_bus->write = &dnet_mdio_write;
  263. bp->mii_bus->reset = &dnet_mdio_reset;
  264. snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
  265. bp->mii_bus->priv = bp;
  266. bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
  267. if (!bp->mii_bus->irq) {
  268. err = -ENOMEM;
  269. goto err_out;
  270. }
  271. for (i = 0; i < PHY_MAX_ADDR; i++)
  272. bp->mii_bus->irq[i] = PHY_POLL;
  273. if (mdiobus_register(bp->mii_bus)) {
  274. err = -ENXIO;
  275. goto err_out_free_mdio_irq;
  276. }
  277. if (dnet_mii_probe(bp->dev) != 0) {
  278. err = -ENXIO;
  279. goto err_out_unregister_bus;
  280. }
  281. return 0;
  282. err_out_unregister_bus:
  283. mdiobus_unregister(bp->mii_bus);
  284. err_out_free_mdio_irq:
  285. kfree(bp->mii_bus->irq);
  286. err_out:
  287. mdiobus_free(bp->mii_bus);
  288. return err;
  289. }
  290. /* For Neptune board: LINK1000 as Link LED and TX as activity LED */
  291. static int dnet_phy_marvell_fixup(struct phy_device *phydev)
  292. {
  293. return phy_write(phydev, 0x18, 0x4148);
  294. }
  295. static void dnet_update_stats(struct dnet *bp)
  296. {
  297. u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
  298. u32 *p = &bp->hw_stats.rx_pkt_ignr;
  299. u32 *end = &bp->hw_stats.rx_byte + 1;
  300. WARN_ON((unsigned long)(end - p - 1) !=
  301. (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
  302. for (; p < end; p++, reg++)
  303. *p += readl(reg);
  304. reg = bp->regs + DNET_TX_UNICAST_CNT;
  305. p = &bp->hw_stats.tx_unicast;
  306. end = &bp->hw_stats.tx_byte + 1;
  307. WARN_ON((unsigned long)(end - p - 1) !=
  308. (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
  309. for (; p < end; p++, reg++)
  310. *p += readl(reg);
  311. }
  312. static int dnet_poll(struct napi_struct *napi, int budget)
  313. {
  314. struct dnet *bp = container_of(napi, struct dnet, napi);
  315. struct net_device *dev = bp->dev;
  316. int npackets = 0;
  317. unsigned int pkt_len;
  318. struct sk_buff *skb;
  319. unsigned int *data_ptr;
  320. u32 int_enable;
  321. u32 cmd_word;
  322. int i;
  323. while (npackets < budget) {
  324. /*
  325. * break out of while loop if there are no more
  326. * packets waiting
  327. */
  328. if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
  329. napi_complete(napi);
  330. int_enable = dnet_readl(bp, INTR_ENB);
  331. int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
  332. dnet_writel(bp, int_enable, INTR_ENB);
  333. return 0;
  334. }
  335. cmd_word = dnet_readl(bp, RX_LEN_FIFO);
  336. pkt_len = cmd_word & 0xFFFF;
  337. if (cmd_word & 0xDF180000)
  338. printk(KERN_ERR "%s packet receive error %x\n",
  339. __func__, cmd_word);
  340. skb = dev_alloc_skb(pkt_len + 5);
  341. if (skb != NULL) {
  342. /* Align IP on 16 byte boundaries */
  343. skb_reserve(skb, 2);
  344. /*
  345. * 'skb_put()' points to the start of sk_buff
  346. * data area.
  347. */
  348. data_ptr = (unsigned int *)skb_put(skb, pkt_len);
  349. for (i = 0; i < (pkt_len + 3) >> 2; i++)
  350. *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
  351. skb->protocol = eth_type_trans(skb, dev);
  352. netif_receive_skb(skb);
  353. npackets++;
  354. } else
  355. printk(KERN_NOTICE
  356. "%s: No memory to allocate a sk_buff of "
  357. "size %u.\n", dev->name, pkt_len);
  358. }
  359. budget -= npackets;
  360. if (npackets < budget) {
  361. /* We processed all packets available. Tell NAPI it can
  362. * stop polling then re-enable rx interrupts */
  363. napi_complete(napi);
  364. int_enable = dnet_readl(bp, INTR_ENB);
  365. int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
  366. dnet_writel(bp, int_enable, INTR_ENB);
  367. return 0;
  368. }
  369. /* There are still packets waiting */
  370. return 1;
  371. }
  372. static irqreturn_t dnet_interrupt(int irq, void *dev_id)
  373. {
  374. struct net_device *dev = dev_id;
  375. struct dnet *bp = netdev_priv(dev);
  376. u32 int_src, int_enable, int_current;
  377. unsigned long flags;
  378. unsigned int handled = 0;
  379. spin_lock_irqsave(&bp->lock, flags);
  380. /* read and clear the DNET irq (clear on read) */
  381. int_src = dnet_readl(bp, INTR_SRC);
  382. int_enable = dnet_readl(bp, INTR_ENB);
  383. int_current = int_src & int_enable;
  384. /* restart the queue if we had stopped it for TX fifo almost full */
  385. if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
  386. int_enable = dnet_readl(bp, INTR_ENB);
  387. int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
  388. dnet_writel(bp, int_enable, INTR_ENB);
  389. netif_wake_queue(dev);
  390. handled = 1;
  391. }
  392. /* RX FIFO error checking */
  393. if (int_current &
  394. (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
  395. printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
  396. dnet_readl(bp, RX_STATUS), int_current);
  397. /* we can only flush the RX FIFOs */
  398. dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
  399. ndelay(500);
  400. dnet_writel(bp, 0, SYS_CTL);
  401. handled = 1;
  402. }
  403. /* TX FIFO error checking */
  404. if (int_current &
  405. (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
  406. printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
  407. dnet_readl(bp, TX_STATUS), int_current);
  408. /* we can only flush the TX FIFOs */
  409. dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
  410. ndelay(500);
  411. dnet_writel(bp, 0, SYS_CTL);
  412. handled = 1;
  413. }
  414. if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
  415. if (napi_schedule_prep(&bp->napi)) {
  416. /*
  417. * There's no point taking any more interrupts
  418. * until we have processed the buffers
  419. */
  420. /* Disable Rx interrupts and schedule NAPI poll */
  421. int_enable = dnet_readl(bp, INTR_ENB);
  422. int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
  423. dnet_writel(bp, int_enable, INTR_ENB);
  424. __napi_schedule(&bp->napi);
  425. }
  426. handled = 1;
  427. }
  428. if (!handled)
  429. pr_debug("%s: irq %x remains\n", __func__, int_current);
  430. spin_unlock_irqrestore(&bp->lock, flags);
  431. return IRQ_RETVAL(handled);
  432. }
  433. #ifdef DEBUG
  434. static inline void dnet_print_skb(struct sk_buff *skb)
  435. {
  436. int k;
  437. printk(KERN_DEBUG PFX "data:");
  438. for (k = 0; k < skb->len; k++)
  439. printk(" %02x", (unsigned int)skb->data[k]);
  440. printk("\n");
  441. }
  442. #else
  443. #define dnet_print_skb(skb) do {} while (0)
  444. #endif
  445. static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  446. {
  447. struct dnet *bp = netdev_priv(dev);
  448. u32 tx_status, irq_enable;
  449. unsigned int len, i, tx_cmd, wrsz;
  450. unsigned long flags;
  451. unsigned int *bufp;
  452. tx_status = dnet_readl(bp, TX_STATUS);
  453. pr_debug("start_xmit: len %u head %p data %p\n",
  454. skb->len, skb->head, skb->data);
  455. dnet_print_skb(skb);
  456. /* frame size (words) */
  457. len = (skb->len + 3) >> 2;
  458. spin_lock_irqsave(&bp->lock, flags);
  459. tx_status = dnet_readl(bp, TX_STATUS);
  460. bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
  461. wrsz = (u32) skb->len + 3;
  462. wrsz += ((unsigned long) skb->data) & 0x3;
  463. wrsz >>= 2;
  464. tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
  465. /* check if there is enough room for the current frame */
  466. if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
  467. for (i = 0; i < wrsz; i++)
  468. dnet_writel(bp, *bufp++, TX_DATA_FIFO);
  469. /*
  470. * inform MAC that a packet's written and ready to be
  471. * shipped out
  472. */
  473. dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
  474. }
  475. if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
  476. netif_stop_queue(dev);
  477. tx_status = dnet_readl(bp, INTR_SRC);
  478. irq_enable = dnet_readl(bp, INTR_ENB);
  479. irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
  480. dnet_writel(bp, irq_enable, INTR_ENB);
  481. }
  482. /* free the buffer */
  483. dev_kfree_skb(skb);
  484. spin_unlock_irqrestore(&bp->lock, flags);
  485. return NETDEV_TX_OK;
  486. }
  487. static void dnet_reset_hw(struct dnet *bp)
  488. {
  489. /* put ts_mac in IDLE state i.e. disable rx/tx */
  490. dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
  491. /*
  492. * RX FIFO almost full threshold: only cmd FIFO almost full is
  493. * implemented for RX side
  494. */
  495. dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
  496. /*
  497. * TX FIFO almost empty threshold: only data FIFO almost empty
  498. * is implemented for TX side
  499. */
  500. dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
  501. /* flush rx/tx fifos */
  502. dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
  503. SYS_CTL);
  504. msleep(1);
  505. dnet_writel(bp, 0, SYS_CTL);
  506. }
  507. static void dnet_init_hw(struct dnet *bp)
  508. {
  509. u32 config;
  510. dnet_reset_hw(bp);
  511. __dnet_set_hwaddr(bp);
  512. config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
  513. if (bp->dev->flags & IFF_PROMISC)
  514. /* Copy All Frames */
  515. config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
  516. if (!(bp->dev->flags & IFF_BROADCAST))
  517. /* No BroadCast */
  518. config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
  519. config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
  520. DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
  521. DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
  522. DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
  523. dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
  524. /* clear irq before enabling them */
  525. config = dnet_readl(bp, INTR_SRC);
  526. /* enable RX/TX interrupt, recv packet ready interrupt */
  527. dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
  528. DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
  529. DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
  530. DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
  531. DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
  532. }
  533. static int dnet_open(struct net_device *dev)
  534. {
  535. struct dnet *bp = netdev_priv(dev);
  536. /* if the phy is not yet register, retry later */
  537. if (!bp->phy_dev)
  538. return -EAGAIN;
  539. if (!is_valid_ether_addr(dev->dev_addr))
  540. return -EADDRNOTAVAIL;
  541. napi_enable(&bp->napi);
  542. dnet_init_hw(bp);
  543. phy_start_aneg(bp->phy_dev);
  544. /* schedule a link state check */
  545. phy_start(bp->phy_dev);
  546. netif_start_queue(dev);
  547. return 0;
  548. }
  549. static int dnet_close(struct net_device *dev)
  550. {
  551. struct dnet *bp = netdev_priv(dev);
  552. netif_stop_queue(dev);
  553. napi_disable(&bp->napi);
  554. if (bp->phy_dev)
  555. phy_stop(bp->phy_dev);
  556. dnet_reset_hw(bp);
  557. netif_carrier_off(dev);
  558. return 0;
  559. }
  560. static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
  561. {
  562. pr_debug("%s\n", __func__);
  563. pr_debug("----------------------------- RX statistics "
  564. "-------------------------------\n");
  565. pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
  566. pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
  567. pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
  568. pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
  569. pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
  570. pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
  571. pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
  572. pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
  573. pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
  574. pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
  575. pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
  576. pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
  577. pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
  578. pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
  579. pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
  580. pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
  581. pr_debug("----------------------------- TX statistics "
  582. "-------------------------------\n");
  583. pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
  584. pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
  585. pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
  586. pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
  587. pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
  588. pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
  589. pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
  590. pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
  591. }
  592. static struct net_device_stats *dnet_get_stats(struct net_device *dev)
  593. {
  594. struct dnet *bp = netdev_priv(dev);
  595. struct net_device_stats *nstat = &dev->stats;
  596. struct dnet_stats *hwstat = &bp->hw_stats;
  597. /* read stats from hardware */
  598. dnet_update_stats(bp);
  599. /* Convert HW stats into netdevice stats */
  600. nstat->rx_errors = (hwstat->rx_len_chk_err +
  601. hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
  602. /* ignore IGP violation error
  603. hwstat->rx_ipg_viol + */
  604. hwstat->rx_crc_err +
  605. hwstat->rx_pre_shrink +
  606. hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
  607. nstat->tx_errors = hwstat->tx_bad_fcs;
  608. nstat->rx_length_errors = (hwstat->rx_len_chk_err +
  609. hwstat->rx_lng_frm +
  610. hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
  611. nstat->rx_crc_errors = hwstat->rx_crc_err;
  612. nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
  613. nstat->rx_packets = hwstat->rx_ok_pkt;
  614. nstat->tx_packets = (hwstat->tx_unicast +
  615. hwstat->tx_multicast + hwstat->tx_brdcast);
  616. nstat->rx_bytes = hwstat->rx_byte;
  617. nstat->tx_bytes = hwstat->tx_byte;
  618. nstat->multicast = hwstat->rx_multicast;
  619. nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
  620. dnet_print_pretty_hwstats(hwstat);
  621. return nstat;
  622. }
  623. static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  624. {
  625. struct dnet *bp = netdev_priv(dev);
  626. struct phy_device *phydev = bp->phy_dev;
  627. if (!phydev)
  628. return -ENODEV;
  629. return phy_ethtool_gset(phydev, cmd);
  630. }
  631. static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  632. {
  633. struct dnet *bp = netdev_priv(dev);
  634. struct phy_device *phydev = bp->phy_dev;
  635. if (!phydev)
  636. return -ENODEV;
  637. return phy_ethtool_sset(phydev, cmd);
  638. }
  639. static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  640. {
  641. struct dnet *bp = netdev_priv(dev);
  642. struct phy_device *phydev = bp->phy_dev;
  643. if (!netif_running(dev))
  644. return -EINVAL;
  645. if (!phydev)
  646. return -ENODEV;
  647. return phy_mii_ioctl(phydev, rq, cmd);
  648. }
  649. static void dnet_get_drvinfo(struct net_device *dev,
  650. struct ethtool_drvinfo *info)
  651. {
  652. strcpy(info->driver, DRV_NAME);
  653. strcpy(info->version, DRV_VERSION);
  654. strcpy(info->bus_info, "0");
  655. }
  656. static const struct ethtool_ops dnet_ethtool_ops = {
  657. .get_settings = dnet_get_settings,
  658. .set_settings = dnet_set_settings,
  659. .get_drvinfo = dnet_get_drvinfo,
  660. .get_link = ethtool_op_get_link,
  661. };
  662. static const struct net_device_ops dnet_netdev_ops = {
  663. .ndo_open = dnet_open,
  664. .ndo_stop = dnet_close,
  665. .ndo_get_stats = dnet_get_stats,
  666. .ndo_start_xmit = dnet_start_xmit,
  667. .ndo_do_ioctl = dnet_ioctl,
  668. .ndo_set_mac_address = eth_mac_addr,
  669. .ndo_validate_addr = eth_validate_addr,
  670. .ndo_change_mtu = eth_change_mtu,
  671. };
  672. static int __devinit dnet_probe(struct platform_device *pdev)
  673. {
  674. struct resource *res;
  675. struct net_device *dev;
  676. struct dnet *bp;
  677. struct phy_device *phydev;
  678. int err = -ENXIO;
  679. unsigned int mem_base, mem_size, irq;
  680. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  681. if (!res) {
  682. dev_err(&pdev->dev, "no mmio resource defined\n");
  683. goto err_out;
  684. }
  685. mem_base = res->start;
  686. mem_size = resource_size(res);
  687. irq = platform_get_irq(pdev, 0);
  688. if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
  689. dev_err(&pdev->dev, "no memory region available\n");
  690. err = -EBUSY;
  691. goto err_out;
  692. }
  693. err = -ENOMEM;
  694. dev = alloc_etherdev(sizeof(*bp));
  695. if (!dev) {
  696. dev_err(&pdev->dev, "etherdev alloc failed, aborting.\n");
  697. goto err_out_release_mem;
  698. }
  699. /* TODO: Actually, we have some interesting features... */
  700. dev->features |= 0;
  701. bp = netdev_priv(dev);
  702. bp->dev = dev;
  703. platform_set_drvdata(pdev, dev);
  704. SET_NETDEV_DEV(dev, &pdev->dev);
  705. spin_lock_init(&bp->lock);
  706. bp->regs = ioremap(mem_base, mem_size);
  707. if (!bp->regs) {
  708. dev_err(&pdev->dev, "failed to map registers, aborting.\n");
  709. err = -ENOMEM;
  710. goto err_out_free_dev;
  711. }
  712. dev->irq = irq;
  713. err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
  714. if (err) {
  715. dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
  716. irq, err);
  717. goto err_out_iounmap;
  718. }
  719. dev->netdev_ops = &dnet_netdev_ops;
  720. netif_napi_add(dev, &bp->napi, dnet_poll, 64);
  721. dev->ethtool_ops = &dnet_ethtool_ops;
  722. dev->base_addr = (unsigned long)bp->regs;
  723. bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
  724. dnet_get_hwaddr(bp);
  725. if (!is_valid_ether_addr(dev->dev_addr)) {
  726. /* choose a random ethernet address */
  727. random_ether_addr(dev->dev_addr);
  728. __dnet_set_hwaddr(bp);
  729. }
  730. err = register_netdev(dev);
  731. if (err) {
  732. dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
  733. goto err_out_free_irq;
  734. }
  735. /* register the PHY board fixup (for Marvell 88E1111) */
  736. err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
  737. dnet_phy_marvell_fixup);
  738. /* we can live without it, so just issue a warning */
  739. if (err)
  740. dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
  741. err = dnet_mii_init(bp);
  742. if (err)
  743. goto err_out_unregister_netdev;
  744. dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
  745. bp->regs, mem_base, dev->irq, dev->dev_addr);
  746. dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
  747. (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
  748. (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
  749. (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
  750. (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
  751. phydev = bp->phy_dev;
  752. dev_info(&pdev->dev, "attached PHY driver [%s] "
  753. "(mii_bus:phy_addr=%s, irq=%d)\n",
  754. phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
  755. return 0;
  756. err_out_unregister_netdev:
  757. unregister_netdev(dev);
  758. err_out_free_irq:
  759. free_irq(dev->irq, dev);
  760. err_out_iounmap:
  761. iounmap(bp->regs);
  762. err_out_free_dev:
  763. free_netdev(dev);
  764. err_out_release_mem:
  765. release_mem_region(mem_base, mem_size);
  766. err_out:
  767. return err;
  768. }
  769. static int __devexit dnet_remove(struct platform_device *pdev)
  770. {
  771. struct net_device *dev;
  772. struct dnet *bp;
  773. dev = platform_get_drvdata(pdev);
  774. if (dev) {
  775. bp = netdev_priv(dev);
  776. if (bp->phy_dev)
  777. phy_disconnect(bp->phy_dev);
  778. mdiobus_unregister(bp->mii_bus);
  779. kfree(bp->mii_bus->irq);
  780. mdiobus_free(bp->mii_bus);
  781. unregister_netdev(dev);
  782. free_irq(dev->irq, dev);
  783. iounmap(bp->regs);
  784. free_netdev(dev);
  785. }
  786. return 0;
  787. }
  788. static struct platform_driver dnet_driver = {
  789. .probe = dnet_probe,
  790. .remove = __devexit_p(dnet_remove),
  791. .driver = {
  792. .name = "dnet",
  793. },
  794. };
  795. static int __init dnet_init(void)
  796. {
  797. return platform_driver_register(&dnet_driver);
  798. }
  799. static void __exit dnet_exit(void)
  800. {
  801. platform_driver_unregister(&dnet_driver);
  802. }
  803. module_init(dnet_init);
  804. module_exit(dnet_exit);
  805. MODULE_LICENSE("GPL");
  806. MODULE_DESCRIPTION("Dave DNET Ethernet driver");
  807. MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
  808. "Matteo Vit <matteo.vit@dave.eu>");