ixp4xx_eth.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. /*
  2. * Intel IXP4xx Ethernet driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * Ethernet port config (0x00 is not present on IXP42X):
  11. *
  12. * logical port 0x00 0x10 0x20
  13. * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
  14. * physical PortId 2 0 1
  15. * TX queue 23 24 25
  16. * RX-free queue 26 27 28
  17. * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
  18. *
  19. *
  20. * Queue entries:
  21. * bits 0 -> 1 - NPE ID (RX and TX-done)
  22. * bits 0 -> 2 - priority (TX, per 802.1D)
  23. * bits 3 -> 4 - port ID (user-set?)
  24. * bits 5 -> 31 - physical descriptor address
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/dmapool.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/io.h>
  31. #include <linux/kernel.h>
  32. #include <linux/net_tstamp.h>
  33. #include <linux/phy.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/ptp_classify.h>
  36. #include <linux/slab.h>
  37. #include <linux/module.h>
  38. #include <mach/ixp46x_ts.h>
  39. #include <mach/npe.h>
  40. #include <mach/qmgr.h>
  41. #define DEBUG_DESC 0
  42. #define DEBUG_RX 0
  43. #define DEBUG_TX 0
  44. #define DEBUG_PKT_BYTES 0
  45. #define DEBUG_MDIO 0
  46. #define DEBUG_CLOSE 0
  47. #define DRV_NAME "ixp4xx_eth"
  48. #define MAX_NPES 3
  49. #define RX_DESCS 64 /* also length of all RX queues */
  50. #define TX_DESCS 16 /* also length of all TX queues */
  51. #define TXDONE_QUEUE_LEN 64 /* dwords */
  52. #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
  53. #define REGS_SIZE 0x1000
  54. #define MAX_MRU 1536 /* 0x600 */
  55. #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
  56. #define NAPI_WEIGHT 16
  57. #define MDIO_INTERVAL (3 * HZ)
  58. #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
  59. #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
  60. #define NPE_ID(port_id) ((port_id) >> 4)
  61. #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
  62. #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
  63. #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
  64. #define TXDONE_QUEUE 31
  65. #define PTP_SLAVE_MODE 1
  66. #define PTP_MASTER_MODE 2
  67. #define PORT2CHANNEL(p) NPE_ID(p->id)
  68. /* TX Control Registers */
  69. #define TX_CNTRL0_TX_EN 0x01
  70. #define TX_CNTRL0_HALFDUPLEX 0x02
  71. #define TX_CNTRL0_RETRY 0x04
  72. #define TX_CNTRL0_PAD_EN 0x08
  73. #define TX_CNTRL0_APPEND_FCS 0x10
  74. #define TX_CNTRL0_2DEFER 0x20
  75. #define TX_CNTRL0_RMII 0x40 /* reduced MII */
  76. #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
  77. /* RX Control Registers */
  78. #define RX_CNTRL0_RX_EN 0x01
  79. #define RX_CNTRL0_PADSTRIP_EN 0x02
  80. #define RX_CNTRL0_SEND_FCS 0x04
  81. #define RX_CNTRL0_PAUSE_EN 0x08
  82. #define RX_CNTRL0_LOOP_EN 0x10
  83. #define RX_CNTRL0_ADDR_FLTR_EN 0x20
  84. #define RX_CNTRL0_RX_RUNT_EN 0x40
  85. #define RX_CNTRL0_BCAST_DIS 0x80
  86. #define RX_CNTRL1_DEFER_EN 0x01
  87. /* Core Control Register */
  88. #define CORE_RESET 0x01
  89. #define CORE_RX_FIFO_FLUSH 0x02
  90. #define CORE_TX_FIFO_FLUSH 0x04
  91. #define CORE_SEND_JAM 0x08
  92. #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
  93. #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
  94. TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
  95. TX_CNTRL0_2DEFER)
  96. #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
  97. #define DEFAULT_CORE_CNTRL CORE_MDC_EN
  98. /* NPE message codes */
  99. #define NPE_GETSTATUS 0x00
  100. #define NPE_EDB_SETPORTADDRESS 0x01
  101. #define NPE_EDB_GETMACADDRESSDATABASE 0x02
  102. #define NPE_EDB_SETMACADDRESSSDATABASE 0x03
  103. #define NPE_GETSTATS 0x04
  104. #define NPE_RESETSTATS 0x05
  105. #define NPE_SETMAXFRAMELENGTHS 0x06
  106. #define NPE_VLAN_SETRXTAGMODE 0x07
  107. #define NPE_VLAN_SETDEFAULTRXVID 0x08
  108. #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
  109. #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
  110. #define NPE_VLAN_SETRXQOSENTRY 0x0B
  111. #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
  112. #define NPE_STP_SETBLOCKINGSTATE 0x0D
  113. #define NPE_FW_SETFIREWALLMODE 0x0E
  114. #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
  115. #define NPE_PC_SETAPMACTABLE 0x11
  116. #define NPE_SETLOOPBACK_MODE 0x12
  117. #define NPE_PC_SETBSSIDTABLE 0x13
  118. #define NPE_ADDRESS_FILTER_CONFIG 0x14
  119. #define NPE_APPENDFCSCONFIG 0x15
  120. #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
  121. #define NPE_MAC_RECOVERY_START 0x17
  122. #ifdef __ARMEB__
  123. typedef struct sk_buff buffer_t;
  124. #define free_buffer dev_kfree_skb
  125. #define free_buffer_irq dev_kfree_skb_irq
  126. #else
  127. typedef void buffer_t;
  128. #define free_buffer kfree
  129. #define free_buffer_irq kfree
  130. #endif
  131. struct eth_regs {
  132. u32 tx_control[2], __res1[2]; /* 000 */
  133. u32 rx_control[2], __res2[2]; /* 010 */
  134. u32 random_seed, __res3[3]; /* 020 */
  135. u32 partial_empty_threshold, __res4; /* 030 */
  136. u32 partial_full_threshold, __res5; /* 038 */
  137. u32 tx_start_bytes, __res6[3]; /* 040 */
  138. u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
  139. u32 tx_2part_deferral[2], __res8[2]; /* 060 */
  140. u32 slot_time, __res9[3]; /* 070 */
  141. u32 mdio_command[4]; /* 080 */
  142. u32 mdio_status[4]; /* 090 */
  143. u32 mcast_mask[6], __res10[2]; /* 0A0 */
  144. u32 mcast_addr[6], __res11[2]; /* 0C0 */
  145. u32 int_clock_threshold, __res12[3]; /* 0E0 */
  146. u32 hw_addr[6], __res13[61]; /* 0F0 */
  147. u32 core_control; /* 1FC */
  148. };
  149. struct port {
  150. struct resource *mem_res;
  151. struct eth_regs __iomem *regs;
  152. struct npe *npe;
  153. struct net_device *netdev;
  154. struct napi_struct napi;
  155. struct phy_device *phydev;
  156. struct eth_plat_info *plat;
  157. buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
  158. struct desc *desc_tab; /* coherent */
  159. u32 desc_tab_phys;
  160. int id; /* logical port ID */
  161. int speed, duplex;
  162. u8 firmware[4];
  163. int hwts_tx_en;
  164. int hwts_rx_en;
  165. };
  166. /* NPE message structure */
  167. struct msg {
  168. #ifdef __ARMEB__
  169. u8 cmd, eth_id, byte2, byte3;
  170. u8 byte4, byte5, byte6, byte7;
  171. #else
  172. u8 byte3, byte2, eth_id, cmd;
  173. u8 byte7, byte6, byte5, byte4;
  174. #endif
  175. };
  176. /* Ethernet packet descriptor */
  177. struct desc {
  178. u32 next; /* pointer to next buffer, unused */
  179. #ifdef __ARMEB__
  180. u16 buf_len; /* buffer length */
  181. u16 pkt_len; /* packet length */
  182. u32 data; /* pointer to data buffer in RAM */
  183. u8 dest_id;
  184. u8 src_id;
  185. u16 flags;
  186. u8 qos;
  187. u8 padlen;
  188. u16 vlan_tci;
  189. #else
  190. u16 pkt_len; /* packet length */
  191. u16 buf_len; /* buffer length */
  192. u32 data; /* pointer to data buffer in RAM */
  193. u16 flags;
  194. u8 src_id;
  195. u8 dest_id;
  196. u16 vlan_tci;
  197. u8 padlen;
  198. u8 qos;
  199. #endif
  200. #ifdef __ARMEB__
  201. u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
  202. u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
  203. u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
  204. #else
  205. u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
  206. u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
  207. u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
  208. #endif
  209. };
  210. #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
  211. (n) * sizeof(struct desc))
  212. #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
  213. #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
  214. ((n) + RX_DESCS) * sizeof(struct desc))
  215. #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
  216. #ifndef __ARMEB__
  217. static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
  218. {
  219. int i;
  220. for (i = 0; i < cnt; i++)
  221. dest[i] = swab32(src[i]);
  222. }
  223. #endif
  224. static spinlock_t mdio_lock;
  225. static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
  226. static struct mii_bus *mdio_bus;
  227. static int ports_open;
  228. static struct port *npe_port_tab[MAX_NPES];
  229. static struct dma_pool *dma_pool;
  230. static struct sock_filter ptp_filter[] = {
  231. PTP_FILTER
  232. };
  233. static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
  234. {
  235. u8 *data = skb->data;
  236. unsigned int offset;
  237. u16 *hi, *id;
  238. u32 lo;
  239. if (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)
  240. return 0;
  241. offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  242. if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
  243. return 0;
  244. hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
  245. id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  246. memcpy(&lo, &hi[1], sizeof(lo));
  247. return (uid_hi == ntohs(*hi) &&
  248. uid_lo == ntohl(lo) &&
  249. seqid == ntohs(*id));
  250. }
  251. static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
  252. {
  253. struct skb_shared_hwtstamps *shhwtstamps;
  254. struct ixp46x_ts_regs *regs;
  255. u64 ns;
  256. u32 ch, hi, lo, val;
  257. u16 uid, seq;
  258. if (!port->hwts_rx_en)
  259. return;
  260. ch = PORT2CHANNEL(port);
  261. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  262. val = __raw_readl(&regs->channel[ch].ch_event);
  263. if (!(val & RX_SNAPSHOT_LOCKED))
  264. return;
  265. lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
  266. hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
  267. uid = hi & 0xffff;
  268. seq = (hi >> 16) & 0xffff;
  269. if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
  270. goto out;
  271. lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
  272. hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
  273. ns = ((u64) hi) << 32;
  274. ns |= lo;
  275. ns <<= TICKS_NS_SHIFT;
  276. shhwtstamps = skb_hwtstamps(skb);
  277. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  278. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  279. out:
  280. __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  281. }
  282. static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
  283. {
  284. struct skb_shared_hwtstamps shhwtstamps;
  285. struct ixp46x_ts_regs *regs;
  286. struct skb_shared_info *shtx;
  287. u64 ns;
  288. u32 ch, cnt, hi, lo, val;
  289. shtx = skb_shinfo(skb);
  290. if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
  291. shtx->tx_flags |= SKBTX_IN_PROGRESS;
  292. else
  293. return;
  294. ch = PORT2CHANNEL(port);
  295. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  296. /*
  297. * This really stinks, but we have to poll for the Tx time stamp.
  298. * Usually, the time stamp is ready after 4 to 6 microseconds.
  299. */
  300. for (cnt = 0; cnt < 100; cnt++) {
  301. val = __raw_readl(&regs->channel[ch].ch_event);
  302. if (val & TX_SNAPSHOT_LOCKED)
  303. break;
  304. udelay(1);
  305. }
  306. if (!(val & TX_SNAPSHOT_LOCKED)) {
  307. shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
  308. return;
  309. }
  310. lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
  311. hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
  312. ns = ((u64) hi) << 32;
  313. ns |= lo;
  314. ns <<= TICKS_NS_SHIFT;
  315. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  316. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  317. skb_tstamp_tx(skb, &shhwtstamps);
  318. __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  319. }
  320. static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  321. {
  322. struct hwtstamp_config cfg;
  323. struct ixp46x_ts_regs *regs;
  324. struct port *port = netdev_priv(netdev);
  325. int ch;
  326. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  327. return -EFAULT;
  328. if (cfg.flags) /* reserved for future extensions */
  329. return -EINVAL;
  330. ch = PORT2CHANNEL(port);
  331. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  332. switch (cfg.tx_type) {
  333. case HWTSTAMP_TX_OFF:
  334. port->hwts_tx_en = 0;
  335. break;
  336. case HWTSTAMP_TX_ON:
  337. port->hwts_tx_en = 1;
  338. break;
  339. default:
  340. return -ERANGE;
  341. }
  342. switch (cfg.rx_filter) {
  343. case HWTSTAMP_FILTER_NONE:
  344. port->hwts_rx_en = 0;
  345. break;
  346. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  347. port->hwts_rx_en = PTP_SLAVE_MODE;
  348. __raw_writel(0, &regs->channel[ch].ch_control);
  349. break;
  350. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  351. port->hwts_rx_en = PTP_MASTER_MODE;
  352. __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
  353. break;
  354. default:
  355. return -ERANGE;
  356. }
  357. /* Clear out any old time stamps. */
  358. __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
  359. &regs->channel[ch].ch_event);
  360. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  361. }
  362. static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
  363. int write, u16 cmd)
  364. {
  365. int cycles = 0;
  366. if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
  367. printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
  368. return -1;
  369. }
  370. if (write) {
  371. __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
  372. __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
  373. }
  374. __raw_writel(((phy_id << 5) | location) & 0xFF,
  375. &mdio_regs->mdio_command[2]);
  376. __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
  377. &mdio_regs->mdio_command[3]);
  378. while ((cycles < MAX_MDIO_RETRIES) &&
  379. (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
  380. udelay(1);
  381. cycles++;
  382. }
  383. if (cycles == MAX_MDIO_RETRIES) {
  384. printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
  385. phy_id);
  386. return -1;
  387. }
  388. #if DEBUG_MDIO
  389. printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
  390. phy_id, write ? "write" : "read", cycles);
  391. #endif
  392. if (write)
  393. return 0;
  394. if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
  395. #if DEBUG_MDIO
  396. printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
  397. phy_id);
  398. #endif
  399. return 0xFFFF; /* don't return error */
  400. }
  401. return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
  402. ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
  403. }
  404. static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
  405. {
  406. unsigned long flags;
  407. int ret;
  408. spin_lock_irqsave(&mdio_lock, flags);
  409. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
  410. spin_unlock_irqrestore(&mdio_lock, flags);
  411. #if DEBUG_MDIO
  412. printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
  413. phy_id, location, ret);
  414. #endif
  415. return ret;
  416. }
  417. static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
  418. u16 val)
  419. {
  420. unsigned long flags;
  421. int ret;
  422. spin_lock_irqsave(&mdio_lock, flags);
  423. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
  424. spin_unlock_irqrestore(&mdio_lock, flags);
  425. #if DEBUG_MDIO
  426. printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
  427. bus->name, phy_id, location, val, ret);
  428. #endif
  429. return ret;
  430. }
  431. static int ixp4xx_mdio_register(void)
  432. {
  433. int err;
  434. if (!(mdio_bus = mdiobus_alloc()))
  435. return -ENOMEM;
  436. if (cpu_is_ixp43x()) {
  437. /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
  438. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
  439. return -ENODEV;
  440. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  441. } else {
  442. /* All MII PHY accesses use NPE-B Ethernet registers */
  443. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
  444. return -ENODEV;
  445. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  446. }
  447. __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
  448. spin_lock_init(&mdio_lock);
  449. mdio_bus->name = "IXP4xx MII Bus";
  450. mdio_bus->read = &ixp4xx_mdio_read;
  451. mdio_bus->write = &ixp4xx_mdio_write;
  452. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
  453. if ((err = mdiobus_register(mdio_bus)))
  454. mdiobus_free(mdio_bus);
  455. return err;
  456. }
  457. static void ixp4xx_mdio_remove(void)
  458. {
  459. mdiobus_unregister(mdio_bus);
  460. mdiobus_free(mdio_bus);
  461. }
  462. static void ixp4xx_adjust_link(struct net_device *dev)
  463. {
  464. struct port *port = netdev_priv(dev);
  465. struct phy_device *phydev = port->phydev;
  466. if (!phydev->link) {
  467. if (port->speed) {
  468. port->speed = 0;
  469. printk(KERN_INFO "%s: link down\n", dev->name);
  470. }
  471. return;
  472. }
  473. if (port->speed == phydev->speed && port->duplex == phydev->duplex)
  474. return;
  475. port->speed = phydev->speed;
  476. port->duplex = phydev->duplex;
  477. if (port->duplex)
  478. __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
  479. &port->regs->tx_control[0]);
  480. else
  481. __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
  482. &port->regs->tx_control[0]);
  483. printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
  484. dev->name, port->speed, port->duplex ? "full" : "half");
  485. }
  486. static inline void debug_pkt(struct net_device *dev, const char *func,
  487. u8 *data, int len)
  488. {
  489. #if DEBUG_PKT_BYTES
  490. int i;
  491. printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
  492. for (i = 0; i < len; i++) {
  493. if (i >= DEBUG_PKT_BYTES)
  494. break;
  495. printk("%s%02X",
  496. ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
  497. data[i]);
  498. }
  499. printk("\n");
  500. #endif
  501. }
  502. static inline void debug_desc(u32 phys, struct desc *desc)
  503. {
  504. #if DEBUG_DESC
  505. printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
  506. " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
  507. phys, desc->next, desc->buf_len, desc->pkt_len,
  508. desc->data, desc->dest_id, desc->src_id, desc->flags,
  509. desc->qos, desc->padlen, desc->vlan_tci,
  510. desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
  511. desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
  512. desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
  513. desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
  514. #endif
  515. }
  516. static inline int queue_get_desc(unsigned int queue, struct port *port,
  517. int is_tx)
  518. {
  519. u32 phys, tab_phys, n_desc;
  520. struct desc *tab;
  521. if (!(phys = qmgr_get_entry(queue)))
  522. return -1;
  523. phys &= ~0x1F; /* mask out non-address bits */
  524. tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
  525. tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
  526. n_desc = (phys - tab_phys) / sizeof(struct desc);
  527. BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
  528. debug_desc(phys, &tab[n_desc]);
  529. BUG_ON(tab[n_desc].next);
  530. return n_desc;
  531. }
  532. static inline void queue_put_desc(unsigned int queue, u32 phys,
  533. struct desc *desc)
  534. {
  535. debug_desc(phys, desc);
  536. BUG_ON(phys & 0x1F);
  537. qmgr_put_entry(queue, phys);
  538. /* Don't check for queue overflow here, we've allocated sufficient
  539. length and queues >= 32 don't support this check anyway. */
  540. }
  541. static inline void dma_unmap_tx(struct port *port, struct desc *desc)
  542. {
  543. #ifdef __ARMEB__
  544. dma_unmap_single(&port->netdev->dev, desc->data,
  545. desc->buf_len, DMA_TO_DEVICE);
  546. #else
  547. dma_unmap_single(&port->netdev->dev, desc->data & ~3,
  548. ALIGN((desc->data & 3) + desc->buf_len, 4),
  549. DMA_TO_DEVICE);
  550. #endif
  551. }
  552. static void eth_rx_irq(void *pdev)
  553. {
  554. struct net_device *dev = pdev;
  555. struct port *port = netdev_priv(dev);
  556. #if DEBUG_RX
  557. printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
  558. #endif
  559. qmgr_disable_irq(port->plat->rxq);
  560. napi_schedule(&port->napi);
  561. }
  562. static int eth_poll(struct napi_struct *napi, int budget)
  563. {
  564. struct port *port = container_of(napi, struct port, napi);
  565. struct net_device *dev = port->netdev;
  566. unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
  567. int received = 0;
  568. #if DEBUG_RX
  569. printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
  570. #endif
  571. while (received < budget) {
  572. struct sk_buff *skb;
  573. struct desc *desc;
  574. int n;
  575. #ifdef __ARMEB__
  576. struct sk_buff *temp;
  577. u32 phys;
  578. #endif
  579. if ((n = queue_get_desc(rxq, port, 0)) < 0) {
  580. #if DEBUG_RX
  581. printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
  582. dev->name);
  583. #endif
  584. napi_complete(napi);
  585. qmgr_enable_irq(rxq);
  586. if (!qmgr_stat_below_low_watermark(rxq) &&
  587. napi_reschedule(napi)) { /* not empty again */
  588. #if DEBUG_RX
  589. printk(KERN_DEBUG "%s: eth_poll"
  590. " napi_reschedule successed\n",
  591. dev->name);
  592. #endif
  593. qmgr_disable_irq(rxq);
  594. continue;
  595. }
  596. #if DEBUG_RX
  597. printk(KERN_DEBUG "%s: eth_poll all done\n",
  598. dev->name);
  599. #endif
  600. return received; /* all work done */
  601. }
  602. desc = rx_desc_ptr(port, n);
  603. #ifdef __ARMEB__
  604. if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
  605. phys = dma_map_single(&dev->dev, skb->data,
  606. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  607. if (dma_mapping_error(&dev->dev, phys)) {
  608. dev_kfree_skb(skb);
  609. skb = NULL;
  610. }
  611. }
  612. #else
  613. skb = netdev_alloc_skb(dev,
  614. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
  615. #endif
  616. if (!skb) {
  617. dev->stats.rx_dropped++;
  618. /* put the desc back on RX-ready queue */
  619. desc->buf_len = MAX_MRU;
  620. desc->pkt_len = 0;
  621. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  622. continue;
  623. }
  624. /* process received frame */
  625. #ifdef __ARMEB__
  626. temp = skb;
  627. skb = port->rx_buff_tab[n];
  628. dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
  629. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  630. #else
  631. dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
  632. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  633. memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
  634. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
  635. #endif
  636. skb_reserve(skb, NET_IP_ALIGN);
  637. skb_put(skb, desc->pkt_len);
  638. debug_pkt(dev, "eth_poll", skb->data, skb->len);
  639. ixp_rx_timestamp(port, skb);
  640. skb->protocol = eth_type_trans(skb, dev);
  641. dev->stats.rx_packets++;
  642. dev->stats.rx_bytes += skb->len;
  643. netif_receive_skb(skb);
  644. /* put the new buffer on RX-free queue */
  645. #ifdef __ARMEB__
  646. port->rx_buff_tab[n] = temp;
  647. desc->data = phys + NET_IP_ALIGN;
  648. #endif
  649. desc->buf_len = MAX_MRU;
  650. desc->pkt_len = 0;
  651. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  652. received++;
  653. }
  654. #if DEBUG_RX
  655. printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
  656. #endif
  657. return received; /* not all work done */
  658. }
  659. static void eth_txdone_irq(void *unused)
  660. {
  661. u32 phys;
  662. #if DEBUG_TX
  663. printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
  664. #endif
  665. while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
  666. u32 npe_id, n_desc;
  667. struct port *port;
  668. struct desc *desc;
  669. int start;
  670. npe_id = phys & 3;
  671. BUG_ON(npe_id >= MAX_NPES);
  672. port = npe_port_tab[npe_id];
  673. BUG_ON(!port);
  674. phys &= ~0x1F; /* mask out non-address bits */
  675. n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
  676. BUG_ON(n_desc >= TX_DESCS);
  677. desc = tx_desc_ptr(port, n_desc);
  678. debug_desc(phys, desc);
  679. if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
  680. port->netdev->stats.tx_packets++;
  681. port->netdev->stats.tx_bytes += desc->pkt_len;
  682. dma_unmap_tx(port, desc);
  683. #if DEBUG_TX
  684. printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
  685. port->netdev->name, port->tx_buff_tab[n_desc]);
  686. #endif
  687. free_buffer_irq(port->tx_buff_tab[n_desc]);
  688. port->tx_buff_tab[n_desc] = NULL;
  689. }
  690. start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
  691. queue_put_desc(port->plat->txreadyq, phys, desc);
  692. if (start) { /* TX-ready queue was empty */
  693. #if DEBUG_TX
  694. printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
  695. port->netdev->name);
  696. #endif
  697. netif_wake_queue(port->netdev);
  698. }
  699. }
  700. }
  701. static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
  702. {
  703. struct port *port = netdev_priv(dev);
  704. unsigned int txreadyq = port->plat->txreadyq;
  705. int len, offset, bytes, n;
  706. void *mem;
  707. u32 phys;
  708. struct desc *desc;
  709. #if DEBUG_TX
  710. printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
  711. #endif
  712. if (unlikely(skb->len > MAX_MRU)) {
  713. dev_kfree_skb(skb);
  714. dev->stats.tx_errors++;
  715. return NETDEV_TX_OK;
  716. }
  717. debug_pkt(dev, "eth_xmit", skb->data, skb->len);
  718. len = skb->len;
  719. #ifdef __ARMEB__
  720. offset = 0; /* no need to keep alignment */
  721. bytes = len;
  722. mem = skb->data;
  723. #else
  724. offset = (int)skb->data & 3; /* keep 32-bit alignment */
  725. bytes = ALIGN(offset + len, 4);
  726. if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
  727. dev_kfree_skb(skb);
  728. dev->stats.tx_dropped++;
  729. return NETDEV_TX_OK;
  730. }
  731. memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
  732. #endif
  733. phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
  734. if (dma_mapping_error(&dev->dev, phys)) {
  735. dev_kfree_skb(skb);
  736. #ifndef __ARMEB__
  737. kfree(mem);
  738. #endif
  739. dev->stats.tx_dropped++;
  740. return NETDEV_TX_OK;
  741. }
  742. n = queue_get_desc(txreadyq, port, 1);
  743. BUG_ON(n < 0);
  744. desc = tx_desc_ptr(port, n);
  745. #ifdef __ARMEB__
  746. port->tx_buff_tab[n] = skb;
  747. #else
  748. port->tx_buff_tab[n] = mem;
  749. #endif
  750. desc->data = phys + offset;
  751. desc->buf_len = desc->pkt_len = len;
  752. /* NPE firmware pads short frames with zeros internally */
  753. wmb();
  754. queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
  755. if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
  756. #if DEBUG_TX
  757. printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
  758. #endif
  759. netif_stop_queue(dev);
  760. /* we could miss TX ready interrupt */
  761. /* really empty in fact */
  762. if (!qmgr_stat_below_low_watermark(txreadyq)) {
  763. #if DEBUG_TX
  764. printk(KERN_DEBUG "%s: eth_xmit ready again\n",
  765. dev->name);
  766. #endif
  767. netif_wake_queue(dev);
  768. }
  769. }
  770. #if DEBUG_TX
  771. printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
  772. #endif
  773. ixp_tx_timestamp(port, skb);
  774. skb_tx_timestamp(skb);
  775. #ifndef __ARMEB__
  776. dev_kfree_skb(skb);
  777. #endif
  778. return NETDEV_TX_OK;
  779. }
  780. static void eth_set_mcast_list(struct net_device *dev)
  781. {
  782. struct port *port = netdev_priv(dev);
  783. struct netdev_hw_addr *ha;
  784. u8 diffs[ETH_ALEN], *addr;
  785. int i;
  786. static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  787. if (dev->flags & IFF_ALLMULTI) {
  788. for (i = 0; i < ETH_ALEN; i++) {
  789. __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
  790. __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
  791. }
  792. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  793. &port->regs->rx_control[0]);
  794. return;
  795. }
  796. if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
  797. __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
  798. &port->regs->rx_control[0]);
  799. return;
  800. }
  801. memset(diffs, 0, ETH_ALEN);
  802. addr = NULL;
  803. netdev_for_each_mc_addr(ha, dev) {
  804. if (!addr)
  805. addr = ha->addr; /* first MAC address */
  806. for (i = 0; i < ETH_ALEN; i++)
  807. diffs[i] |= addr[i] ^ ha->addr[i];
  808. }
  809. for (i = 0; i < ETH_ALEN; i++) {
  810. __raw_writel(addr[i], &port->regs->mcast_addr[i]);
  811. __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
  812. }
  813. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  814. &port->regs->rx_control[0]);
  815. }
  816. static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  817. {
  818. struct port *port = netdev_priv(dev);
  819. if (!netif_running(dev))
  820. return -EINVAL;
  821. if (cpu_is_ixp46x() && cmd == SIOCSHWTSTAMP)
  822. return hwtstamp_ioctl(dev, req, cmd);
  823. return phy_mii_ioctl(port->phydev, req, cmd);
  824. }
  825. /* ethtool support */
  826. static void ixp4xx_get_drvinfo(struct net_device *dev,
  827. struct ethtool_drvinfo *info)
  828. {
  829. struct port *port = netdev_priv(dev);
  830. strcpy(info->driver, DRV_NAME);
  831. snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
  832. port->firmware[0], port->firmware[1],
  833. port->firmware[2], port->firmware[3]);
  834. strcpy(info->bus_info, "internal");
  835. }
  836. static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  837. {
  838. struct port *port = netdev_priv(dev);
  839. return phy_ethtool_gset(port->phydev, cmd);
  840. }
  841. static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  842. {
  843. struct port *port = netdev_priv(dev);
  844. return phy_ethtool_sset(port->phydev, cmd);
  845. }
  846. static int ixp4xx_nway_reset(struct net_device *dev)
  847. {
  848. struct port *port = netdev_priv(dev);
  849. return phy_start_aneg(port->phydev);
  850. }
  851. static const struct ethtool_ops ixp4xx_ethtool_ops = {
  852. .get_drvinfo = ixp4xx_get_drvinfo,
  853. .get_settings = ixp4xx_get_settings,
  854. .set_settings = ixp4xx_set_settings,
  855. .nway_reset = ixp4xx_nway_reset,
  856. .get_link = ethtool_op_get_link,
  857. };
  858. static int request_queues(struct port *port)
  859. {
  860. int err;
  861. err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
  862. "%s:RX-free", port->netdev->name);
  863. if (err)
  864. return err;
  865. err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
  866. "%s:RX", port->netdev->name);
  867. if (err)
  868. goto rel_rxfree;
  869. err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
  870. "%s:TX", port->netdev->name);
  871. if (err)
  872. goto rel_rx;
  873. err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
  874. "%s:TX-ready", port->netdev->name);
  875. if (err)
  876. goto rel_tx;
  877. /* TX-done queue handles skbs sent out by the NPEs */
  878. if (!ports_open) {
  879. err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
  880. "%s:TX-done", DRV_NAME);
  881. if (err)
  882. goto rel_txready;
  883. }
  884. return 0;
  885. rel_txready:
  886. qmgr_release_queue(port->plat->txreadyq);
  887. rel_tx:
  888. qmgr_release_queue(TX_QUEUE(port->id));
  889. rel_rx:
  890. qmgr_release_queue(port->plat->rxq);
  891. rel_rxfree:
  892. qmgr_release_queue(RXFREE_QUEUE(port->id));
  893. printk(KERN_DEBUG "%s: unable to request hardware queues\n",
  894. port->netdev->name);
  895. return err;
  896. }
  897. static void release_queues(struct port *port)
  898. {
  899. qmgr_release_queue(RXFREE_QUEUE(port->id));
  900. qmgr_release_queue(port->plat->rxq);
  901. qmgr_release_queue(TX_QUEUE(port->id));
  902. qmgr_release_queue(port->plat->txreadyq);
  903. if (!ports_open)
  904. qmgr_release_queue(TXDONE_QUEUE);
  905. }
  906. static int init_queues(struct port *port)
  907. {
  908. int i;
  909. if (!ports_open)
  910. if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
  911. POOL_ALLOC_SIZE, 32, 0)))
  912. return -ENOMEM;
  913. if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
  914. &port->desc_tab_phys)))
  915. return -ENOMEM;
  916. memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
  917. memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
  918. memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
  919. /* Setup RX buffers */
  920. for (i = 0; i < RX_DESCS; i++) {
  921. struct desc *desc = rx_desc_ptr(port, i);
  922. buffer_t *buff; /* skb or kmalloc()ated memory */
  923. void *data;
  924. #ifdef __ARMEB__
  925. if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
  926. return -ENOMEM;
  927. data = buff->data;
  928. #else
  929. if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
  930. return -ENOMEM;
  931. data = buff;
  932. #endif
  933. desc->buf_len = MAX_MRU;
  934. desc->data = dma_map_single(&port->netdev->dev, data,
  935. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  936. if (dma_mapping_error(&port->netdev->dev, desc->data)) {
  937. free_buffer(buff);
  938. return -EIO;
  939. }
  940. desc->data += NET_IP_ALIGN;
  941. port->rx_buff_tab[i] = buff;
  942. }
  943. return 0;
  944. }
  945. static void destroy_queues(struct port *port)
  946. {
  947. int i;
  948. if (port->desc_tab) {
  949. for (i = 0; i < RX_DESCS; i++) {
  950. struct desc *desc = rx_desc_ptr(port, i);
  951. buffer_t *buff = port->rx_buff_tab[i];
  952. if (buff) {
  953. dma_unmap_single(&port->netdev->dev,
  954. desc->data - NET_IP_ALIGN,
  955. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  956. free_buffer(buff);
  957. }
  958. }
  959. for (i = 0; i < TX_DESCS; i++) {
  960. struct desc *desc = tx_desc_ptr(port, i);
  961. buffer_t *buff = port->tx_buff_tab[i];
  962. if (buff) {
  963. dma_unmap_tx(port, desc);
  964. free_buffer(buff);
  965. }
  966. }
  967. dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
  968. port->desc_tab = NULL;
  969. }
  970. if (!ports_open && dma_pool) {
  971. dma_pool_destroy(dma_pool);
  972. dma_pool = NULL;
  973. }
  974. }
  975. static int eth_open(struct net_device *dev)
  976. {
  977. struct port *port = netdev_priv(dev);
  978. struct npe *npe = port->npe;
  979. struct msg msg;
  980. int i, err;
  981. if (!npe_running(npe)) {
  982. err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
  983. if (err)
  984. return err;
  985. if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
  986. printk(KERN_ERR "%s: %s not responding\n", dev->name,
  987. npe_name(npe));
  988. return -EIO;
  989. }
  990. port->firmware[0] = msg.byte4;
  991. port->firmware[1] = msg.byte5;
  992. port->firmware[2] = msg.byte6;
  993. port->firmware[3] = msg.byte7;
  994. }
  995. memset(&msg, 0, sizeof(msg));
  996. msg.cmd = NPE_VLAN_SETRXQOSENTRY;
  997. msg.eth_id = port->id;
  998. msg.byte5 = port->plat->rxq | 0x80;
  999. msg.byte7 = port->plat->rxq << 4;
  1000. for (i = 0; i < 8; i++) {
  1001. msg.byte3 = i;
  1002. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
  1003. return -EIO;
  1004. }
  1005. msg.cmd = NPE_EDB_SETPORTADDRESS;
  1006. msg.eth_id = PHYSICAL_ID(port->id);
  1007. msg.byte2 = dev->dev_addr[0];
  1008. msg.byte3 = dev->dev_addr[1];
  1009. msg.byte4 = dev->dev_addr[2];
  1010. msg.byte5 = dev->dev_addr[3];
  1011. msg.byte6 = dev->dev_addr[4];
  1012. msg.byte7 = dev->dev_addr[5];
  1013. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
  1014. return -EIO;
  1015. memset(&msg, 0, sizeof(msg));
  1016. msg.cmd = NPE_FW_SETFIREWALLMODE;
  1017. msg.eth_id = port->id;
  1018. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
  1019. return -EIO;
  1020. if ((err = request_queues(port)) != 0)
  1021. return err;
  1022. if ((err = init_queues(port)) != 0) {
  1023. destroy_queues(port);
  1024. release_queues(port);
  1025. return err;
  1026. }
  1027. port->speed = 0; /* force "link up" message */
  1028. phy_start(port->phydev);
  1029. for (i = 0; i < ETH_ALEN; i++)
  1030. __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
  1031. __raw_writel(0x08, &port->regs->random_seed);
  1032. __raw_writel(0x12, &port->regs->partial_empty_threshold);
  1033. __raw_writel(0x30, &port->regs->partial_full_threshold);
  1034. __raw_writel(0x08, &port->regs->tx_start_bytes);
  1035. __raw_writel(0x15, &port->regs->tx_deferral);
  1036. __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
  1037. __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
  1038. __raw_writel(0x80, &port->regs->slot_time);
  1039. __raw_writel(0x01, &port->regs->int_clock_threshold);
  1040. /* Populate queues with buffers, no failure after this point */
  1041. for (i = 0; i < TX_DESCS; i++)
  1042. queue_put_desc(port->plat->txreadyq,
  1043. tx_desc_phys(port, i), tx_desc_ptr(port, i));
  1044. for (i = 0; i < RX_DESCS; i++)
  1045. queue_put_desc(RXFREE_QUEUE(port->id),
  1046. rx_desc_phys(port, i), rx_desc_ptr(port, i));
  1047. __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
  1048. __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
  1049. __raw_writel(0, &port->regs->rx_control[1]);
  1050. __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
  1051. napi_enable(&port->napi);
  1052. eth_set_mcast_list(dev);
  1053. netif_start_queue(dev);
  1054. qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
  1055. eth_rx_irq, dev);
  1056. if (!ports_open) {
  1057. qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
  1058. eth_txdone_irq, NULL);
  1059. qmgr_enable_irq(TXDONE_QUEUE);
  1060. }
  1061. ports_open++;
  1062. /* we may already have RX data, enables IRQ */
  1063. napi_schedule(&port->napi);
  1064. return 0;
  1065. }
  1066. static int eth_close(struct net_device *dev)
  1067. {
  1068. struct port *port = netdev_priv(dev);
  1069. struct msg msg;
  1070. int buffs = RX_DESCS; /* allocated RX buffers */
  1071. int i;
  1072. ports_open--;
  1073. qmgr_disable_irq(port->plat->rxq);
  1074. napi_disable(&port->napi);
  1075. netif_stop_queue(dev);
  1076. while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
  1077. buffs--;
  1078. memset(&msg, 0, sizeof(msg));
  1079. msg.cmd = NPE_SETLOOPBACK_MODE;
  1080. msg.eth_id = port->id;
  1081. msg.byte3 = 1;
  1082. if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
  1083. printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
  1084. i = 0;
  1085. do { /* drain RX buffers */
  1086. while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
  1087. buffs--;
  1088. if (!buffs)
  1089. break;
  1090. if (qmgr_stat_empty(TX_QUEUE(port->id))) {
  1091. /* we have to inject some packet */
  1092. struct desc *desc;
  1093. u32 phys;
  1094. int n = queue_get_desc(port->plat->txreadyq, port, 1);
  1095. BUG_ON(n < 0);
  1096. desc = tx_desc_ptr(port, n);
  1097. phys = tx_desc_phys(port, n);
  1098. desc->buf_len = desc->pkt_len = 1;
  1099. wmb();
  1100. queue_put_desc(TX_QUEUE(port->id), phys, desc);
  1101. }
  1102. udelay(1);
  1103. } while (++i < MAX_CLOSE_WAIT);
  1104. if (buffs)
  1105. printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
  1106. " left in NPE\n", dev->name, buffs);
  1107. #if DEBUG_CLOSE
  1108. if (!buffs)
  1109. printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
  1110. #endif
  1111. buffs = TX_DESCS;
  1112. while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
  1113. buffs--; /* cancel TX */
  1114. i = 0;
  1115. do {
  1116. while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
  1117. buffs--;
  1118. if (!buffs)
  1119. break;
  1120. } while (++i < MAX_CLOSE_WAIT);
  1121. if (buffs)
  1122. printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
  1123. "left in NPE\n", dev->name, buffs);
  1124. #if DEBUG_CLOSE
  1125. if (!buffs)
  1126. printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
  1127. #endif
  1128. msg.byte3 = 0;
  1129. if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
  1130. printk(KERN_CRIT "%s: unable to disable loopback\n",
  1131. dev->name);
  1132. phy_stop(port->phydev);
  1133. if (!ports_open)
  1134. qmgr_disable_irq(TXDONE_QUEUE);
  1135. destroy_queues(port);
  1136. release_queues(port);
  1137. return 0;
  1138. }
  1139. static const struct net_device_ops ixp4xx_netdev_ops = {
  1140. .ndo_open = eth_open,
  1141. .ndo_stop = eth_close,
  1142. .ndo_start_xmit = eth_xmit,
  1143. .ndo_set_rx_mode = eth_set_mcast_list,
  1144. .ndo_do_ioctl = eth_ioctl,
  1145. .ndo_change_mtu = eth_change_mtu,
  1146. .ndo_set_mac_address = eth_mac_addr,
  1147. .ndo_validate_addr = eth_validate_addr,
  1148. };
  1149. static int __devinit eth_init_one(struct platform_device *pdev)
  1150. {
  1151. struct port *port;
  1152. struct net_device *dev;
  1153. struct eth_plat_info *plat = pdev->dev.platform_data;
  1154. u32 regs_phys;
  1155. char phy_id[MII_BUS_ID_SIZE + 3];
  1156. int err;
  1157. if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter))) {
  1158. pr_err("ixp4xx_eth: bad ptp filter\n");
  1159. return -EINVAL;
  1160. }
  1161. if (!(dev = alloc_etherdev(sizeof(struct port))))
  1162. return -ENOMEM;
  1163. SET_NETDEV_DEV(dev, &pdev->dev);
  1164. port = netdev_priv(dev);
  1165. port->netdev = dev;
  1166. port->id = pdev->id;
  1167. switch (port->id) {
  1168. case IXP4XX_ETH_NPEA:
  1169. port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
  1170. regs_phys = IXP4XX_EthA_BASE_PHYS;
  1171. break;
  1172. case IXP4XX_ETH_NPEB:
  1173. port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  1174. regs_phys = IXP4XX_EthB_BASE_PHYS;
  1175. break;
  1176. case IXP4XX_ETH_NPEC:
  1177. port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  1178. regs_phys = IXP4XX_EthC_BASE_PHYS;
  1179. break;
  1180. default:
  1181. err = -ENODEV;
  1182. goto err_free;
  1183. }
  1184. dev->netdev_ops = &ixp4xx_netdev_ops;
  1185. dev->ethtool_ops = &ixp4xx_ethtool_ops;
  1186. dev->tx_queue_len = 100;
  1187. netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
  1188. if (!(port->npe = npe_request(NPE_ID(port->id)))) {
  1189. err = -EIO;
  1190. goto err_free;
  1191. }
  1192. port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
  1193. if (!port->mem_res) {
  1194. err = -EBUSY;
  1195. goto err_npe_rel;
  1196. }
  1197. port->plat = plat;
  1198. npe_port_tab[NPE_ID(port->id)] = port;
  1199. memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
  1200. platform_set_drvdata(pdev, dev);
  1201. __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
  1202. &port->regs->core_control);
  1203. udelay(50);
  1204. __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
  1205. udelay(50);
  1206. snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  1207. mdio_bus->id, plat->phy);
  1208. port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
  1209. PHY_INTERFACE_MODE_MII);
  1210. if (IS_ERR(port->phydev)) {
  1211. err = PTR_ERR(port->phydev);
  1212. goto err_free_mem;
  1213. }
  1214. port->phydev->irq = PHY_POLL;
  1215. if ((err = register_netdev(dev)))
  1216. goto err_phy_dis;
  1217. printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
  1218. npe_name(port->npe));
  1219. return 0;
  1220. err_phy_dis:
  1221. phy_disconnect(port->phydev);
  1222. err_free_mem:
  1223. npe_port_tab[NPE_ID(port->id)] = NULL;
  1224. platform_set_drvdata(pdev, NULL);
  1225. release_resource(port->mem_res);
  1226. err_npe_rel:
  1227. npe_release(port->npe);
  1228. err_free:
  1229. free_netdev(dev);
  1230. return err;
  1231. }
  1232. static int __devexit eth_remove_one(struct platform_device *pdev)
  1233. {
  1234. struct net_device *dev = platform_get_drvdata(pdev);
  1235. struct port *port = netdev_priv(dev);
  1236. unregister_netdev(dev);
  1237. phy_disconnect(port->phydev);
  1238. npe_port_tab[NPE_ID(port->id)] = NULL;
  1239. platform_set_drvdata(pdev, NULL);
  1240. npe_release(port->npe);
  1241. release_resource(port->mem_res);
  1242. free_netdev(dev);
  1243. return 0;
  1244. }
  1245. static struct platform_driver ixp4xx_eth_driver = {
  1246. .driver.name = DRV_NAME,
  1247. .probe = eth_init_one,
  1248. .remove = eth_remove_one,
  1249. };
  1250. static int __init eth_init_module(void)
  1251. {
  1252. int err;
  1253. if ((err = ixp4xx_mdio_register()))
  1254. return err;
  1255. return platform_driver_register(&ixp4xx_eth_driver);
  1256. }
  1257. static void __exit eth_cleanup_module(void)
  1258. {
  1259. platform_driver_unregister(&ixp4xx_eth_driver);
  1260. ixp4xx_mdio_remove();
  1261. }
  1262. MODULE_AUTHOR("Krzysztof Halasa");
  1263. MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
  1264. MODULE_LICENSE("GPL v2");
  1265. MODULE_ALIAS("platform:ixp4xx_eth");
  1266. module_init(eth_init_module);
  1267. module_exit(eth_cleanup_module);