ixp4xx_eth.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523
  1. /*
  2. * Intel IXP4xx Ethernet driver for Linux
  3. *
  4. * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of version 2 of the GNU General Public License
  8. * as published by the Free Software Foundation.
  9. *
  10. * Ethernet port config (0x00 is not present on IXP42X):
  11. *
  12. * logical port 0x00 0x10 0x20
  13. * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C)
  14. * physical PortId 2 0 1
  15. * TX queue 23 24 25
  16. * RX-free queue 26 27 28
  17. * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
  18. *
  19. *
  20. * Queue entries:
  21. * bits 0 -> 1 - NPE ID (RX and TX-done)
  22. * bits 0 -> 2 - priority (TX, per 802.1D)
  23. * bits 3 -> 4 - port ID (user-set?)
  24. * bits 5 -> 31 - physical descriptor address
  25. */
  26. #include <linux/delay.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/dmapool.h>
  29. #include <linux/etherdevice.h>
  30. #include <linux/io.h>
  31. #include <linux/kernel.h>
  32. #include <linux/net_tstamp.h>
  33. #include <linux/phy.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/ptp_classify.h>
  36. #include <linux/slab.h>
  37. #include <linux/module.h>
  38. #include <mach/ixp46x_ts.h>
  39. #include <mach/npe.h>
  40. #include <mach/qmgr.h>
  41. #define DEBUG_DESC 0
  42. #define DEBUG_RX 0
  43. #define DEBUG_TX 0
  44. #define DEBUG_PKT_BYTES 0
  45. #define DEBUG_MDIO 0
  46. #define DEBUG_CLOSE 0
  47. #define DRV_NAME "ixp4xx_eth"
  48. #define MAX_NPES 3
  49. #define RX_DESCS 64 /* also length of all RX queues */
  50. #define TX_DESCS 16 /* also length of all TX queues */
  51. #define TXDONE_QUEUE_LEN 64 /* dwords */
  52. #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
  53. #define REGS_SIZE 0x1000
  54. #define MAX_MRU 1536 /* 0x600 */
  55. #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
  56. #define NAPI_WEIGHT 16
  57. #define MDIO_INTERVAL (3 * HZ)
  58. #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */
  59. #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */
  60. #define NPE_ID(port_id) ((port_id) >> 4)
  61. #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3)
  62. #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23)
  63. #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26)
  64. #define TXDONE_QUEUE 31
  65. #define PTP_SLAVE_MODE 1
  66. #define PTP_MASTER_MODE 2
  67. #define PORT2CHANNEL(p) NPE_ID(p->id)
  68. /* TX Control Registers */
  69. #define TX_CNTRL0_TX_EN 0x01
  70. #define TX_CNTRL0_HALFDUPLEX 0x02
  71. #define TX_CNTRL0_RETRY 0x04
  72. #define TX_CNTRL0_PAD_EN 0x08
  73. #define TX_CNTRL0_APPEND_FCS 0x10
  74. #define TX_CNTRL0_2DEFER 0x20
  75. #define TX_CNTRL0_RMII 0x40 /* reduced MII */
  76. #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */
  77. /* RX Control Registers */
  78. #define RX_CNTRL0_RX_EN 0x01
  79. #define RX_CNTRL0_PADSTRIP_EN 0x02
  80. #define RX_CNTRL0_SEND_FCS 0x04
  81. #define RX_CNTRL0_PAUSE_EN 0x08
  82. #define RX_CNTRL0_LOOP_EN 0x10
  83. #define RX_CNTRL0_ADDR_FLTR_EN 0x20
  84. #define RX_CNTRL0_RX_RUNT_EN 0x40
  85. #define RX_CNTRL0_BCAST_DIS 0x80
  86. #define RX_CNTRL1_DEFER_EN 0x01
  87. /* Core Control Register */
  88. #define CORE_RESET 0x01
  89. #define CORE_RX_FIFO_FLUSH 0x02
  90. #define CORE_TX_FIFO_FLUSH 0x04
  91. #define CORE_SEND_JAM 0x08
  92. #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */
  93. #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \
  94. TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
  95. TX_CNTRL0_2DEFER)
  96. #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN
  97. #define DEFAULT_CORE_CNTRL CORE_MDC_EN
  98. /* NPE message codes */
  99. #define NPE_GETSTATUS 0x00
  100. #define NPE_EDB_SETPORTADDRESS 0x01
  101. #define NPE_EDB_GETMACADDRESSDATABASE 0x02
  102. #define NPE_EDB_SETMACADDRESSSDATABASE 0x03
  103. #define NPE_GETSTATS 0x04
  104. #define NPE_RESETSTATS 0x05
  105. #define NPE_SETMAXFRAMELENGTHS 0x06
  106. #define NPE_VLAN_SETRXTAGMODE 0x07
  107. #define NPE_VLAN_SETDEFAULTRXVID 0x08
  108. #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
  109. #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
  110. #define NPE_VLAN_SETRXQOSENTRY 0x0B
  111. #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
  112. #define NPE_STP_SETBLOCKINGSTATE 0x0D
  113. #define NPE_FW_SETFIREWALLMODE 0x0E
  114. #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
  115. #define NPE_PC_SETAPMACTABLE 0x11
  116. #define NPE_SETLOOPBACK_MODE 0x12
  117. #define NPE_PC_SETBSSIDTABLE 0x13
  118. #define NPE_ADDRESS_FILTER_CONFIG 0x14
  119. #define NPE_APPENDFCSCONFIG 0x15
  120. #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16
  121. #define NPE_MAC_RECOVERY_START 0x17
  122. #ifdef __ARMEB__
  123. typedef struct sk_buff buffer_t;
  124. #define free_buffer dev_kfree_skb
  125. #define free_buffer_irq dev_kfree_skb_irq
  126. #else
  127. typedef void buffer_t;
  128. #define free_buffer kfree
  129. #define free_buffer_irq kfree
  130. #endif
  131. struct eth_regs {
  132. u32 tx_control[2], __res1[2]; /* 000 */
  133. u32 rx_control[2], __res2[2]; /* 010 */
  134. u32 random_seed, __res3[3]; /* 020 */
  135. u32 partial_empty_threshold, __res4; /* 030 */
  136. u32 partial_full_threshold, __res5; /* 038 */
  137. u32 tx_start_bytes, __res6[3]; /* 040 */
  138. u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
  139. u32 tx_2part_deferral[2], __res8[2]; /* 060 */
  140. u32 slot_time, __res9[3]; /* 070 */
  141. u32 mdio_command[4]; /* 080 */
  142. u32 mdio_status[4]; /* 090 */
  143. u32 mcast_mask[6], __res10[2]; /* 0A0 */
  144. u32 mcast_addr[6], __res11[2]; /* 0C0 */
  145. u32 int_clock_threshold, __res12[3]; /* 0E0 */
  146. u32 hw_addr[6], __res13[61]; /* 0F0 */
  147. u32 core_control; /* 1FC */
  148. };
  149. struct port {
  150. struct resource *mem_res;
  151. struct eth_regs __iomem *regs;
  152. struct npe *npe;
  153. struct net_device *netdev;
  154. struct napi_struct napi;
  155. struct eth_plat_info *plat;
  156. buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
  157. struct desc *desc_tab; /* coherent */
  158. u32 desc_tab_phys;
  159. int id; /* logical port ID */
  160. int speed, duplex;
  161. u8 firmware[4];
  162. int hwts_tx_en;
  163. int hwts_rx_en;
  164. };
  165. /* NPE message structure */
  166. struct msg {
  167. #ifdef __ARMEB__
  168. u8 cmd, eth_id, byte2, byte3;
  169. u8 byte4, byte5, byte6, byte7;
  170. #else
  171. u8 byte3, byte2, eth_id, cmd;
  172. u8 byte7, byte6, byte5, byte4;
  173. #endif
  174. };
  175. /* Ethernet packet descriptor */
  176. struct desc {
  177. u32 next; /* pointer to next buffer, unused */
  178. #ifdef __ARMEB__
  179. u16 buf_len; /* buffer length */
  180. u16 pkt_len; /* packet length */
  181. u32 data; /* pointer to data buffer in RAM */
  182. u8 dest_id;
  183. u8 src_id;
  184. u16 flags;
  185. u8 qos;
  186. u8 padlen;
  187. u16 vlan_tci;
  188. #else
  189. u16 pkt_len; /* packet length */
  190. u16 buf_len; /* buffer length */
  191. u32 data; /* pointer to data buffer in RAM */
  192. u16 flags;
  193. u8 src_id;
  194. u8 dest_id;
  195. u16 vlan_tci;
  196. u8 padlen;
  197. u8 qos;
  198. #endif
  199. #ifdef __ARMEB__
  200. u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
  201. u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
  202. u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
  203. #else
  204. u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
  205. u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
  206. u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
  207. #endif
  208. };
  209. #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
  210. (n) * sizeof(struct desc))
  211. #define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
  212. #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
  213. ((n) + RX_DESCS) * sizeof(struct desc))
  214. #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
  215. #ifndef __ARMEB__
  216. static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
  217. {
  218. int i;
  219. for (i = 0; i < cnt; i++)
  220. dest[i] = swab32(src[i]);
  221. }
  222. #endif
  223. static spinlock_t mdio_lock;
  224. static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
  225. static struct mii_bus *mdio_bus;
  226. static int ports_open;
  227. static struct port *npe_port_tab[MAX_NPES];
  228. static struct dma_pool *dma_pool;
  229. static int ixp_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
  230. {
  231. u8 *data = skb->data;
  232. unsigned int offset;
  233. u16 *hi, *id;
  234. u32 lo;
  235. if (ptp_classify_raw(skb) != PTP_CLASS_V1_IPV4)
  236. return 0;
  237. offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
  238. if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(seqid))
  239. return 0;
  240. hi = (u16 *)(data + offset + OFF_PTP_SOURCE_UUID);
  241. id = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
  242. memcpy(&lo, &hi[1], sizeof(lo));
  243. return (uid_hi == ntohs(*hi) &&
  244. uid_lo == ntohl(lo) &&
  245. seqid == ntohs(*id));
  246. }
  247. static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb)
  248. {
  249. struct skb_shared_hwtstamps *shhwtstamps;
  250. struct ixp46x_ts_regs *regs;
  251. u64 ns;
  252. u32 ch, hi, lo, val;
  253. u16 uid, seq;
  254. if (!port->hwts_rx_en)
  255. return;
  256. ch = PORT2CHANNEL(port);
  257. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  258. val = __raw_readl(&regs->channel[ch].ch_event);
  259. if (!(val & RX_SNAPSHOT_LOCKED))
  260. return;
  261. lo = __raw_readl(&regs->channel[ch].src_uuid_lo);
  262. hi = __raw_readl(&regs->channel[ch].src_uuid_hi);
  263. uid = hi & 0xffff;
  264. seq = (hi >> 16) & 0xffff;
  265. if (!ixp_ptp_match(skb, htons(uid), htonl(lo), htons(seq)))
  266. goto out;
  267. lo = __raw_readl(&regs->channel[ch].rx_snap_lo);
  268. hi = __raw_readl(&regs->channel[ch].rx_snap_hi);
  269. ns = ((u64) hi) << 32;
  270. ns |= lo;
  271. ns <<= TICKS_NS_SHIFT;
  272. shhwtstamps = skb_hwtstamps(skb);
  273. memset(shhwtstamps, 0, sizeof(*shhwtstamps));
  274. shhwtstamps->hwtstamp = ns_to_ktime(ns);
  275. out:
  276. __raw_writel(RX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  277. }
  278. static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb)
  279. {
  280. struct skb_shared_hwtstamps shhwtstamps;
  281. struct ixp46x_ts_regs *regs;
  282. struct skb_shared_info *shtx;
  283. u64 ns;
  284. u32 ch, cnt, hi, lo, val;
  285. shtx = skb_shinfo(skb);
  286. if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && port->hwts_tx_en))
  287. shtx->tx_flags |= SKBTX_IN_PROGRESS;
  288. else
  289. return;
  290. ch = PORT2CHANNEL(port);
  291. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  292. /*
  293. * This really stinks, but we have to poll for the Tx time stamp.
  294. * Usually, the time stamp is ready after 4 to 6 microseconds.
  295. */
  296. for (cnt = 0; cnt < 100; cnt++) {
  297. val = __raw_readl(&regs->channel[ch].ch_event);
  298. if (val & TX_SNAPSHOT_LOCKED)
  299. break;
  300. udelay(1);
  301. }
  302. if (!(val & TX_SNAPSHOT_LOCKED)) {
  303. shtx->tx_flags &= ~SKBTX_IN_PROGRESS;
  304. return;
  305. }
  306. lo = __raw_readl(&regs->channel[ch].tx_snap_lo);
  307. hi = __raw_readl(&regs->channel[ch].tx_snap_hi);
  308. ns = ((u64) hi) << 32;
  309. ns |= lo;
  310. ns <<= TICKS_NS_SHIFT;
  311. memset(&shhwtstamps, 0, sizeof(shhwtstamps));
  312. shhwtstamps.hwtstamp = ns_to_ktime(ns);
  313. skb_tstamp_tx(skb, &shhwtstamps);
  314. __raw_writel(TX_SNAPSHOT_LOCKED, &regs->channel[ch].ch_event);
  315. }
  316. static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
  317. {
  318. struct hwtstamp_config cfg;
  319. struct ixp46x_ts_regs *regs;
  320. struct port *port = netdev_priv(netdev);
  321. int ch;
  322. if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
  323. return -EFAULT;
  324. if (cfg.flags) /* reserved for future extensions */
  325. return -EINVAL;
  326. ch = PORT2CHANNEL(port);
  327. regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT;
  328. if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
  329. return -ERANGE;
  330. switch (cfg.rx_filter) {
  331. case HWTSTAMP_FILTER_NONE:
  332. port->hwts_rx_en = 0;
  333. break;
  334. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  335. port->hwts_rx_en = PTP_SLAVE_MODE;
  336. __raw_writel(0, &regs->channel[ch].ch_control);
  337. break;
  338. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  339. port->hwts_rx_en = PTP_MASTER_MODE;
  340. __raw_writel(MASTER_MODE, &regs->channel[ch].ch_control);
  341. break;
  342. default:
  343. return -ERANGE;
  344. }
  345. port->hwts_tx_en = cfg.tx_type == HWTSTAMP_TX_ON;
  346. /* Clear out any old time stamps. */
  347. __raw_writel(TX_SNAPSHOT_LOCKED | RX_SNAPSHOT_LOCKED,
  348. &regs->channel[ch].ch_event);
  349. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  350. }
  351. static int hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
  352. {
  353. struct hwtstamp_config cfg;
  354. struct port *port = netdev_priv(netdev);
  355. cfg.flags = 0;
  356. cfg.tx_type = port->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
  357. switch (port->hwts_rx_en) {
  358. case 0:
  359. cfg.rx_filter = HWTSTAMP_FILTER_NONE;
  360. break;
  361. case PTP_SLAVE_MODE:
  362. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
  363. break;
  364. case PTP_MASTER_MODE:
  365. cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
  366. break;
  367. default:
  368. WARN_ON_ONCE(1);
  369. return -ERANGE;
  370. }
  371. return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
  372. }
  373. static int ixp4xx_mdio_cmd(struct mii_bus *bus, int phy_id, int location,
  374. int write, u16 cmd)
  375. {
  376. int cycles = 0;
  377. if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
  378. printk(KERN_ERR "%s: MII not ready to transmit\n", bus->name);
  379. return -1;
  380. }
  381. if (write) {
  382. __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
  383. __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
  384. }
  385. __raw_writel(((phy_id << 5) | location) & 0xFF,
  386. &mdio_regs->mdio_command[2]);
  387. __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
  388. &mdio_regs->mdio_command[3]);
  389. while ((cycles < MAX_MDIO_RETRIES) &&
  390. (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
  391. udelay(1);
  392. cycles++;
  393. }
  394. if (cycles == MAX_MDIO_RETRIES) {
  395. printk(KERN_ERR "%s #%i: MII write failed\n", bus->name,
  396. phy_id);
  397. return -1;
  398. }
  399. #if DEBUG_MDIO
  400. printk(KERN_DEBUG "%s #%i: mdio_%s() took %i cycles\n", bus->name,
  401. phy_id, write ? "write" : "read", cycles);
  402. #endif
  403. if (write)
  404. return 0;
  405. if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
  406. #if DEBUG_MDIO
  407. printk(KERN_DEBUG "%s #%i: MII read failed\n", bus->name,
  408. phy_id);
  409. #endif
  410. return 0xFFFF; /* don't return error */
  411. }
  412. return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
  413. ((__raw_readl(&mdio_regs->mdio_status[1]) & 0xFF) << 8);
  414. }
  415. static int ixp4xx_mdio_read(struct mii_bus *bus, int phy_id, int location)
  416. {
  417. unsigned long flags;
  418. int ret;
  419. spin_lock_irqsave(&mdio_lock, flags);
  420. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 0, 0);
  421. spin_unlock_irqrestore(&mdio_lock, flags);
  422. #if DEBUG_MDIO
  423. printk(KERN_DEBUG "%s #%i: MII read [%i] -> 0x%X\n", bus->name,
  424. phy_id, location, ret);
  425. #endif
  426. return ret;
  427. }
  428. static int ixp4xx_mdio_write(struct mii_bus *bus, int phy_id, int location,
  429. u16 val)
  430. {
  431. unsigned long flags;
  432. int ret;
  433. spin_lock_irqsave(&mdio_lock, flags);
  434. ret = ixp4xx_mdio_cmd(bus, phy_id, location, 1, val);
  435. spin_unlock_irqrestore(&mdio_lock, flags);
  436. #if DEBUG_MDIO
  437. printk(KERN_DEBUG "%s #%i: MII write [%i] <- 0x%X, err = %i\n",
  438. bus->name, phy_id, location, val, ret);
  439. #endif
  440. return ret;
  441. }
  442. static int ixp4xx_mdio_register(void)
  443. {
  444. int err;
  445. if (!(mdio_bus = mdiobus_alloc()))
  446. return -ENOMEM;
  447. if (cpu_is_ixp43x()) {
  448. /* IXP43x lacks NPE-B and uses NPE-C for MII PHY access */
  449. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEC_ETH))
  450. return -ENODEV;
  451. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  452. } else {
  453. /* All MII PHY accesses use NPE-B Ethernet registers */
  454. if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
  455. return -ENODEV;
  456. mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  457. }
  458. __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
  459. spin_lock_init(&mdio_lock);
  460. mdio_bus->name = "IXP4xx MII Bus";
  461. mdio_bus->read = &ixp4xx_mdio_read;
  462. mdio_bus->write = &ixp4xx_mdio_write;
  463. snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
  464. if ((err = mdiobus_register(mdio_bus)))
  465. mdiobus_free(mdio_bus);
  466. return err;
  467. }
  468. static void ixp4xx_mdio_remove(void)
  469. {
  470. mdiobus_unregister(mdio_bus);
  471. mdiobus_free(mdio_bus);
  472. }
  473. static void ixp4xx_adjust_link(struct net_device *dev)
  474. {
  475. struct port *port = netdev_priv(dev);
  476. struct phy_device *phydev = dev->phydev;
  477. if (!phydev->link) {
  478. if (port->speed) {
  479. port->speed = 0;
  480. printk(KERN_INFO "%s: link down\n", dev->name);
  481. }
  482. return;
  483. }
  484. if (port->speed == phydev->speed && port->duplex == phydev->duplex)
  485. return;
  486. port->speed = phydev->speed;
  487. port->duplex = phydev->duplex;
  488. if (port->duplex)
  489. __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
  490. &port->regs->tx_control[0]);
  491. else
  492. __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
  493. &port->regs->tx_control[0]);
  494. printk(KERN_INFO "%s: link up, speed %u Mb/s, %s duplex\n",
  495. dev->name, port->speed, port->duplex ? "full" : "half");
  496. }
  497. static inline void debug_pkt(struct net_device *dev, const char *func,
  498. u8 *data, int len)
  499. {
  500. #if DEBUG_PKT_BYTES
  501. int i;
  502. printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
  503. for (i = 0; i < len; i++) {
  504. if (i >= DEBUG_PKT_BYTES)
  505. break;
  506. printk("%s%02X",
  507. ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
  508. data[i]);
  509. }
  510. printk("\n");
  511. #endif
  512. }
  513. static inline void debug_desc(u32 phys, struct desc *desc)
  514. {
  515. #if DEBUG_DESC
  516. printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
  517. " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
  518. phys, desc->next, desc->buf_len, desc->pkt_len,
  519. desc->data, desc->dest_id, desc->src_id, desc->flags,
  520. desc->qos, desc->padlen, desc->vlan_tci,
  521. desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
  522. desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
  523. desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
  524. desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
  525. #endif
  526. }
  527. static inline int queue_get_desc(unsigned int queue, struct port *port,
  528. int is_tx)
  529. {
  530. u32 phys, tab_phys, n_desc;
  531. struct desc *tab;
  532. if (!(phys = qmgr_get_entry(queue)))
  533. return -1;
  534. phys &= ~0x1F; /* mask out non-address bits */
  535. tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
  536. tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
  537. n_desc = (phys - tab_phys) / sizeof(struct desc);
  538. BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
  539. debug_desc(phys, &tab[n_desc]);
  540. BUG_ON(tab[n_desc].next);
  541. return n_desc;
  542. }
  543. static inline void queue_put_desc(unsigned int queue, u32 phys,
  544. struct desc *desc)
  545. {
  546. debug_desc(phys, desc);
  547. BUG_ON(phys & 0x1F);
  548. qmgr_put_entry(queue, phys);
  549. /* Don't check for queue overflow here, we've allocated sufficient
  550. length and queues >= 32 don't support this check anyway. */
  551. }
  552. static inline void dma_unmap_tx(struct port *port, struct desc *desc)
  553. {
  554. #ifdef __ARMEB__
  555. dma_unmap_single(&port->netdev->dev, desc->data,
  556. desc->buf_len, DMA_TO_DEVICE);
  557. #else
  558. dma_unmap_single(&port->netdev->dev, desc->data & ~3,
  559. ALIGN((desc->data & 3) + desc->buf_len, 4),
  560. DMA_TO_DEVICE);
  561. #endif
  562. }
  563. static void eth_rx_irq(void *pdev)
  564. {
  565. struct net_device *dev = pdev;
  566. struct port *port = netdev_priv(dev);
  567. #if DEBUG_RX
  568. printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
  569. #endif
  570. qmgr_disable_irq(port->plat->rxq);
  571. napi_schedule(&port->napi);
  572. }
  573. static int eth_poll(struct napi_struct *napi, int budget)
  574. {
  575. struct port *port = container_of(napi, struct port, napi);
  576. struct net_device *dev = port->netdev;
  577. unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
  578. int received = 0;
  579. #if DEBUG_RX
  580. printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
  581. #endif
  582. while (received < budget) {
  583. struct sk_buff *skb;
  584. struct desc *desc;
  585. int n;
  586. #ifdef __ARMEB__
  587. struct sk_buff *temp;
  588. u32 phys;
  589. #endif
  590. if ((n = queue_get_desc(rxq, port, 0)) < 0) {
  591. #if DEBUG_RX
  592. printk(KERN_DEBUG "%s: eth_poll napi_complete\n",
  593. dev->name);
  594. #endif
  595. napi_complete(napi);
  596. qmgr_enable_irq(rxq);
  597. if (!qmgr_stat_below_low_watermark(rxq) &&
  598. napi_reschedule(napi)) { /* not empty again */
  599. #if DEBUG_RX
  600. printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n",
  601. dev->name);
  602. #endif
  603. qmgr_disable_irq(rxq);
  604. continue;
  605. }
  606. #if DEBUG_RX
  607. printk(KERN_DEBUG "%s: eth_poll all done\n",
  608. dev->name);
  609. #endif
  610. return received; /* all work done */
  611. }
  612. desc = rx_desc_ptr(port, n);
  613. #ifdef __ARMEB__
  614. if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
  615. phys = dma_map_single(&dev->dev, skb->data,
  616. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  617. if (dma_mapping_error(&dev->dev, phys)) {
  618. dev_kfree_skb(skb);
  619. skb = NULL;
  620. }
  621. }
  622. #else
  623. skb = netdev_alloc_skb(dev,
  624. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
  625. #endif
  626. if (!skb) {
  627. dev->stats.rx_dropped++;
  628. /* put the desc back on RX-ready queue */
  629. desc->buf_len = MAX_MRU;
  630. desc->pkt_len = 0;
  631. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  632. continue;
  633. }
  634. /* process received frame */
  635. #ifdef __ARMEB__
  636. temp = skb;
  637. skb = port->rx_buff_tab[n];
  638. dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
  639. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  640. #else
  641. dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
  642. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  643. memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
  644. ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
  645. #endif
  646. skb_reserve(skb, NET_IP_ALIGN);
  647. skb_put(skb, desc->pkt_len);
  648. debug_pkt(dev, "eth_poll", skb->data, skb->len);
  649. ixp_rx_timestamp(port, skb);
  650. skb->protocol = eth_type_trans(skb, dev);
  651. dev->stats.rx_packets++;
  652. dev->stats.rx_bytes += skb->len;
  653. netif_receive_skb(skb);
  654. /* put the new buffer on RX-free queue */
  655. #ifdef __ARMEB__
  656. port->rx_buff_tab[n] = temp;
  657. desc->data = phys + NET_IP_ALIGN;
  658. #endif
  659. desc->buf_len = MAX_MRU;
  660. desc->pkt_len = 0;
  661. queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
  662. received++;
  663. }
  664. #if DEBUG_RX
  665. printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
  666. #endif
  667. return received; /* not all work done */
  668. }
  669. static void eth_txdone_irq(void *unused)
  670. {
  671. u32 phys;
  672. #if DEBUG_TX
  673. printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
  674. #endif
  675. while ((phys = qmgr_get_entry(TXDONE_QUEUE)) != 0) {
  676. u32 npe_id, n_desc;
  677. struct port *port;
  678. struct desc *desc;
  679. int start;
  680. npe_id = phys & 3;
  681. BUG_ON(npe_id >= MAX_NPES);
  682. port = npe_port_tab[npe_id];
  683. BUG_ON(!port);
  684. phys &= ~0x1F; /* mask out non-address bits */
  685. n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
  686. BUG_ON(n_desc >= TX_DESCS);
  687. desc = tx_desc_ptr(port, n_desc);
  688. debug_desc(phys, desc);
  689. if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
  690. port->netdev->stats.tx_packets++;
  691. port->netdev->stats.tx_bytes += desc->pkt_len;
  692. dma_unmap_tx(port, desc);
  693. #if DEBUG_TX
  694. printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
  695. port->netdev->name, port->tx_buff_tab[n_desc]);
  696. #endif
  697. free_buffer_irq(port->tx_buff_tab[n_desc]);
  698. port->tx_buff_tab[n_desc] = NULL;
  699. }
  700. start = qmgr_stat_below_low_watermark(port->plat->txreadyq);
  701. queue_put_desc(port->plat->txreadyq, phys, desc);
  702. if (start) { /* TX-ready queue was empty */
  703. #if DEBUG_TX
  704. printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
  705. port->netdev->name);
  706. #endif
  707. netif_wake_queue(port->netdev);
  708. }
  709. }
  710. }
  711. static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
  712. {
  713. struct port *port = netdev_priv(dev);
  714. unsigned int txreadyq = port->plat->txreadyq;
  715. int len, offset, bytes, n;
  716. void *mem;
  717. u32 phys;
  718. struct desc *desc;
  719. #if DEBUG_TX
  720. printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
  721. #endif
  722. if (unlikely(skb->len > MAX_MRU)) {
  723. dev_kfree_skb(skb);
  724. dev->stats.tx_errors++;
  725. return NETDEV_TX_OK;
  726. }
  727. debug_pkt(dev, "eth_xmit", skb->data, skb->len);
  728. len = skb->len;
  729. #ifdef __ARMEB__
  730. offset = 0; /* no need to keep alignment */
  731. bytes = len;
  732. mem = skb->data;
  733. #else
  734. offset = (int)skb->data & 3; /* keep 32-bit alignment */
  735. bytes = ALIGN(offset + len, 4);
  736. if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
  737. dev_kfree_skb(skb);
  738. dev->stats.tx_dropped++;
  739. return NETDEV_TX_OK;
  740. }
  741. memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
  742. #endif
  743. phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
  744. if (dma_mapping_error(&dev->dev, phys)) {
  745. dev_kfree_skb(skb);
  746. #ifndef __ARMEB__
  747. kfree(mem);
  748. #endif
  749. dev->stats.tx_dropped++;
  750. return NETDEV_TX_OK;
  751. }
  752. n = queue_get_desc(txreadyq, port, 1);
  753. BUG_ON(n < 0);
  754. desc = tx_desc_ptr(port, n);
  755. #ifdef __ARMEB__
  756. port->tx_buff_tab[n] = skb;
  757. #else
  758. port->tx_buff_tab[n] = mem;
  759. #endif
  760. desc->data = phys + offset;
  761. desc->buf_len = desc->pkt_len = len;
  762. /* NPE firmware pads short frames with zeros internally */
  763. wmb();
  764. queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
  765. if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
  766. #if DEBUG_TX
  767. printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
  768. #endif
  769. netif_stop_queue(dev);
  770. /* we could miss TX ready interrupt */
  771. /* really empty in fact */
  772. if (!qmgr_stat_below_low_watermark(txreadyq)) {
  773. #if DEBUG_TX
  774. printk(KERN_DEBUG "%s: eth_xmit ready again\n",
  775. dev->name);
  776. #endif
  777. netif_wake_queue(dev);
  778. }
  779. }
  780. #if DEBUG_TX
  781. printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
  782. #endif
  783. ixp_tx_timestamp(port, skb);
  784. skb_tx_timestamp(skb);
  785. #ifndef __ARMEB__
  786. dev_kfree_skb(skb);
  787. #endif
  788. return NETDEV_TX_OK;
  789. }
  790. static void eth_set_mcast_list(struct net_device *dev)
  791. {
  792. struct port *port = netdev_priv(dev);
  793. struct netdev_hw_addr *ha;
  794. u8 diffs[ETH_ALEN], *addr;
  795. int i;
  796. static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
  797. if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
  798. for (i = 0; i < ETH_ALEN; i++) {
  799. __raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
  800. __raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
  801. }
  802. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  803. &port->regs->rx_control[0]);
  804. return;
  805. }
  806. if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
  807. __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
  808. &port->regs->rx_control[0]);
  809. return;
  810. }
  811. eth_zero_addr(diffs);
  812. addr = NULL;
  813. netdev_for_each_mc_addr(ha, dev) {
  814. if (!addr)
  815. addr = ha->addr; /* first MAC address */
  816. for (i = 0; i < ETH_ALEN; i++)
  817. diffs[i] |= addr[i] ^ ha->addr[i];
  818. }
  819. for (i = 0; i < ETH_ALEN; i++) {
  820. __raw_writel(addr[i], &port->regs->mcast_addr[i]);
  821. __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
  822. }
  823. __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
  824. &port->regs->rx_control[0]);
  825. }
  826. static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  827. {
  828. if (!netif_running(dev))
  829. return -EINVAL;
  830. if (cpu_is_ixp46x()) {
  831. if (cmd == SIOCSHWTSTAMP)
  832. return hwtstamp_set(dev, req);
  833. if (cmd == SIOCGHWTSTAMP)
  834. return hwtstamp_get(dev, req);
  835. }
  836. return phy_mii_ioctl(dev->phydev, req, cmd);
  837. }
  838. /* ethtool support */
  839. static void ixp4xx_get_drvinfo(struct net_device *dev,
  840. struct ethtool_drvinfo *info)
  841. {
  842. struct port *port = netdev_priv(dev);
  843. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  844. snprintf(info->fw_version, sizeof(info->fw_version), "%u:%u:%u:%u",
  845. port->firmware[0], port->firmware[1],
  846. port->firmware[2], port->firmware[3]);
  847. strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
  848. }
  849. static int ixp4xx_nway_reset(struct net_device *dev)
  850. {
  851. return phy_start_aneg(dev->phydev);
  852. }
  853. int ixp46x_phc_index = -1;
  854. EXPORT_SYMBOL_GPL(ixp46x_phc_index);
  855. static int ixp4xx_get_ts_info(struct net_device *dev,
  856. struct ethtool_ts_info *info)
  857. {
  858. if (!cpu_is_ixp46x()) {
  859. info->so_timestamping =
  860. SOF_TIMESTAMPING_TX_SOFTWARE |
  861. SOF_TIMESTAMPING_RX_SOFTWARE |
  862. SOF_TIMESTAMPING_SOFTWARE;
  863. info->phc_index = -1;
  864. return 0;
  865. }
  866. info->so_timestamping =
  867. SOF_TIMESTAMPING_TX_HARDWARE |
  868. SOF_TIMESTAMPING_RX_HARDWARE |
  869. SOF_TIMESTAMPING_RAW_HARDWARE;
  870. info->phc_index = ixp46x_phc_index;
  871. info->tx_types =
  872. (1 << HWTSTAMP_TX_OFF) |
  873. (1 << HWTSTAMP_TX_ON);
  874. info->rx_filters =
  875. (1 << HWTSTAMP_FILTER_NONE) |
  876. (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
  877. (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
  878. return 0;
  879. }
  880. static const struct ethtool_ops ixp4xx_ethtool_ops = {
  881. .get_drvinfo = ixp4xx_get_drvinfo,
  882. .nway_reset = ixp4xx_nway_reset,
  883. .get_link = ethtool_op_get_link,
  884. .get_ts_info = ixp4xx_get_ts_info,
  885. .get_link_ksettings = phy_ethtool_get_link_ksettings,
  886. .set_link_ksettings = phy_ethtool_set_link_ksettings,
  887. };
  888. static int request_queues(struct port *port)
  889. {
  890. int err;
  891. err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0,
  892. "%s:RX-free", port->netdev->name);
  893. if (err)
  894. return err;
  895. err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0,
  896. "%s:RX", port->netdev->name);
  897. if (err)
  898. goto rel_rxfree;
  899. err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0,
  900. "%s:TX", port->netdev->name);
  901. if (err)
  902. goto rel_rx;
  903. err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0,
  904. "%s:TX-ready", port->netdev->name);
  905. if (err)
  906. goto rel_tx;
  907. /* TX-done queue handles skbs sent out by the NPEs */
  908. if (!ports_open) {
  909. err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0,
  910. "%s:TX-done", DRV_NAME);
  911. if (err)
  912. goto rel_txready;
  913. }
  914. return 0;
  915. rel_txready:
  916. qmgr_release_queue(port->plat->txreadyq);
  917. rel_tx:
  918. qmgr_release_queue(TX_QUEUE(port->id));
  919. rel_rx:
  920. qmgr_release_queue(port->plat->rxq);
  921. rel_rxfree:
  922. qmgr_release_queue(RXFREE_QUEUE(port->id));
  923. printk(KERN_DEBUG "%s: unable to request hardware queues\n",
  924. port->netdev->name);
  925. return err;
  926. }
  927. static void release_queues(struct port *port)
  928. {
  929. qmgr_release_queue(RXFREE_QUEUE(port->id));
  930. qmgr_release_queue(port->plat->rxq);
  931. qmgr_release_queue(TX_QUEUE(port->id));
  932. qmgr_release_queue(port->plat->txreadyq);
  933. if (!ports_open)
  934. qmgr_release_queue(TXDONE_QUEUE);
  935. }
  936. static int init_queues(struct port *port)
  937. {
  938. int i;
  939. if (!ports_open) {
  940. dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
  941. POOL_ALLOC_SIZE, 32, 0);
  942. if (!dma_pool)
  943. return -ENOMEM;
  944. }
  945. if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
  946. &port->desc_tab_phys)))
  947. return -ENOMEM;
  948. memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
  949. memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
  950. memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
  951. /* Setup RX buffers */
  952. for (i = 0; i < RX_DESCS; i++) {
  953. struct desc *desc = rx_desc_ptr(port, i);
  954. buffer_t *buff; /* skb or kmalloc()ated memory */
  955. void *data;
  956. #ifdef __ARMEB__
  957. if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
  958. return -ENOMEM;
  959. data = buff->data;
  960. #else
  961. if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
  962. return -ENOMEM;
  963. data = buff;
  964. #endif
  965. desc->buf_len = MAX_MRU;
  966. desc->data = dma_map_single(&port->netdev->dev, data,
  967. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  968. if (dma_mapping_error(&port->netdev->dev, desc->data)) {
  969. free_buffer(buff);
  970. return -EIO;
  971. }
  972. desc->data += NET_IP_ALIGN;
  973. port->rx_buff_tab[i] = buff;
  974. }
  975. return 0;
  976. }
  977. static void destroy_queues(struct port *port)
  978. {
  979. int i;
  980. if (port->desc_tab) {
  981. for (i = 0; i < RX_DESCS; i++) {
  982. struct desc *desc = rx_desc_ptr(port, i);
  983. buffer_t *buff = port->rx_buff_tab[i];
  984. if (buff) {
  985. dma_unmap_single(&port->netdev->dev,
  986. desc->data - NET_IP_ALIGN,
  987. RX_BUFF_SIZE, DMA_FROM_DEVICE);
  988. free_buffer(buff);
  989. }
  990. }
  991. for (i = 0; i < TX_DESCS; i++) {
  992. struct desc *desc = tx_desc_ptr(port, i);
  993. buffer_t *buff = port->tx_buff_tab[i];
  994. if (buff) {
  995. dma_unmap_tx(port, desc);
  996. free_buffer(buff);
  997. }
  998. }
  999. dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
  1000. port->desc_tab = NULL;
  1001. }
  1002. if (!ports_open && dma_pool) {
  1003. dma_pool_destroy(dma_pool);
  1004. dma_pool = NULL;
  1005. }
  1006. }
  1007. static int eth_open(struct net_device *dev)
  1008. {
  1009. struct port *port = netdev_priv(dev);
  1010. struct npe *npe = port->npe;
  1011. struct msg msg;
  1012. int i, err;
  1013. if (!npe_running(npe)) {
  1014. err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
  1015. if (err)
  1016. return err;
  1017. if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
  1018. printk(KERN_ERR "%s: %s not responding\n", dev->name,
  1019. npe_name(npe));
  1020. return -EIO;
  1021. }
  1022. port->firmware[0] = msg.byte4;
  1023. port->firmware[1] = msg.byte5;
  1024. port->firmware[2] = msg.byte6;
  1025. port->firmware[3] = msg.byte7;
  1026. }
  1027. memset(&msg, 0, sizeof(msg));
  1028. msg.cmd = NPE_VLAN_SETRXQOSENTRY;
  1029. msg.eth_id = port->id;
  1030. msg.byte5 = port->plat->rxq | 0x80;
  1031. msg.byte7 = port->plat->rxq << 4;
  1032. for (i = 0; i < 8; i++) {
  1033. msg.byte3 = i;
  1034. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
  1035. return -EIO;
  1036. }
  1037. msg.cmd = NPE_EDB_SETPORTADDRESS;
  1038. msg.eth_id = PHYSICAL_ID(port->id);
  1039. msg.byte2 = dev->dev_addr[0];
  1040. msg.byte3 = dev->dev_addr[1];
  1041. msg.byte4 = dev->dev_addr[2];
  1042. msg.byte5 = dev->dev_addr[3];
  1043. msg.byte6 = dev->dev_addr[4];
  1044. msg.byte7 = dev->dev_addr[5];
  1045. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
  1046. return -EIO;
  1047. memset(&msg, 0, sizeof(msg));
  1048. msg.cmd = NPE_FW_SETFIREWALLMODE;
  1049. msg.eth_id = port->id;
  1050. if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
  1051. return -EIO;
  1052. if ((err = request_queues(port)) != 0)
  1053. return err;
  1054. if ((err = init_queues(port)) != 0) {
  1055. destroy_queues(port);
  1056. release_queues(port);
  1057. return err;
  1058. }
  1059. port->speed = 0; /* force "link up" message */
  1060. phy_start(dev->phydev);
  1061. for (i = 0; i < ETH_ALEN; i++)
  1062. __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
  1063. __raw_writel(0x08, &port->regs->random_seed);
  1064. __raw_writel(0x12, &port->regs->partial_empty_threshold);
  1065. __raw_writel(0x30, &port->regs->partial_full_threshold);
  1066. __raw_writel(0x08, &port->regs->tx_start_bytes);
  1067. __raw_writel(0x15, &port->regs->tx_deferral);
  1068. __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
  1069. __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
  1070. __raw_writel(0x80, &port->regs->slot_time);
  1071. __raw_writel(0x01, &port->regs->int_clock_threshold);
  1072. /* Populate queues with buffers, no failure after this point */
  1073. for (i = 0; i < TX_DESCS; i++)
  1074. queue_put_desc(port->plat->txreadyq,
  1075. tx_desc_phys(port, i), tx_desc_ptr(port, i));
  1076. for (i = 0; i < RX_DESCS; i++)
  1077. queue_put_desc(RXFREE_QUEUE(port->id),
  1078. rx_desc_phys(port, i), rx_desc_ptr(port, i));
  1079. __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
  1080. __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
  1081. __raw_writel(0, &port->regs->rx_control[1]);
  1082. __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
  1083. napi_enable(&port->napi);
  1084. eth_set_mcast_list(dev);
  1085. netif_start_queue(dev);
  1086. qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
  1087. eth_rx_irq, dev);
  1088. if (!ports_open) {
  1089. qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
  1090. eth_txdone_irq, NULL);
  1091. qmgr_enable_irq(TXDONE_QUEUE);
  1092. }
  1093. ports_open++;
  1094. /* we may already have RX data, enables IRQ */
  1095. napi_schedule(&port->napi);
  1096. return 0;
  1097. }
  1098. static int eth_close(struct net_device *dev)
  1099. {
  1100. struct port *port = netdev_priv(dev);
  1101. struct msg msg;
  1102. int buffs = RX_DESCS; /* allocated RX buffers */
  1103. int i;
  1104. ports_open--;
  1105. qmgr_disable_irq(port->plat->rxq);
  1106. napi_disable(&port->napi);
  1107. netif_stop_queue(dev);
  1108. while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
  1109. buffs--;
  1110. memset(&msg, 0, sizeof(msg));
  1111. msg.cmd = NPE_SETLOOPBACK_MODE;
  1112. msg.eth_id = port->id;
  1113. msg.byte3 = 1;
  1114. if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
  1115. printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
  1116. i = 0;
  1117. do { /* drain RX buffers */
  1118. while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
  1119. buffs--;
  1120. if (!buffs)
  1121. break;
  1122. if (qmgr_stat_empty(TX_QUEUE(port->id))) {
  1123. /* we have to inject some packet */
  1124. struct desc *desc;
  1125. u32 phys;
  1126. int n = queue_get_desc(port->plat->txreadyq, port, 1);
  1127. BUG_ON(n < 0);
  1128. desc = tx_desc_ptr(port, n);
  1129. phys = tx_desc_phys(port, n);
  1130. desc->buf_len = desc->pkt_len = 1;
  1131. wmb();
  1132. queue_put_desc(TX_QUEUE(port->id), phys, desc);
  1133. }
  1134. udelay(1);
  1135. } while (++i < MAX_CLOSE_WAIT);
  1136. if (buffs)
  1137. printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
  1138. " left in NPE\n", dev->name, buffs);
  1139. #if DEBUG_CLOSE
  1140. if (!buffs)
  1141. printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
  1142. #endif
  1143. buffs = TX_DESCS;
  1144. while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
  1145. buffs--; /* cancel TX */
  1146. i = 0;
  1147. do {
  1148. while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
  1149. buffs--;
  1150. if (!buffs)
  1151. break;
  1152. } while (++i < MAX_CLOSE_WAIT);
  1153. if (buffs)
  1154. printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
  1155. "left in NPE\n", dev->name, buffs);
  1156. #if DEBUG_CLOSE
  1157. if (!buffs)
  1158. printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
  1159. #endif
  1160. msg.byte3 = 0;
  1161. if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
  1162. printk(KERN_CRIT "%s: unable to disable loopback\n",
  1163. dev->name);
  1164. phy_stop(dev->phydev);
  1165. if (!ports_open)
  1166. qmgr_disable_irq(TXDONE_QUEUE);
  1167. destroy_queues(port);
  1168. release_queues(port);
  1169. return 0;
  1170. }
  1171. static const struct net_device_ops ixp4xx_netdev_ops = {
  1172. .ndo_open = eth_open,
  1173. .ndo_stop = eth_close,
  1174. .ndo_start_xmit = eth_xmit,
  1175. .ndo_set_rx_mode = eth_set_mcast_list,
  1176. .ndo_do_ioctl = eth_ioctl,
  1177. .ndo_change_mtu = eth_change_mtu,
  1178. .ndo_set_mac_address = eth_mac_addr,
  1179. .ndo_validate_addr = eth_validate_addr,
  1180. };
  1181. static int eth_init_one(struct platform_device *pdev)
  1182. {
  1183. struct port *port;
  1184. struct net_device *dev;
  1185. struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
  1186. struct phy_device *phydev = NULL;
  1187. u32 regs_phys;
  1188. char phy_id[MII_BUS_ID_SIZE + 3];
  1189. int err;
  1190. if (!(dev = alloc_etherdev(sizeof(struct port))))
  1191. return -ENOMEM;
  1192. SET_NETDEV_DEV(dev, &pdev->dev);
  1193. port = netdev_priv(dev);
  1194. port->netdev = dev;
  1195. port->id = pdev->id;
  1196. switch (port->id) {
  1197. case IXP4XX_ETH_NPEA:
  1198. port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
  1199. regs_phys = IXP4XX_EthA_BASE_PHYS;
  1200. break;
  1201. case IXP4XX_ETH_NPEB:
  1202. port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
  1203. regs_phys = IXP4XX_EthB_BASE_PHYS;
  1204. break;
  1205. case IXP4XX_ETH_NPEC:
  1206. port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
  1207. regs_phys = IXP4XX_EthC_BASE_PHYS;
  1208. break;
  1209. default:
  1210. err = -ENODEV;
  1211. goto err_free;
  1212. }
  1213. dev->netdev_ops = &ixp4xx_netdev_ops;
  1214. dev->ethtool_ops = &ixp4xx_ethtool_ops;
  1215. dev->tx_queue_len = 100;
  1216. netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
  1217. if (!(port->npe = npe_request(NPE_ID(port->id)))) {
  1218. err = -EIO;
  1219. goto err_free;
  1220. }
  1221. port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
  1222. if (!port->mem_res) {
  1223. err = -EBUSY;
  1224. goto err_npe_rel;
  1225. }
  1226. port->plat = plat;
  1227. npe_port_tab[NPE_ID(port->id)] = port;
  1228. memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
  1229. platform_set_drvdata(pdev, dev);
  1230. __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
  1231. &port->regs->core_control);
  1232. udelay(50);
  1233. __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
  1234. udelay(50);
  1235. snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
  1236. mdio_bus->id, plat->phy);
  1237. phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
  1238. PHY_INTERFACE_MODE_MII);
  1239. if (IS_ERR(phydev)) {
  1240. err = PTR_ERR(phydev);
  1241. goto err_free_mem;
  1242. }
  1243. phydev->irq = PHY_POLL;
  1244. if ((err = register_netdev(dev)))
  1245. goto err_phy_dis;
  1246. printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
  1247. npe_name(port->npe));
  1248. return 0;
  1249. err_phy_dis:
  1250. phy_disconnect(phydev);
  1251. err_free_mem:
  1252. npe_port_tab[NPE_ID(port->id)] = NULL;
  1253. release_resource(port->mem_res);
  1254. err_npe_rel:
  1255. npe_release(port->npe);
  1256. err_free:
  1257. free_netdev(dev);
  1258. return err;
  1259. }
  1260. static int eth_remove_one(struct platform_device *pdev)
  1261. {
  1262. struct net_device *dev = platform_get_drvdata(pdev);
  1263. struct phy_device *phydev = dev->phydev;
  1264. struct port *port = netdev_priv(dev);
  1265. unregister_netdev(dev);
  1266. phy_disconnect(phydev);
  1267. npe_port_tab[NPE_ID(port->id)] = NULL;
  1268. npe_release(port->npe);
  1269. release_resource(port->mem_res);
  1270. free_netdev(dev);
  1271. return 0;
  1272. }
  1273. static struct platform_driver ixp4xx_eth_driver = {
  1274. .driver.name = DRV_NAME,
  1275. .probe = eth_init_one,
  1276. .remove = eth_remove_one,
  1277. };
  1278. static int __init eth_init_module(void)
  1279. {
  1280. int err;
  1281. if ((err = ixp4xx_mdio_register()))
  1282. return err;
  1283. return platform_driver_register(&ixp4xx_eth_driver);
  1284. }
  1285. static void __exit eth_cleanup_module(void)
  1286. {
  1287. platform_driver_unregister(&ixp4xx_eth_driver);
  1288. ixp4xx_mdio_remove();
  1289. }
  1290. MODULE_AUTHOR("Krzysztof Halasa");
  1291. MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
  1292. MODULE_LICENSE("GPL v2");
  1293. MODULE_ALIAS("platform:ixp4xx_eth");
  1294. module_init(eth_init_module);
  1295. module_exit(eth_cleanup_module);