am_net8218.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575
  1. /***************************************************************************************
  2. amlogic networks .....
  3. add by zhouzhi 2008-8-18
  4. ***************************************************************************************/
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/string.h>
  8. #include <linux/slab.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/io.h>
  13. #include <linux/mii.h>
  14. #include <asm/delay.h>
  15. #include <mach/pinmux.h>
  16. #include <linux/sched.h>
  17. #include <linux/pm.h>
  18. #include <linux/crc32.h>
  19. #include <linux/platform_device.h>
  20. #include <plat/eth.h>
  21. #include <mach/reg_addr.h>
  22. #include <plat/regops.h>
  23. #include "am_net8218.h"
  24. // >0 for basic init and remove debug;
  25. // >1 further setting debug;
  26. // >2 for tx
  27. // >3 for rx
  28. #ifdef CONFIG_AM_ETHERNET_DEBUG_LEVEL
  29. static int debug = CONFIG_AM_ETHERNET_DEBUG_LEVEL;
  30. #else
  31. static int debug = 1;
  32. #endif
  33. //#define LOOP_BACK_TEST
  34. //#define MAC_LOOPBACK_TEST
  35. //#define PHY_LOOPBACK_TEST
  36. void start_test(struct net_device *dev);
  37. static int running = 0;
  38. static struct net_device *my_ndev = NULL;
  39. static unsigned int g_mac_setup = 0;
  40. static char DEFMAC[] = "\x00\x01\x23\xcd\xee\xaf";
  41. #define MODULE_NAME "ethernet"
  42. #define DRIVER_NAME "ethernet"
  43. #define DRV_NAME DRIVER_NAME
  44. #define DRV_VERSION "v2.0.0"
  45. #define DRV_RELDATE "2008-8-28"
  46. MODULE_AUTHOR("rising_o<zhi_zhou@amlogic.com>");
  47. MODULE_DESCRIPTION("Amlogic Ethernet Driver");
  48. MODULE_LICENSE("Amlogic");
  49. MODULE_VERSION(DRV_VERSION);
  50. module_param_named(amlog_level, debug, int, 0664);
  51. MODULE_PARM_DESC(amlog_level, "ethernet debug level\n");
  52. #define PERIPHS_SET_BITS(reg, mask) \
  53. aml_set_reg32_mask(reg, mask)
  54. #define PERIPHS_CLEAR_BITS(reg, mask) \
  55. aml_clr_reg32_mask(reg, mask)
  56. static void write_mac_addr(struct net_device *dev, char *macaddr);
  57. static int ethernet_reset(struct net_device *dev);
  58. static int mdio_read(struct net_device *dev, int phyid, int reg)
  59. {
  60. #define WR (1<<1)
  61. #define MDCCLK (0x1) << 2 //our 130 MHz
  62. #define BUSY 0x1
  63. struct am_net_private *priv = netdev_priv(dev);
  64. unsigned long busy = 0;
  65. unsigned long reg4;
  66. unsigned long val = 0;
  67. reg4 = phyid << 11 | reg << 6 | MDCCLK | BUSY;
  68. /*
  69. do{ //waiting the phy is ready to write ...
  70. busy=IO_READ32(priv->base_addr+ETH_MAC_4_GMII_Addr);
  71. }while(busy&0x1);
  72. */
  73. IO_WRITE32(reg4, priv->base_addr + ETH_MAC_4_GMII_Addr);
  74. do { //waiting the phy is ready to write ...
  75. busy = IO_READ32(priv->base_addr + ETH_MAC_4_GMII_Addr);
  76. } while (busy & 0x1);
  77. val = IO_READ32(priv->base_addr + ETH_MAC_5_GMII_Data) & 0xffff;
  78. return val;
  79. }
  80. static void mdio_write(struct net_device *dev, int phyid, int reg, int val)
  81. {
  82. #define WR (1<<1)
  83. #define MDCCLK (0x1) << 2 //our 130 MHz
  84. #define BUSY 0x1
  85. struct am_net_private *priv = netdev_priv(dev);
  86. unsigned long busy = 0;
  87. unsigned long reg4;
  88. reg4 = phyid << 11 | reg << 6 | MDCCLK | WR | BUSY;
  89. IO_WRITE32(val, priv->base_addr + ETH_MAC_5_GMII_Data);
  90. do { //waiting the phy is ready to write ...
  91. busy = IO_READ32(priv->base_addr + ETH_MAC_4_GMII_Addr);
  92. } while (busy & 0x1);
  93. IO_WRITE32(reg4, priv->base_addr + ETH_MAC_4_GMII_Addr);
  94. do { //waiting the phy is ready to write ...
  95. busy = IO_READ32(priv->base_addr + ETH_MAC_4_GMII_Addr);
  96. } while (busy & 0x1);
  97. }
  98. static void dump(unsigned char *p, int len)
  99. {
  100. int i, j;
  101. char s[20];
  102. for (i = 0; i < len; i += 16) {
  103. printk("%08x:", (unsigned int)p);
  104. for (j = 0; j < 16 && j < len - 0 * 16; j++) {
  105. s[j] = (p[j] > 15 && p[j] < 128) ? p[j] : '.';
  106. printk(" %02x", p[j]);
  107. }
  108. s[j] = 0;
  109. printk(" |%s|\n", s);
  110. p = p + 16;
  111. }
  112. }
  113. static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  114. {
  115. struct mii_ioctl_data *data = if_mii(rq);
  116. struct am_net_private *np = netdev_priv(dev);
  117. char addr[MAX_ADDR_LEN];
  118. if (debug > 0)
  119. printk("Ethernet Driver ioctl (%x) \n", cmd);
  120. switch (cmd) {
  121. case SIOCGMIIPHY: /* Get address of MII PHY in use. */
  122. data->phy_id =
  123. ((struct am_net_private *)netdev_priv(dev))->phys[0] & 0x1f;
  124. /* Fall Through */
  125. case SIOCGMIIREG: /* Read MII PHY register. */
  126. spin_lock_irq(&np->lock);
  127. data->val_out =
  128. mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
  129. spin_unlock_irq(&np->lock);
  130. return 0;
  131. case SIOCSMIIREG: /* Write MII PHY register. */
  132. if (!capable(CAP_NET_ADMIN))
  133. return -EPERM;
  134. spin_lock_irq(&np->lock);
  135. mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f,
  136. data->val_in);
  137. spin_unlock_irq(&np->lock);
  138. return 0;
  139. case SIOCSIFHWADDR:
  140. if (copy_from_user(&addr,
  141. (void __user *)rq->ifr_hwaddr.sa_data,
  142. MAX_ADDR_LEN)) {
  143. return -EFAULT;
  144. }
  145. if (debug > 0)
  146. printk
  147. ("set mac addr to %02x:%02x:%02x:%02x:%02x:%02x\n",
  148. addr[0], addr[1], addr[2], addr[3], addr[4],
  149. addr[5]);
  150. spin_lock_irq(&np->lock);
  151. memcpy(dev->dev_addr, &addr, MAX_ADDR_LEN);
  152. write_mac_addr(dev, addr);
  153. spin_unlock_irq(&np->lock);
  154. default:
  155. if (debug > 0)
  156. printk("Ethernet Driver unknow ioctl (%x) \n", cmd);
  157. return -EOPNOTSUPP;
  158. }
  159. return 0;
  160. }
  161. int init_rxtx_rings(struct net_device *dev)
  162. {
  163. struct am_net_private *np = netdev_priv(dev);
  164. int i;
  165. #ifndef DMA_USE_SKB_BUF
  166. unsigned long tx = 0, rx = 0;
  167. #endif
  168. #ifdef DMA_USE_MALLOC_ADDR
  169. rx = (unsigned long)kmalloc((RX_RING_SIZE) * np->rx_buf_sz, GFP_KERNEL);
  170. if (rx == 0) {
  171. printk("error to alloc Rx ring buf\n");
  172. return -1;
  173. }
  174. tx = (unsigned long)kmalloc((TX_RING_SIZE) * np->rx_buf_sz, GFP_KERNEL);
  175. if (tx == 0) {
  176. kfree((void *)rx);
  177. printk("error to alloc Tx ring buf\n");
  178. return -1;
  179. }
  180. #elif defined(DMA_USE_SKB_BUF)
  181. //not needed
  182. #else
  183. tx = TX_BUF_ADDR;
  184. rx = RX_BUF_ADDR;
  185. #endif
  186. /* Fill in the Rx buffers. Handle allocation failure gracefully. */
  187. for (i = 0; i < RX_RING_SIZE; i++) {
  188. #ifdef DMA_USE_SKB_BUF
  189. struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
  190. np->rx_ring[i].skb = skb;
  191. if (skb == NULL)
  192. break;
  193. skb_reserve(skb, 2); /* 16 byte alignd for ip */
  194. skb->dev = dev; /* Mark as being used by this device. */
  195. np->rx_ring[i].buf = (unsigned long)skb->data;
  196. #else
  197. np->rx_ring[i].skb = NULL;
  198. np->rx_ring[i].buf = (rx + i * np->rx_buf_sz); //(unsigned long )skb->data;
  199. #endif
  200. np->rx_ring[i].buf_dma =
  201. dma_map_single(&dev->dev, (void *)np->rx_ring[i].buf,
  202. np->rx_buf_sz, DMA_FROM_DEVICE);
  203. np->rx_ring[i].count =
  204. (DescChain) | (np->rx_buf_sz & DescSize1Mask);
  205. np->rx_ring[i].status = (DescOwnByDma);
  206. np->rx_ring[i].next_dma = &np->rx_ring_dma[i + 1];
  207. np->rx_ring[i].next = &np->rx_ring[i + 1];
  208. }
  209. np->rx_ring[RX_RING_SIZE - 1].next_dma = &np->rx_ring_dma[0];
  210. np->rx_ring[RX_RING_SIZE - 1].next = &np->rx_ring[0];
  211. /* Initialize the Tx descriptors */
  212. for (i = 0; i < TX_RING_SIZE; i++) {
  213. #ifdef DMA_USE_SKB_BUF
  214. np->tx_ring[i].buf = 0;
  215. #else
  216. np->tx_ring[i].buf = (tx + i * np->rx_buf_sz);
  217. #endif
  218. np->tx_ring[i].status = 0;
  219. np->tx_ring[i].count =
  220. (DescChain) | (np->rx_buf_sz & DescSize1Mask);
  221. np->tx_ring[i].next_dma = &np->tx_ring_dma[i + 1];
  222. np->tx_ring[i].next = &np->tx_ring[i + 1];
  223. np->tx_ring[i].skb = NULL;
  224. }
  225. np->tx_ring[TX_RING_SIZE - 1].next_dma = &np->tx_ring_dma[0];
  226. np->tx_ring[TX_RING_SIZE - 1].next = &np->tx_ring[0];
  227. np->start_tx = &np->tx_ring[0];
  228. np->last_tx = NULL;
  229. np->last_rx = &np->rx_ring[RX_RING_SIZE - 1];
  230. CACHE_WSYNC(np->tx_ring, sizeof(struct _tx_desc) * TX_RING_SIZE);
  231. CACHE_WSYNC(np->rx_ring, sizeof(struct _rx_desc) * RX_RING_SIZE);
  232. return 0;
  233. }
  234. EXPORT_SYMBOL(init_rxtx_rings);
  235. static int alloc_ringdesc(struct net_device *dev)
  236. {
  237. struct am_net_private *np = netdev_priv(dev);
  238. np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
  239. #ifdef USE_COHERENT_MEMORY
  240. np->rx_ring = dma_alloc_coherent(&dev->dev,
  241. sizeof(struct _rx_desc) * RX_RING_SIZE,
  242. (dma_addr_t *) & np->rx_ring_dma,
  243. GFP_KERNEL);
  244. #else
  245. np->rx_ring =
  246. kmalloc(sizeof(struct _rx_desc) * RX_RING_SIZE, GFP_KERNEL);
  247. np->rx_ring_dma = (void *)virt_to_phys(np->rx_ring);
  248. #endif
  249. if (!np->rx_ring)
  250. return -ENOMEM;
  251. if (!IS_CACHE_ALIGNED(np->rx_ring)) {
  252. printk("Error the alloc mem is not cache aligned(%p)\n",
  253. np->rx_ring);
  254. }
  255. printk("NET MDA descpter start addr=%p\n", np->rx_ring);
  256. #ifdef USE_COHERENT_MEMORY
  257. np->tx_ring = dma_alloc_coherent(&dev->dev,
  258. sizeof(struct _tx_desc) * TX_RING_SIZE,
  259. (dma_addr_t *) & np->tx_ring_dma,
  260. GFP_KERNEL);
  261. #else
  262. np->tx_ring =
  263. kmalloc(sizeof(struct _tx_desc) * TX_RING_SIZE, GFP_KERNEL);
  264. np->tx_ring_dma = (void *)virt_to_phys(np->tx_ring);
  265. #endif
  266. if (init_rxtx_rings(dev)) {
  267. printk("init rx tx ring failed!!\n");
  268. return -1;
  269. }
  270. //make sure all the data are write to memory
  271. return 0;
  272. }
  273. static int free_ringdesc(struct net_device *dev)
  274. {
  275. struct am_net_private *np = netdev_priv(dev);
  276. int i;
  277. for (i = 0; i < RX_RING_SIZE; i++) {
  278. if (np->rx_ring[i].skb) {
  279. dev_kfree_skb_any(np->rx_ring[i].skb);
  280. if (np->rx_ring[i].buf_dma != 0)
  281. dma_unmap_single(&dev->dev,
  282. np->rx_ring[i].buf_dma,
  283. np->rx_buf_sz,
  284. DMA_FROM_DEVICE);
  285. }
  286. np->rx_ring[i].skb = NULL;
  287. }
  288. for (i = 0; i < TX_RING_SIZE; i++) {
  289. if (np->tx_ring[i].skb != NULL) {
  290. dev_kfree_skb_any(np->tx_ring[i].skb);
  291. if (np->rx_ring[i].buf_dma != 0)
  292. dma_unmap_single(&dev->dev,
  293. np->tx_ring[i].buf_dma,
  294. np->rx_buf_sz, DMA_TO_DEVICE);
  295. }
  296. np->tx_ring[i].skb = NULL;
  297. }
  298. if (np->rx_ring) {
  299. #ifdef USE_COHERENT_MEMORY
  300. dma_free_coherent(&dev->dev, sizeof(struct _rx_desc) * RX_RING_SIZE, np->rx_ring, (dma_addr_t) np->rx_ring_dma); // for apollo
  301. #else
  302. kfree(np->rx_ring);
  303. #endif
  304. }
  305. np->rx_ring = NULL;
  306. if (np->tx_ring) {
  307. #ifdef USE_COHERENT_MEMORY
  308. dma_free_coherent(&dev->dev, sizeof(struct _tx_desc) * TX_RING_SIZE, np->tx_ring, (dma_addr_t) np->tx_ring_dma); // for apollo
  309. #else
  310. kfree(np->tx_ring);
  311. #endif
  312. }
  313. np->tx_ring = NULL;
  314. return 0;
  315. }
  316. static int phy_linked(struct am_net_private *np)
  317. {
  318. unsigned int val;
  319. switch (np->phy_Identifier) {
  320. case PHY_ATHEROS_8032:
  321. val = mdio_read(np->dev, np->phys[0], 17);
  322. val = (val & (1 << 10));
  323. break;
  324. case PHY_SMSC_8700:
  325. case PHY_SMSC_8720:
  326. default:
  327. val = mdio_read(np->dev, np->phys[0], 1);
  328. val = (val & (1 << 2));
  329. }
  330. return val;
  331. }
  332. static int mac_PLL_changed(struct am_net_private *np, int clk_mhz)
  333. {
  334. unsigned long tmp;
  335. switch (clk_mhz) {
  336. case 0: //disable clock
  337. PERIPHS_CLEAR_BITS(P_PREG_ETHERNET_ADDR0, 1); //disable clk
  338. PERIPHS_CLEAR_BITS(P_PREG_ETHERNET_ADDR0,
  339. (1 << 0 | 1 << 2 | 1 << 3));
  340. break;
  341. case 10:
  342. if (debug > 0)
  343. printk("10m\n");
  344. //(*ETH_MAC_0_Configuration) &= ~(1<<14); // program mac
  345. tmp = IO_READ32(np->base_addr + ETH_MAC_0_Configuration);
  346. tmp &= ~(1 << 14);
  347. IO_WRITE32(tmp, np->base_addr + ETH_MAC_0_Configuration);
  348. PERIPHS_CLEAR_BITS(P_PREG_ETHERNET_ADDR0, 1);
  349. PERIPHS_CLEAR_BITS(P_PREG_ETHERNET_ADDR0, (1 << 1));
  350. PERIPHS_SET_BITS(P_PREG_ETHERNET_ADDR0, 1);
  351. break;
  352. case 100:
  353. default:
  354. if (debug > 0)
  355. printk("100m\n");
  356. //(*ETH_MAC_0_Configuration) |= 1<<14; // program mac
  357. tmp = IO_READ32(np->base_addr + ETH_MAC_0_Configuration);
  358. tmp |= 1 << 14;
  359. IO_WRITE32(tmp, np->base_addr + ETH_MAC_0_Configuration);
  360. PERIPHS_CLEAR_BITS(P_PREG_ETHERNET_ADDR0, 1);
  361. PERIPHS_SET_BITS(P_PREG_ETHERNET_ADDR0, (1 << 1));
  362. PERIPHS_SET_BITS(P_PREG_ETHERNET_ADDR0, 1);
  363. }
  364. udelay(10);
  365. return 0;
  366. }
  367. static void phy_auto_negotiation_set(struct am_net_private *np)
  368. {
  369. unsigned int rint;
  370. int s100, full, tmp;
  371. switch (np->phy_Identifier) {
  372. case PHY_ATHEROS_8032:
  373. rint = mdio_read(np->dev, np->phys[0], 0x11);
  374. s100 = rint & (1 << 14);
  375. full = ((rint) & (1 << 13));
  376. break;
  377. case PHY_SMSC_8700:
  378. case PHY_SMSC_8720:
  379. default:
  380. rint = mdio_read(np->dev, np->phys[0], 31);
  381. s100 = rint & (1 << 3);
  382. full = ((rint >> 4) & 1);
  383. break;
  384. }
  385. if (full) {
  386. if (debug > 0)
  387. printk("duplex\n");
  388. //(*ETH_MAC_0_Configuration) |= 1<<11; // program mac
  389. tmp = IO_READ32(np->base_addr + ETH_MAC_0_Configuration);
  390. tmp |= 1 << 11;
  391. IO_WRITE32(tmp, np->base_addr + ETH_MAC_0_Configuration);
  392. } else {
  393. if (debug > 0)
  394. printk("half duplex\n");
  395. //(*ETH_MAC_0_Configuration) &= ~(1<<11) ; // program mac
  396. tmp = IO_READ32(np->base_addr + ETH_MAC_0_Configuration);
  397. tmp &= ~(1 << 11);
  398. IO_WRITE32(tmp, np->base_addr + ETH_MAC_0_Configuration);
  399. }
  400. mac_PLL_changed(np, s100 ? 100 : 10);
  401. return;
  402. }
  403. static void netdev_timer(unsigned long data)
  404. {
  405. struct net_device *dev = (struct net_device *)data;
  406. struct am_net_private *np = netdev_priv(dev);
  407. unsigned long ioaddr = np->base_addr;
  408. static int error_num = 0;
  409. int val;
  410. if (debug > 2)
  411. printk(KERN_DEBUG
  412. "%s: Media selection timer tick, mac status %8.8x \n",
  413. dev->name, ioread32(ioaddr + ETH_DMA_5_Status));
  414. if (!phy_linked(np)) { //unlink .....
  415. error_num++;
  416. if (error_num > 30) {
  417. error_num = 0;
  418. spin_lock_irq(&np->lock);
  419. val = (1 << 14) | (7 << 5) | np->phys[0];
  420. mdio_write(dev, np->phys[0], 18, val);
  421. // Auto negotiation restart
  422. val = mdio_read(dev, np->phys[0], MII_BMCR);
  423. #ifdef PHY_LOOPBACK_TEST
  424. val = 1 << 14 | 1 << 8 | 1 << 13; //100M,full,seting it as
  425. #else
  426. val |= BMCR_ANENABLE | BMCR_ANRESTART;
  427. #endif
  428. mdio_write(dev, np->phys[0], MII_BMCR, val);
  429. spin_unlock_irq(&np->lock);
  430. }
  431. np->timer.expires = jiffies + 1 * HZ;
  432. netif_stop_queue(dev);
  433. netif_carrier_off(dev);
  434. np->phy_set[0] = 0;
  435. } else { //linked
  436. val = mdio_read(dev, np->phys[0], 1);
  437. if (np->phy_set[0] != val) {
  438. np->phy_set[0] = val;
  439. phy_auto_negotiation_set(np);
  440. }
  441. error_num = 0;
  442. netif_carrier_on(dev);
  443. netif_start_queue(dev);
  444. np->timer.expires = jiffies + 1 * HZ;
  445. }
  446. add_timer(&np->timer);
  447. }
  448. static inline int update_status(struct net_device *dev, unsigned long status,
  449. unsigned long mask)
  450. {
  451. struct am_net_private *np = netdev_priv(dev);
  452. int need_reset = 0;
  453. int need_rx_restart = 0;
  454. int res = 0;
  455. if (status & NOR_INTR_EN) //Normal Interrupts Process
  456. {
  457. if (status & TX_INTR_EN) //Transmit Interrupt Process
  458. {
  459. IO_WRITE32((1 << 0 | 1 << 16),
  460. np->base_addr + ETH_DMA_5_Status);
  461. res |= 1;
  462. }
  463. if (status & RX_INTR_EN) //Receive Interrupt Process
  464. {
  465. IO_WRITE32((1 << 6 | 1 << 16),
  466. np->base_addr + ETH_DMA_5_Status);
  467. res |= 2;
  468. }
  469. if (status & EARLY_RX_INTR_EN) {
  470. IO_WRITE32((EARLY_RX_INTR_EN | NOR_INTR_EN),
  471. np->base_addr + ETH_DMA_5_Status);
  472. }
  473. if (status & TX_BUF_UN_EN) {
  474. IO_WRITE32((1 << 2 | 1 << 16),
  475. np->base_addr + ETH_DMA_5_Status);
  476. res |= 1;
  477. //this error will cleard in start tx...
  478. if (debug > 1)
  479. printk(KERN_WARNING "[" DRV_NAME "]"
  480. "Tx bufer unenable\n");
  481. }
  482. } else if (status & ANOR_INTR_EN) //Abnormal Interrupts Process
  483. {
  484. if (status & RX_BUF_UN) {
  485. IO_WRITE32((RX_BUF_UN | ANOR_INTR_EN),
  486. np->base_addr + ETH_DMA_5_Status);
  487. np->stats.rx_over_errors++;
  488. need_rx_restart++;
  489. res |= 2;
  490. //printk(KERN_WARNING DRV_NAME "Receive Buffer Unavailable\n");
  491. if (debug > 1)
  492. printk(KERN_WARNING "[" DRV_NAME "]"
  493. "Rx bufer unenable\n");
  494. }
  495. if (status & RX_STOP_EN) {
  496. IO_WRITE32((RX_STOP_EN | ANOR_INTR_EN),
  497. np->base_addr + ETH_DMA_5_Status);
  498. need_rx_restart++;
  499. res |= 2;
  500. }
  501. if (status & RX_WATCH_TIMEOUT) {
  502. IO_WRITE32((RX_WATCH_TIMEOUT | ANOR_INTR_EN),
  503. np->base_addr + ETH_DMA_5_Status);
  504. need_rx_restart++;
  505. }
  506. if (status & FATAL_BUS_ERROR) {
  507. IO_WRITE32((FATAL_BUS_ERROR | ANOR_INTR_EN),
  508. np->base_addr + ETH_DMA_5_Status);
  509. need_reset++;
  510. printk(KERN_WARNING "[" DRV_NAME "]"
  511. "fatal bus error\n");
  512. }
  513. if (status & EARLY_TX_INTR_EN) {
  514. IO_WRITE32((EARLY_TX_INTR_EN | ANOR_INTR_EN),
  515. np->base_addr + ETH_DMA_5_Status);
  516. }
  517. if (status & TX_STOP_EN) {
  518. IO_WRITE32((TX_STOP_EN | ANOR_INTR_EN),
  519. np->base_addr + ETH_DMA_5_Status);
  520. res |= 1;
  521. }
  522. if (status & TX_JABBER_TIMEOUT) {
  523. IO_WRITE32((TX_JABBER_TIMEOUT | ANOR_INTR_EN),
  524. np->base_addr + ETH_DMA_5_Status);
  525. printk(KERN_WARNING "[" DRV_NAME "]"
  526. "tx jabber timeout\n");
  527. np->first_tx = 1;
  528. }
  529. if (status & RX_FIFO_OVER) {
  530. IO_WRITE32((RX_FIFO_OVER | ANOR_INTR_EN),
  531. np->base_addr + ETH_DMA_5_Status);
  532. np->stats.rx_fifo_errors++;
  533. need_rx_restart++;
  534. res |= 2;
  535. printk(KERN_WARNING "[" DRV_NAME "]" "Rx fifo over\n");
  536. }
  537. if (status & TX_UNDERFLOW) {
  538. IO_WRITE32((TX_UNDERFLOW | ANOR_INTR_EN),
  539. np->base_addr + ETH_DMA_5_Status);
  540. printk(KERN_WARNING "[" DRV_NAME "]" "Tx underflow\n");
  541. np->first_tx = 1;
  542. res |= 1;
  543. }
  544. }
  545. if (need_reset) {
  546. printk(KERN_WARNING DRV_NAME "system reset\n");
  547. free_ringdesc(dev);
  548. ethernet_reset(dev);
  549. } else if (need_rx_restart) {
  550. IO_WRITE32(1, np->base_addr + ETH_DMA_2_Re_Poll_Demand);
  551. }
  552. return res;
  553. }
  554. static void inline print_rx_error_log(unsigned long status)
  555. {
  556. if (status & DescRxTruncated) {
  557. printk(KERN_WARNING "Descriptor Error desc-mask[%d]\n",
  558. DescRxTruncated);
  559. }
  560. if (status & DescSAFilterFail) {
  561. printk(KERN_WARNING
  562. "Source Address Filter Fail rx desc-mask[%d]\n",
  563. DescSAFilterFail);
  564. }
  565. if (status & DescRxLengthError) {
  566. printk(KERN_WARNING "Length Error rx desc-mask[%d]\n",
  567. DescRxLengthError);
  568. }
  569. if (status & DescRxIPChecksumErr) {
  570. printk(KERN_WARNING "TCP checksum Error rx desc-mask[%d]\n",
  571. DescRxLengthError);
  572. }
  573. if (status & DescRxTCPChecksumErr) {
  574. printk(KERN_WARNING "TCP checksum Error rx desc-mask[%d]\n",
  575. DescRxLengthError);
  576. }
  577. if (status & DescRxDamaged) {
  578. printk(KERN_WARNING "Overflow Error rx desc-mask[%d]\n",
  579. DescRxDamaged);
  580. }
  581. if (status & DescRxMiiError) {
  582. printk(KERN_WARNING "Receive Error rx desc-mask[%d]\n",
  583. DescRxMiiError);
  584. }
  585. if (status & DescRxDribbling) {
  586. printk(KERN_WARNING "Dribble Bit Error rx desc-mask[%d]\n",
  587. DescRxDribbling);
  588. }
  589. if (status & DescRxCrc) {
  590. printk(KERN_WARNING "CE: CRC Errorrx desc-mask[%d]\n",
  591. DescRxCrc);
  592. }
  593. }
  594. /**********************
  595. we do the basic rx tx operation irq;
  596. FIXME:on SMP system..
  597. ************************/
  598. void net_tasklet(unsigned long dev_instance)
  599. {
  600. struct net_device *dev = (struct net_device *)dev_instance;
  601. struct am_net_private *np = netdev_priv(dev);
  602. int len;
  603. int result;
  604. unsigned long flags;
  605. #ifndef DMA_USE_SKB_BUF
  606. struct sk_buff *skb = NULL;
  607. #endif
  608. spin_lock_irqsave(&np->lock, flags);
  609. result = np->int_rx_tx;
  610. np->int_rx_tx = 0;
  611. spin_unlock_irqrestore(&np->lock, flags);
  612. if (!running)
  613. goto release;
  614. if (result & 1) {
  615. struct _tx_desc *c_tx, *tx = NULL;
  616. c_tx =
  617. (void *)IO_READ32(np->base_addr +
  618. ETH_DMA_18_Curr_Host_Tr_Descriptor);
  619. c_tx = np->tx_ring + (c_tx - np->tx_ring_dma);
  620. tx = np->start_tx;
  621. CACHE_RSYNC(tx, sizeof(struct _tx_desc));
  622. while (tx != NULL && tx != c_tx && !(tx->status & DescOwnByDma)) {
  623. #ifdef DMA_USE_SKB_BUF
  624. spin_lock_irqsave(&np->lock, flags);
  625. if (tx->skb != NULL) {
  626. //clear to next send;
  627. if (np->tx_full) {
  628. netif_wake_queue(dev);
  629. np->tx_full = 0;
  630. }
  631. if (debug > 2)
  632. printk("send data ok len=%d\n",
  633. tx->skb->len);
  634. dev_kfree_skb_any(tx->skb);
  635. if (tx->buf_dma != 0)
  636. dma_unmap_single(&dev->dev, tx->buf_dma,
  637. np->rx_buf_sz,
  638. DMA_TO_DEVICE);
  639. tx->skb = NULL;
  640. tx->buf = 0;
  641. tx->buf_dma = 0;
  642. tx->status = 0;
  643. } else {
  644. spin_unlock_irqrestore(&np->lock, flags);
  645. break;
  646. }
  647. spin_unlock_irqrestore(&np->lock, flags);
  648. #else
  649. tx->status = 0;
  650. CACHE_WSYNC(tx, sizeof(struct _tx_desc));
  651. if (np->tx_full) {
  652. netif_wake_queue(dev);
  653. np->tx_full = 0;
  654. }
  655. #endif
  656. tx = tx->next;
  657. CACHE_RSYNC(tx, sizeof(struct _tx_desc));
  658. }
  659. np->start_tx = tx;
  660. //data tx end... todo
  661. }
  662. if (result & 2) {
  663. //data rx;
  664. struct _rx_desc *c_rx, *rx = NULL;
  665. c_rx =
  666. (void *)IO_READ32(np->base_addr +
  667. ETH_DMA_19_Curr_Host_Re_Descriptor);
  668. c_rx = np->rx_ring + (c_rx - np->rx_ring_dma);
  669. rx = np->last_rx->next;
  670. while (rx != NULL) {
  671. //if(rx->status !=IO_READ32(&rx->status))
  672. // printk("error of D-chche!\n");
  673. CACHE_RSYNC(rx, sizeof(struct _rx_desc));
  674. if (!(rx->status & (DescOwnByDma))) {
  675. int ip_summed = CHECKSUM_UNNECESSARY;
  676. len =
  677. (rx->
  678. status & DescFrameLengthMask) >>
  679. DescFrameLengthShift;
  680. if (unlikely(len < 18 || len > np->rx_buf_sz)) { //here is fatal error we drop it ;
  681. np->stats.rx_dropped++;
  682. np->stats.rx_errors++;
  683. goto to_next;
  684. }
  685. if (unlikely(rx->status & (DescError))) { //here is not often occur
  686. print_rx_error_log(rx->status);
  687. //rx->status=DescOwnByDma;
  688. if ((rx->status & DescRxIPChecksumErr) || (rx->status & DescRxTCPChecksumErr)) { //maybe checksum engine's problem;
  689. //we set the NONE for ip/tcp need check it again
  690. ip_summed = CHECKSUM_NONE;
  691. } else {
  692. np->stats.rx_dropped++;
  693. np->stats.rx_errors++;
  694. goto to_next;
  695. }
  696. }
  697. len = len - 4; //clear the crc
  698. #ifdef DMA_USE_SKB_BUF
  699. if (rx->skb == NULL) {
  700. printk("NET skb pointer error!!!\n");
  701. break;
  702. }
  703. if (rx->skb->len > 0) {
  704. printk
  705. ("skb have data before,skb=%p,len=%d\n",
  706. rx->skb, rx->skb->len);
  707. rx->skb = NULL;
  708. goto to_next;
  709. }
  710. skb_put(rx->skb, len);
  711. rx->skb->dev = dev;
  712. rx->skb->protocol =
  713. eth_type_trans(rx->skb, dev);
  714. /*we have checked in hardware;
  715. we not need check again */
  716. rx->skb->ip_summed = ip_summed;
  717. if (rx->buf_dma != 0)
  718. dma_unmap_single(&dev->dev, rx->buf_dma,
  719. np->rx_buf_sz,
  720. DMA_FROM_DEVICE);
  721. rx->buf_dma = 0;
  722. netif_rx(rx->skb);
  723. if (debug > 3)
  724. printk("receive skb=%p\n", rx->skb);
  725. rx->skb = NULL;
  726. #else
  727. skb = dev_alloc_skb(len + 4);
  728. if (skb == NULL) {
  729. np->stats.rx_dropped++;
  730. printk("error to alloc skb\n");
  731. break;
  732. }
  733. skb_reserve(skb, 2);
  734. skb_put(skb, len);
  735. if (rx->buf_dma != NULL)
  736. dma_unmap_single(&dev->dev,
  737. (void *)rx->buf_dma,
  738. np->rx_buf_sz,
  739. DMA_FROM_DEVICE);
  740. memcpy(skb->data, (void *)rx->buf, len);
  741. skb->dev = dev;
  742. skb->protocol = eth_type_trans(skb, dev);
  743. skb->ip_summed = ip_summed;
  744. netif_rx(skb);
  745. #endif
  746. dev->last_rx = jiffies;
  747. np->stats.rx_packets++;
  748. np->stats.rx_bytes += len;
  749. //*/
  750. if (debug > 3)
  751. printk("receive data len=%d\n", len);
  752. //dump((unsigned char *)rx->buf,len);
  753. //reset the rx_ring to receive
  754. ///
  755. to_next:
  756. #ifdef DMA_USE_SKB_BUF
  757. if (rx->skb)
  758. dev_kfree_skb_any(rx->skb);
  759. rx->skb = dev_alloc_skb(np->rx_buf_sz + 4);
  760. if (rx->skb == NULL) {
  761. printk(KERN_ERR
  762. "error to alloc the skb\n");
  763. rx->buf = 0;
  764. rx->buf_dma = 0;
  765. rx->status = 0;
  766. rx->count = 0;
  767. np->last_rx = rx;
  768. CACHE_WSYNC(rx,
  769. sizeof(struct _rx_desc));
  770. break;
  771. }
  772. if (debug > 3)
  773. printk("new malloc skb=%p\n", rx->skb);
  774. skb_reserve(rx->skb, 2);
  775. rx->buf = (unsigned long)rx->skb->data;
  776. #endif
  777. rx->buf_dma = dma_map_single(&dev->dev, (void *)rx->buf, (unsigned long)np->rx_buf_sz, DMA_FROM_DEVICE); //invalidate for next dma in;
  778. rx->count =
  779. (DescChain) | (np->
  780. rx_buf_sz & DescSize1Mask);
  781. rx->status = DescOwnByDma;
  782. CACHE_WSYNC(rx, sizeof(struct _rx_desc));
  783. np->last_rx = rx;
  784. rx = rx->next;
  785. } else {
  786. break;
  787. }
  788. }
  789. }
  790. release:
  791. IO_WRITE32(np->irq_mask, (np->base_addr + ETH_DMA_7_Interrupt_Enable));
  792. }
  793. static irqreturn_t intr_handler(int irq, void *dev_instance)
  794. {
  795. struct net_device *dev = (struct net_device *)dev_instance;
  796. struct am_net_private *np = netdev_priv(dev);
  797. unsigned long status, mask = 0;
  798. IO_WRITE32(0, (np->base_addr + ETH_DMA_7_Interrupt_Enable)); //disable irq
  799. tasklet_schedule(&np->rx_tasklet);
  800. status = IO_READ32(np->base_addr + ETH_DMA_5_Status);
  801. mask = IO_READ32(np->base_addr + ETH_MAC_Interrupt_Mask);
  802. np->int_rx_tx |= update_status(dev, status, mask);
  803. return IRQ_HANDLED;
  804. }
  805. static int phy_reset(struct net_device *ndev)
  806. {
  807. struct am_net_private *np = netdev_priv(ndev);
  808. unsigned long val;
  809. int k;
  810. //mac reset ...
  811. IO_WRITE32(1, np->base_addr + ETH_DMA_0_Bus_Mode);
  812. //waiting mac reset...
  813. for (k = 0;
  814. (IO_READ32(np->base_addr + ETH_DMA_0_Bus_Mode) & 1) && k < 1000;
  815. k++)
  816. udelay(1);
  817. if (k >= 1000) {
  818. printk("error to reset mac!\n");
  819. goto error_reset;
  820. }
  821. //set for RMII mode;
  822. val = (1 << 14) | (7 << 5) | np->phys[0];
  823. mdio_write(ndev, np->phys[0], 18, val);
  824. val = BMCR_RESET;
  825. mdio_write(ndev, np->phys[0], MII_BMCR, val);
  826. //waiting to phy reset ok....
  827. for (k = 0; (mdio_read(ndev, np->phys[0], MII_BMCR)) & (BMCR_RESET)
  828. && k < 1000; k++) {
  829. udelay(1);
  830. }
  831. if (k >= 1000) {
  832. printk("error to reset phy!\n");
  833. goto error_reset;
  834. }
  835. // mode = 111; turn on auto-neg mode (previously was power-saving)
  836. val = (1 << 14) | (7 << 5) | np->phys[0];
  837. mdio_write(ndev, np->phys[0], 18, val);
  838. // Auto negotiation restart
  839. val = BMCR_ANENABLE | BMCR_ANRESTART;
  840. mdio_write(ndev, np->phys[0], MII_BMCR, val);
  841. if (debug > 1)
  842. printk("starting to auto negotiation!\n");
  843. //(*ETH_DMA_0_Bus_Mode) = 0x00100800;
  844. IO_WRITE32(0x00100800, np->base_addr + ETH_DMA_0_Bus_Mode);
  845. /*
  846. val=*((unsigned short *)&ndev->dev_addr[4]);
  847. IO_WRITE32(val,np->base_addr+ETH_MAC_Addr0_High);
  848. val=*((unsigned long *)ndev->dev_addr);
  849. IO_WRITE32(val,np->base_addr+ETH_MAC_Addr0_Low);
  850. */
  851. write_mac_addr(ndev, ndev->dev_addr);
  852. val = 0xc80c | //8<<8 | 8<<17; //tx and rx all 8bit mode;
  853. 1 << 10; //checksum offload enabled
  854. #ifdef MAC_LOOPBACK_TEST
  855. val |= 1 << 12; //mac loop back
  856. #endif
  857. IO_WRITE32(val, np->base_addr + ETH_MAC_0_Configuration);
  858. val = 1 << 4; /*receive all muticast */
  859. //| 1 << 31; //receive all the data
  860. IO_WRITE32(val, np->base_addr + ETH_MAC_1_Frame_Filter);
  861. #ifdef PHY_LOOPBACK_TEST
  862. /*phy loop back */
  863. val = mdio_read(ndev, np->phys[0], MII_BMCR);
  864. //val=1<<14 | 1<<8 | 1<<13;//100M,full,seting it as ;
  865. val = 1 << 14 | 1 << 8; /////10M,full,seting it as ;
  866. mdio_write(ndev, np->phys[0], MII_BMCR, val);
  867. #endif
  868. IO_WRITE32((unsigned long)&np->rx_ring_dma[0],
  869. (np->base_addr + ETH_DMA_3_Re_Descriptor_List_Addr));
  870. IO_WRITE32((unsigned long)&np->tx_ring_dma[0],
  871. (np->base_addr + ETH_DMA_4_Tr_Descriptor_List_Addr));
  872. IO_WRITE32(np->irq_mask, (np->base_addr + ETH_DMA_7_Interrupt_Enable));
  873. IO_WRITE32((0), (np->base_addr + ETH_MAC_Interrupt_Mask));
  874. printk("Current DMA mode=%x\n",
  875. IO_READ32(np->base_addr + ETH_DMA_6_Operation_Mode));
  876. val = (7 << 14 | 1 << 25 | 1 << 8 | 1 << 26 | 1 << 21); /*don't start receive here */
  877. ////1<<21 is Transmit Store and Forward used for tcp/ip checksum insert
  878. IO_WRITE32(val, (np->base_addr + ETH_DMA_6_Operation_Mode));
  879. np->phy_set[0] = 0; //make sure reset the phy speed
  880. return 0;
  881. error_reset:
  882. return -1;
  883. }
  884. static int ethernet_reset(struct net_device *dev)
  885. {
  886. struct am_net_private *np = netdev_priv(dev);
  887. int res;
  888. unsigned long flags;
  889. spin_lock_irqsave(&np->lock, flags);
  890. res = alloc_ringdesc(dev);
  891. spin_unlock_irqrestore(&np->lock, flags);
  892. if (res != 0) {
  893. printk(KERN_INFO "can't alloc ring desc!err=%d\n", res);
  894. goto out_err;
  895. }
  896. res = phy_reset(dev);
  897. if (res != 0) {
  898. printk(KERN_INFO "can't reset ethernet phy!err=%d\n", res);
  899. goto out_err;
  900. }
  901. np->first_tx = 1;
  902. out_err:
  903. return res;
  904. }
  905. static int netdev_open(struct net_device *dev)
  906. {
  907. struct am_net_private *np = netdev_priv(dev);
  908. unsigned long val;
  909. int res;
  910. if (running)
  911. return 0;
  912. printk(KERN_INFO "netdev_open\n");
  913. res = ethernet_reset(dev);
  914. if (res != 0) {
  915. printk(KERN_INFO "ethernet_reset err=%d\n", res);
  916. goto out_err;
  917. }
  918. //netif_device_detach(dev);
  919. res = request_irq(dev->irq, &intr_handler, IRQF_SHARED, dev->name, dev);
  920. if (res) {
  921. printk(KERN_ERR "%s: request_irq error %d.,err=%d\n",
  922. dev->name, dev->irq, res);
  923. goto out_err;
  924. }
  925. if (debug > 0)
  926. printk("%s: opened (irq %d).\n", dev->name, dev->irq);
  927. //enable_irq(dev->irq);
  928. /* Set the timer to check for link beat. */
  929. init_timer(&np->timer);
  930. np->timer.expires = jiffies + 1;
  931. np->timer.data = (unsigned long)dev;
  932. np->timer.function = &netdev_timer; /* timer handler */
  933. add_timer(&np->timer);
  934. val = IO_READ32((np->base_addr + ETH_DMA_6_Operation_Mode));
  935. val |= (1 << 1); /*start receive */
  936. IO_WRITE32(val, (np->base_addr + ETH_DMA_6_Operation_Mode));
  937. running = 1;
  938. #ifdef LOOP_BACK_TEST
  939. start_test(np->dev);
  940. #endif
  941. return 0;
  942. out_err:
  943. running = 0;
  944. return -EIO;
  945. }
  946. static int netdev_close(struct net_device *dev)
  947. {
  948. struct am_net_private *np = netdev_priv(dev);
  949. unsigned long val;
  950. if (!running)
  951. return 0;
  952. running = 0;
  953. IO_WRITE32(0, (np->base_addr + ETH_DMA_6_Operation_Mode));
  954. IO_WRITE32(0, np->base_addr + ETH_DMA_7_Interrupt_Enable);
  955. val = IO_READ32((np->base_addr + ETH_DMA_5_Status));
  956. while ((val & (7 << 17)) || (val & (5 << 20))) { /*DMA not finished? */
  957. printk(KERN_ERR "ERROR! MDA is not stoped,val=%lx!\n", val);
  958. msleep(1); //waiting all dma is finished!!
  959. val = IO_READ32((np->base_addr + ETH_DMA_5_Status));
  960. }
  961. if (debug > 0)
  962. printk(KERN_INFO "NET MDA is stoped,val=%lx!\n", val);
  963. disable_irq(dev->irq);
  964. netif_carrier_off(dev);
  965. netif_stop_queue(dev);
  966. free_ringdesc(dev);
  967. free_irq(dev->irq, dev);
  968. del_timer_sync(&np->timer);
  969. // free_rxtx_rings(np);
  970. // free_ringdesc(np);
  971. // PERIPHS_CLEAR_BITS(P_ETH_PLL_CNTL,1);//disable clk
  972. if (debug > 0)
  973. printk(KERN_DEBUG "%s: closed\n", dev->name);
  974. return 0;
  975. }
  976. static int start_tx(struct sk_buff *skb, struct net_device *dev)
  977. {
  978. struct am_net_private *np = netdev_priv(dev);
  979. int tmp;
  980. struct _tx_desc *tx;
  981. unsigned long flags;
  982. dev->trans_start = jiffies;
  983. if (!running)
  984. return -1;
  985. if (debug > 2) {
  986. printk(KERN_DEBUG "%s: Transmit frame queued\n", dev->name);
  987. }
  988. spin_lock_irqsave(&np->lock, flags);
  989. if (np->last_tx != NULL)
  990. tx = np->last_tx->next;
  991. else
  992. tx = &np->tx_ring[0];
  993. CACHE_RSYNC(tx, sizeof(*tx));
  994. if (tx->status & DescOwnByDma) {
  995. spin_unlock_irqrestore(&np->lock, flags);
  996. if (debug > 2)
  997. printk("tx queue is full \n");
  998. goto err;
  999. }
  1000. #ifdef DMA_USE_SKB_BUF
  1001. if (tx->skb != NULL) {
  1002. dev_kfree_skb_any(tx->skb);
  1003. if (tx->buf_dma != 0)
  1004. dma_unmap_single(&dev->dev, tx->buf_dma, np->rx_buf_sz,
  1005. DMA_FROM_DEVICE);
  1006. }
  1007. tx->skb = skb;
  1008. tx->buf = (unsigned long)skb->data;
  1009. #else
  1010. memcpy((void *)tx->buf, skb->data, skb->len);
  1011. #endif
  1012. tx->buf_dma =
  1013. dma_map_single(&dev->dev, (void *)tx->buf,
  1014. (unsigned long)(skb->len), DMA_TO_DEVICE);
  1015. tx->count = ((skb->len << DescSize1Shift) & DescSize1Mask) | DescTxFirst | DescTxLast | DescTxIntEnable | DescChain; //|2<<27; (1<<25, ring end)
  1016. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  1017. tx->count |= 0x3 << 27; //add hw check sum;
  1018. }
  1019. tx->status = DescOwnByDma;
  1020. np->last_tx = tx;
  1021. np->stats.tx_packets++;
  1022. np->stats.tx_bytes += skb->len;
  1023. CACHE_WSYNC(tx, sizeof(*tx));
  1024. #ifndef DMA_USE_SKB_BUF
  1025. dev_kfree_skb_any(skb);
  1026. #endif
  1027. if (np->first_tx) {
  1028. np->first_tx = 0;
  1029. tmp = IO_READ32(np->base_addr + ETH_DMA_6_Operation_Mode);
  1030. tmp |= (7 << 14) | (1 << 13);
  1031. IO_WRITE32(tmp, np->base_addr + ETH_DMA_6_Operation_Mode);
  1032. } else {
  1033. //ETH_DMA_1_Tr_Poll_Demand
  1034. IO_WRITE32(1, np->base_addr + ETH_DMA_1_Tr_Poll_Demand);
  1035. }
  1036. spin_unlock_irqrestore(&np->lock, flags);
  1037. return 0;
  1038. err:
  1039. np->tx_full = 1;
  1040. np->stats.tx_dropped++;
  1041. netif_stop_queue(dev);
  1042. return -1;
  1043. }
  1044. #ifdef LOOP_BACK_TEST
  1045. void test_loop_back(struct net_device *dev)
  1046. {
  1047. //static int start_tx(struct sk_buff *skb, struct net_device *dev)
  1048. //struct am_net_private *np = netdev_priv(dev);
  1049. int i = 0;
  1050. char header[64] = "";
  1051. struct am_net_private *np = netdev_priv(dev);
  1052. printk("start testing!!\n");
  1053. memcpy(header, dev->dev_addr, 6);
  1054. memcpy(header + 8, dev->dev_addr, 6);
  1055. header[12] = 0x80;
  1056. header[13] = 0;
  1057. while (1) {
  1058. struct sk_buff *skb = dev_alloc_skb(1600);
  1059. while (!running) {
  1060. i = 0;
  1061. msleep(10);
  1062. }
  1063. skb_put(skb, 1400);
  1064. memset(skb->data, 0x55, skb->len);
  1065. memcpy(skb->data, header, 16);
  1066. if (start_tx(skb, dev) != 0) {
  1067. /*tx list is full */
  1068. msleep(1);
  1069. dev_kfree_skb(skb);
  1070. } else
  1071. i++;
  1072. if (i % 2000 == 0) {
  1073. msleep(1);
  1074. printk("send pkts=%ld,receive pkts=%ld\n",
  1075. np->stats.tx_packets, np->stats.rx_packets);
  1076. }
  1077. }
  1078. }
  1079. void start_test(struct net_device *dev)
  1080. {
  1081. static int test_running = 0;
  1082. struct am_net_private *np = netdev_priv(dev);
  1083. if (test_running)
  1084. return;
  1085. phy_auto_negotiation_set(np);
  1086. kernel_thread((void *)test_loop_back, (void *)dev,
  1087. CLONE_FS | CLONE_SIGHAND);
  1088. test_running++;
  1089. }
  1090. #endif
  1091. static struct net_device_stats *get_stats(struct net_device *dev)
  1092. {
  1093. struct am_net_private *np = netdev_priv(dev);
  1094. return &np->stats;
  1095. }
  1096. static void tx_timeout(struct net_device *dev)
  1097. {
  1098. struct am_net_private *np = netdev_priv(dev);
  1099. int val;
  1100. //FIXME
  1101. spin_lock_irq(&np->lock);
  1102. val = mdio_read(dev, np->phys[0], MII_BMSR);
  1103. spin_unlock_irq(&np->lock);
  1104. if (!(val & (BMSR_LSTATUS))) { //unlink .....
  1105. netif_stop_queue(dev);
  1106. netif_carrier_off(dev);
  1107. } else {
  1108. netif_carrier_on(dev);
  1109. netif_wake_queue(dev);
  1110. dev->trans_start = jiffies;
  1111. np->stats.tx_errors++;
  1112. }
  1113. return;
  1114. }
  1115. static void write_mac_addr(struct net_device *dev, char *macaddr)
  1116. {
  1117. struct am_net_private *np = netdev_priv(dev);
  1118. unsigned int val;
  1119. val = *((unsigned short *)&macaddr[4]);
  1120. IO_WRITE32(val, np->base_addr + ETH_MAC_Addr0_High);
  1121. val = *((unsigned long *)macaddr);
  1122. IO_WRITE32(val, np->base_addr + ETH_MAC_Addr0_Low);
  1123. printk("write mac add to:");
  1124. dump(macaddr, 6);
  1125. }
  1126. static unsigned char inline chartonum(char c)
  1127. {
  1128. if (c >= '0' && c <= '9')
  1129. return c - '0';
  1130. if (c >= 'A' && c <= 'F')
  1131. return (c - 'A') + 10;
  1132. if (c >= 'a' && c <= 'f')
  1133. return (c - 'a') + 10;
  1134. return 0;
  1135. }
  1136. static void config_mac_addr(struct net_device *dev, void *mac)
  1137. {
  1138. if(g_mac_setup == 0) {
  1139. printk("*****WARNING: Haven't setup MAC address! Using random MAC address.\n");
  1140. unsigned long mac_fir = 0;
  1141. unsigned char mac_add[6] = {};
  1142. mac_fir = READ_MPEG_REG(RAND64_ADDR1);
  1143. mac_add[1] = mac_fir&0xFF; mac_add[2] = (mac_fir>>16)&0xFF;
  1144. mac_add[3] = (mac_fir>>8)&0xFF; mac_add[4] = (mac_fir>>24) &0xFF;
  1145. mac_add[5] = (mac_add[1]<<4)|(mac_add[4]>>4);
  1146. memcpy(mac, mac_add, 6);
  1147. printk("mac-addr: %x:%x:%x:%x:%x:%x\n", mac_add[0],mac_add[1],mac_add[2],
  1148. mac_add[3],mac_add[4],mac_add[5]);
  1149. }
  1150. memcpy(dev->dev_addr, mac, 6);
  1151. write_mac_addr(dev, dev->dev_addr);
  1152. }
  1153. static int __init mac_addr_set(char *line)
  1154. {
  1155. unsigned char mac[6];
  1156. int i = 0;
  1157. for (i = 0; i < 6 && line[0] != '\0' && line[1] != '\0'; i++) {
  1158. mac[i] = chartonum(line[0]) << 4 | chartonum(line[1]);
  1159. line += 3;
  1160. }
  1161. memcpy(DEFMAC, mac, 6);
  1162. printk("******** uboot setup mac-addr: %x:%x:%x:%x:%x:%x\n",
  1163. DEFMAC[0], DEFMAC[1], DEFMAC[2], DEFMAC[3], DEFMAC[4], DEFMAC[5]);
  1164. g_mac_setup++;
  1165. return 1;
  1166. }
  1167. __setup("mac=", mac_addr_set);
  1168. static inline int phy_mc_hash(__u8 * addr)
  1169. {
  1170. return (bitrev32(~crc32_le(~0, addr, ETH_ALEN)) >> 26);
  1171. }
  1172. static void set_multicast_list(struct net_device *dev)
  1173. {
  1174. struct am_net_private *np = netdev_priv(dev);
  1175. u32 tmp;
  1176. if ((dev->flags & IFF_PROMISC)) {
  1177. tmp = IO_READ32(np->base_addr + ETH_MAC_1_Frame_Filter);
  1178. tmp |= 1;
  1179. IO_WRITE32(tmp, np->base_addr + ETH_MAC_1_Frame_Filter); //promisc module
  1180. printk("ether enter promisc module\n");
  1181. } else {
  1182. tmp = IO_READ32(np->base_addr + ETH_MAC_1_Frame_Filter);
  1183. tmp &= ~1;
  1184. IO_WRITE32(tmp, np->base_addr + ETH_MAC_1_Frame_Filter); //live promisc
  1185. //printk("ether leave promisc module\n");
  1186. }
  1187. if ((dev->flags & IFF_ALLMULTI)) {
  1188. tmp = IO_READ32(np->base_addr + ETH_MAC_1_Frame_Filter);
  1189. tmp |= (1 << 4);
  1190. IO_WRITE32(tmp, np->base_addr + ETH_MAC_1_Frame_Filter); //all muticast
  1191. printk("ether enter all multicast module\n");
  1192. } else {
  1193. tmp = IO_READ32(np->base_addr + ETH_MAC_1_Frame_Filter);
  1194. tmp &= (1 << 4);
  1195. IO_WRITE32(tmp, np->base_addr + ETH_MAC_1_Frame_Filter); //live all muticast
  1196. //printk("ether leave all muticast module\n");
  1197. }
  1198. if (netdev_mc_count(dev) > 0) {
  1199. u32 hash[2];
  1200. struct netdev_hw_addr *ha;
  1201. u32 hash_id;
  1202. char *addr;
  1203. hash[0] = 0;
  1204. hash[1] = 0;
  1205. printk("changed the Multicast,mcount=%d\n",
  1206. netdev_mc_count(dev));
  1207. //for (addr_list = dev->mc_list; cnt && addr_list != NULL; addr_list = addr_list->next, cnt--) {
  1208. netdev_for_each_mc_addr(ha, dev) {
  1209. addr = ha->addr;
  1210. hash_id = phy_mc_hash(addr);
  1211. ///*
  1212. printk
  1213. ("add mac address:%02x:%02x:%02x:%02x:%02x:%02x,bit=%d\n",
  1214. addr[0], addr[1], addr[2], addr[3], addr[4],
  1215. addr[5], hash_id);
  1216. //*/
  1217. //set_bit(hash_id,hash);
  1218. if (hash_id > 31)
  1219. hash[1] |= 1 << (hash_id - 32);
  1220. else
  1221. hash[0] |= 1 << hash_id;
  1222. }
  1223. printk("set hash low=%x,high=%x\n", hash[0], hash[1]);
  1224. IO_WRITE32(hash[1], np->base_addr + ETH_MAC_2_Hash_Table_High);
  1225. IO_WRITE32(hash[0], np->base_addr + ETH_MAC_3_Hash_Table_Low);
  1226. tmp = IO_READ32(np->base_addr + ETH_MAC_1_Frame_Filter);
  1227. tmp = (1 << 2) | //hash filter
  1228. 0;
  1229. printk("changed the filter setting to :%x\n", tmp);
  1230. IO_WRITE32(tmp, np->base_addr + ETH_MAC_1_Frame_Filter); //hash muticast
  1231. }
  1232. }
  1233. static const struct net_device_ops am_netdev_ops = {
  1234. .ndo_open = netdev_open,
  1235. .ndo_stop = netdev_close,
  1236. .ndo_start_xmit = start_tx,
  1237. .ndo_tx_timeout = tx_timeout,
  1238. .ndo_set_multicast_list = set_multicast_list,
  1239. .ndo_do_ioctl = netdev_ioctl,
  1240. .ndo_get_stats = get_stats,
  1241. .ndo_change_mtu = eth_change_mtu,
  1242. .ndo_set_mac_address = eth_mac_addr,
  1243. .ndo_validate_addr = eth_validate_addr,
  1244. };
  1245. static int setup_net_device(struct net_device *dev)
  1246. {
  1247. struct am_net_private *np = netdev_priv(dev);
  1248. int res = 0;
  1249. dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
  1250. dev->features = NETIF_F_GEN_CSUM;
  1251. dev->netdev_ops = &am_netdev_ops;
  1252. dev->ethtool_ops = NULL; // &netdev_ethtool_ops;
  1253. dev->watchdog_timeo = TX_TIMEOUT;
  1254. np->irq_mask = (1 << 16) | //NIE: Normal Interrupt Summary Enable
  1255. (1 << 15) | //abnormal int summary
  1256. (1 << 6) | //Receive Interrupt Enable
  1257. (1 << 2) | //Transmit Buffer Unavailable Enable
  1258. (1 << 3) | //TJT: Transmit Jabber Timeout
  1259. (1 << 4) | //OVF: Receive Overflow
  1260. (1 << 5) | //UNF: Transmit Underflow
  1261. (1 << 7) | //7 RU: Receive Buffer Unavailable
  1262. (1 << 8) | //RPS: Receive Process Stopped
  1263. (1 << 13) | //13 FBI: Fatal Bus Error Interrupt
  1264. (1) | //tx interrupt
  1265. 0;
  1266. config_mac_addr(dev, DEFMAC);
  1267. dev_alloc_name(dev, "eth%d");
  1268. memset(&np->stats, 0, sizeof(np->stats));
  1269. return res;
  1270. }
  1271. static int probe_init(struct net_device *ndev)
  1272. {
  1273. int phy = 0;
  1274. int phy_idx = 0;
  1275. int found = 0;
  1276. int res = 0;
  1277. unsigned int val;
  1278. int k, kk;
  1279. struct am_net_private *priv = netdev_priv(ndev);
  1280. priv->dev = ndev;
  1281. //ndev->base_addr = (unsigned long)ioremap(ETHBASE,0x2000);
  1282. ndev->base_addr = (unsigned long)(ETHBASE);
  1283. ndev->irq = ETH_INTERRUPT;
  1284. spin_lock_init(&priv->lock);
  1285. priv->mii_if.dev = ndev;
  1286. priv->mii_if.mdio_read = mdio_read;
  1287. priv->mii_if.mdio_write = mdio_write;
  1288. priv->base_addr = ndev->base_addr;
  1289. if (debug > 0)
  1290. printk("addr is %x\n", (unsigned int)ndev->base_addr);
  1291. //bank_io_init(ndev);
  1292. for (k = 0; k < 100 && !found; k++) {
  1293. //mac reset ...
  1294. IO_WRITE32(1, priv->base_addr + ETH_DMA_0_Bus_Mode);
  1295. //waiting mac reset...
  1296. for (kk = 0;
  1297. (IO_READ32(priv->base_addr + ETH_DMA_0_Bus_Mode) & 1)
  1298. && kk < 1000; kk++)
  1299. udelay(1);
  1300. if (kk >= 1000) {
  1301. printk("error to reset mac at probe!\n");
  1302. goto error0;
  1303. }
  1304. for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
  1305. int mii_status = mdio_read(ndev, phy, MII_BMSR);
  1306. if (mii_status != 0xffff && mii_status != 0x0000) {
  1307. priv->phys[phy_idx++] = phy;
  1308. priv->mii_if.advertising =
  1309. mdio_read(ndev, phy, MII_ADVERTISE);
  1310. priv->mii =
  1311. (mdio_read(ndev, phy, MII_PHYSID1) << 16) +
  1312. mdio_read(ndev, phy, MII_PHYSID2);
  1313. if (debug > 0)
  1314. printk(KERN_INFO
  1315. "%s: MII PHY %8.8xh found at address %d, status "
  1316. "0x%4.4x advertising %4.4x.\n",
  1317. DRV_NAME, priv->mii, phy,
  1318. mii_status,
  1319. priv->mii_if.advertising);
  1320. found++;
  1321. }
  1322. }
  1323. }
  1324. if (!found) {
  1325. printk("can't find any mii phy device !\n");
  1326. res = -EIO;
  1327. goto error0;
  1328. }
  1329. mdio_write(ndev, priv->phys[0], 18, priv->phys[0] | (1 << 14 | 7 << 5));
  1330. val = mdio_read(ndev, priv->phys[0], 2); //phy_rw(0, phyad, 2, &val);
  1331. priv->phy_Identifier = val << 16;
  1332. val = mdio_read(ndev, priv->phys[0], 3); //phy_rw(0, phyad, 3, &val);
  1333. priv->phy_Identifier |= val;
  1334. printk("find phy phy_Identifier=%x\n", priv->phy_Identifier);
  1335. res = setup_net_device(ndev);
  1336. if (res != 0) {
  1337. printk("setup net device error !\n");
  1338. res = -EIO;
  1339. goto error0;
  1340. }
  1341. res = register_netdev(ndev);
  1342. if (res != 0) {
  1343. printk("can't register net device !\n");
  1344. res = -EBUSY;
  1345. goto error0;
  1346. }
  1347. tasklet_init(&priv->rx_tasklet, net_tasklet, (unsigned long)ndev);
  1348. return 0;
  1349. //error1:
  1350. // unregister_netdev(ndev);
  1351. error0:
  1352. return res;
  1353. }
  1354. static int has_ethernet_pm = 0;
  1355. static struct aml_eth_platdata *eth_pdata;
  1356. static int ethernet_probe(struct platform_device *pdev)
  1357. {
  1358. printk("ethernet_driver probe!\n");
  1359. eth_pdata = (struct aml_eth_platdata *)pdev->dev.platform_data;
  1360. if (!eth_pdata) {
  1361. printk("\nethernet pm ops resource undefined.\n");
  1362. return -EFAULT;
  1363. }
  1364. if (eth_pdata->clock_enable)
  1365. eth_pdata->clock_enable();
  1366. if (eth_pdata->pinmux_setup)
  1367. eth_pdata->pinmux_setup();
  1368. return 0;
  1369. }
  1370. static int ethernet_remove(struct platform_device *pdev)
  1371. {
  1372. printk("ethernet_driver remove!\n");
  1373. return 0;
  1374. }
  1375. static int ethernet_suspend(struct platform_device *dev, pm_message_t event)
  1376. {
  1377. netdev_close(my_ndev);
  1378. ///unregister_netdev(my_ndev);
  1379. eth_pdata->clock_disable();
  1380. //eth_clk_set(ETH_CLKSRC_APLL_CLK,400*CLK_1M,0);
  1381. printk("ethernet_suspend()\n");
  1382. return 0;
  1383. }
  1384. static int ethernet_resume(struct platform_device *dev)
  1385. {
  1386. int res = 0;
  1387. if (eth_pdata->clock_enable)
  1388. eth_pdata->clock_enable();
  1389. //eth_clk_set(ETH_CLKSRC_APLL_CLK,400*CLK_1M,50*CLK_1M);
  1390. eth_pdata->reset();
  1391. printk("ethernet_resume()\n");
  1392. //res = probe_init(my_ndev);
  1393. res = netdev_open(my_ndev);
  1394. if (res != 0)
  1395. printk("nono, it can not be true!\n");
  1396. printk("ethernet_resume!\n");
  1397. return 0;
  1398. }
  1399. static struct platform_driver ethernet_driver = {
  1400. .probe = ethernet_probe,
  1401. .remove = ethernet_remove,
  1402. .suspend = ethernet_suspend,
  1403. .resume = ethernet_resume,
  1404. .driver = {
  1405. .name = "meson-eth",
  1406. }
  1407. };
  1408. static int __init am_net_init(void)
  1409. {
  1410. int res;
  1411. printk(DRV_NAME "init(dbg[%p]:%d)\n", (&debug), debug);
  1412. my_ndev = alloc_etherdev(sizeof(struct am_net_private));
  1413. if (my_ndev == NULL) {
  1414. printk(DRV_NAME "ndev alloc failed!!\n");
  1415. return -ENOMEM;
  1416. }
  1417. res = probe_init(my_ndev);
  1418. if (res != 0)
  1419. free_netdev(my_ndev);
  1420. else {
  1421. printk("ethernet_driver init\n");
  1422. if (platform_driver_register(&ethernet_driver)) {
  1423. printk("failed to register ethernet_pm driver\n");
  1424. has_ethernet_pm = 0;
  1425. } else
  1426. has_ethernet_pm = 1;
  1427. }
  1428. return res;
  1429. }
  1430. static void am_net_free(struct net_device *ndev)
  1431. {
  1432. //struct am_net_private *np=netdev_priv(ndev);
  1433. netdev_close(ndev);
  1434. unregister_netdev(ndev);
  1435. }
  1436. static void __exit am_net_exit(void)
  1437. {
  1438. printk(DRV_NAME "exit\n");
  1439. am_net_free(my_ndev);
  1440. free_netdev(my_ndev);
  1441. if (has_ethernet_pm == 1) {
  1442. printk("ethernet_pm driver remove.\n");
  1443. platform_driver_unregister(&ethernet_driver);
  1444. has_ethernet_pm = 0;
  1445. }
  1446. return;
  1447. }
  1448. module_init(am_net_init);
  1449. module_exit(am_net_exit);