b44.c 58 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380
  1. /* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
  2. *
  3. * Copyright (C) 2002 David S. Miller (davem@redhat.com)
  4. * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
  5. * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
  6. * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
  7. * Copyright (C) 2006 Broadcom Corporation.
  8. * Copyright (C) 2007 Michael Buesch <mb@bu3sch.de>
  9. *
  10. * Distribute under GPL.
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/moduleparam.h>
  16. #include <linux/types.h>
  17. #include <linux/netdevice.h>
  18. #include <linux/ethtool.h>
  19. #include <linux/mii.h>
  20. #include <linux/if_ether.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/etherdevice.h>
  23. #include <linux/pci.h>
  24. #include <linux/delay.h>
  25. #include <linux/init.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/ssb/ssb.h>
  28. #include <linux/slab.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/irq.h>
  32. #include "b44.h"
  33. #define DRV_MODULE_NAME "b44"
  34. #define DRV_MODULE_VERSION "2.0"
  35. #define B44_DEF_MSG_ENABLE \
  36. (NETIF_MSG_DRV | \
  37. NETIF_MSG_PROBE | \
  38. NETIF_MSG_LINK | \
  39. NETIF_MSG_TIMER | \
  40. NETIF_MSG_IFDOWN | \
  41. NETIF_MSG_IFUP | \
  42. NETIF_MSG_RX_ERR | \
  43. NETIF_MSG_TX_ERR)
  44. /* length of time before we decide the hardware is borked,
  45. * and dev->tx_timeout() should be called to fix the problem
  46. */
  47. #define B44_TX_TIMEOUT (5 * HZ)
  48. /* hardware minimum and maximum for a single frame's data payload */
  49. #define B44_MIN_MTU 60
  50. #define B44_MAX_MTU 1500
  51. #define B44_RX_RING_SIZE 512
  52. #define B44_DEF_RX_RING_PENDING 200
  53. #define B44_RX_RING_BYTES (sizeof(struct dma_desc) * \
  54. B44_RX_RING_SIZE)
  55. #define B44_TX_RING_SIZE 512
  56. #define B44_DEF_TX_RING_PENDING (B44_TX_RING_SIZE - 1)
  57. #define B44_TX_RING_BYTES (sizeof(struct dma_desc) * \
  58. B44_TX_RING_SIZE)
  59. #define TX_RING_GAP(BP) \
  60. (B44_TX_RING_SIZE - (BP)->tx_pending)
  61. #define TX_BUFFS_AVAIL(BP) \
  62. (((BP)->tx_cons <= (BP)->tx_prod) ? \
  63. (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod : \
  64. (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
  65. #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
  66. #define RX_PKT_OFFSET (RX_HEADER_LEN + 2)
  67. #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET)
  68. /* minimum number of free TX descriptors required to wake up TX process */
  69. #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
  70. /* b44 internal pattern match filter info */
  71. #define B44_PATTERN_BASE 0x400
  72. #define B44_PATTERN_SIZE 0x80
  73. #define B44_PMASK_BASE 0x600
  74. #define B44_PMASK_SIZE 0x10
  75. #define B44_MAX_PATTERNS 16
  76. #define B44_ETHIPV6UDP_HLEN 62
  77. #define B44_ETHIPV4UDP_HLEN 42
  78. static char version[] __devinitdata =
  79. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION "\n";
  80. MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
  81. MODULE_DESCRIPTION("Broadcom 44xx/47xx 10/100 PCI ethernet driver");
  82. MODULE_LICENSE("GPL");
  83. MODULE_VERSION(DRV_MODULE_VERSION);
  84. static int b44_debug = -1; /* -1 == use B44_DEF_MSG_ENABLE as value */
  85. module_param(b44_debug, int, 0);
  86. MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
  87. #ifdef CONFIG_B44_PCI
  88. static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
  89. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
  90. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
  91. { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
  92. { 0 } /* terminate list with empty entry */
  93. };
  94. MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
  95. static struct pci_driver b44_pci_driver = {
  96. .name = DRV_MODULE_NAME,
  97. .id_table = b44_pci_tbl,
  98. };
  99. #endif /* CONFIG_B44_PCI */
  100. static const struct ssb_device_id b44_ssb_tbl[] = {
  101. SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
  102. SSB_DEVTABLE_END
  103. };
  104. MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
  105. static void b44_halt(struct b44 *);
  106. static void b44_init_rings(struct b44 *);
  107. #define B44_FULL_RESET 1
  108. #define B44_FULL_RESET_SKIP_PHY 2
  109. #define B44_PARTIAL_RESET 3
  110. #define B44_CHIP_RESET_FULL 4
  111. #define B44_CHIP_RESET_PARTIAL 5
  112. static void b44_init_hw(struct b44 *, int);
  113. static int dma_desc_sync_size;
  114. static int instance;
  115. static const char b44_gstrings[][ETH_GSTRING_LEN] = {
  116. #define _B44(x...) # x,
  117. B44_STAT_REG_DECLARE
  118. #undef _B44
  119. };
  120. static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
  121. dma_addr_t dma_base,
  122. unsigned long offset,
  123. enum dma_data_direction dir)
  124. {
  125. dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
  126. dma_desc_sync_size, dir);
  127. }
  128. static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
  129. dma_addr_t dma_base,
  130. unsigned long offset,
  131. enum dma_data_direction dir)
  132. {
  133. dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
  134. dma_desc_sync_size, dir);
  135. }
  136. static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
  137. {
  138. return ssb_read32(bp->sdev, reg);
  139. }
  140. static inline void bw32(const struct b44 *bp,
  141. unsigned long reg, unsigned long val)
  142. {
  143. ssb_write32(bp->sdev, reg, val);
  144. }
  145. static int b44_wait_bit(struct b44 *bp, unsigned long reg,
  146. u32 bit, unsigned long timeout, const int clear)
  147. {
  148. unsigned long i;
  149. for (i = 0; i < timeout; i++) {
  150. u32 val = br32(bp, reg);
  151. if (clear && !(val & bit))
  152. break;
  153. if (!clear && (val & bit))
  154. break;
  155. udelay(10);
  156. }
  157. if (i == timeout) {
  158. if (net_ratelimit())
  159. netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
  160. bit, reg, clear ? "clear" : "set");
  161. return -ENODEV;
  162. }
  163. return 0;
  164. }
  165. static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
  166. {
  167. u32 val;
  168. bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
  169. (index << CAM_CTRL_INDEX_SHIFT)));
  170. b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
  171. val = br32(bp, B44_CAM_DATA_LO);
  172. data[2] = (val >> 24) & 0xFF;
  173. data[3] = (val >> 16) & 0xFF;
  174. data[4] = (val >> 8) & 0xFF;
  175. data[5] = (val >> 0) & 0xFF;
  176. val = br32(bp, B44_CAM_DATA_HI);
  177. data[0] = (val >> 8) & 0xFF;
  178. data[1] = (val >> 0) & 0xFF;
  179. }
  180. static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
  181. {
  182. u32 val;
  183. val = ((u32) data[2]) << 24;
  184. val |= ((u32) data[3]) << 16;
  185. val |= ((u32) data[4]) << 8;
  186. val |= ((u32) data[5]) << 0;
  187. bw32(bp, B44_CAM_DATA_LO, val);
  188. val = (CAM_DATA_HI_VALID |
  189. (((u32) data[0]) << 8) |
  190. (((u32) data[1]) << 0));
  191. bw32(bp, B44_CAM_DATA_HI, val);
  192. bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
  193. (index << CAM_CTRL_INDEX_SHIFT)));
  194. b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
  195. }
  196. static inline void __b44_disable_ints(struct b44 *bp)
  197. {
  198. bw32(bp, B44_IMASK, 0);
  199. }
  200. static void b44_disable_ints(struct b44 *bp)
  201. {
  202. __b44_disable_ints(bp);
  203. /* Flush posted writes. */
  204. br32(bp, B44_IMASK);
  205. }
  206. static void b44_enable_ints(struct b44 *bp)
  207. {
  208. bw32(bp, B44_IMASK, bp->imask);
  209. }
  210. static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
  211. {
  212. int err;
  213. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  214. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
  215. (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
  216. (phy_addr << MDIO_DATA_PMD_SHIFT) |
  217. (reg << MDIO_DATA_RA_SHIFT) |
  218. (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
  219. err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  220. *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
  221. return err;
  222. }
  223. static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
  224. {
  225. bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
  226. bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
  227. (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
  228. (phy_addr << MDIO_DATA_PMD_SHIFT) |
  229. (reg << MDIO_DATA_RA_SHIFT) |
  230. (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
  231. (val & MDIO_DATA_DATA)));
  232. return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
  233. }
  234. static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
  235. {
  236. if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
  237. return 0;
  238. return __b44_readphy(bp, bp->phy_addr, reg, val);
  239. }
  240. static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
  241. {
  242. if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
  243. return 0;
  244. return __b44_writephy(bp, bp->phy_addr, reg, val);
  245. }
  246. /* miilib interface */
  247. static int b44_mii_read(struct net_device *dev, int phy_id, int location)
  248. {
  249. u32 val;
  250. struct b44 *bp = netdev_priv(dev);
  251. int rc = __b44_readphy(bp, phy_id, location, &val);
  252. if (rc)
  253. return 0xffffffff;
  254. return val;
  255. }
  256. static void b44_mii_write(struct net_device *dev, int phy_id, int location,
  257. int val)
  258. {
  259. struct b44 *bp = netdev_priv(dev);
  260. __b44_writephy(bp, phy_id, location, val);
  261. }
  262. static int b44_phy_reset(struct b44 *bp)
  263. {
  264. u32 val;
  265. int err;
  266. if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
  267. return 0;
  268. err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
  269. if (err)
  270. return err;
  271. udelay(100);
  272. err = b44_readphy(bp, MII_BMCR, &val);
  273. if (!err) {
  274. if (val & BMCR_RESET) {
  275. netdev_err(bp->dev, "PHY Reset would not complete\n");
  276. err = -ENODEV;
  277. }
  278. }
  279. return err;
  280. }
  281. static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
  282. {
  283. u32 val;
  284. bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
  285. bp->flags |= pause_flags;
  286. val = br32(bp, B44_RXCONFIG);
  287. if (pause_flags & B44_FLAG_RX_PAUSE)
  288. val |= RXCONFIG_FLOW;
  289. else
  290. val &= ~RXCONFIG_FLOW;
  291. bw32(bp, B44_RXCONFIG, val);
  292. val = br32(bp, B44_MAC_FLOW);
  293. if (pause_flags & B44_FLAG_TX_PAUSE)
  294. val |= (MAC_FLOW_PAUSE_ENAB |
  295. (0xc0 & MAC_FLOW_RX_HI_WATER));
  296. else
  297. val &= ~MAC_FLOW_PAUSE_ENAB;
  298. bw32(bp, B44_MAC_FLOW, val);
  299. }
  300. static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
  301. {
  302. u32 pause_enab = 0;
  303. /* The driver supports only rx pause by default because
  304. the b44 mac tx pause mechanism generates excessive
  305. pause frames.
  306. Use ethtool to turn on b44 tx pause if necessary.
  307. */
  308. if ((local & ADVERTISE_PAUSE_CAP) &&
  309. (local & ADVERTISE_PAUSE_ASYM)){
  310. if ((remote & LPA_PAUSE_ASYM) &&
  311. !(remote & LPA_PAUSE_CAP))
  312. pause_enab |= B44_FLAG_RX_PAUSE;
  313. }
  314. __b44_set_flow_ctrl(bp, pause_enab);
  315. }
  316. #ifdef CONFIG_BCM47XX
  317. #include <asm/mach-bcm47xx/nvram.h>
  318. static void b44_wap54g10_workaround(struct b44 *bp)
  319. {
  320. char buf[20];
  321. u32 val;
  322. int err;
  323. /*
  324. * workaround for bad hardware design in Linksys WAP54G v1.0
  325. * see https://dev.openwrt.org/ticket/146
  326. * check and reset bit "isolate"
  327. */
  328. if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
  329. return;
  330. if (simple_strtoul(buf, NULL, 0) == 2) {
  331. err = __b44_readphy(bp, 0, MII_BMCR, &val);
  332. if (err)
  333. goto error;
  334. if (!(val & BMCR_ISOLATE))
  335. return;
  336. val &= ~BMCR_ISOLATE;
  337. err = __b44_writephy(bp, 0, MII_BMCR, val);
  338. if (err)
  339. goto error;
  340. }
  341. return;
  342. error:
  343. pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
  344. }
  345. #else
  346. static inline void b44_wap54g10_workaround(struct b44 *bp)
  347. {
  348. }
  349. #endif
  350. static int b44_setup_phy(struct b44 *bp)
  351. {
  352. u32 val;
  353. int err;
  354. b44_wap54g10_workaround(bp);
  355. if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
  356. return 0;
  357. if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
  358. goto out;
  359. if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
  360. val & MII_ALEDCTRL_ALLMSK)) != 0)
  361. goto out;
  362. if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
  363. goto out;
  364. if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
  365. val | MII_TLEDCTRL_ENABLE)) != 0)
  366. goto out;
  367. if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
  368. u32 adv = ADVERTISE_CSMA;
  369. if (bp->flags & B44_FLAG_ADV_10HALF)
  370. adv |= ADVERTISE_10HALF;
  371. if (bp->flags & B44_FLAG_ADV_10FULL)
  372. adv |= ADVERTISE_10FULL;
  373. if (bp->flags & B44_FLAG_ADV_100HALF)
  374. adv |= ADVERTISE_100HALF;
  375. if (bp->flags & B44_FLAG_ADV_100FULL)
  376. adv |= ADVERTISE_100FULL;
  377. if (bp->flags & B44_FLAG_PAUSE_AUTO)
  378. adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  379. if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
  380. goto out;
  381. if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
  382. BMCR_ANRESTART))) != 0)
  383. goto out;
  384. } else {
  385. u32 bmcr;
  386. if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
  387. goto out;
  388. bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
  389. if (bp->flags & B44_FLAG_100_BASE_T)
  390. bmcr |= BMCR_SPEED100;
  391. if (bp->flags & B44_FLAG_FULL_DUPLEX)
  392. bmcr |= BMCR_FULLDPLX;
  393. if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
  394. goto out;
  395. /* Since we will not be negotiating there is no safe way
  396. * to determine if the link partner supports flow control
  397. * or not. So just disable it completely in this case.
  398. */
  399. b44_set_flow_ctrl(bp, 0, 0);
  400. }
  401. out:
  402. return err;
  403. }
  404. static void b44_stats_update(struct b44 *bp)
  405. {
  406. unsigned long reg;
  407. u32 *val;
  408. val = &bp->hw_stats.tx_good_octets;
  409. for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
  410. *val++ += br32(bp, reg);
  411. }
  412. /* Pad */
  413. reg += 8*4UL;
  414. for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
  415. *val++ += br32(bp, reg);
  416. }
  417. }
  418. static void b44_link_report(struct b44 *bp)
  419. {
  420. if (!netif_carrier_ok(bp->dev)) {
  421. netdev_info(bp->dev, "Link is down\n");
  422. } else {
  423. netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
  424. (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
  425. (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
  426. netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
  427. (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
  428. (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
  429. }
  430. }
  431. static void b44_check_phy(struct b44 *bp)
  432. {
  433. u32 bmsr, aux;
  434. if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
  435. bp->flags |= B44_FLAG_100_BASE_T;
  436. bp->flags |= B44_FLAG_FULL_DUPLEX;
  437. if (!netif_carrier_ok(bp->dev)) {
  438. u32 val = br32(bp, B44_TX_CTRL);
  439. val |= TX_CTRL_DUPLEX;
  440. bw32(bp, B44_TX_CTRL, val);
  441. netif_carrier_on(bp->dev);
  442. b44_link_report(bp);
  443. }
  444. return;
  445. }
  446. if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
  447. !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
  448. (bmsr != 0xffff)) {
  449. if (aux & MII_AUXCTRL_SPEED)
  450. bp->flags |= B44_FLAG_100_BASE_T;
  451. else
  452. bp->flags &= ~B44_FLAG_100_BASE_T;
  453. if (aux & MII_AUXCTRL_DUPLEX)
  454. bp->flags |= B44_FLAG_FULL_DUPLEX;
  455. else
  456. bp->flags &= ~B44_FLAG_FULL_DUPLEX;
  457. if (!netif_carrier_ok(bp->dev) &&
  458. (bmsr & BMSR_LSTATUS)) {
  459. u32 val = br32(bp, B44_TX_CTRL);
  460. u32 local_adv, remote_adv;
  461. if (bp->flags & B44_FLAG_FULL_DUPLEX)
  462. val |= TX_CTRL_DUPLEX;
  463. else
  464. val &= ~TX_CTRL_DUPLEX;
  465. bw32(bp, B44_TX_CTRL, val);
  466. if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
  467. !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
  468. !b44_readphy(bp, MII_LPA, &remote_adv))
  469. b44_set_flow_ctrl(bp, local_adv, remote_adv);
  470. /* Link now up */
  471. netif_carrier_on(bp->dev);
  472. b44_link_report(bp);
  473. } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
  474. /* Link now down */
  475. netif_carrier_off(bp->dev);
  476. b44_link_report(bp);
  477. }
  478. if (bmsr & BMSR_RFAULT)
  479. netdev_warn(bp->dev, "Remote fault detected in PHY\n");
  480. if (bmsr & BMSR_JCD)
  481. netdev_warn(bp->dev, "Jabber detected in PHY\n");
  482. }
  483. }
  484. static void b44_timer(unsigned long __opaque)
  485. {
  486. struct b44 *bp = (struct b44 *) __opaque;
  487. spin_lock_irq(&bp->lock);
  488. b44_check_phy(bp);
  489. b44_stats_update(bp);
  490. spin_unlock_irq(&bp->lock);
  491. mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
  492. }
  493. static void b44_tx(struct b44 *bp)
  494. {
  495. u32 cur, cons;
  496. cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
  497. cur /= sizeof(struct dma_desc);
  498. /* XXX needs updating when NETIF_F_SG is supported */
  499. for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
  500. struct ring_info *rp = &bp->tx_buffers[cons];
  501. struct sk_buff *skb = rp->skb;
  502. BUG_ON(skb == NULL);
  503. dma_unmap_single(bp->sdev->dma_dev,
  504. rp->mapping,
  505. skb->len,
  506. DMA_TO_DEVICE);
  507. rp->skb = NULL;
  508. dev_kfree_skb_irq(skb);
  509. }
  510. bp->tx_cons = cons;
  511. if (netif_queue_stopped(bp->dev) &&
  512. TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
  513. netif_wake_queue(bp->dev);
  514. bw32(bp, B44_GPTIMER, 0);
  515. }
  516. /* Works like this. This chip writes a 'struct rx_header" 30 bytes
  517. * before the DMA address you give it. So we allocate 30 more bytes
  518. * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
  519. * point the chip at 30 bytes past where the rx_header will go.
  520. */
  521. static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
  522. {
  523. struct dma_desc *dp;
  524. struct ring_info *src_map, *map;
  525. struct rx_header *rh;
  526. struct sk_buff *skb;
  527. dma_addr_t mapping;
  528. int dest_idx;
  529. u32 ctrl;
  530. src_map = NULL;
  531. if (src_idx >= 0)
  532. src_map = &bp->rx_buffers[src_idx];
  533. dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
  534. map = &bp->rx_buffers[dest_idx];
  535. skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
  536. if (skb == NULL)
  537. return -ENOMEM;
  538. mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
  539. RX_PKT_BUF_SZ,
  540. DMA_FROM_DEVICE);
  541. /* Hardware bug work-around, the chip is unable to do PCI DMA
  542. to/from anything above 1GB :-( */
  543. if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
  544. mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
  545. /* Sigh... */
  546. if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
  547. dma_unmap_single(bp->sdev->dma_dev, mapping,
  548. RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
  549. dev_kfree_skb_any(skb);
  550. skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
  551. if (skb == NULL)
  552. return -ENOMEM;
  553. mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
  554. RX_PKT_BUF_SZ,
  555. DMA_FROM_DEVICE);
  556. if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
  557. mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
  558. if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
  559. dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
  560. dev_kfree_skb_any(skb);
  561. return -ENOMEM;
  562. }
  563. bp->force_copybreak = 1;
  564. }
  565. rh = (struct rx_header *) skb->data;
  566. rh->len = 0;
  567. rh->flags = 0;
  568. map->skb = skb;
  569. map->mapping = mapping;
  570. if (src_map != NULL)
  571. src_map->skb = NULL;
  572. ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
  573. if (dest_idx == (B44_RX_RING_SIZE - 1))
  574. ctrl |= DESC_CTRL_EOT;
  575. dp = &bp->rx_ring[dest_idx];
  576. dp->ctrl = cpu_to_le32(ctrl);
  577. dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
  578. if (bp->flags & B44_FLAG_RX_RING_HACK)
  579. b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
  580. dest_idx * sizeof(*dp),
  581. DMA_BIDIRECTIONAL);
  582. return RX_PKT_BUF_SZ;
  583. }
  584. static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
  585. {
  586. struct dma_desc *src_desc, *dest_desc;
  587. struct ring_info *src_map, *dest_map;
  588. struct rx_header *rh;
  589. int dest_idx;
  590. __le32 ctrl;
  591. dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
  592. dest_desc = &bp->rx_ring[dest_idx];
  593. dest_map = &bp->rx_buffers[dest_idx];
  594. src_desc = &bp->rx_ring[src_idx];
  595. src_map = &bp->rx_buffers[src_idx];
  596. dest_map->skb = src_map->skb;
  597. rh = (struct rx_header *) src_map->skb->data;
  598. rh->len = 0;
  599. rh->flags = 0;
  600. dest_map->mapping = src_map->mapping;
  601. if (bp->flags & B44_FLAG_RX_RING_HACK)
  602. b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
  603. src_idx * sizeof(*src_desc),
  604. DMA_BIDIRECTIONAL);
  605. ctrl = src_desc->ctrl;
  606. if (dest_idx == (B44_RX_RING_SIZE - 1))
  607. ctrl |= cpu_to_le32(DESC_CTRL_EOT);
  608. else
  609. ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
  610. dest_desc->ctrl = ctrl;
  611. dest_desc->addr = src_desc->addr;
  612. src_map->skb = NULL;
  613. if (bp->flags & B44_FLAG_RX_RING_HACK)
  614. b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
  615. dest_idx * sizeof(*dest_desc),
  616. DMA_BIDIRECTIONAL);
  617. dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
  618. RX_PKT_BUF_SZ,
  619. DMA_FROM_DEVICE);
  620. }
  621. static int b44_rx(struct b44 *bp, int budget)
  622. {
  623. int received;
  624. u32 cons, prod;
  625. received = 0;
  626. prod = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
  627. prod /= sizeof(struct dma_desc);
  628. cons = bp->rx_cons;
  629. while (cons != prod && budget > 0) {
  630. struct ring_info *rp = &bp->rx_buffers[cons];
  631. struct sk_buff *skb = rp->skb;
  632. dma_addr_t map = rp->mapping;
  633. struct rx_header *rh;
  634. u16 len;
  635. dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
  636. RX_PKT_BUF_SZ,
  637. DMA_FROM_DEVICE);
  638. rh = (struct rx_header *) skb->data;
  639. len = le16_to_cpu(rh->len);
  640. if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
  641. (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
  642. drop_it:
  643. b44_recycle_rx(bp, cons, bp->rx_prod);
  644. drop_it_no_recycle:
  645. bp->dev->stats.rx_dropped++;
  646. goto next_pkt;
  647. }
  648. if (len == 0) {
  649. int i = 0;
  650. do {
  651. udelay(2);
  652. barrier();
  653. len = le16_to_cpu(rh->len);
  654. } while (len == 0 && i++ < 5);
  655. if (len == 0)
  656. goto drop_it;
  657. }
  658. /* Omit CRC. */
  659. len -= 4;
  660. if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
  661. int skb_size;
  662. skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
  663. if (skb_size < 0)
  664. goto drop_it;
  665. dma_unmap_single(bp->sdev->dma_dev, map,
  666. skb_size, DMA_FROM_DEVICE);
  667. /* Leave out rx_header */
  668. skb_put(skb, len + RX_PKT_OFFSET);
  669. skb_pull(skb, RX_PKT_OFFSET);
  670. } else {
  671. struct sk_buff *copy_skb;
  672. b44_recycle_rx(bp, cons, bp->rx_prod);
  673. copy_skb = netdev_alloc_skb(bp->dev, len + 2);
  674. if (copy_skb == NULL)
  675. goto drop_it_no_recycle;
  676. skb_reserve(copy_skb, 2);
  677. skb_put(copy_skb, len);
  678. /* DMA sync done above, copy just the actual packet */
  679. skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
  680. copy_skb->data, len);
  681. skb = copy_skb;
  682. }
  683. skb_checksum_none_assert(skb);
  684. skb->protocol = eth_type_trans(skb, bp->dev);
  685. netif_receive_skb(skb);
  686. received++;
  687. budget--;
  688. next_pkt:
  689. bp->rx_prod = (bp->rx_prod + 1) &
  690. (B44_RX_RING_SIZE - 1);
  691. cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
  692. }
  693. bp->rx_cons = cons;
  694. bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
  695. return received;
  696. }
  697. static int b44_poll(struct napi_struct *napi, int budget)
  698. {
  699. struct b44 *bp = container_of(napi, struct b44, napi);
  700. int work_done;
  701. unsigned long flags;
  702. spin_lock_irqsave(&bp->lock, flags);
  703. if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
  704. /* spin_lock(&bp->tx_lock); */
  705. b44_tx(bp);
  706. /* spin_unlock(&bp->tx_lock); */
  707. }
  708. if (bp->istat & ISTAT_RFO) { /* fast recovery, in ~20msec */
  709. bp->istat &= ~ISTAT_RFO;
  710. b44_disable_ints(bp);
  711. ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
  712. b44_init_rings(bp);
  713. b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
  714. netif_wake_queue(bp->dev);
  715. }
  716. spin_unlock_irqrestore(&bp->lock, flags);
  717. work_done = 0;
  718. if (bp->istat & ISTAT_RX)
  719. work_done += b44_rx(bp, budget);
  720. if (bp->istat & ISTAT_ERRORS) {
  721. spin_lock_irqsave(&bp->lock, flags);
  722. b44_halt(bp);
  723. b44_init_rings(bp);
  724. b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
  725. netif_wake_queue(bp->dev);
  726. spin_unlock_irqrestore(&bp->lock, flags);
  727. work_done = 0;
  728. }
  729. if (work_done < budget) {
  730. napi_complete(napi);
  731. b44_enable_ints(bp);
  732. }
  733. return work_done;
  734. }
  735. static irqreturn_t b44_interrupt(int irq, void *dev_id)
  736. {
  737. struct net_device *dev = dev_id;
  738. struct b44 *bp = netdev_priv(dev);
  739. u32 istat, imask;
  740. int handled = 0;
  741. spin_lock(&bp->lock);
  742. istat = br32(bp, B44_ISTAT);
  743. imask = br32(bp, B44_IMASK);
  744. /* The interrupt mask register controls which interrupt bits
  745. * will actually raise an interrupt to the CPU when set by hw/firmware,
  746. * but doesn't mask off the bits.
  747. */
  748. istat &= imask;
  749. if (istat) {
  750. handled = 1;
  751. if (unlikely(!netif_running(dev))) {
  752. netdev_info(dev, "late interrupt\n");
  753. goto irq_ack;
  754. }
  755. if (napi_schedule_prep(&bp->napi)) {
  756. /* NOTE: These writes are posted by the readback of
  757. * the ISTAT register below.
  758. */
  759. bp->istat = istat;
  760. __b44_disable_ints(bp);
  761. __napi_schedule(&bp->napi);
  762. }
  763. irq_ack:
  764. bw32(bp, B44_ISTAT, istat);
  765. br32(bp, B44_ISTAT);
  766. }
  767. spin_unlock(&bp->lock);
  768. return IRQ_RETVAL(handled);
  769. }
  770. static void b44_tx_timeout(struct net_device *dev)
  771. {
  772. struct b44 *bp = netdev_priv(dev);
  773. netdev_err(dev, "transmit timed out, resetting\n");
  774. spin_lock_irq(&bp->lock);
  775. b44_halt(bp);
  776. b44_init_rings(bp);
  777. b44_init_hw(bp, B44_FULL_RESET);
  778. spin_unlock_irq(&bp->lock);
  779. b44_enable_ints(bp);
  780. netif_wake_queue(dev);
  781. }
  782. static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
  783. {
  784. struct b44 *bp = netdev_priv(dev);
  785. int rc = NETDEV_TX_OK;
  786. dma_addr_t mapping;
  787. u32 len, entry, ctrl;
  788. unsigned long flags;
  789. len = skb->len;
  790. spin_lock_irqsave(&bp->lock, flags);
  791. /* This is a hard error, log it. */
  792. if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
  793. netif_stop_queue(dev);
  794. netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
  795. goto err_out;
  796. }
  797. mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
  798. if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
  799. struct sk_buff *bounce_skb;
  800. /* Chip can't handle DMA to/from >1GB, use bounce buffer */
  801. if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
  802. dma_unmap_single(bp->sdev->dma_dev, mapping, len,
  803. DMA_TO_DEVICE);
  804. bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
  805. if (!bounce_skb)
  806. goto err_out;
  807. mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
  808. len, DMA_TO_DEVICE);
  809. if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
  810. if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
  811. dma_unmap_single(bp->sdev->dma_dev, mapping,
  812. len, DMA_TO_DEVICE);
  813. dev_kfree_skb_any(bounce_skb);
  814. goto err_out;
  815. }
  816. skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
  817. dev_kfree_skb_any(skb);
  818. skb = bounce_skb;
  819. }
  820. entry = bp->tx_prod;
  821. bp->tx_buffers[entry].skb = skb;
  822. bp->tx_buffers[entry].mapping = mapping;
  823. ctrl = (len & DESC_CTRL_LEN);
  824. ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
  825. if (entry == (B44_TX_RING_SIZE - 1))
  826. ctrl |= DESC_CTRL_EOT;
  827. bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
  828. bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
  829. if (bp->flags & B44_FLAG_TX_RING_HACK)
  830. b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
  831. entry * sizeof(bp->tx_ring[0]),
  832. DMA_TO_DEVICE);
  833. entry = NEXT_TX(entry);
  834. bp->tx_prod = entry;
  835. wmb();
  836. bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
  837. if (bp->flags & B44_FLAG_BUGGY_TXPTR)
  838. bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
  839. if (bp->flags & B44_FLAG_REORDER_BUG)
  840. br32(bp, B44_DMATX_PTR);
  841. if (TX_BUFFS_AVAIL(bp) < 1)
  842. netif_stop_queue(dev);
  843. out_unlock:
  844. spin_unlock_irqrestore(&bp->lock, flags);
  845. return rc;
  846. err_out:
  847. rc = NETDEV_TX_BUSY;
  848. goto out_unlock;
  849. }
  850. static int b44_change_mtu(struct net_device *dev, int new_mtu)
  851. {
  852. struct b44 *bp = netdev_priv(dev);
  853. if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
  854. return -EINVAL;
  855. if (!netif_running(dev)) {
  856. /* We'll just catch it later when the
  857. * device is up'd.
  858. */
  859. dev->mtu = new_mtu;
  860. return 0;
  861. }
  862. spin_lock_irq(&bp->lock);
  863. b44_halt(bp);
  864. dev->mtu = new_mtu;
  865. b44_init_rings(bp);
  866. b44_init_hw(bp, B44_FULL_RESET);
  867. spin_unlock_irq(&bp->lock);
  868. b44_enable_ints(bp);
  869. return 0;
  870. }
  871. /* Free up pending packets in all rx/tx rings.
  872. *
  873. * The chip has been shut down and the driver detached from
  874. * the networking, so no interrupts or new tx packets will
  875. * end up in the driver. bp->lock is not held and we are not
  876. * in an interrupt context and thus may sleep.
  877. */
  878. static void b44_free_rings(struct b44 *bp)
  879. {
  880. struct ring_info *rp;
  881. int i;
  882. for (i = 0; i < B44_RX_RING_SIZE; i++) {
  883. rp = &bp->rx_buffers[i];
  884. if (rp->skb == NULL)
  885. continue;
  886. dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
  887. DMA_FROM_DEVICE);
  888. dev_kfree_skb_any(rp->skb);
  889. rp->skb = NULL;
  890. }
  891. /* XXX needs changes once NETIF_F_SG is set... */
  892. for (i = 0; i < B44_TX_RING_SIZE; i++) {
  893. rp = &bp->tx_buffers[i];
  894. if (rp->skb == NULL)
  895. continue;
  896. dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
  897. DMA_TO_DEVICE);
  898. dev_kfree_skb_any(rp->skb);
  899. rp->skb = NULL;
  900. }
  901. }
  902. /* Initialize tx/rx rings for packet processing.
  903. *
  904. * The chip has been shut down and the driver detached from
  905. * the networking, so no interrupts or new tx packets will
  906. * end up in the driver.
  907. */
  908. static void b44_init_rings(struct b44 *bp)
  909. {
  910. int i;
  911. b44_free_rings(bp);
  912. memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
  913. memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
  914. if (bp->flags & B44_FLAG_RX_RING_HACK)
  915. dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
  916. DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
  917. if (bp->flags & B44_FLAG_TX_RING_HACK)
  918. dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
  919. DMA_TABLE_BYTES, DMA_TO_DEVICE);
  920. for (i = 0; i < bp->rx_pending; i++) {
  921. if (b44_alloc_rx_skb(bp, -1, i) < 0)
  922. break;
  923. }
  924. }
  925. /*
  926. * Must not be invoked with interrupt sources disabled and
  927. * the hardware shutdown down.
  928. */
  929. static void b44_free_consistent(struct b44 *bp)
  930. {
  931. kfree(bp->rx_buffers);
  932. bp->rx_buffers = NULL;
  933. kfree(bp->tx_buffers);
  934. bp->tx_buffers = NULL;
  935. if (bp->rx_ring) {
  936. if (bp->flags & B44_FLAG_RX_RING_HACK) {
  937. dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
  938. DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
  939. kfree(bp->rx_ring);
  940. } else
  941. dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
  942. bp->rx_ring, bp->rx_ring_dma);
  943. bp->rx_ring = NULL;
  944. bp->flags &= ~B44_FLAG_RX_RING_HACK;
  945. }
  946. if (bp->tx_ring) {
  947. if (bp->flags & B44_FLAG_TX_RING_HACK) {
  948. dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
  949. DMA_TABLE_BYTES, DMA_TO_DEVICE);
  950. kfree(bp->tx_ring);
  951. } else
  952. dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
  953. bp->tx_ring, bp->tx_ring_dma);
  954. bp->tx_ring = NULL;
  955. bp->flags &= ~B44_FLAG_TX_RING_HACK;
  956. }
  957. }
  958. /*
  959. * Must not be invoked with interrupt sources disabled and
  960. * the hardware shutdown down. Can sleep.
  961. */
  962. static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
  963. {
  964. int size;
  965. size = B44_RX_RING_SIZE * sizeof(struct ring_info);
  966. bp->rx_buffers = kzalloc(size, gfp);
  967. if (!bp->rx_buffers)
  968. goto out_err;
  969. size = B44_TX_RING_SIZE * sizeof(struct ring_info);
  970. bp->tx_buffers = kzalloc(size, gfp);
  971. if (!bp->tx_buffers)
  972. goto out_err;
  973. size = DMA_TABLE_BYTES;
  974. bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
  975. &bp->rx_ring_dma, gfp);
  976. if (!bp->rx_ring) {
  977. /* Allocation may have failed due to pci_alloc_consistent
  978. insisting on use of GFP_DMA, which is more restrictive
  979. than necessary... */
  980. struct dma_desc *rx_ring;
  981. dma_addr_t rx_ring_dma;
  982. rx_ring = kzalloc(size, gfp);
  983. if (!rx_ring)
  984. goto out_err;
  985. rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
  986. DMA_TABLE_BYTES,
  987. DMA_BIDIRECTIONAL);
  988. if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
  989. rx_ring_dma + size > DMA_BIT_MASK(30)) {
  990. kfree(rx_ring);
  991. goto out_err;
  992. }
  993. bp->rx_ring = rx_ring;
  994. bp->rx_ring_dma = rx_ring_dma;
  995. bp->flags |= B44_FLAG_RX_RING_HACK;
  996. }
  997. bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
  998. &bp->tx_ring_dma, gfp);
  999. if (!bp->tx_ring) {
  1000. /* Allocation may have failed due to ssb_dma_alloc_consistent
  1001. insisting on use of GFP_DMA, which is more restrictive
  1002. than necessary... */
  1003. struct dma_desc *tx_ring;
  1004. dma_addr_t tx_ring_dma;
  1005. tx_ring = kzalloc(size, gfp);
  1006. if (!tx_ring)
  1007. goto out_err;
  1008. tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
  1009. DMA_TABLE_BYTES,
  1010. DMA_TO_DEVICE);
  1011. if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
  1012. tx_ring_dma + size > DMA_BIT_MASK(30)) {
  1013. kfree(tx_ring);
  1014. goto out_err;
  1015. }
  1016. bp->tx_ring = tx_ring;
  1017. bp->tx_ring_dma = tx_ring_dma;
  1018. bp->flags |= B44_FLAG_TX_RING_HACK;
  1019. }
  1020. return 0;
  1021. out_err:
  1022. b44_free_consistent(bp);
  1023. return -ENOMEM;
  1024. }
  1025. /* bp->lock is held. */
  1026. static void b44_clear_stats(struct b44 *bp)
  1027. {
  1028. unsigned long reg;
  1029. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  1030. for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
  1031. br32(bp, reg);
  1032. for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
  1033. br32(bp, reg);
  1034. }
  1035. /* bp->lock is held. */
  1036. static void b44_chip_reset(struct b44 *bp, int reset_kind)
  1037. {
  1038. struct ssb_device *sdev = bp->sdev;
  1039. bool was_enabled;
  1040. was_enabled = ssb_device_is_enabled(bp->sdev);
  1041. ssb_device_enable(bp->sdev, 0);
  1042. ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
  1043. if (was_enabled) {
  1044. bw32(bp, B44_RCV_LAZY, 0);
  1045. bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
  1046. b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
  1047. bw32(bp, B44_DMATX_CTRL, 0);
  1048. bp->tx_prod = bp->tx_cons = 0;
  1049. if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
  1050. b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
  1051. 100, 0);
  1052. }
  1053. bw32(bp, B44_DMARX_CTRL, 0);
  1054. bp->rx_prod = bp->rx_cons = 0;
  1055. }
  1056. b44_clear_stats(bp);
  1057. /*
  1058. * Don't enable PHY if we are doing a partial reset
  1059. * we are probably going to power down
  1060. */
  1061. if (reset_kind == B44_CHIP_RESET_PARTIAL)
  1062. return;
  1063. switch (sdev->bus->bustype) {
  1064. case SSB_BUSTYPE_SSB:
  1065. bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
  1066. (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
  1067. B44_MDC_RATIO)
  1068. & MDIO_CTRL_MAXF_MASK)));
  1069. break;
  1070. case SSB_BUSTYPE_PCI:
  1071. bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
  1072. (0x0d & MDIO_CTRL_MAXF_MASK)));
  1073. break;
  1074. case SSB_BUSTYPE_PCMCIA:
  1075. case SSB_BUSTYPE_SDIO:
  1076. WARN_ON(1); /* A device with this bus does not exist. */
  1077. break;
  1078. }
  1079. br32(bp, B44_MDIO_CTRL);
  1080. if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
  1081. bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
  1082. br32(bp, B44_ENET_CTRL);
  1083. bp->flags &= ~B44_FLAG_INTERNAL_PHY;
  1084. } else {
  1085. u32 val = br32(bp, B44_DEVCTRL);
  1086. if (val & DEVCTRL_EPR) {
  1087. bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
  1088. br32(bp, B44_DEVCTRL);
  1089. udelay(100);
  1090. }
  1091. bp->flags |= B44_FLAG_INTERNAL_PHY;
  1092. }
  1093. }
  1094. /* bp->lock is held. */
  1095. static void b44_halt(struct b44 *bp)
  1096. {
  1097. b44_disable_ints(bp);
  1098. /* reset PHY */
  1099. b44_phy_reset(bp);
  1100. /* power down PHY */
  1101. netdev_info(bp->dev, "powering down PHY\n");
  1102. bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
  1103. /* now reset the chip, but without enabling the MAC&PHY
  1104. * part of it. This has to be done _after_ we shut down the PHY */
  1105. b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
  1106. }
  1107. /* bp->lock is held. */
  1108. static void __b44_set_mac_addr(struct b44 *bp)
  1109. {
  1110. bw32(bp, B44_CAM_CTRL, 0);
  1111. if (!(bp->dev->flags & IFF_PROMISC)) {
  1112. u32 val;
  1113. __b44_cam_write(bp, bp->dev->dev_addr, 0);
  1114. val = br32(bp, B44_CAM_CTRL);
  1115. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  1116. }
  1117. }
  1118. static int b44_set_mac_addr(struct net_device *dev, void *p)
  1119. {
  1120. struct b44 *bp = netdev_priv(dev);
  1121. struct sockaddr *addr = p;
  1122. u32 val;
  1123. if (netif_running(dev))
  1124. return -EBUSY;
  1125. if (!is_valid_ether_addr(addr->sa_data))
  1126. return -EINVAL;
  1127. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  1128. spin_lock_irq(&bp->lock);
  1129. val = br32(bp, B44_RXCONFIG);
  1130. if (!(val & RXCONFIG_CAM_ABSENT))
  1131. __b44_set_mac_addr(bp);
  1132. spin_unlock_irq(&bp->lock);
  1133. return 0;
  1134. }
  1135. /* Called at device open time to get the chip ready for
  1136. * packet processing. Invoked with bp->lock held.
  1137. */
  1138. static void __b44_set_rx_mode(struct net_device *);
  1139. static void b44_init_hw(struct b44 *bp, int reset_kind)
  1140. {
  1141. u32 val;
  1142. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  1143. if (reset_kind == B44_FULL_RESET) {
  1144. b44_phy_reset(bp);
  1145. b44_setup_phy(bp);
  1146. }
  1147. /* Enable CRC32, set proper LED modes and power on PHY */
  1148. bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
  1149. bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
  1150. /* This sets the MAC address too. */
  1151. __b44_set_rx_mode(bp->dev);
  1152. /* MTU + eth header + possible VLAN tag + struct rx_header */
  1153. bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
  1154. bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
  1155. bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
  1156. if (reset_kind == B44_PARTIAL_RESET) {
  1157. bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
  1158. (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
  1159. } else {
  1160. bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
  1161. bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
  1162. bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
  1163. (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
  1164. bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
  1165. bw32(bp, B44_DMARX_PTR, bp->rx_pending);
  1166. bp->rx_prod = bp->rx_pending;
  1167. bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
  1168. }
  1169. val = br32(bp, B44_ENET_CTRL);
  1170. bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
  1171. }
  1172. static int b44_open(struct net_device *dev)
  1173. {
  1174. struct b44 *bp = netdev_priv(dev);
  1175. int err;
  1176. err = b44_alloc_consistent(bp, GFP_KERNEL);
  1177. if (err)
  1178. goto out;
  1179. napi_enable(&bp->napi);
  1180. b44_init_rings(bp);
  1181. b44_init_hw(bp, B44_FULL_RESET);
  1182. b44_check_phy(bp);
  1183. err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
  1184. if (unlikely(err < 0)) {
  1185. napi_disable(&bp->napi);
  1186. b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
  1187. b44_free_rings(bp);
  1188. b44_free_consistent(bp);
  1189. goto out;
  1190. }
  1191. init_timer(&bp->timer);
  1192. bp->timer.expires = jiffies + HZ;
  1193. bp->timer.data = (unsigned long) bp;
  1194. bp->timer.function = b44_timer;
  1195. add_timer(&bp->timer);
  1196. b44_enable_ints(bp);
  1197. netif_start_queue(dev);
  1198. out:
  1199. return err;
  1200. }
  1201. #ifdef CONFIG_NET_POLL_CONTROLLER
  1202. /*
  1203. * Polling receive - used by netconsole and other diagnostic tools
  1204. * to allow network i/o with interrupts disabled.
  1205. */
  1206. static void b44_poll_controller(struct net_device *dev)
  1207. {
  1208. disable_irq(dev->irq);
  1209. b44_interrupt(dev->irq, dev);
  1210. enable_irq(dev->irq);
  1211. }
  1212. #endif
  1213. static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
  1214. {
  1215. u32 i;
  1216. u32 *pattern = (u32 *) pp;
  1217. for (i = 0; i < bytes; i += sizeof(u32)) {
  1218. bw32(bp, B44_FILT_ADDR, table_offset + i);
  1219. bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
  1220. }
  1221. }
  1222. static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
  1223. {
  1224. int magicsync = 6;
  1225. int k, j, len = offset;
  1226. int ethaddr_bytes = ETH_ALEN;
  1227. memset(ppattern + offset, 0xff, magicsync);
  1228. for (j = 0; j < magicsync; j++)
  1229. set_bit(len++, (unsigned long *) pmask);
  1230. for (j = 0; j < B44_MAX_PATTERNS; j++) {
  1231. if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
  1232. ethaddr_bytes = ETH_ALEN;
  1233. else
  1234. ethaddr_bytes = B44_PATTERN_SIZE - len;
  1235. if (ethaddr_bytes <=0)
  1236. break;
  1237. for (k = 0; k< ethaddr_bytes; k++) {
  1238. ppattern[offset + magicsync +
  1239. (j * ETH_ALEN) + k] = macaddr[k];
  1240. set_bit(len++, (unsigned long *) pmask);
  1241. }
  1242. }
  1243. return len - 1;
  1244. }
  1245. /* Setup magic packet patterns in the b44 WOL
  1246. * pattern matching filter.
  1247. */
  1248. static void b44_setup_pseudo_magicp(struct b44 *bp)
  1249. {
  1250. u32 val;
  1251. int plen0, plen1, plen2;
  1252. u8 *pwol_pattern;
  1253. u8 pwol_mask[B44_PMASK_SIZE];
  1254. pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
  1255. if (!pwol_pattern) {
  1256. pr_err("Memory not available for WOL\n");
  1257. return;
  1258. }
  1259. /* Ipv4 magic packet pattern - pattern 0.*/
  1260. memset(pwol_mask, 0, B44_PMASK_SIZE);
  1261. plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
  1262. B44_ETHIPV4UDP_HLEN);
  1263. bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
  1264. bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
  1265. /* Raw ethernet II magic packet pattern - pattern 1 */
  1266. memset(pwol_pattern, 0, B44_PATTERN_SIZE);
  1267. memset(pwol_mask, 0, B44_PMASK_SIZE);
  1268. plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
  1269. ETH_HLEN);
  1270. bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
  1271. B44_PATTERN_BASE + B44_PATTERN_SIZE);
  1272. bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
  1273. B44_PMASK_BASE + B44_PMASK_SIZE);
  1274. /* Ipv6 magic packet pattern - pattern 2 */
  1275. memset(pwol_pattern, 0, B44_PATTERN_SIZE);
  1276. memset(pwol_mask, 0, B44_PMASK_SIZE);
  1277. plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
  1278. B44_ETHIPV6UDP_HLEN);
  1279. bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
  1280. B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
  1281. bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
  1282. B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
  1283. kfree(pwol_pattern);
  1284. /* set these pattern's lengths: one less than each real length */
  1285. val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
  1286. bw32(bp, B44_WKUP_LEN, val);
  1287. /* enable wakeup pattern matching */
  1288. val = br32(bp, B44_DEVCTRL);
  1289. bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
  1290. }
  1291. #ifdef CONFIG_B44_PCI
  1292. static void b44_setup_wol_pci(struct b44 *bp)
  1293. {
  1294. u16 val;
  1295. if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
  1296. bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
  1297. pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
  1298. pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
  1299. }
  1300. }
  1301. #else
  1302. static inline void b44_setup_wol_pci(struct b44 *bp) { }
  1303. #endif /* CONFIG_B44_PCI */
  1304. static void b44_setup_wol(struct b44 *bp)
  1305. {
  1306. u32 val;
  1307. bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
  1308. if (bp->flags & B44_FLAG_B0_ANDLATER) {
  1309. bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
  1310. val = bp->dev->dev_addr[2] << 24 |
  1311. bp->dev->dev_addr[3] << 16 |
  1312. bp->dev->dev_addr[4] << 8 |
  1313. bp->dev->dev_addr[5];
  1314. bw32(bp, B44_ADDR_LO, val);
  1315. val = bp->dev->dev_addr[0] << 8 |
  1316. bp->dev->dev_addr[1];
  1317. bw32(bp, B44_ADDR_HI, val);
  1318. val = br32(bp, B44_DEVCTRL);
  1319. bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
  1320. } else {
  1321. b44_setup_pseudo_magicp(bp);
  1322. }
  1323. b44_setup_wol_pci(bp);
  1324. }
  1325. static int b44_close(struct net_device *dev)
  1326. {
  1327. struct b44 *bp = netdev_priv(dev);
  1328. netif_stop_queue(dev);
  1329. napi_disable(&bp->napi);
  1330. del_timer_sync(&bp->timer);
  1331. spin_lock_irq(&bp->lock);
  1332. b44_halt(bp);
  1333. b44_free_rings(bp);
  1334. netif_carrier_off(dev);
  1335. spin_unlock_irq(&bp->lock);
  1336. free_irq(dev->irq, dev);
  1337. if (bp->flags & B44_FLAG_WOL_ENABLE) {
  1338. b44_init_hw(bp, B44_PARTIAL_RESET);
  1339. b44_setup_wol(bp);
  1340. }
  1341. b44_free_consistent(bp);
  1342. return 0;
  1343. }
  1344. static struct net_device_stats *b44_get_stats(struct net_device *dev)
  1345. {
  1346. struct b44 *bp = netdev_priv(dev);
  1347. struct net_device_stats *nstat = &dev->stats;
  1348. struct b44_hw_stats *hwstat = &bp->hw_stats;
  1349. /* Convert HW stats into netdevice stats. */
  1350. nstat->rx_packets = hwstat->rx_pkts;
  1351. nstat->tx_packets = hwstat->tx_pkts;
  1352. nstat->rx_bytes = hwstat->rx_octets;
  1353. nstat->tx_bytes = hwstat->tx_octets;
  1354. nstat->tx_errors = (hwstat->tx_jabber_pkts +
  1355. hwstat->tx_oversize_pkts +
  1356. hwstat->tx_underruns +
  1357. hwstat->tx_excessive_cols +
  1358. hwstat->tx_late_cols);
  1359. nstat->multicast = hwstat->tx_multicast_pkts;
  1360. nstat->collisions = hwstat->tx_total_cols;
  1361. nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
  1362. hwstat->rx_undersize);
  1363. nstat->rx_over_errors = hwstat->rx_missed_pkts;
  1364. nstat->rx_frame_errors = hwstat->rx_align_errs;
  1365. nstat->rx_crc_errors = hwstat->rx_crc_errs;
  1366. nstat->rx_errors = (hwstat->rx_jabber_pkts +
  1367. hwstat->rx_oversize_pkts +
  1368. hwstat->rx_missed_pkts +
  1369. hwstat->rx_crc_align_errs +
  1370. hwstat->rx_undersize +
  1371. hwstat->rx_crc_errs +
  1372. hwstat->rx_align_errs +
  1373. hwstat->rx_symbol_errs);
  1374. nstat->tx_aborted_errors = hwstat->tx_underruns;
  1375. #if 0
  1376. /* Carrier lost counter seems to be broken for some devices */
  1377. nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
  1378. #endif
  1379. return nstat;
  1380. }
  1381. static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
  1382. {
  1383. struct netdev_hw_addr *ha;
  1384. int i, num_ents;
  1385. num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
  1386. i = 0;
  1387. netdev_for_each_mc_addr(ha, dev) {
  1388. if (i == num_ents)
  1389. break;
  1390. __b44_cam_write(bp, ha->addr, i++ + 1);
  1391. }
  1392. return i+1;
  1393. }
  1394. static void __b44_set_rx_mode(struct net_device *dev)
  1395. {
  1396. struct b44 *bp = netdev_priv(dev);
  1397. u32 val;
  1398. val = br32(bp, B44_RXCONFIG);
  1399. val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
  1400. if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
  1401. val |= RXCONFIG_PROMISC;
  1402. bw32(bp, B44_RXCONFIG, val);
  1403. } else {
  1404. unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
  1405. int i = 1;
  1406. __b44_set_mac_addr(bp);
  1407. if ((dev->flags & IFF_ALLMULTI) ||
  1408. (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
  1409. val |= RXCONFIG_ALLMULTI;
  1410. else
  1411. i = __b44_load_mcast(bp, dev);
  1412. for (; i < 64; i++)
  1413. __b44_cam_write(bp, zero, i);
  1414. bw32(bp, B44_RXCONFIG, val);
  1415. val = br32(bp, B44_CAM_CTRL);
  1416. bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
  1417. }
  1418. }
  1419. static void b44_set_rx_mode(struct net_device *dev)
  1420. {
  1421. struct b44 *bp = netdev_priv(dev);
  1422. spin_lock_irq(&bp->lock);
  1423. __b44_set_rx_mode(dev);
  1424. spin_unlock_irq(&bp->lock);
  1425. }
  1426. static u32 b44_get_msglevel(struct net_device *dev)
  1427. {
  1428. struct b44 *bp = netdev_priv(dev);
  1429. return bp->msg_enable;
  1430. }
  1431. static void b44_set_msglevel(struct net_device *dev, u32 value)
  1432. {
  1433. struct b44 *bp = netdev_priv(dev);
  1434. bp->msg_enable = value;
  1435. }
  1436. static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
  1437. {
  1438. struct b44 *bp = netdev_priv(dev);
  1439. struct ssb_bus *bus = bp->sdev->bus;
  1440. strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
  1441. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  1442. switch (bus->bustype) {
  1443. case SSB_BUSTYPE_PCI:
  1444. strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
  1445. break;
  1446. case SSB_BUSTYPE_SSB:
  1447. strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
  1448. break;
  1449. case SSB_BUSTYPE_PCMCIA:
  1450. case SSB_BUSTYPE_SDIO:
  1451. WARN_ON(1); /* A device with this bus does not exist. */
  1452. break;
  1453. }
  1454. }
  1455. static int b44_nway_reset(struct net_device *dev)
  1456. {
  1457. struct b44 *bp = netdev_priv(dev);
  1458. u32 bmcr;
  1459. int r;
  1460. spin_lock_irq(&bp->lock);
  1461. b44_readphy(bp, MII_BMCR, &bmcr);
  1462. b44_readphy(bp, MII_BMCR, &bmcr);
  1463. r = -EINVAL;
  1464. if (bmcr & BMCR_ANENABLE) {
  1465. b44_writephy(bp, MII_BMCR,
  1466. bmcr | BMCR_ANRESTART);
  1467. r = 0;
  1468. }
  1469. spin_unlock_irq(&bp->lock);
  1470. return r;
  1471. }
  1472. static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1473. {
  1474. struct b44 *bp = netdev_priv(dev);
  1475. cmd->supported = (SUPPORTED_Autoneg);
  1476. cmd->supported |= (SUPPORTED_100baseT_Half |
  1477. SUPPORTED_100baseT_Full |
  1478. SUPPORTED_10baseT_Half |
  1479. SUPPORTED_10baseT_Full |
  1480. SUPPORTED_MII);
  1481. cmd->advertising = 0;
  1482. if (bp->flags & B44_FLAG_ADV_10HALF)
  1483. cmd->advertising |= ADVERTISED_10baseT_Half;
  1484. if (bp->flags & B44_FLAG_ADV_10FULL)
  1485. cmd->advertising |= ADVERTISED_10baseT_Full;
  1486. if (bp->flags & B44_FLAG_ADV_100HALF)
  1487. cmd->advertising |= ADVERTISED_100baseT_Half;
  1488. if (bp->flags & B44_FLAG_ADV_100FULL)
  1489. cmd->advertising |= ADVERTISED_100baseT_Full;
  1490. cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
  1491. ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
  1492. SPEED_100 : SPEED_10));
  1493. cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
  1494. DUPLEX_FULL : DUPLEX_HALF;
  1495. cmd->port = 0;
  1496. cmd->phy_address = bp->phy_addr;
  1497. cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
  1498. XCVR_INTERNAL : XCVR_EXTERNAL;
  1499. cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
  1500. AUTONEG_DISABLE : AUTONEG_ENABLE;
  1501. if (cmd->autoneg == AUTONEG_ENABLE)
  1502. cmd->advertising |= ADVERTISED_Autoneg;
  1503. if (!netif_running(dev)){
  1504. ethtool_cmd_speed_set(cmd, 0);
  1505. cmd->duplex = 0xff;
  1506. }
  1507. cmd->maxtxpkt = 0;
  1508. cmd->maxrxpkt = 0;
  1509. return 0;
  1510. }
  1511. static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1512. {
  1513. struct b44 *bp = netdev_priv(dev);
  1514. u32 speed = ethtool_cmd_speed(cmd);
  1515. /* We do not support gigabit. */
  1516. if (cmd->autoneg == AUTONEG_ENABLE) {
  1517. if (cmd->advertising &
  1518. (ADVERTISED_1000baseT_Half |
  1519. ADVERTISED_1000baseT_Full))
  1520. return -EINVAL;
  1521. } else if ((speed != SPEED_100 &&
  1522. speed != SPEED_10) ||
  1523. (cmd->duplex != DUPLEX_HALF &&
  1524. cmd->duplex != DUPLEX_FULL)) {
  1525. return -EINVAL;
  1526. }
  1527. spin_lock_irq(&bp->lock);
  1528. if (cmd->autoneg == AUTONEG_ENABLE) {
  1529. bp->flags &= ~(B44_FLAG_FORCE_LINK |
  1530. B44_FLAG_100_BASE_T |
  1531. B44_FLAG_FULL_DUPLEX |
  1532. B44_FLAG_ADV_10HALF |
  1533. B44_FLAG_ADV_10FULL |
  1534. B44_FLAG_ADV_100HALF |
  1535. B44_FLAG_ADV_100FULL);
  1536. if (cmd->advertising == 0) {
  1537. bp->flags |= (B44_FLAG_ADV_10HALF |
  1538. B44_FLAG_ADV_10FULL |
  1539. B44_FLAG_ADV_100HALF |
  1540. B44_FLAG_ADV_100FULL);
  1541. } else {
  1542. if (cmd->advertising & ADVERTISED_10baseT_Half)
  1543. bp->flags |= B44_FLAG_ADV_10HALF;
  1544. if (cmd->advertising & ADVERTISED_10baseT_Full)
  1545. bp->flags |= B44_FLAG_ADV_10FULL;
  1546. if (cmd->advertising & ADVERTISED_100baseT_Half)
  1547. bp->flags |= B44_FLAG_ADV_100HALF;
  1548. if (cmd->advertising & ADVERTISED_100baseT_Full)
  1549. bp->flags |= B44_FLAG_ADV_100FULL;
  1550. }
  1551. } else {
  1552. bp->flags |= B44_FLAG_FORCE_LINK;
  1553. bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
  1554. if (speed == SPEED_100)
  1555. bp->flags |= B44_FLAG_100_BASE_T;
  1556. if (cmd->duplex == DUPLEX_FULL)
  1557. bp->flags |= B44_FLAG_FULL_DUPLEX;
  1558. }
  1559. if (netif_running(dev))
  1560. b44_setup_phy(bp);
  1561. spin_unlock_irq(&bp->lock);
  1562. return 0;
  1563. }
  1564. static void b44_get_ringparam(struct net_device *dev,
  1565. struct ethtool_ringparam *ering)
  1566. {
  1567. struct b44 *bp = netdev_priv(dev);
  1568. ering->rx_max_pending = B44_RX_RING_SIZE - 1;
  1569. ering->rx_pending = bp->rx_pending;
  1570. /* XXX ethtool lacks a tx_max_pending, oops... */
  1571. }
  1572. static int b44_set_ringparam(struct net_device *dev,
  1573. struct ethtool_ringparam *ering)
  1574. {
  1575. struct b44 *bp = netdev_priv(dev);
  1576. if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
  1577. (ering->rx_mini_pending != 0) ||
  1578. (ering->rx_jumbo_pending != 0) ||
  1579. (ering->tx_pending > B44_TX_RING_SIZE - 1))
  1580. return -EINVAL;
  1581. spin_lock_irq(&bp->lock);
  1582. bp->rx_pending = ering->rx_pending;
  1583. bp->tx_pending = ering->tx_pending;
  1584. b44_halt(bp);
  1585. b44_init_rings(bp);
  1586. b44_init_hw(bp, B44_FULL_RESET);
  1587. netif_wake_queue(bp->dev);
  1588. spin_unlock_irq(&bp->lock);
  1589. b44_enable_ints(bp);
  1590. return 0;
  1591. }
  1592. static void b44_get_pauseparam(struct net_device *dev,
  1593. struct ethtool_pauseparam *epause)
  1594. {
  1595. struct b44 *bp = netdev_priv(dev);
  1596. epause->autoneg =
  1597. (bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
  1598. epause->rx_pause =
  1599. (bp->flags & B44_FLAG_RX_PAUSE) != 0;
  1600. epause->tx_pause =
  1601. (bp->flags & B44_FLAG_TX_PAUSE) != 0;
  1602. }
  1603. static int b44_set_pauseparam(struct net_device *dev,
  1604. struct ethtool_pauseparam *epause)
  1605. {
  1606. struct b44 *bp = netdev_priv(dev);
  1607. spin_lock_irq(&bp->lock);
  1608. if (epause->autoneg)
  1609. bp->flags |= B44_FLAG_PAUSE_AUTO;
  1610. else
  1611. bp->flags &= ~B44_FLAG_PAUSE_AUTO;
  1612. if (epause->rx_pause)
  1613. bp->flags |= B44_FLAG_RX_PAUSE;
  1614. else
  1615. bp->flags &= ~B44_FLAG_RX_PAUSE;
  1616. if (epause->tx_pause)
  1617. bp->flags |= B44_FLAG_TX_PAUSE;
  1618. else
  1619. bp->flags &= ~B44_FLAG_TX_PAUSE;
  1620. if (bp->flags & B44_FLAG_PAUSE_AUTO) {
  1621. b44_halt(bp);
  1622. b44_init_rings(bp);
  1623. b44_init_hw(bp, B44_FULL_RESET);
  1624. } else {
  1625. __b44_set_flow_ctrl(bp, bp->flags);
  1626. }
  1627. spin_unlock_irq(&bp->lock);
  1628. b44_enable_ints(bp);
  1629. return 0;
  1630. }
  1631. static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1632. {
  1633. switch(stringset) {
  1634. case ETH_SS_STATS:
  1635. memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
  1636. break;
  1637. }
  1638. }
  1639. static int b44_get_sset_count(struct net_device *dev, int sset)
  1640. {
  1641. switch (sset) {
  1642. case ETH_SS_STATS:
  1643. return ARRAY_SIZE(b44_gstrings);
  1644. default:
  1645. return -EOPNOTSUPP;
  1646. }
  1647. }
  1648. static void b44_get_ethtool_stats(struct net_device *dev,
  1649. struct ethtool_stats *stats, u64 *data)
  1650. {
  1651. struct b44 *bp = netdev_priv(dev);
  1652. u32 *val = &bp->hw_stats.tx_good_octets;
  1653. u32 i;
  1654. spin_lock_irq(&bp->lock);
  1655. b44_stats_update(bp);
  1656. for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
  1657. *data++ = *val++;
  1658. spin_unlock_irq(&bp->lock);
  1659. }
  1660. static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1661. {
  1662. struct b44 *bp = netdev_priv(dev);
  1663. wol->supported = WAKE_MAGIC;
  1664. if (bp->flags & B44_FLAG_WOL_ENABLE)
  1665. wol->wolopts = WAKE_MAGIC;
  1666. else
  1667. wol->wolopts = 0;
  1668. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1669. }
  1670. static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1671. {
  1672. struct b44 *bp = netdev_priv(dev);
  1673. spin_lock_irq(&bp->lock);
  1674. if (wol->wolopts & WAKE_MAGIC)
  1675. bp->flags |= B44_FLAG_WOL_ENABLE;
  1676. else
  1677. bp->flags &= ~B44_FLAG_WOL_ENABLE;
  1678. spin_unlock_irq(&bp->lock);
  1679. return 0;
  1680. }
  1681. static const struct ethtool_ops b44_ethtool_ops = {
  1682. .get_drvinfo = b44_get_drvinfo,
  1683. .get_settings = b44_get_settings,
  1684. .set_settings = b44_set_settings,
  1685. .nway_reset = b44_nway_reset,
  1686. .get_link = ethtool_op_get_link,
  1687. .get_wol = b44_get_wol,
  1688. .set_wol = b44_set_wol,
  1689. .get_ringparam = b44_get_ringparam,
  1690. .set_ringparam = b44_set_ringparam,
  1691. .get_pauseparam = b44_get_pauseparam,
  1692. .set_pauseparam = b44_set_pauseparam,
  1693. .get_msglevel = b44_get_msglevel,
  1694. .set_msglevel = b44_set_msglevel,
  1695. .get_strings = b44_get_strings,
  1696. .get_sset_count = b44_get_sset_count,
  1697. .get_ethtool_stats = b44_get_ethtool_stats,
  1698. };
  1699. static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  1700. {
  1701. struct mii_ioctl_data *data = if_mii(ifr);
  1702. struct b44 *bp = netdev_priv(dev);
  1703. int err = -EINVAL;
  1704. if (!netif_running(dev))
  1705. goto out;
  1706. spin_lock_irq(&bp->lock);
  1707. err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
  1708. spin_unlock_irq(&bp->lock);
  1709. out:
  1710. return err;
  1711. }
  1712. static int __devinit b44_get_invariants(struct b44 *bp)
  1713. {
  1714. struct ssb_device *sdev = bp->sdev;
  1715. int err = 0;
  1716. u8 *addr;
  1717. bp->dma_offset = ssb_dma_translation(sdev);
  1718. if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
  1719. instance > 1) {
  1720. addr = sdev->bus->sprom.et1mac;
  1721. bp->phy_addr = sdev->bus->sprom.et1phyaddr;
  1722. } else {
  1723. addr = sdev->bus->sprom.et0mac;
  1724. bp->phy_addr = sdev->bus->sprom.et0phyaddr;
  1725. }
  1726. /* Some ROMs have buggy PHY addresses with the high
  1727. * bits set (sign extension?). Truncate them to a
  1728. * valid PHY address. */
  1729. bp->phy_addr &= 0x1F;
  1730. memcpy(bp->dev->dev_addr, addr, 6);
  1731. if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
  1732. pr_err("Invalid MAC address found in EEPROM\n");
  1733. return -EINVAL;
  1734. }
  1735. memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
  1736. bp->imask = IMASK_DEF;
  1737. /* XXX - really required?
  1738. bp->flags |= B44_FLAG_BUGGY_TXPTR;
  1739. */
  1740. if (bp->sdev->id.revision >= 7)
  1741. bp->flags |= B44_FLAG_B0_ANDLATER;
  1742. return err;
  1743. }
  1744. static const struct net_device_ops b44_netdev_ops = {
  1745. .ndo_open = b44_open,
  1746. .ndo_stop = b44_close,
  1747. .ndo_start_xmit = b44_start_xmit,
  1748. .ndo_get_stats = b44_get_stats,
  1749. .ndo_set_multicast_list = b44_set_rx_mode,
  1750. .ndo_set_mac_address = b44_set_mac_addr,
  1751. .ndo_validate_addr = eth_validate_addr,
  1752. .ndo_do_ioctl = b44_ioctl,
  1753. .ndo_tx_timeout = b44_tx_timeout,
  1754. .ndo_change_mtu = b44_change_mtu,
  1755. #ifdef CONFIG_NET_POLL_CONTROLLER
  1756. .ndo_poll_controller = b44_poll_controller,
  1757. #endif
  1758. };
  1759. static int __devinit b44_init_one(struct ssb_device *sdev,
  1760. const struct ssb_device_id *ent)
  1761. {
  1762. static int b44_version_printed = 0;
  1763. struct net_device *dev;
  1764. struct b44 *bp;
  1765. int err;
  1766. instance++;
  1767. if (b44_version_printed++ == 0)
  1768. pr_info("%s", version);
  1769. dev = alloc_etherdev(sizeof(*bp));
  1770. if (!dev) {
  1771. dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
  1772. err = -ENOMEM;
  1773. goto out;
  1774. }
  1775. SET_NETDEV_DEV(dev, sdev->dev);
  1776. /* No interesting netdevice features in this card... */
  1777. dev->features |= 0;
  1778. bp = netdev_priv(dev);
  1779. bp->sdev = sdev;
  1780. bp->dev = dev;
  1781. bp->force_copybreak = 0;
  1782. bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
  1783. spin_lock_init(&bp->lock);
  1784. bp->rx_pending = B44_DEF_RX_RING_PENDING;
  1785. bp->tx_pending = B44_DEF_TX_RING_PENDING;
  1786. dev->netdev_ops = &b44_netdev_ops;
  1787. netif_napi_add(dev, &bp->napi, b44_poll, 64);
  1788. dev->watchdog_timeo = B44_TX_TIMEOUT;
  1789. dev->irq = sdev->irq;
  1790. SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
  1791. err = ssb_bus_powerup(sdev->bus, 0);
  1792. if (err) {
  1793. dev_err(sdev->dev,
  1794. "Failed to powerup the bus\n");
  1795. goto err_out_free_dev;
  1796. }
  1797. if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
  1798. dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
  1799. dev_err(sdev->dev,
  1800. "Required 30BIT DMA mask unsupported by the system\n");
  1801. goto err_out_powerdown;
  1802. }
  1803. err = b44_get_invariants(bp);
  1804. if (err) {
  1805. dev_err(sdev->dev,
  1806. "Problem fetching invariants of chip, aborting\n");
  1807. goto err_out_powerdown;
  1808. }
  1809. bp->mii_if.dev = dev;
  1810. bp->mii_if.mdio_read = b44_mii_read;
  1811. bp->mii_if.mdio_write = b44_mii_write;
  1812. bp->mii_if.phy_id = bp->phy_addr;
  1813. bp->mii_if.phy_id_mask = 0x1f;
  1814. bp->mii_if.reg_num_mask = 0x1f;
  1815. /* By default, advertise all speed/duplex settings. */
  1816. bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
  1817. B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
  1818. /* By default, auto-negotiate PAUSE. */
  1819. bp->flags |= B44_FLAG_PAUSE_AUTO;
  1820. err = register_netdev(dev);
  1821. if (err) {
  1822. dev_err(sdev->dev, "Cannot register net device, aborting\n");
  1823. goto err_out_powerdown;
  1824. }
  1825. netif_carrier_off(dev);
  1826. ssb_set_drvdata(sdev, dev);
  1827. /* Chip reset provides power to the b44 MAC & PCI cores, which
  1828. * is necessary for MAC register access.
  1829. */
  1830. b44_chip_reset(bp, B44_CHIP_RESET_FULL);
  1831. /* do a phy reset to test if there is an active phy */
  1832. if (b44_phy_reset(bp) < 0)
  1833. bp->phy_addr = B44_PHY_ADDR_NO_PHY;
  1834. netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
  1835. dev->dev_addr);
  1836. return 0;
  1837. err_out_powerdown:
  1838. ssb_bus_may_powerdown(sdev->bus);
  1839. err_out_free_dev:
  1840. free_netdev(dev);
  1841. out:
  1842. return err;
  1843. }
  1844. static void __devexit b44_remove_one(struct ssb_device *sdev)
  1845. {
  1846. struct net_device *dev = ssb_get_drvdata(sdev);
  1847. unregister_netdev(dev);
  1848. ssb_device_disable(sdev, 0);
  1849. ssb_bus_may_powerdown(sdev->bus);
  1850. free_netdev(dev);
  1851. ssb_pcihost_set_power_state(sdev, PCI_D3hot);
  1852. ssb_set_drvdata(sdev, NULL);
  1853. }
  1854. static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
  1855. {
  1856. struct net_device *dev = ssb_get_drvdata(sdev);
  1857. struct b44 *bp = netdev_priv(dev);
  1858. if (!netif_running(dev))
  1859. return 0;
  1860. del_timer_sync(&bp->timer);
  1861. spin_lock_irq(&bp->lock);
  1862. b44_halt(bp);
  1863. netif_carrier_off(bp->dev);
  1864. netif_device_detach(bp->dev);
  1865. b44_free_rings(bp);
  1866. spin_unlock_irq(&bp->lock);
  1867. free_irq(dev->irq, dev);
  1868. if (bp->flags & B44_FLAG_WOL_ENABLE) {
  1869. b44_init_hw(bp, B44_PARTIAL_RESET);
  1870. b44_setup_wol(bp);
  1871. }
  1872. ssb_pcihost_set_power_state(sdev, PCI_D3hot);
  1873. return 0;
  1874. }
  1875. static int b44_resume(struct ssb_device *sdev)
  1876. {
  1877. struct net_device *dev = ssb_get_drvdata(sdev);
  1878. struct b44 *bp = netdev_priv(dev);
  1879. int rc = 0;
  1880. rc = ssb_bus_powerup(sdev->bus, 0);
  1881. if (rc) {
  1882. dev_err(sdev->dev,
  1883. "Failed to powerup the bus\n");
  1884. return rc;
  1885. }
  1886. if (!netif_running(dev))
  1887. return 0;
  1888. spin_lock_irq(&bp->lock);
  1889. b44_init_rings(bp);
  1890. b44_init_hw(bp, B44_FULL_RESET);
  1891. spin_unlock_irq(&bp->lock);
  1892. /*
  1893. * As a shared interrupt, the handler can be called immediately. To be
  1894. * able to check the interrupt status the hardware must already be
  1895. * powered back on (b44_init_hw).
  1896. */
  1897. rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
  1898. if (rc) {
  1899. netdev_err(dev, "request_irq failed\n");
  1900. spin_lock_irq(&bp->lock);
  1901. b44_halt(bp);
  1902. b44_free_rings(bp);
  1903. spin_unlock_irq(&bp->lock);
  1904. return rc;
  1905. }
  1906. netif_device_attach(bp->dev);
  1907. b44_enable_ints(bp);
  1908. netif_wake_queue(dev);
  1909. mod_timer(&bp->timer, jiffies + 1);
  1910. return 0;
  1911. }
  1912. static struct ssb_driver b44_ssb_driver = {
  1913. .name = DRV_MODULE_NAME,
  1914. .id_table = b44_ssb_tbl,
  1915. .probe = b44_init_one,
  1916. .remove = __devexit_p(b44_remove_one),
  1917. .suspend = b44_suspend,
  1918. .resume = b44_resume,
  1919. };
  1920. static inline int b44_pci_init(void)
  1921. {
  1922. int err = 0;
  1923. #ifdef CONFIG_B44_PCI
  1924. err = ssb_pcihost_register(&b44_pci_driver);
  1925. #endif
  1926. return err;
  1927. }
  1928. static inline void b44_pci_exit(void)
  1929. {
  1930. #ifdef CONFIG_B44_PCI
  1931. ssb_pcihost_unregister(&b44_pci_driver);
  1932. #endif
  1933. }
  1934. static int __init b44_init(void)
  1935. {
  1936. unsigned int dma_desc_align_size = dma_get_cache_alignment();
  1937. int err;
  1938. /* Setup paramaters for syncing RX/TX DMA descriptors */
  1939. dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
  1940. err = b44_pci_init();
  1941. if (err)
  1942. return err;
  1943. err = ssb_driver_register(&b44_ssb_driver);
  1944. if (err)
  1945. b44_pci_exit();
  1946. return err;
  1947. }
  1948. static void __exit b44_cleanup(void)
  1949. {
  1950. ssb_driver_unregister(&b44_ssb_driver);
  1951. b44_pci_exit();
  1952. }
  1953. module_init(b44_init);
  1954. module_exit(b44_cleanup);