phy.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450
  1. /* Framework for configuring and reading PHY devices
  2. * Based on code in sungem_phy.c and gianfar_phy.c
  3. *
  4. * Author: Andy Fleming
  5. *
  6. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  7. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/unistd.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/delay.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/mii.h>
  28. #include <linux/ethtool.h>
  29. #include <linux/phy.h>
  30. #include <linux/timer.h>
  31. #include <linux/workqueue.h>
  32. #include <linux/mdio.h>
  33. #include <linux/io.h>
  34. #include <linux/uaccess.h>
  35. #include <linux/atomic.h>
  36. #include <asm/irq.h>
  37. static const char *phy_speed_to_str(int speed)
  38. {
  39. switch (speed) {
  40. case SPEED_10:
  41. return "10Mbps";
  42. case SPEED_100:
  43. return "100Mbps";
  44. case SPEED_1000:
  45. return "1Gbps";
  46. case SPEED_2500:
  47. return "2.5Gbps";
  48. case SPEED_10000:
  49. return "10Gbps";
  50. case SPEED_UNKNOWN:
  51. return "Unknown";
  52. default:
  53. return "Unsupported (update phy.c)";
  54. }
  55. }
  56. #define PHY_STATE_STR(_state) \
  57. case PHY_##_state: \
  58. return __stringify(_state); \
  59. static const char *phy_state_to_str(enum phy_state st)
  60. {
  61. switch (st) {
  62. PHY_STATE_STR(DOWN)
  63. PHY_STATE_STR(STARTING)
  64. PHY_STATE_STR(READY)
  65. PHY_STATE_STR(PENDING)
  66. PHY_STATE_STR(UP)
  67. PHY_STATE_STR(AN)
  68. PHY_STATE_STR(RUNNING)
  69. PHY_STATE_STR(NOLINK)
  70. PHY_STATE_STR(FORCING)
  71. PHY_STATE_STR(CHANGELINK)
  72. PHY_STATE_STR(HALTED)
  73. PHY_STATE_STR(RESUMING)
  74. }
  75. return NULL;
  76. }
  77. /**
  78. * phy_print_status - Convenience function to print out the current phy status
  79. * @phydev: the phy_device struct
  80. */
  81. void phy_print_status(struct phy_device *phydev)
  82. {
  83. if (phydev->link) {
  84. netdev_info(phydev->attached_dev,
  85. "Link is Up - %s/%s - flow control %s\n",
  86. phy_speed_to_str(phydev->speed),
  87. DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
  88. phydev->pause ? "rx/tx" : "off");
  89. } else {
  90. netdev_info(phydev->attached_dev, "Link is Down\n");
  91. }
  92. }
  93. EXPORT_SYMBOL(phy_print_status);
  94. /**
  95. * phy_clear_interrupt - Ack the phy device's interrupt
  96. * @phydev: the phy_device struct
  97. *
  98. * If the @phydev driver has an ack_interrupt function, call it to
  99. * ack and clear the phy device's interrupt.
  100. *
  101. * Returns 0 on success or < 0 on error.
  102. */
  103. static int phy_clear_interrupt(struct phy_device *phydev)
  104. {
  105. if (phydev->drv->ack_interrupt)
  106. return phydev->drv->ack_interrupt(phydev);
  107. return 0;
  108. }
  109. /**
  110. * phy_config_interrupt - configure the PHY device for the requested interrupts
  111. * @phydev: the phy_device struct
  112. * @interrupts: interrupt flags to configure for this @phydev
  113. *
  114. * Returns 0 on success or < 0 on error.
  115. */
  116. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  117. {
  118. phydev->interrupts = interrupts;
  119. if (phydev->drv->config_intr)
  120. return phydev->drv->config_intr(phydev);
  121. return 0;
  122. }
  123. /**
  124. * phy_aneg_done - return auto-negotiation status
  125. * @phydev: target phy_device struct
  126. *
  127. * Description: Return the auto-negotiation status from this @phydev
  128. * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
  129. * is still pending.
  130. */
  131. static inline int phy_aneg_done(struct phy_device *phydev)
  132. {
  133. if (phydev->drv->aneg_done)
  134. return phydev->drv->aneg_done(phydev);
  135. /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
  136. * implement Clause 22 registers
  137. */
  138. if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
  139. return -EINVAL;
  140. return genphy_aneg_done(phydev);
  141. }
  142. /* A structure for mapping a particular speed and duplex
  143. * combination to a particular SUPPORTED and ADVERTISED value
  144. */
  145. struct phy_setting {
  146. int speed;
  147. int duplex;
  148. u32 setting;
  149. };
  150. /* A mapping of all SUPPORTED settings to speed/duplex */
  151. static const struct phy_setting settings[] = {
  152. {
  153. .speed = SPEED_10000,
  154. .duplex = DUPLEX_FULL,
  155. .setting = SUPPORTED_10000baseKR_Full,
  156. },
  157. {
  158. .speed = SPEED_10000,
  159. .duplex = DUPLEX_FULL,
  160. .setting = SUPPORTED_10000baseKX4_Full,
  161. },
  162. {
  163. .speed = SPEED_10000,
  164. .duplex = DUPLEX_FULL,
  165. .setting = SUPPORTED_10000baseT_Full,
  166. },
  167. {
  168. .speed = SPEED_2500,
  169. .duplex = DUPLEX_FULL,
  170. .setting = SUPPORTED_2500baseX_Full,
  171. },
  172. {
  173. .speed = SPEED_1000,
  174. .duplex = DUPLEX_FULL,
  175. .setting = SUPPORTED_1000baseKX_Full,
  176. },
  177. {
  178. .speed = SPEED_1000,
  179. .duplex = DUPLEX_FULL,
  180. .setting = SUPPORTED_1000baseT_Full,
  181. },
  182. {
  183. .speed = SPEED_1000,
  184. .duplex = DUPLEX_HALF,
  185. .setting = SUPPORTED_1000baseT_Half,
  186. },
  187. {
  188. .speed = SPEED_100,
  189. .duplex = DUPLEX_FULL,
  190. .setting = SUPPORTED_100baseT_Full,
  191. },
  192. {
  193. .speed = SPEED_100,
  194. .duplex = DUPLEX_HALF,
  195. .setting = SUPPORTED_100baseT_Half,
  196. },
  197. {
  198. .speed = SPEED_10,
  199. .duplex = DUPLEX_FULL,
  200. .setting = SUPPORTED_10baseT_Full,
  201. },
  202. {
  203. .speed = SPEED_10,
  204. .duplex = DUPLEX_HALF,
  205. .setting = SUPPORTED_10baseT_Half,
  206. },
  207. };
  208. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  209. /**
  210. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  211. * @speed: speed to match
  212. * @duplex: duplex to match
  213. *
  214. * Description: Searches the settings array for the setting which
  215. * matches the desired speed and duplex, and returns the index
  216. * of that setting. Returns the index of the last setting if
  217. * none of the others match.
  218. */
  219. static inline unsigned int phy_find_setting(int speed, int duplex)
  220. {
  221. unsigned int idx = 0;
  222. while (idx < ARRAY_SIZE(settings) &&
  223. (settings[idx].speed != speed || settings[idx].duplex != duplex))
  224. idx++;
  225. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  226. }
  227. /**
  228. * phy_find_valid - find a PHY setting that matches the requested features mask
  229. * @idx: The first index in settings[] to search
  230. * @features: A mask of the valid settings
  231. *
  232. * Description: Returns the index of the first valid setting less
  233. * than or equal to the one pointed to by idx, as determined by
  234. * the mask in features. Returns the index of the last setting
  235. * if nothing else matches.
  236. */
  237. static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
  238. {
  239. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  240. idx++;
  241. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  242. }
  243. /**
  244. * phy_check_valid - check if there is a valid PHY setting which matches
  245. * speed, duplex, and feature mask
  246. * @speed: speed to match
  247. * @duplex: duplex to match
  248. * @features: A mask of the valid settings
  249. *
  250. * Description: Returns true if there is a valid setting, false otherwise.
  251. */
  252. static inline bool phy_check_valid(int speed, int duplex, u32 features)
  253. {
  254. unsigned int idx;
  255. idx = phy_find_valid(phy_find_setting(speed, duplex), features);
  256. return settings[idx].speed == speed && settings[idx].duplex == duplex &&
  257. (settings[idx].setting & features);
  258. }
  259. /**
  260. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  261. * @phydev: the target phy_device struct
  262. *
  263. * Description: Make sure the PHY is set to supported speeds and
  264. * duplexes. Drop down by one in this order: 1000/FULL,
  265. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  266. */
  267. static void phy_sanitize_settings(struct phy_device *phydev)
  268. {
  269. u32 features = phydev->supported;
  270. unsigned int idx;
  271. /* Sanitize settings based on PHY capabilities */
  272. if ((features & SUPPORTED_Autoneg) == 0)
  273. phydev->autoneg = AUTONEG_DISABLE;
  274. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  275. features);
  276. phydev->speed = settings[idx].speed;
  277. phydev->duplex = settings[idx].duplex;
  278. }
  279. /**
  280. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  281. * @phydev: target phy_device struct
  282. * @cmd: ethtool_cmd
  283. *
  284. * A few notes about parameter checking:
  285. * - We don't set port or transceiver, so we don't care what they
  286. * were set to.
  287. * - phy_start_aneg() will make sure forced settings are sane, and
  288. * choose the next best ones from the ones selected, so we don't
  289. * care if ethtool tries to give us bad values.
  290. */
  291. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  292. {
  293. u32 speed = ethtool_cmd_speed(cmd);
  294. if (cmd->phy_address != phydev->mdio.addr)
  295. return -EINVAL;
  296. /* We make sure that we don't pass unsupported values in to the PHY */
  297. cmd->advertising &= phydev->supported;
  298. /* Verify the settings we care about. */
  299. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  300. return -EINVAL;
  301. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  302. return -EINVAL;
  303. if (cmd->autoneg == AUTONEG_DISABLE &&
  304. ((speed != SPEED_1000 &&
  305. speed != SPEED_100 &&
  306. speed != SPEED_10) ||
  307. (cmd->duplex != DUPLEX_HALF &&
  308. cmd->duplex != DUPLEX_FULL)))
  309. return -EINVAL;
  310. phydev->autoneg = cmd->autoneg;
  311. phydev->speed = speed;
  312. phydev->advertising = cmd->advertising;
  313. if (AUTONEG_ENABLE == cmd->autoneg)
  314. phydev->advertising |= ADVERTISED_Autoneg;
  315. else
  316. phydev->advertising &= ~ADVERTISED_Autoneg;
  317. phydev->duplex = cmd->duplex;
  318. phydev->mdix = cmd->eth_tp_mdix_ctrl;
  319. /* Restart the PHY */
  320. phy_start_aneg(phydev);
  321. return 0;
  322. }
  323. EXPORT_SYMBOL(phy_ethtool_sset);
  324. int phy_ethtool_ksettings_set(struct phy_device *phydev,
  325. const struct ethtool_link_ksettings *cmd)
  326. {
  327. u8 autoneg = cmd->base.autoneg;
  328. u8 duplex = cmd->base.duplex;
  329. u32 speed = cmd->base.speed;
  330. u32 advertising;
  331. if (cmd->base.phy_address != phydev->mdio.addr)
  332. return -EINVAL;
  333. ethtool_convert_link_mode_to_legacy_u32(&advertising,
  334. cmd->link_modes.advertising);
  335. /* We make sure that we don't pass unsupported values in to the PHY */
  336. advertising &= phydev->supported;
  337. /* Verify the settings we care about. */
  338. if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
  339. return -EINVAL;
  340. if (autoneg == AUTONEG_ENABLE && advertising == 0)
  341. return -EINVAL;
  342. if (autoneg == AUTONEG_DISABLE &&
  343. ((speed != SPEED_1000 &&
  344. speed != SPEED_100 &&
  345. speed != SPEED_10) ||
  346. (duplex != DUPLEX_HALF &&
  347. duplex != DUPLEX_FULL)))
  348. return -EINVAL;
  349. phydev->autoneg = autoneg;
  350. phydev->speed = speed;
  351. phydev->advertising = advertising;
  352. if (autoneg == AUTONEG_ENABLE)
  353. phydev->advertising |= ADVERTISED_Autoneg;
  354. else
  355. phydev->advertising &= ~ADVERTISED_Autoneg;
  356. phydev->duplex = duplex;
  357. phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
  358. /* Restart the PHY */
  359. phy_start_aneg(phydev);
  360. return 0;
  361. }
  362. EXPORT_SYMBOL(phy_ethtool_ksettings_set);
  363. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  364. {
  365. cmd->supported = phydev->supported;
  366. cmd->advertising = phydev->advertising;
  367. cmd->lp_advertising = phydev->lp_advertising;
  368. ethtool_cmd_speed_set(cmd, phydev->speed);
  369. cmd->duplex = phydev->duplex;
  370. if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
  371. cmd->port = PORT_BNC;
  372. else
  373. cmd->port = PORT_MII;
  374. cmd->phy_address = phydev->mdio.addr;
  375. cmd->transceiver = phy_is_internal(phydev) ?
  376. XCVR_INTERNAL : XCVR_EXTERNAL;
  377. cmd->autoneg = phydev->autoneg;
  378. cmd->eth_tp_mdix_ctrl = phydev->mdix;
  379. return 0;
  380. }
  381. EXPORT_SYMBOL(phy_ethtool_gset);
  382. int phy_ethtool_ksettings_get(struct phy_device *phydev,
  383. struct ethtool_link_ksettings *cmd)
  384. {
  385. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
  386. phydev->supported);
  387. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
  388. phydev->advertising);
  389. ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
  390. phydev->lp_advertising);
  391. cmd->base.speed = phydev->speed;
  392. cmd->base.duplex = phydev->duplex;
  393. if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
  394. cmd->base.port = PORT_BNC;
  395. else
  396. cmd->base.port = PORT_MII;
  397. cmd->base.phy_address = phydev->mdio.addr;
  398. cmd->base.autoneg = phydev->autoneg;
  399. cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
  400. return 0;
  401. }
  402. EXPORT_SYMBOL(phy_ethtool_ksettings_get);
  403. /**
  404. * phy_mii_ioctl - generic PHY MII ioctl interface
  405. * @phydev: the phy_device struct
  406. * @ifr: &struct ifreq for socket ioctl's
  407. * @cmd: ioctl cmd to execute
  408. *
  409. * Note that this function is currently incompatible with the
  410. * PHYCONTROL layer. It changes registers without regard to
  411. * current state. Use at own risk.
  412. */
  413. int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
  414. {
  415. struct mii_ioctl_data *mii_data = if_mii(ifr);
  416. u16 val = mii_data->val_in;
  417. bool change_autoneg = false;
  418. switch (cmd) {
  419. case SIOCGMIIPHY:
  420. mii_data->phy_id = phydev->mdio.addr;
  421. /* fall through */
  422. case SIOCGMIIREG:
  423. mii_data->val_out = mdiobus_read(phydev->mdio.bus,
  424. mii_data->phy_id,
  425. mii_data->reg_num);
  426. return 0;
  427. case SIOCSMIIREG:
  428. if (mii_data->phy_id == phydev->mdio.addr) {
  429. switch (mii_data->reg_num) {
  430. case MII_BMCR:
  431. if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
  432. if (phydev->autoneg == AUTONEG_ENABLE)
  433. change_autoneg = true;
  434. phydev->autoneg = AUTONEG_DISABLE;
  435. if (val & BMCR_FULLDPLX)
  436. phydev->duplex = DUPLEX_FULL;
  437. else
  438. phydev->duplex = DUPLEX_HALF;
  439. if (val & BMCR_SPEED1000)
  440. phydev->speed = SPEED_1000;
  441. else if (val & BMCR_SPEED100)
  442. phydev->speed = SPEED_100;
  443. else phydev->speed = SPEED_10;
  444. }
  445. else {
  446. if (phydev->autoneg == AUTONEG_DISABLE)
  447. change_autoneg = true;
  448. phydev->autoneg = AUTONEG_ENABLE;
  449. }
  450. break;
  451. case MII_ADVERTISE:
  452. phydev->advertising = mii_adv_to_ethtool_adv_t(val);
  453. change_autoneg = true;
  454. break;
  455. default:
  456. /* do nothing */
  457. break;
  458. }
  459. }
  460. mdiobus_write(phydev->mdio.bus, mii_data->phy_id,
  461. mii_data->reg_num, val);
  462. if (mii_data->phy_id == phydev->mdio.addr &&
  463. mii_data->reg_num == MII_BMCR &&
  464. val & BMCR_RESET)
  465. return phy_init_hw(phydev);
  466. if (change_autoneg)
  467. return phy_start_aneg(phydev);
  468. return 0;
  469. case SIOCSHWTSTAMP:
  470. if (phydev->drv->hwtstamp)
  471. return phydev->drv->hwtstamp(phydev, ifr);
  472. /* fall through */
  473. default:
  474. return -EOPNOTSUPP;
  475. }
  476. }
  477. EXPORT_SYMBOL(phy_mii_ioctl);
  478. /**
  479. * phy_start_aneg_priv - start auto-negotiation for this PHY device
  480. * @phydev: the phy_device struct
  481. * @sync: indicate whether we should wait for the workqueue cancelation
  482. *
  483. * Description: Sanitizes the settings (if we're not autonegotiating
  484. * them), and then calls the driver's config_aneg function.
  485. * If the PHYCONTROL Layer is operating, we change the state to
  486. * reflect the beginning of Auto-negotiation or forcing.
  487. */
  488. static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
  489. {
  490. bool trigger = 0;
  491. int err;
  492. mutex_lock(&phydev->lock);
  493. if (AUTONEG_DISABLE == phydev->autoneg)
  494. phy_sanitize_settings(phydev);
  495. /* Invalidate LP advertising flags */
  496. phydev->lp_advertising = 0;
  497. err = phydev->drv->config_aneg(phydev);
  498. if (err < 0)
  499. goto out_unlock;
  500. if (phydev->state != PHY_HALTED) {
  501. if (AUTONEG_ENABLE == phydev->autoneg) {
  502. phydev->state = PHY_AN;
  503. phydev->link_timeout = PHY_AN_TIMEOUT;
  504. } else {
  505. phydev->state = PHY_FORCING;
  506. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  507. }
  508. }
  509. /* Re-schedule a PHY state machine to check PHY status because
  510. * negotiation may already be done and aneg interrupt may not be
  511. * generated.
  512. */
  513. if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
  514. err = phy_aneg_done(phydev);
  515. if (err > 0) {
  516. trigger = true;
  517. err = 0;
  518. }
  519. }
  520. out_unlock:
  521. mutex_unlock(&phydev->lock);
  522. if (trigger)
  523. phy_trigger_machine(phydev, sync);
  524. return err;
  525. }
  526. /**
  527. * phy_start_aneg - start auto-negotiation for this PHY device
  528. * @phydev: the phy_device struct
  529. *
  530. * Description: Sanitizes the settings (if we're not autonegotiating
  531. * them), and then calls the driver's config_aneg function.
  532. * If the PHYCONTROL Layer is operating, we change the state to
  533. * reflect the beginning of Auto-negotiation or forcing.
  534. */
  535. int phy_start_aneg(struct phy_device *phydev)
  536. {
  537. return phy_start_aneg_priv(phydev, true);
  538. }
  539. EXPORT_SYMBOL(phy_start_aneg);
  540. /**
  541. * phy_start_machine - start PHY state machine tracking
  542. * @phydev: the phy_device struct
  543. *
  544. * Description: The PHY infrastructure can run a state machine
  545. * which tracks whether the PHY is starting up, negotiating,
  546. * etc. This function starts the timer which tracks the state
  547. * of the PHY. If you want to maintain your own state machine,
  548. * do not call this function.
  549. */
  550. void phy_start_machine(struct phy_device *phydev)
  551. {
  552. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  553. }
  554. /**
  555. * phy_trigger_machine - trigger the state machine to run
  556. *
  557. * @phydev: the phy_device struct
  558. * @sync: indicate whether we should wait for the workqueue cancelation
  559. *
  560. * Description: There has been a change in state which requires that the
  561. * state machine runs.
  562. */
  563. void phy_trigger_machine(struct phy_device *phydev, bool sync)
  564. {
  565. if (sync)
  566. cancel_delayed_work_sync(&phydev->state_queue);
  567. else
  568. cancel_delayed_work(&phydev->state_queue);
  569. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
  570. }
  571. /**
  572. * phy_stop_machine - stop the PHY state machine tracking
  573. * @phydev: target phy_device struct
  574. *
  575. * Description: Stops the state machine timer, sets the state to UP
  576. * (unless it wasn't up yet). This function must be called BEFORE
  577. * phy_detach.
  578. */
  579. void phy_stop_machine(struct phy_device *phydev)
  580. {
  581. cancel_delayed_work_sync(&phydev->state_queue);
  582. mutex_lock(&phydev->lock);
  583. if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
  584. phydev->state = PHY_UP;
  585. mutex_unlock(&phydev->lock);
  586. }
  587. /**
  588. * phy_error - enter HALTED state for this PHY device
  589. * @phydev: target phy_device struct
  590. *
  591. * Moves the PHY to the HALTED state in response to a read
  592. * or write error, and tells the controller the link is down.
  593. * Must not be called from interrupt context, or while the
  594. * phydev->lock is held.
  595. */
  596. static void phy_error(struct phy_device *phydev)
  597. {
  598. mutex_lock(&phydev->lock);
  599. phydev->state = PHY_HALTED;
  600. mutex_unlock(&phydev->lock);
  601. phy_trigger_machine(phydev, false);
  602. }
  603. /**
  604. * phy_interrupt - PHY interrupt handler
  605. * @irq: interrupt line
  606. * @phy_dat: phy_device pointer
  607. *
  608. * Description: When a PHY interrupt occurs, the handler disables
  609. * interrupts, and schedules a work task to clear the interrupt.
  610. */
  611. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  612. {
  613. struct phy_device *phydev = phy_dat;
  614. if (PHY_HALTED == phydev->state)
  615. return IRQ_NONE; /* It can't be ours. */
  616. /* The MDIO bus is not allowed to be written in interrupt
  617. * context, so we need to disable the irq here. A work
  618. * queue will write the PHY to disable and clear the
  619. * interrupt, and then reenable the irq line.
  620. */
  621. disable_irq_nosync(irq);
  622. atomic_inc(&phydev->irq_disable);
  623. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  624. return IRQ_HANDLED;
  625. }
  626. /**
  627. * phy_enable_interrupts - Enable the interrupts from the PHY side
  628. * @phydev: target phy_device struct
  629. */
  630. static int phy_enable_interrupts(struct phy_device *phydev)
  631. {
  632. int err = phy_clear_interrupt(phydev);
  633. if (err < 0)
  634. return err;
  635. return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  636. }
  637. /**
  638. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  639. * @phydev: target phy_device struct
  640. */
  641. static int phy_disable_interrupts(struct phy_device *phydev)
  642. {
  643. int err;
  644. /* Disable PHY interrupts */
  645. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  646. if (err)
  647. goto phy_err;
  648. /* Clear the interrupt */
  649. err = phy_clear_interrupt(phydev);
  650. if (err)
  651. goto phy_err;
  652. return 0;
  653. phy_err:
  654. phy_error(phydev);
  655. return err;
  656. }
  657. /**
  658. * phy_start_interrupts - request and enable interrupts for a PHY device
  659. * @phydev: target phy_device struct
  660. *
  661. * Description: Request the interrupt for the given PHY.
  662. * If this fails, then we set irq to PHY_POLL.
  663. * Otherwise, we enable the interrupts in the PHY.
  664. * This should only be called with a valid IRQ number.
  665. * Returns 0 on success or < 0 on error.
  666. */
  667. int phy_start_interrupts(struct phy_device *phydev)
  668. {
  669. atomic_set(&phydev->irq_disable, 0);
  670. if (request_irq(phydev->irq, phy_interrupt,
  671. IRQF_SHARED,
  672. "phy_interrupt",
  673. phydev) < 0) {
  674. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  675. phydev->mdio.bus->name, phydev->irq);
  676. phydev->irq = PHY_POLL;
  677. return 0;
  678. }
  679. return phy_enable_interrupts(phydev);
  680. }
  681. EXPORT_SYMBOL(phy_start_interrupts);
  682. /**
  683. * phy_stop_interrupts - disable interrupts from a PHY device
  684. * @phydev: target phy_device struct
  685. */
  686. int phy_stop_interrupts(struct phy_device *phydev)
  687. {
  688. int err = phy_disable_interrupts(phydev);
  689. if (err)
  690. phy_error(phydev);
  691. free_irq(phydev->irq, phydev);
  692. /* Cannot call flush_scheduled_work() here as desired because
  693. * of rtnl_lock(), but we do not really care about what would
  694. * be done, except from enable_irq(), so cancel any work
  695. * possibly pending and take care of the matter below.
  696. */
  697. cancel_work_sync(&phydev->phy_queue);
  698. /* If work indeed has been cancelled, disable_irq() will have
  699. * been left unbalanced from phy_interrupt() and enable_irq()
  700. * has to be called so that other devices on the line work.
  701. */
  702. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  703. enable_irq(phydev->irq);
  704. return err;
  705. }
  706. EXPORT_SYMBOL(phy_stop_interrupts);
  707. /**
  708. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  709. * @work: work_struct that describes the work to be done
  710. */
  711. void phy_change(struct work_struct *work)
  712. {
  713. struct phy_device *phydev =
  714. container_of(work, struct phy_device, phy_queue);
  715. if (phy_interrupt_is_valid(phydev)) {
  716. if (phydev->drv->did_interrupt &&
  717. !phydev->drv->did_interrupt(phydev))
  718. goto ignore;
  719. if (phy_disable_interrupts(phydev))
  720. goto phy_err;
  721. }
  722. mutex_lock(&phydev->lock);
  723. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  724. phydev->state = PHY_CHANGELINK;
  725. mutex_unlock(&phydev->lock);
  726. if (phy_interrupt_is_valid(phydev)) {
  727. atomic_dec(&phydev->irq_disable);
  728. enable_irq(phydev->irq);
  729. /* Reenable interrupts */
  730. if (PHY_HALTED != phydev->state &&
  731. phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
  732. goto irq_enable_err;
  733. }
  734. /* reschedule state queue work to run as soon as possible */
  735. phy_trigger_machine(phydev, true);
  736. return;
  737. ignore:
  738. atomic_dec(&phydev->irq_disable);
  739. enable_irq(phydev->irq);
  740. return;
  741. irq_enable_err:
  742. disable_irq(phydev->irq);
  743. atomic_inc(&phydev->irq_disable);
  744. phy_err:
  745. phy_error(phydev);
  746. }
  747. /**
  748. * phy_stop - Bring down the PHY link, and stop checking the status
  749. * @phydev: target phy_device struct
  750. */
  751. void phy_stop(struct phy_device *phydev)
  752. {
  753. mutex_lock(&phydev->lock);
  754. if (PHY_HALTED == phydev->state)
  755. goto out_unlock;
  756. if (phy_interrupt_is_valid(phydev)) {
  757. /* Disable PHY Interrupts */
  758. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  759. /* Clear any pending interrupts */
  760. phy_clear_interrupt(phydev);
  761. }
  762. phydev->state = PHY_HALTED;
  763. out_unlock:
  764. mutex_unlock(&phydev->lock);
  765. /* Cannot call flush_scheduled_work() here as desired because
  766. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  767. * will not reenable interrupts.
  768. */
  769. }
  770. EXPORT_SYMBOL(phy_stop);
  771. /**
  772. * phy_start - start or restart a PHY device
  773. * @phydev: target phy_device struct
  774. *
  775. * Description: Indicates the attached device's readiness to
  776. * handle PHY-related work. Used during startup to start the
  777. * PHY, and after a call to phy_stop() to resume operation.
  778. * Also used to indicate the MDIO bus has cleared an error
  779. * condition.
  780. */
  781. void phy_start(struct phy_device *phydev)
  782. {
  783. bool do_resume = false;
  784. int err = 0;
  785. mutex_lock(&phydev->lock);
  786. switch (phydev->state) {
  787. case PHY_STARTING:
  788. phydev->state = PHY_PENDING;
  789. break;
  790. case PHY_READY:
  791. phydev->state = PHY_UP;
  792. break;
  793. case PHY_HALTED:
  794. /* make sure interrupts are re-enabled for the PHY */
  795. if (phy_interrupt_is_valid(phydev)) {
  796. err = phy_enable_interrupts(phydev);
  797. if (err < 0)
  798. break;
  799. }
  800. phydev->state = PHY_RESUMING;
  801. do_resume = true;
  802. break;
  803. default:
  804. break;
  805. }
  806. mutex_unlock(&phydev->lock);
  807. /* if phy was suspended, bring the physical link up again */
  808. if (do_resume)
  809. phy_resume(phydev);
  810. phy_trigger_machine(phydev, true);
  811. }
  812. EXPORT_SYMBOL(phy_start);
  813. /**
  814. * phy_state_machine - Handle the state machine
  815. * @work: work_struct that describes the work to be done
  816. */
  817. void phy_state_machine(struct work_struct *work)
  818. {
  819. struct delayed_work *dwork = to_delayed_work(work);
  820. struct phy_device *phydev =
  821. container_of(dwork, struct phy_device, state_queue);
  822. bool needs_aneg = false, do_suspend = false;
  823. enum phy_state old_state;
  824. int err = 0;
  825. int old_link;
  826. mutex_lock(&phydev->lock);
  827. old_state = phydev->state;
  828. if (phydev->drv->link_change_notify)
  829. phydev->drv->link_change_notify(phydev);
  830. switch (phydev->state) {
  831. case PHY_DOWN:
  832. case PHY_STARTING:
  833. case PHY_READY:
  834. case PHY_PENDING:
  835. break;
  836. case PHY_UP:
  837. needs_aneg = true;
  838. phydev->link_timeout = PHY_AN_TIMEOUT;
  839. break;
  840. case PHY_AN:
  841. err = phy_read_status(phydev);
  842. if (err < 0)
  843. break;
  844. /* If the link is down, give up on negotiation for now */
  845. if (!phydev->link) {
  846. phydev->state = PHY_NOLINK;
  847. netif_carrier_off(phydev->attached_dev);
  848. phydev->adjust_link(phydev->attached_dev);
  849. break;
  850. }
  851. /* Check if negotiation is done. Break if there's an error */
  852. err = phy_aneg_done(phydev);
  853. if (err < 0)
  854. break;
  855. /* If AN is done, we're running */
  856. if (err > 0) {
  857. phydev->state = PHY_RUNNING;
  858. netif_carrier_on(phydev->attached_dev);
  859. phydev->adjust_link(phydev->attached_dev);
  860. } else if (0 == phydev->link_timeout--)
  861. needs_aneg = true;
  862. break;
  863. case PHY_NOLINK:
  864. if (phy_interrupt_is_valid(phydev))
  865. break;
  866. err = phy_read_status(phydev);
  867. if (err)
  868. break;
  869. if (phydev->link) {
  870. if (AUTONEG_ENABLE == phydev->autoneg) {
  871. err = phy_aneg_done(phydev);
  872. if (err < 0)
  873. break;
  874. if (!err) {
  875. phydev->state = PHY_AN;
  876. phydev->link_timeout = PHY_AN_TIMEOUT;
  877. break;
  878. }
  879. }
  880. phydev->state = PHY_RUNNING;
  881. netif_carrier_on(phydev->attached_dev);
  882. phydev->adjust_link(phydev->attached_dev);
  883. }
  884. break;
  885. case PHY_FORCING:
  886. err = genphy_update_link(phydev);
  887. if (err)
  888. break;
  889. if (phydev->link) {
  890. phydev->state = PHY_RUNNING;
  891. netif_carrier_on(phydev->attached_dev);
  892. } else {
  893. if (0 == phydev->link_timeout--)
  894. needs_aneg = true;
  895. }
  896. phydev->adjust_link(phydev->attached_dev);
  897. break;
  898. case PHY_RUNNING:
  899. /* Only register a CHANGE if we are polling and link changed
  900. * since latest checking.
  901. */
  902. if (phydev->irq == PHY_POLL) {
  903. old_link = phydev->link;
  904. err = phy_read_status(phydev);
  905. if (err)
  906. break;
  907. if (old_link != phydev->link)
  908. phydev->state = PHY_CHANGELINK;
  909. }
  910. /*
  911. * Failsafe: check that nobody set phydev->link=0 between two
  912. * poll cycles, otherwise we won't leave RUNNING state as long
  913. * as link remains down.
  914. */
  915. if (!phydev->link && phydev->state == PHY_RUNNING) {
  916. phydev->state = PHY_CHANGELINK;
  917. phydev_err(phydev, "no link in PHY_RUNNING\n");
  918. }
  919. break;
  920. case PHY_CHANGELINK:
  921. err = phy_read_status(phydev);
  922. if (err)
  923. break;
  924. if (phydev->link) {
  925. phydev->state = PHY_RUNNING;
  926. netif_carrier_on(phydev->attached_dev);
  927. } else {
  928. phydev->state = PHY_NOLINK;
  929. netif_carrier_off(phydev->attached_dev);
  930. }
  931. phydev->adjust_link(phydev->attached_dev);
  932. if (phy_interrupt_is_valid(phydev))
  933. err = phy_config_interrupt(phydev,
  934. PHY_INTERRUPT_ENABLED);
  935. break;
  936. case PHY_HALTED:
  937. if (phydev->link) {
  938. phydev->link = 0;
  939. netif_carrier_off(phydev->attached_dev);
  940. phydev->adjust_link(phydev->attached_dev);
  941. do_suspend = true;
  942. }
  943. break;
  944. case PHY_RESUMING:
  945. if (AUTONEG_ENABLE == phydev->autoneg) {
  946. err = phy_aneg_done(phydev);
  947. if (err < 0)
  948. break;
  949. /* err > 0 if AN is done.
  950. * Otherwise, it's 0, and we're still waiting for AN
  951. */
  952. if (err > 0) {
  953. err = phy_read_status(phydev);
  954. if (err)
  955. break;
  956. if (phydev->link) {
  957. phydev->state = PHY_RUNNING;
  958. netif_carrier_on(phydev->attached_dev);
  959. } else {
  960. phydev->state = PHY_NOLINK;
  961. }
  962. phydev->adjust_link(phydev->attached_dev);
  963. } else {
  964. phydev->state = PHY_AN;
  965. phydev->link_timeout = PHY_AN_TIMEOUT;
  966. }
  967. } else {
  968. err = phy_read_status(phydev);
  969. if (err)
  970. break;
  971. if (phydev->link) {
  972. phydev->state = PHY_RUNNING;
  973. netif_carrier_on(phydev->attached_dev);
  974. } else {
  975. phydev->state = PHY_NOLINK;
  976. }
  977. phydev->adjust_link(phydev->attached_dev);
  978. }
  979. break;
  980. }
  981. mutex_unlock(&phydev->lock);
  982. if (needs_aneg)
  983. err = phy_start_aneg_priv(phydev, false);
  984. else if (do_suspend)
  985. phy_suspend(phydev);
  986. if (err < 0)
  987. phy_error(phydev);
  988. phydev_dbg(phydev, "PHY state change %s -> %s\n",
  989. phy_state_to_str(old_state),
  990. phy_state_to_str(phydev->state));
  991. /* Only re-schedule a PHY state machine change if we are polling the
  992. * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
  993. * between states from phy_mac_interrupt()
  994. */
  995. if (phydev->irq == PHY_POLL)
  996. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
  997. PHY_STATE_TIME * HZ);
  998. }
  999. void phy_mac_interrupt(struct phy_device *phydev, int new_link)
  1000. {
  1001. phydev->link = new_link;
  1002. /* Trigger a state machine change */
  1003. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  1004. }
  1005. EXPORT_SYMBOL(phy_mac_interrupt);
  1006. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  1007. int addr)
  1008. {
  1009. /* Write the desired MMD Devad */
  1010. bus->write(bus, addr, MII_MMD_CTRL, devad);
  1011. /* Write the desired MMD register address */
  1012. bus->write(bus, addr, MII_MMD_DATA, prtad);
  1013. /* Select the Function : DATA with no post increment */
  1014. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  1015. }
  1016. /**
  1017. * phy_read_mmd_indirect - reads data from the MMD registers
  1018. * @phydev: The PHY device bus
  1019. * @prtad: MMD Address
  1020. * @devad: MMD DEVAD
  1021. *
  1022. * Description: it reads data from the MMD registers (clause 22 to access to
  1023. * clause 45) of the specified phy address.
  1024. * To read these register we have:
  1025. * 1) Write reg 13 // DEVAD
  1026. * 2) Write reg 14 // MMD Address
  1027. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  1028. * 3) Read reg 14 // Read MMD data
  1029. */
  1030. int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad)
  1031. {
  1032. struct phy_driver *phydrv = phydev->drv;
  1033. int addr = phydev->mdio.addr;
  1034. int value = -1;
  1035. if (!phydrv->read_mmd_indirect) {
  1036. struct mii_bus *bus = phydev->mdio.bus;
  1037. mutex_lock(&bus->mdio_lock);
  1038. mmd_phy_indirect(bus, prtad, devad, addr);
  1039. /* Read the content of the MMD's selected register */
  1040. value = bus->read(bus, addr, MII_MMD_DATA);
  1041. mutex_unlock(&bus->mdio_lock);
  1042. } else {
  1043. value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
  1044. }
  1045. return value;
  1046. }
  1047. EXPORT_SYMBOL(phy_read_mmd_indirect);
  1048. /**
  1049. * phy_write_mmd_indirect - writes data to the MMD registers
  1050. * @phydev: The PHY device
  1051. * @prtad: MMD Address
  1052. * @devad: MMD DEVAD
  1053. * @data: data to write in the MMD register
  1054. *
  1055. * Description: Write data from the MMD registers of the specified
  1056. * phy address.
  1057. * To write these register we have:
  1058. * 1) Write reg 13 // DEVAD
  1059. * 2) Write reg 14 // MMD Address
  1060. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  1061. * 3) Write reg 14 // Write MMD data
  1062. */
  1063. void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
  1064. int devad, u32 data)
  1065. {
  1066. struct phy_driver *phydrv = phydev->drv;
  1067. int addr = phydev->mdio.addr;
  1068. if (!phydrv->write_mmd_indirect) {
  1069. struct mii_bus *bus = phydev->mdio.bus;
  1070. mutex_lock(&bus->mdio_lock);
  1071. mmd_phy_indirect(bus, prtad, devad, addr);
  1072. /* Write the data into MMD's selected register */
  1073. bus->write(bus, addr, MII_MMD_DATA, data);
  1074. mutex_unlock(&bus->mdio_lock);
  1075. } else {
  1076. phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
  1077. }
  1078. }
  1079. EXPORT_SYMBOL(phy_write_mmd_indirect);
  1080. /**
  1081. * phy_init_eee - init and check the EEE feature
  1082. * @phydev: target phy_device struct
  1083. * @clk_stop_enable: PHY may stop the clock during LPI
  1084. *
  1085. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  1086. * is supported by looking at the MMD registers 3.20 and 7.60/61
  1087. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  1088. * bit if required.
  1089. */
  1090. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  1091. {
  1092. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  1093. * Also EEE feature is active when core is operating with MII, GMII
  1094. * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
  1095. * should return an error if they do not support EEE.
  1096. */
  1097. if ((phydev->duplex == DUPLEX_FULL) &&
  1098. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  1099. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  1100. phy_interface_is_rgmii(phydev) ||
  1101. phy_is_internal(phydev))) {
  1102. int eee_lp, eee_cap, eee_adv;
  1103. u32 lp, cap, adv;
  1104. int status;
  1105. /* Read phy status to properly get the right settings */
  1106. status = phy_read_status(phydev);
  1107. if (status)
  1108. return status;
  1109. /* First check if the EEE ability is supported */
  1110. eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
  1111. MDIO_MMD_PCS);
  1112. if (eee_cap <= 0)
  1113. goto eee_exit_err;
  1114. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  1115. if (!cap)
  1116. goto eee_exit_err;
  1117. /* Check which link settings negotiated and verify it in
  1118. * the EEE advertising registers.
  1119. */
  1120. eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
  1121. MDIO_MMD_AN);
  1122. if (eee_lp <= 0)
  1123. goto eee_exit_err;
  1124. eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
  1125. MDIO_MMD_AN);
  1126. if (eee_adv <= 0)
  1127. goto eee_exit_err;
  1128. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  1129. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  1130. if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
  1131. goto eee_exit_err;
  1132. if (clk_stop_enable) {
  1133. /* Configure the PHY to stop receiving xMII
  1134. * clock while it is signaling LPI.
  1135. */
  1136. int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
  1137. MDIO_MMD_PCS);
  1138. if (val < 0)
  1139. return val;
  1140. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  1141. phy_write_mmd_indirect(phydev, MDIO_CTRL1,
  1142. MDIO_MMD_PCS, val);
  1143. }
  1144. return 0; /* EEE supported */
  1145. }
  1146. eee_exit_err:
  1147. return -EPROTONOSUPPORT;
  1148. }
  1149. EXPORT_SYMBOL(phy_init_eee);
  1150. /**
  1151. * phy_get_eee_err - report the EEE wake error count
  1152. * @phydev: target phy_device struct
  1153. *
  1154. * Description: it is to report the number of time where the PHY
  1155. * failed to complete its normal wake sequence.
  1156. */
  1157. int phy_get_eee_err(struct phy_device *phydev)
  1158. {
  1159. return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
  1160. }
  1161. EXPORT_SYMBOL(phy_get_eee_err);
  1162. /**
  1163. * phy_ethtool_get_eee - get EEE supported and status
  1164. * @phydev: target phy_device struct
  1165. * @data: ethtool_eee data
  1166. *
  1167. * Description: it reportes the Supported/Advertisement/LP Advertisement
  1168. * capabilities.
  1169. */
  1170. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1171. {
  1172. int val;
  1173. /* Get Supported EEE */
  1174. val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
  1175. if (val < 0)
  1176. return val;
  1177. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  1178. /* Get advertisement EEE */
  1179. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
  1180. if (val < 0)
  1181. return val;
  1182. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1183. /* Get LP advertisement EEE */
  1184. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, MDIO_MMD_AN);
  1185. if (val < 0)
  1186. return val;
  1187. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1188. return 0;
  1189. }
  1190. EXPORT_SYMBOL(phy_ethtool_get_eee);
  1191. /**
  1192. * phy_ethtool_set_eee - set EEE supported and status
  1193. * @phydev: target phy_device struct
  1194. * @data: ethtool_eee data
  1195. *
  1196. * Description: it is to program the Advertisement EEE register.
  1197. */
  1198. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1199. {
  1200. int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  1201. /* Mask prohibited EEE modes */
  1202. val &= ~phydev->eee_broken_modes;
  1203. phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
  1204. return 0;
  1205. }
  1206. EXPORT_SYMBOL(phy_ethtool_set_eee);
  1207. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1208. {
  1209. if (phydev->drv->set_wol)
  1210. return phydev->drv->set_wol(phydev, wol);
  1211. return -EOPNOTSUPP;
  1212. }
  1213. EXPORT_SYMBOL(phy_ethtool_set_wol);
  1214. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1215. {
  1216. if (phydev->drv->get_wol)
  1217. phydev->drv->get_wol(phydev, wol);
  1218. }
  1219. EXPORT_SYMBOL(phy_ethtool_get_wol);
  1220. int phy_ethtool_get_link_ksettings(struct net_device *ndev,
  1221. struct ethtool_link_ksettings *cmd)
  1222. {
  1223. struct phy_device *phydev = ndev->phydev;
  1224. if (!phydev)
  1225. return -ENODEV;
  1226. return phy_ethtool_ksettings_get(phydev, cmd);
  1227. }
  1228. EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
  1229. int phy_ethtool_set_link_ksettings(struct net_device *ndev,
  1230. const struct ethtool_link_ksettings *cmd)
  1231. {
  1232. struct phy_device *phydev = ndev->phydev;
  1233. if (!phydev)
  1234. return -ENODEV;
  1235. return phy_ethtool_ksettings_set(phydev, cmd);
  1236. }
  1237. EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);