e1000_i210.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /* Intel(R) Gigabit Ethernet Linux driver
  2. * Copyright(c) 2007-2014 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * The full GNU General Public License is included in this distribution in
  17. * the file called "COPYING".
  18. *
  19. * Contact Information:
  20. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  21. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  22. */
  23. /* e1000_i210
  24. * e1000_i211
  25. */
  26. #include <linux/types.h>
  27. #include <linux/if_ether.h>
  28. #include "e1000_hw.h"
  29. #include "e1000_i210.h"
  30. static s32 igb_update_flash_i210(struct e1000_hw *hw);
  31. /**
  32. * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
  33. * @hw: pointer to the HW structure
  34. *
  35. * Acquire the HW semaphore to access the PHY or NVM
  36. */
  37. static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
  38. {
  39. u32 swsm;
  40. s32 timeout = hw->nvm.word_size + 1;
  41. s32 i = 0;
  42. /* Get the SW semaphore */
  43. while (i < timeout) {
  44. swsm = rd32(E1000_SWSM);
  45. if (!(swsm & E1000_SWSM_SMBI))
  46. break;
  47. udelay(50);
  48. i++;
  49. }
  50. if (i == timeout) {
  51. /* In rare circumstances, the SW semaphore may already be held
  52. * unintentionally. Clear the semaphore once before giving up.
  53. */
  54. if (hw->dev_spec._82575.clear_semaphore_once) {
  55. hw->dev_spec._82575.clear_semaphore_once = false;
  56. igb_put_hw_semaphore(hw);
  57. for (i = 0; i < timeout; i++) {
  58. swsm = rd32(E1000_SWSM);
  59. if (!(swsm & E1000_SWSM_SMBI))
  60. break;
  61. udelay(50);
  62. }
  63. }
  64. /* If we do not have the semaphore here, we have to give up. */
  65. if (i == timeout) {
  66. hw_dbg("Driver can't access device - SMBI bit is set.\n");
  67. return -E1000_ERR_NVM;
  68. }
  69. }
  70. /* Get the FW semaphore. */
  71. for (i = 0; i < timeout; i++) {
  72. swsm = rd32(E1000_SWSM);
  73. wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
  74. /* Semaphore acquired if bit latched */
  75. if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
  76. break;
  77. udelay(50);
  78. }
  79. if (i == timeout) {
  80. /* Release semaphores */
  81. igb_put_hw_semaphore(hw);
  82. hw_dbg("Driver can't access the NVM\n");
  83. return -E1000_ERR_NVM;
  84. }
  85. return 0;
  86. }
  87. /**
  88. * igb_acquire_nvm_i210 - Request for access to EEPROM
  89. * @hw: pointer to the HW structure
  90. *
  91. * Acquire the necessary semaphores for exclusive access to the EEPROM.
  92. * Set the EEPROM access request bit and wait for EEPROM access grant bit.
  93. * Return successful if access grant bit set, else clear the request for
  94. * EEPROM access and return -E1000_ERR_NVM (-1).
  95. **/
  96. static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
  97. {
  98. return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  99. }
  100. /**
  101. * igb_release_nvm_i210 - Release exclusive access to EEPROM
  102. * @hw: pointer to the HW structure
  103. *
  104. * Stop any current commands to the EEPROM and clear the EEPROM request bit,
  105. * then release the semaphores acquired.
  106. **/
  107. static void igb_release_nvm_i210(struct e1000_hw *hw)
  108. {
  109. igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
  110. }
  111. /**
  112. * igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
  113. * @hw: pointer to the HW structure
  114. * @mask: specifies which semaphore to acquire
  115. *
  116. * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
  117. * will also specify which port we're acquiring the lock for.
  118. **/
  119. s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  120. {
  121. u32 swfw_sync;
  122. u32 swmask = mask;
  123. u32 fwmask = mask << 16;
  124. s32 ret_val = 0;
  125. s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
  126. while (i < timeout) {
  127. if (igb_get_hw_semaphore_i210(hw)) {
  128. ret_val = -E1000_ERR_SWFW_SYNC;
  129. goto out;
  130. }
  131. swfw_sync = rd32(E1000_SW_FW_SYNC);
  132. if (!(swfw_sync & (fwmask | swmask)))
  133. break;
  134. /* Firmware currently using resource (fwmask) */
  135. igb_put_hw_semaphore(hw);
  136. mdelay(5);
  137. i++;
  138. }
  139. if (i == timeout) {
  140. hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
  141. ret_val = -E1000_ERR_SWFW_SYNC;
  142. goto out;
  143. }
  144. swfw_sync |= swmask;
  145. wr32(E1000_SW_FW_SYNC, swfw_sync);
  146. igb_put_hw_semaphore(hw);
  147. out:
  148. return ret_val;
  149. }
  150. /**
  151. * igb_release_swfw_sync_i210 - Release SW/FW semaphore
  152. * @hw: pointer to the HW structure
  153. * @mask: specifies which semaphore to acquire
  154. *
  155. * Release the SW/FW semaphore used to access the PHY or NVM. The mask
  156. * will also specify which port we're releasing the lock for.
  157. **/
  158. void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
  159. {
  160. u32 swfw_sync;
  161. while (igb_get_hw_semaphore_i210(hw))
  162. ; /* Empty */
  163. swfw_sync = rd32(E1000_SW_FW_SYNC);
  164. swfw_sync &= ~mask;
  165. wr32(E1000_SW_FW_SYNC, swfw_sync);
  166. igb_put_hw_semaphore(hw);
  167. }
  168. /**
  169. * igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
  170. * @hw: pointer to the HW structure
  171. * @offset: offset of word in the Shadow Ram to read
  172. * @words: number of words to read
  173. * @data: word read from the Shadow Ram
  174. *
  175. * Reads a 16 bit word from the Shadow Ram using the EERD register.
  176. * Uses necessary synchronization semaphores.
  177. **/
  178. static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
  179. u16 *data)
  180. {
  181. s32 status = 0;
  182. u16 i, count;
  183. /* We cannot hold synchronization semaphores for too long,
  184. * because of forceful takeover procedure. However it is more efficient
  185. * to read in bursts than synchronizing access for each word.
  186. */
  187. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  188. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  189. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  190. if (!(hw->nvm.ops.acquire(hw))) {
  191. status = igb_read_nvm_eerd(hw, offset, count,
  192. data + i);
  193. hw->nvm.ops.release(hw);
  194. } else {
  195. status = E1000_ERR_SWFW_SYNC;
  196. }
  197. if (status)
  198. break;
  199. }
  200. return status;
  201. }
  202. /**
  203. * igb_write_nvm_srwr - Write to Shadow Ram using EEWR
  204. * @hw: pointer to the HW structure
  205. * @offset: offset within the Shadow Ram to be written to
  206. * @words: number of words to write
  207. * @data: 16 bit word(s) to be written to the Shadow Ram
  208. *
  209. * Writes data to Shadow Ram at offset using EEWR register.
  210. *
  211. * If igb_update_nvm_checksum is not called after this function , the
  212. * Shadow Ram will most likely contain an invalid checksum.
  213. **/
  214. static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
  215. u16 *data)
  216. {
  217. struct e1000_nvm_info *nvm = &hw->nvm;
  218. u32 i, k, eewr = 0;
  219. u32 attempts = 100000;
  220. s32 ret_val = 0;
  221. /* A check for invalid values: offset too large, too many words,
  222. * too many words for the offset, and not enough words.
  223. */
  224. if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
  225. (words == 0)) {
  226. hw_dbg("nvm parameter(s) out of bounds\n");
  227. ret_val = -E1000_ERR_NVM;
  228. goto out;
  229. }
  230. for (i = 0; i < words; i++) {
  231. eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
  232. (data[i] << E1000_NVM_RW_REG_DATA) |
  233. E1000_NVM_RW_REG_START;
  234. wr32(E1000_SRWR, eewr);
  235. for (k = 0; k < attempts; k++) {
  236. if (E1000_NVM_RW_REG_DONE &
  237. rd32(E1000_SRWR)) {
  238. ret_val = 0;
  239. break;
  240. }
  241. udelay(5);
  242. }
  243. if (ret_val) {
  244. hw_dbg("Shadow RAM write EEWR timed out\n");
  245. break;
  246. }
  247. }
  248. out:
  249. return ret_val;
  250. }
  251. /**
  252. * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
  253. * @hw: pointer to the HW structure
  254. * @offset: offset within the Shadow RAM to be written to
  255. * @words: number of words to write
  256. * @data: 16 bit word(s) to be written to the Shadow RAM
  257. *
  258. * Writes data to Shadow RAM at offset using EEWR register.
  259. *
  260. * If e1000_update_nvm_checksum is not called after this function , the
  261. * data will not be committed to FLASH and also Shadow RAM will most likely
  262. * contain an invalid checksum.
  263. *
  264. * If error code is returned, data and Shadow RAM may be inconsistent - buffer
  265. * partially written.
  266. **/
  267. static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
  268. u16 *data)
  269. {
  270. s32 status = 0;
  271. u16 i, count;
  272. /* We cannot hold synchronization semaphores for too long,
  273. * because of forceful takeover procedure. However it is more efficient
  274. * to write in bursts than synchronizing access for each word.
  275. */
  276. for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
  277. count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
  278. E1000_EERD_EEWR_MAX_COUNT : (words - i);
  279. if (!(hw->nvm.ops.acquire(hw))) {
  280. status = igb_write_nvm_srwr(hw, offset, count,
  281. data + i);
  282. hw->nvm.ops.release(hw);
  283. } else {
  284. status = E1000_ERR_SWFW_SYNC;
  285. }
  286. if (status)
  287. break;
  288. }
  289. return status;
  290. }
  291. /**
  292. * igb_read_invm_word_i210 - Reads OTP
  293. * @hw: pointer to the HW structure
  294. * @address: the word address (aka eeprom offset) to read
  295. * @data: pointer to the data read
  296. *
  297. * Reads 16-bit words from the OTP. Return error when the word is not
  298. * stored in OTP.
  299. **/
  300. static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
  301. {
  302. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  303. u32 invm_dword;
  304. u16 i;
  305. u8 record_type, word_address;
  306. for (i = 0; i < E1000_INVM_SIZE; i++) {
  307. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  308. /* Get record type */
  309. record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
  310. if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
  311. break;
  312. if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
  313. i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
  314. if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
  315. i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
  316. if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
  317. word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
  318. if (word_address == address) {
  319. *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
  320. hw_dbg("Read INVM Word 0x%02x = %x\n",
  321. address, *data);
  322. status = 0;
  323. break;
  324. }
  325. }
  326. }
  327. if (status)
  328. hw_dbg("Requested word 0x%02x not found in OTP\n", address);
  329. return status;
  330. }
  331. /**
  332. * igb_read_invm_i210 - Read invm wrapper function for I210/I211
  333. * @hw: pointer to the HW structure
  334. * @words: number of words to read
  335. * @data: pointer to the data read
  336. *
  337. * Wrapper function to return data formerly found in the NVM.
  338. **/
  339. static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
  340. u16 words __always_unused, u16 *data)
  341. {
  342. s32 ret_val = 0;
  343. /* Only the MAC addr is required to be present in the iNVM */
  344. switch (offset) {
  345. case NVM_MAC_ADDR:
  346. ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
  347. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
  348. &data[1]);
  349. ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
  350. &data[2]);
  351. if (ret_val)
  352. hw_dbg("MAC Addr not found in iNVM\n");
  353. break;
  354. case NVM_INIT_CTRL_2:
  355. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  356. if (ret_val) {
  357. *data = NVM_INIT_CTRL_2_DEFAULT_I211;
  358. ret_val = 0;
  359. }
  360. break;
  361. case NVM_INIT_CTRL_4:
  362. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  363. if (ret_val) {
  364. *data = NVM_INIT_CTRL_4_DEFAULT_I211;
  365. ret_val = 0;
  366. }
  367. break;
  368. case NVM_LED_1_CFG:
  369. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  370. if (ret_val) {
  371. *data = NVM_LED_1_CFG_DEFAULT_I211;
  372. ret_val = 0;
  373. }
  374. break;
  375. case NVM_LED_0_2_CFG:
  376. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  377. if (ret_val) {
  378. *data = NVM_LED_0_2_CFG_DEFAULT_I211;
  379. ret_val = 0;
  380. }
  381. break;
  382. case NVM_ID_LED_SETTINGS:
  383. ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
  384. if (ret_val) {
  385. *data = ID_LED_RESERVED_FFFF;
  386. ret_val = 0;
  387. }
  388. break;
  389. case NVM_SUB_DEV_ID:
  390. *data = hw->subsystem_device_id;
  391. break;
  392. case NVM_SUB_VEN_ID:
  393. *data = hw->subsystem_vendor_id;
  394. break;
  395. case NVM_DEV_ID:
  396. *data = hw->device_id;
  397. break;
  398. case NVM_VEN_ID:
  399. *data = hw->vendor_id;
  400. break;
  401. default:
  402. hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
  403. *data = NVM_RESERVED_WORD;
  404. break;
  405. }
  406. return ret_val;
  407. }
  408. /**
  409. * igb_read_invm_version - Reads iNVM version and image type
  410. * @hw: pointer to the HW structure
  411. * @invm_ver: version structure for the version read
  412. *
  413. * Reads iNVM version and image type.
  414. **/
  415. s32 igb_read_invm_version(struct e1000_hw *hw,
  416. struct e1000_fw_version *invm_ver) {
  417. u32 *record = NULL;
  418. u32 *next_record = NULL;
  419. u32 i = 0;
  420. u32 invm_dword = 0;
  421. u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
  422. E1000_INVM_RECORD_SIZE_IN_BYTES);
  423. u32 buffer[E1000_INVM_SIZE];
  424. s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  425. u16 version = 0;
  426. /* Read iNVM memory */
  427. for (i = 0; i < E1000_INVM_SIZE; i++) {
  428. invm_dword = rd32(E1000_INVM_DATA_REG(i));
  429. buffer[i] = invm_dword;
  430. }
  431. /* Read version number */
  432. for (i = 1; i < invm_blocks; i++) {
  433. record = &buffer[invm_blocks - i];
  434. next_record = &buffer[invm_blocks - i + 1];
  435. /* Check if we have first version location used */
  436. if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
  437. version = 0;
  438. status = 0;
  439. break;
  440. }
  441. /* Check if we have second version location used */
  442. else if ((i == 1) &&
  443. ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
  444. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  445. status = 0;
  446. break;
  447. }
  448. /* Check if we have odd version location
  449. * used and it is the last one used
  450. */
  451. else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
  452. ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
  453. (i != 1))) {
  454. version = (*next_record & E1000_INVM_VER_FIELD_TWO)
  455. >> 13;
  456. status = 0;
  457. break;
  458. }
  459. /* Check if we have even version location
  460. * used and it is the last one used
  461. */
  462. else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
  463. ((*record & 0x3) == 0)) {
  464. version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
  465. status = 0;
  466. break;
  467. }
  468. }
  469. if (!status) {
  470. invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
  471. >> E1000_INVM_MAJOR_SHIFT;
  472. invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
  473. }
  474. /* Read Image Type */
  475. for (i = 1; i < invm_blocks; i++) {
  476. record = &buffer[invm_blocks - i];
  477. next_record = &buffer[invm_blocks - i + 1];
  478. /* Check if we have image type in first location used */
  479. if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
  480. invm_ver->invm_img_type = 0;
  481. status = 0;
  482. break;
  483. }
  484. /* Check if we have image type in first location used */
  485. else if ((((*record & 0x3) == 0) &&
  486. ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
  487. ((((*record & 0x3) != 0) && (i != 1)))) {
  488. invm_ver->invm_img_type =
  489. (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
  490. status = 0;
  491. break;
  492. }
  493. }
  494. return status;
  495. }
  496. /**
  497. * igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
  498. * @hw: pointer to the HW structure
  499. *
  500. * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
  501. * and then verifies that the sum of the EEPROM is equal to 0xBABA.
  502. **/
  503. static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
  504. {
  505. s32 status = 0;
  506. s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
  507. if (!(hw->nvm.ops.acquire(hw))) {
  508. /* Replace the read function with semaphore grabbing with
  509. * the one that skips this for a while.
  510. * We have semaphore taken already here.
  511. */
  512. read_op_ptr = hw->nvm.ops.read;
  513. hw->nvm.ops.read = igb_read_nvm_eerd;
  514. status = igb_validate_nvm_checksum(hw);
  515. /* Revert original read operation. */
  516. hw->nvm.ops.read = read_op_ptr;
  517. hw->nvm.ops.release(hw);
  518. } else {
  519. status = E1000_ERR_SWFW_SYNC;
  520. }
  521. return status;
  522. }
  523. /**
  524. * igb_update_nvm_checksum_i210 - Update EEPROM checksum
  525. * @hw: pointer to the HW structure
  526. *
  527. * Updates the EEPROM checksum by reading/adding each word of the EEPROM
  528. * up to the checksum. Then calculates the EEPROM checksum and writes the
  529. * value to the EEPROM. Next commit EEPROM data onto the Flash.
  530. **/
  531. static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
  532. {
  533. s32 ret_val = 0;
  534. u16 checksum = 0;
  535. u16 i, nvm_data;
  536. /* Read the first word from the EEPROM. If this times out or fails, do
  537. * not continue or we could be in for a very long wait while every
  538. * EEPROM read fails
  539. */
  540. ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
  541. if (ret_val) {
  542. hw_dbg("EEPROM read failed\n");
  543. goto out;
  544. }
  545. if (!(hw->nvm.ops.acquire(hw))) {
  546. /* Do not use hw->nvm.ops.write, hw->nvm.ops.read
  547. * because we do not want to take the synchronization
  548. * semaphores twice here.
  549. */
  550. for (i = 0; i < NVM_CHECKSUM_REG; i++) {
  551. ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
  552. if (ret_val) {
  553. hw->nvm.ops.release(hw);
  554. hw_dbg("NVM Read Error while updating checksum.\n");
  555. goto out;
  556. }
  557. checksum += nvm_data;
  558. }
  559. checksum = (u16) NVM_SUM - checksum;
  560. ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
  561. &checksum);
  562. if (ret_val) {
  563. hw->nvm.ops.release(hw);
  564. hw_dbg("NVM Write Error while updating checksum.\n");
  565. goto out;
  566. }
  567. hw->nvm.ops.release(hw);
  568. ret_val = igb_update_flash_i210(hw);
  569. } else {
  570. ret_val = -E1000_ERR_SWFW_SYNC;
  571. }
  572. out:
  573. return ret_val;
  574. }
  575. /**
  576. * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
  577. * @hw: pointer to the HW structure
  578. *
  579. **/
  580. static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
  581. {
  582. s32 ret_val = -E1000_ERR_NVM;
  583. u32 i, reg;
  584. for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
  585. reg = rd32(E1000_EECD);
  586. if (reg & E1000_EECD_FLUDONE_I210) {
  587. ret_val = 0;
  588. break;
  589. }
  590. udelay(5);
  591. }
  592. return ret_val;
  593. }
  594. /**
  595. * igb_get_flash_presence_i210 - Check if flash device is detected.
  596. * @hw: pointer to the HW structure
  597. *
  598. **/
  599. bool igb_get_flash_presence_i210(struct e1000_hw *hw)
  600. {
  601. u32 eec = 0;
  602. bool ret_val = false;
  603. eec = rd32(E1000_EECD);
  604. if (eec & E1000_EECD_FLASH_DETECTED_I210)
  605. ret_val = true;
  606. return ret_val;
  607. }
  608. /**
  609. * igb_update_flash_i210 - Commit EEPROM to the flash
  610. * @hw: pointer to the HW structure
  611. *
  612. **/
  613. static s32 igb_update_flash_i210(struct e1000_hw *hw)
  614. {
  615. s32 ret_val = 0;
  616. u32 flup;
  617. ret_val = igb_pool_flash_update_done_i210(hw);
  618. if (ret_val == -E1000_ERR_NVM) {
  619. hw_dbg("Flash update time out\n");
  620. goto out;
  621. }
  622. flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
  623. wr32(E1000_EECD, flup);
  624. ret_val = igb_pool_flash_update_done_i210(hw);
  625. if (ret_val)
  626. hw_dbg("Flash update time out\n");
  627. else
  628. hw_dbg("Flash update complete\n");
  629. out:
  630. return ret_val;
  631. }
  632. /**
  633. * igb_valid_led_default_i210 - Verify a valid default LED config
  634. * @hw: pointer to the HW structure
  635. * @data: pointer to the NVM (EEPROM)
  636. *
  637. * Read the EEPROM for the current default LED configuration. If the
  638. * LED configuration is not valid, set to a valid LED configuration.
  639. **/
  640. s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
  641. {
  642. s32 ret_val;
  643. ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
  644. if (ret_val) {
  645. hw_dbg("NVM Read Error\n");
  646. goto out;
  647. }
  648. if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
  649. switch (hw->phy.media_type) {
  650. case e1000_media_type_internal_serdes:
  651. *data = ID_LED_DEFAULT_I210_SERDES;
  652. break;
  653. case e1000_media_type_copper:
  654. default:
  655. *data = ID_LED_DEFAULT_I210;
  656. break;
  657. }
  658. }
  659. out:
  660. return ret_val;
  661. }
  662. /**
  663. * __igb_access_xmdio_reg - Read/write XMDIO register
  664. * @hw: pointer to the HW structure
  665. * @address: XMDIO address to program
  666. * @dev_addr: device address to program
  667. * @data: pointer to value to read/write from/to the XMDIO address
  668. * @read: boolean flag to indicate read or write
  669. **/
  670. static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
  671. u8 dev_addr, u16 *data, bool read)
  672. {
  673. s32 ret_val = 0;
  674. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
  675. if (ret_val)
  676. return ret_val;
  677. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
  678. if (ret_val)
  679. return ret_val;
  680. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
  681. dev_addr);
  682. if (ret_val)
  683. return ret_val;
  684. if (read)
  685. ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
  686. else
  687. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
  688. if (ret_val)
  689. return ret_val;
  690. /* Recalibrate the device back to 0 */
  691. ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
  692. if (ret_val)
  693. return ret_val;
  694. return ret_val;
  695. }
  696. /**
  697. * igb_read_xmdio_reg - Read XMDIO register
  698. * @hw: pointer to the HW structure
  699. * @addr: XMDIO address to program
  700. * @dev_addr: device address to program
  701. * @data: value to be read from the EMI address
  702. **/
  703. s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
  704. {
  705. return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
  706. }
  707. /**
  708. * igb_write_xmdio_reg - Write XMDIO register
  709. * @hw: pointer to the HW structure
  710. * @addr: XMDIO address to program
  711. * @dev_addr: device address to program
  712. * @data: value to be written to the XMDIO address
  713. **/
  714. s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
  715. {
  716. return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
  717. }
  718. /**
  719. * igb_init_nvm_params_i210 - Init NVM func ptrs.
  720. * @hw: pointer to the HW structure
  721. **/
  722. s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
  723. {
  724. s32 ret_val = 0;
  725. struct e1000_nvm_info *nvm = &hw->nvm;
  726. nvm->ops.acquire = igb_acquire_nvm_i210;
  727. nvm->ops.release = igb_release_nvm_i210;
  728. nvm->ops.valid_led_default = igb_valid_led_default_i210;
  729. /* NVM Function Pointers */
  730. if (igb_get_flash_presence_i210(hw)) {
  731. hw->nvm.type = e1000_nvm_flash_hw;
  732. nvm->ops.read = igb_read_nvm_srrd_i210;
  733. nvm->ops.write = igb_write_nvm_srwr_i210;
  734. nvm->ops.validate = igb_validate_nvm_checksum_i210;
  735. nvm->ops.update = igb_update_nvm_checksum_i210;
  736. } else {
  737. hw->nvm.type = e1000_nvm_invm;
  738. nvm->ops.read = igb_read_invm_i210;
  739. nvm->ops.write = NULL;
  740. nvm->ops.validate = NULL;
  741. nvm->ops.update = NULL;
  742. }
  743. return ret_val;
  744. }
  745. /**
  746. * igb_pll_workaround_i210
  747. * @hw: pointer to the HW structure
  748. *
  749. * Works around an errata in the PLL circuit where it occasionally
  750. * provides the wrong clock frequency after power up.
  751. **/
  752. s32 igb_pll_workaround_i210(struct e1000_hw *hw)
  753. {
  754. s32 ret_val;
  755. u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
  756. u16 nvm_word, phy_word, pci_word, tmp_nvm;
  757. int i;
  758. /* Get and set needed register values */
  759. wuc = rd32(E1000_WUC);
  760. mdicnfg = rd32(E1000_MDICNFG);
  761. reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
  762. wr32(E1000_MDICNFG, reg_val);
  763. /* Get data from NVM, or set default */
  764. ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
  765. &nvm_word);
  766. if (ret_val)
  767. nvm_word = E1000_INVM_DEFAULT_AL;
  768. tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
  769. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, E1000_PHY_PLL_FREQ_PAGE);
  770. for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
  771. /* check current state directly from internal PHY */
  772. igb_read_phy_reg_82580(hw, E1000_PHY_PLL_FREQ_REG, &phy_word);
  773. if ((phy_word & E1000_PHY_PLL_UNCONF)
  774. != E1000_PHY_PLL_UNCONF) {
  775. ret_val = 0;
  776. break;
  777. } else {
  778. ret_val = -E1000_ERR_PHY;
  779. }
  780. /* directly reset the internal PHY */
  781. ctrl = rd32(E1000_CTRL);
  782. wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
  783. ctrl_ext = rd32(E1000_CTRL_EXT);
  784. ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
  785. wr32(E1000_CTRL_EXT, ctrl_ext);
  786. wr32(E1000_WUC, 0);
  787. reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
  788. wr32(E1000_EEARBC_I210, reg_val);
  789. igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  790. pci_word |= E1000_PCI_PMCSR_D3;
  791. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  792. usleep_range(1000, 2000);
  793. pci_word &= ~E1000_PCI_PMCSR_D3;
  794. igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
  795. reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
  796. wr32(E1000_EEARBC_I210, reg_val);
  797. /* restore WUC register */
  798. wr32(E1000_WUC, wuc);
  799. }
  800. igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
  801. /* restore MDICNFG setting */
  802. wr32(E1000_MDICNFG, mdicnfg);
  803. return ret_val;
  804. }
  805. /**
  806. * igb_get_cfg_done_i210 - Read config done bit
  807. * @hw: pointer to the HW structure
  808. *
  809. * Read the management control register for the config done bit for
  810. * completion status. NOTE: silicon which is EEPROM-less will fail trying
  811. * to read the config done bit, so an error is *ONLY* logged and returns
  812. * 0. If we were to return with error, EEPROM-less silicon
  813. * would not be able to be reset or change link.
  814. **/
  815. s32 igb_get_cfg_done_i210(struct e1000_hw *hw)
  816. {
  817. s32 timeout = PHY_CFG_TIMEOUT;
  818. u32 mask = E1000_NVM_CFG_DONE_PORT_0;
  819. while (timeout) {
  820. if (rd32(E1000_EEMNGCTL_I210) & mask)
  821. break;
  822. usleep_range(1000, 2000);
  823. timeout--;
  824. }
  825. if (!timeout)
  826. hw_dbg("MNG configuration cycle has not completed.\n");
  827. return 0;
  828. }