set version to 1.3.0
[dpdk.git] / lib / librte_pmd_e1000 / e1000 / e1000_i210.c
1 /*******************************************************************************
2
3 Copyright (c) 2001-2012, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "e1000_api.h"
35
36
37 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw);
38 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw);
39 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw);
40 STATIC void e1000_put_hw_semaphore_i210(struct e1000_hw *hw);
41 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
42                                 u16 *data);
43 STATIC s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw);
44 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
45 STATIC s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
46                                u16 *data);
47
48 /**
49  *  e1000_acquire_nvm_i210 - Request for access to EEPROM
50  *  @hw: pointer to the HW structure
51  *
52  *  Acquire the necessary semaphores for exclusive access to the EEPROM.
53  *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
54  *  Return successful if access grant bit set, else clear the request for
55  *  EEPROM access and return -E1000_ERR_NVM (-1).
56  **/
57 STATIC s32 e1000_acquire_nvm_i210(struct e1000_hw *hw)
58 {
59         s32 ret_val;
60
61         DEBUGFUNC("e1000_acquire_nvm_i210");
62
63         ret_val = e1000_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
64
65         return ret_val;
66 }
67
68 /**
69  *  e1000_release_nvm_i210 - Release exclusive access to EEPROM
70  *  @hw: pointer to the HW structure
71  *
72  *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
73  *  then release the semaphores acquired.
74  **/
75 STATIC void e1000_release_nvm_i210(struct e1000_hw *hw)
76 {
77         DEBUGFUNC("e1000_release_nvm_i210");
78
79         e1000_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
80 }
81
82 /**
83  *  e1000_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
84  *  @hw: pointer to the HW structure
85  *  @mask: specifies which semaphore to acquire
86  *
87  *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
88  *  will also specify which port we're acquiring the lock for.
89  **/
90 s32 e1000_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
91 {
92         u32 swfw_sync;
93         u32 swmask = mask;
94         u32 fwmask = mask << 16;
95         s32 ret_val = E1000_SUCCESS;
96         s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
97
98         DEBUGFUNC("e1000_acquire_swfw_sync_i210");
99
100         while (i < timeout) {
101                 if (e1000_get_hw_semaphore_i210(hw)) {
102                         ret_val = -E1000_ERR_SWFW_SYNC;
103                         goto out;
104                 }
105
106                 swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
107                 if (!(swfw_sync & fwmask))
108                         break;
109
110                 /*
111                  * Firmware currently using resource (fwmask)
112                  */
113                 e1000_put_hw_semaphore_i210(hw);
114                 msec_delay_irq(5);
115                 i++;
116         }
117
118         if (i == timeout) {
119                 DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
120                 ret_val = -E1000_ERR_SWFW_SYNC;
121                 goto out;
122         }
123
124         swfw_sync |= swmask;
125         E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
126
127         e1000_put_hw_semaphore_i210(hw);
128
129 out:
130         return ret_val;
131 }
132
133 /**
134  *  e1000_release_swfw_sync_i210 - Release SW/FW semaphore
135  *  @hw: pointer to the HW structure
136  *  @mask: specifies which semaphore to acquire
137  *
138  *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
139  *  will also specify which port we're releasing the lock for.
140  **/
141 void e1000_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
142 {
143         u32 swfw_sync;
144
145         DEBUGFUNC("e1000_release_swfw_sync_i210");
146
147         while (e1000_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
148                 ; /* Empty */
149
150         swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
151         swfw_sync &= ~mask;
152         E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
153
154         e1000_put_hw_semaphore_i210(hw);
155 }
156
157 /**
158  *  e1000_get_hw_semaphore_i210 - Acquire hardware semaphore
159  *  @hw: pointer to the HW structure
160  *
161  *  Acquire the HW semaphore to access the PHY or NVM
162  **/
163 STATIC s32 e1000_get_hw_semaphore_i210(struct e1000_hw *hw)
164 {
165         u32 swsm;
166         s32 ret_val = E1000_SUCCESS;
167         s32 timeout = hw->nvm.word_size + 1;
168         s32 i = 0;
169
170         DEBUGFUNC("e1000_get_hw_semaphore_i210");
171
172         /* Get the FW semaphore. */
173         for (i = 0; i < timeout; i++) {
174                 swsm = E1000_READ_REG(hw, E1000_SWSM);
175                 E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
176
177                 /* Semaphore acquired if bit latched */
178                 if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
179                         break;
180
181                 usec_delay(50);
182         }
183
184         if (i == timeout) {
185                 /* Release semaphores */
186                 e1000_put_hw_semaphore_generic(hw);
187                 DEBUGOUT("Driver can't access the NVM\n");
188                 ret_val = -E1000_ERR_NVM;
189                 goto out;
190         }
191
192 out:
193         return ret_val;
194 }
195
196 /**
197  *  e1000_put_hw_semaphore_i210 - Release hardware semaphore
198  *  @hw: pointer to the HW structure
199  *
200  *  Release hardware semaphore used to access the PHY or NVM
201  **/
202 STATIC void e1000_put_hw_semaphore_i210(struct e1000_hw *hw)
203 {
204         u32 swsm;
205
206         DEBUGFUNC("e1000_put_hw_semaphore_i210");
207
208         swsm = E1000_READ_REG(hw, E1000_SWSM);
209
210         swsm &= ~E1000_SWSM_SWESMBI;
211
212         E1000_WRITE_REG(hw, E1000_SWSM, swsm);
213 }
214
215 /**
216  *  e1000_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
217  *  @hw: pointer to the HW structure
218  *  @offset: offset of word in the Shadow Ram to read
219  *  @words: number of words to read
220  *  @data: word read from the Shadow Ram
221  *
222  *  Reads a 16 bit word from the Shadow Ram using the EERD register.
223  *  Uses necessary synchronization semaphores.
224  **/
225 s32 e1000_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
226                              u16 *data)
227 {
228         s32 status = E1000_SUCCESS;
229         u16 i, count;
230
231         DEBUGFUNC("e1000_read_nvm_srrd_i210");
232
233         /* We cannot hold synchronization semaphores for too long,
234          * because of forceful takeover procedure. However it is more efficient
235          * to read in bursts than synchronizing access for each word. */
236         for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
237                 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
238                         E1000_EERD_EEWR_MAX_COUNT : (words - i);
239                 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
240                         status = e1000_read_nvm_eerd(hw, offset, count,
241                                                      data + i);
242                         hw->nvm.ops.release(hw);
243                 } else {
244                         status = E1000_ERR_SWFW_SYNC;
245                 }
246
247                 if (status != E1000_SUCCESS)
248                         break;
249         }
250
251         return status;
252 }
253
254 /**
255  *  e1000_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
256  *  @hw: pointer to the HW structure
257  *  @offset: offset within the Shadow RAM to be written to
258  *  @words: number of words to write
259  *  @data: 16 bit word(s) to be written to the Shadow RAM
260  *
261  *  Writes data to Shadow RAM at offset using EEWR register.
262  *
263  *  If e1000_update_nvm_checksum is not called after this function , the
264  *  data will not be committed to FLASH and also Shadow RAM will most likely
265  *  contain an invalid checksum.
266  *
267  *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
268  *  partially written.
269  **/
270 s32 e1000_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
271                               u16 *data)
272 {
273         s32 status = E1000_SUCCESS;
274         u16 i, count;
275
276         DEBUGFUNC("e1000_write_nvm_srwr_i210");
277
278         /* We cannot hold synchronization semaphores for too long,
279          * because of forceful takeover procedure. However it is more efficient
280          * to write in bursts than synchronizing access for each word. */
281         for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
282                 count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
283                         E1000_EERD_EEWR_MAX_COUNT : (words - i);
284                 if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
285                         status = e1000_write_nvm_srwr(hw, offset, count,
286                                                       data + i);
287                         hw->nvm.ops.release(hw);
288                 } else {
289                         status = E1000_ERR_SWFW_SYNC;
290                 }
291
292                 if (status != E1000_SUCCESS)
293                         break;
294         }
295
296         return status;
297 }
298
299 /**
300  *  e1000_write_nvm_srwr - Write to Shadow Ram using EEWR
301  *  @hw: pointer to the HW structure
302  *  @offset: offset within the Shadow Ram to be written to
303  *  @words: number of words to write
304  *  @data: 16 bit word(s) to be written to the Shadow Ram
305  *
306  *  Writes data to Shadow Ram at offset using EEWR register.
307  *
308  *  If e1000_update_nvm_checksum is not called after this function , the
309  *  Shadow Ram will most likely contain an invalid checksum.
310  **/
311 STATIC s32 e1000_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
312                                 u16 *data)
313 {
314         struct e1000_nvm_info *nvm = &hw->nvm;
315         u32 i, k, eewr = 0;
316         u32 attempts = 100000;
317         s32 ret_val = E1000_SUCCESS;
318
319         DEBUGFUNC("e1000_write_nvm_srwr");
320
321         /*
322          * A check for invalid values:  offset too large, too many words,
323          * too many words for the offset, and not enough words.
324          */
325         if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
326             (words == 0)) {
327                 DEBUGOUT("nvm parameter(s) out of bounds\n");
328                 ret_val = -E1000_ERR_NVM;
329                 goto out;
330         }
331
332         for (i = 0; i < words; i++) {
333                 eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
334                         (data[i] << E1000_NVM_RW_REG_DATA) |
335                         E1000_NVM_RW_REG_START;
336
337                 E1000_WRITE_REG(hw, E1000_SRWR, eewr);
338
339                 for (k = 0; k < attempts; k++) {
340                         if (E1000_NVM_RW_REG_DONE &
341                             E1000_READ_REG(hw, E1000_SRWR)) {
342                                 ret_val = E1000_SUCCESS;
343                                 break;
344                         }
345                         usec_delay(5);
346                 }
347
348                 if (ret_val != E1000_SUCCESS) {
349                         DEBUGOUT("Shadow RAM write EEWR timed out\n");
350                         break;
351                 }
352         }
353
354 out:
355         return ret_val;
356 }
357
358 /**
359  *  e1000_read_nvm_i211 - Read NVM wrapper function for I211
360  *  @hw: pointer to the HW structure
361  *  @address: the word address (aka eeprom offset) to read
362  *  @data: pointer to the data read
363  *
364  *  Wrapper function to return data formerly found in the NVM.
365  **/
366 STATIC s32 e1000_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
367                                u16 *data)
368 {
369         s32 ret_val = E1000_SUCCESS;
370
371         DEBUGFUNC("e1000_read_nvm_i211");
372
373         /* Only the MAC addr is required to be present in the iNVM */
374         switch (offset) {
375         case NVM_MAC_ADDR:
376                 ret_val = e1000_read_invm_i211(hw, (u8)offset, &data[0]);
377                 ret_val |= e1000_read_invm_i211(hw, (u8)offset+1, &data[1]);
378                 ret_val |= e1000_read_invm_i211(hw, (u8)offset+2, &data[2]);
379                 if (ret_val != E1000_SUCCESS)
380                         DEBUGOUT("MAC Addr not found in iNVM\n");
381                 break;
382         case NVM_INIT_CTRL_2:
383                 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
384                 if (ret_val != E1000_SUCCESS) {
385                         *data = NVM_INIT_CTRL_2_DEFAULT_I211;
386                         ret_val = E1000_SUCCESS;
387                 }
388                 break;
389         case NVM_INIT_CTRL_4:
390                 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
391                 if (ret_val != E1000_SUCCESS) {
392                         *data = NVM_INIT_CTRL_4_DEFAULT_I211;
393                         ret_val = E1000_SUCCESS;
394                 }
395                 break;
396         case NVM_LED_1_CFG:
397                 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
398                 if (ret_val != E1000_SUCCESS) {
399                         *data = NVM_LED_1_CFG_DEFAULT_I211;
400                         ret_val = E1000_SUCCESS;
401                 }
402                 break;
403         case NVM_LED_0_2_CFG:
404                 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
405                 if (ret_val != E1000_SUCCESS) {
406                         *data = NVM_LED_0_2_CFG_DEFAULT_I211;
407                         ret_val = E1000_SUCCESS;
408                 }
409                 break;
410         case NVM_ID_LED_SETTINGS:
411                 ret_val = e1000_read_invm_i211(hw, (u8)offset, data);
412                 if (ret_val != E1000_SUCCESS) {
413                         *data = ID_LED_RESERVED_FFFF;
414                         ret_val = E1000_SUCCESS;
415                 }
416                 break;
417         case NVM_SUB_DEV_ID:
418                 *data = hw->subsystem_device_id;
419                 break;
420         case NVM_SUB_VEN_ID:
421                 *data = hw->subsystem_vendor_id;
422                 break;
423         case NVM_DEV_ID:
424                 *data = hw->device_id;
425                 break;
426         case NVM_VEN_ID:
427                 *data = hw->vendor_id;
428                 break;
429         default:
430                 DEBUGOUT1("NVM word 0x%02x is not mapped.\n", offset);
431                 *data = NVM_RESERVED_WORD;
432                 break;
433         }
434         return ret_val;
435 }
436
437 /**
438  *  e1000_read_invm_i211 - Reads OTP
439  *  @hw: pointer to the HW structure
440  *  @address: the word address (aka eeprom offset) to read
441  *  @data: pointer to the data read
442  *
443  *  Reads 16-bit words from the OTP. Return error when the word is not
444  *  stored in OTP.
445  **/
446 s32 e1000_read_invm_i211(struct e1000_hw *hw, u8 address, u16 *data)
447 {
448         s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
449         u32 invm_dword;
450         u16 i;
451         u8 record_type, word_address;
452
453         DEBUGFUNC("e1000_read_invm_i211");
454
455         for (i = 0; i < E1000_INVM_SIZE; i++) {
456                 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
457                 /* Get record type */
458                 record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
459                 if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
460                         break;
461                 if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
462                         i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
463                 if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
464                         i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
465                 if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
466                         word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
467                         if (word_address == address) {
468                                 *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
469                                 DEBUGOUT2("Read INVM Word 0x%02x = %x",
470                                           address, *data);
471                                 status = E1000_SUCCESS;
472                                 break;
473                         }
474                 }
475         }
476         if (status != E1000_SUCCESS)
477                 DEBUGOUT1("Requested word 0x%02x not found in OTP\n", address);
478         return status;
479 }
480
481 /**
482  *  e1000_read_invm_version - Reads iNVM version and image type
483  *  @hw: pointer to the HW structure
484  *  @invm_ver: version structure for the version read
485  *
486  *  Reads iNVM version and image type.
487  **/
488 s32 e1000_read_invm_version(struct e1000_hw *hw,
489                             struct e1000_fw_version *invm_ver)
490 {
491         u32 *record = NULL;
492         u32 *next_record = NULL;
493         u32 i = 0;
494         u32 invm_dword = 0;
495         u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
496                                              E1000_INVM_RECORD_SIZE_IN_BYTES);
497         u32 buffer[E1000_INVM_SIZE];
498         s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
499         u16 version = 0;
500
501         DEBUGFUNC("e1000_read_invm_version");
502
503         /* Read iNVM memory */
504         for (i = 0; i < E1000_INVM_SIZE; i++) {
505                 invm_dword = E1000_READ_REG(hw, E1000_INVM_DATA_REG(i));
506                 buffer[i] = invm_dword;
507         }
508
509         /* Read version number */
510         for (i = 1; i < invm_blocks; i++) {
511                 record = &buffer[invm_blocks - i];
512                 next_record = &buffer[invm_blocks - i + 1];
513
514                 /* Check if we have first version location used */
515                 if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
516                         version = 0;
517                         status = E1000_SUCCESS;
518                         break;
519                 }
520                 /* Check if we have second version location used */
521                 else if ((i == 1) &&
522                          ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
523                         version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
524                         status = E1000_SUCCESS;
525                         break;
526                 }
527                 /*
528                  * Check if we have odd version location
529                  * used and it is the last one used
530                  */
531                 else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
532                          ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
533                          (i != 1))) {
534                         version = (*next_record & E1000_INVM_VER_FIELD_TWO)
535                                   >> 13;
536                         status = E1000_SUCCESS;
537                         break;
538                 }
539                 /*
540                  * Check if we have even version location
541                  * used and it is the last one used
542                  */
543                 else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
544                          ((*record & 0x3) == 0)) {
545                         version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
546                         status = E1000_SUCCESS;
547                         break;
548                 }
549         }
550
551         if (status == E1000_SUCCESS) {
552                 invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
553                                         >> E1000_INVM_MAJOR_SHIFT;
554                 invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
555         }
556         /* Read Image Type */
557         for (i = 1; i < invm_blocks; i++) {
558                 record = &buffer[invm_blocks - i];
559                 next_record = &buffer[invm_blocks - i + 1];
560
561                 /* Check if we have image type in first location used */
562                 if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
563                         invm_ver->invm_img_type = 0;
564                         status = E1000_SUCCESS;
565                         break;
566                 }
567                 /* Check if we have image type in first location used */
568                 else if ((((*record & 0x3) == 0) &&
569                          ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
570                          ((((*record & 0x3) != 0) && (i != 1)))) {
571                         invm_ver->invm_img_type =
572                                 (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
573                         status = E1000_SUCCESS;
574                         break;
575                 }
576         }
577         return status;
578 }
579
580 /**
581  *  e1000_validate_nvm_checksum_i210 - Validate EEPROM checksum
582  *  @hw: pointer to the HW structure
583  *
584  *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
585  *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
586  **/
587 s32 e1000_validate_nvm_checksum_i210(struct e1000_hw *hw)
588 {
589         s32 status = E1000_SUCCESS;
590         s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
591
592         DEBUGFUNC("e1000_validate_nvm_checksum_i210");
593
594         if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
595
596                 /*
597                  * Replace the read function with semaphore grabbing with
598                  * the one that skips this for a while.
599                  * We have semaphore taken already here.
600                  */
601                 read_op_ptr = hw->nvm.ops.read;
602                 hw->nvm.ops.read = e1000_read_nvm_eerd;
603
604                 status = e1000_validate_nvm_checksum_generic(hw);
605
606                 /* Revert original read operation. */
607                 hw->nvm.ops.read = read_op_ptr;
608
609                 hw->nvm.ops.release(hw);
610         } else {
611                 status = E1000_ERR_SWFW_SYNC;
612         }
613
614         return status;
615 }
616
617
618 /**
619  *  e1000_update_nvm_checksum_i210 - Update EEPROM checksum
620  *  @hw: pointer to the HW structure
621  *
622  *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
623  *  up to the checksum.  Then calculates the EEPROM checksum and writes the
624  *  value to the EEPROM. Next commit EEPROM data onto the Flash.
625  **/
626 s32 e1000_update_nvm_checksum_i210(struct e1000_hw *hw)
627 {
628         s32 ret_val = E1000_SUCCESS;
629         u16 checksum = 0;
630         u16 i, nvm_data;
631
632         DEBUGFUNC("e1000_update_nvm_checksum_i210");
633
634         /*
635          * Read the first word from the EEPROM. If this times out or fails, do
636          * not continue or we could be in for a very long wait while every
637          * EEPROM read fails
638          */
639         ret_val = e1000_read_nvm_eerd(hw, 0, 1, &nvm_data);
640         if (ret_val != E1000_SUCCESS) {
641                 DEBUGOUT("EEPROM read failed\n");
642                 goto out;
643         }
644
645         if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
646                 /*
647                  * Do not use hw->nvm.ops.write, hw->nvm.ops.read
648                  * because we do not want to take the synchronization
649                  * semaphores twice here.
650                  */
651
652                 for (i = 0; i < NVM_CHECKSUM_REG; i++) {
653                         ret_val = e1000_read_nvm_eerd(hw, i, 1, &nvm_data);
654                         if (ret_val) {
655                                 hw->nvm.ops.release(hw);
656                                 DEBUGOUT("NVM Read Error while updating checksum.\n");
657                                 goto out;
658                         }
659                         checksum += nvm_data;
660                 }
661                 checksum = (u16) NVM_SUM - checksum;
662                 ret_val = e1000_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
663                                                 &checksum);
664                 if (ret_val != E1000_SUCCESS) {
665                         hw->nvm.ops.release(hw);
666                         DEBUGOUT("NVM Write Error while updating checksum.\n");
667                         goto out;
668                 }
669
670                 hw->nvm.ops.release(hw);
671
672                 ret_val = e1000_update_flash_i210(hw);
673         } else {
674                 ret_val = E1000_ERR_SWFW_SYNC;
675         }
676 out:
677         return ret_val;
678 }
679
680 /**
681  *  e1000_update_flash_i210 - Commit EEPROM to the flash
682  *  @hw: pointer to the HW structure
683  *
684  **/
685 s32 e1000_update_flash_i210(struct e1000_hw *hw)
686 {
687         s32 ret_val = E1000_SUCCESS;
688         u32 flup;
689
690         DEBUGFUNC("e1000_update_flash_i210");
691
692         ret_val = e1000_pool_flash_update_done_i210(hw);
693         if (ret_val == -E1000_ERR_NVM) {
694                 DEBUGOUT("Flash update time out\n");
695                 goto out;
696         }
697
698         flup = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD_I210;
699         E1000_WRITE_REG(hw, E1000_EECD, flup);
700
701         ret_val = e1000_pool_flash_update_done_i210(hw);
702         if (ret_val == E1000_SUCCESS)
703                 DEBUGOUT("Flash update complete\n");
704         else
705                 DEBUGOUT("Flash update time out\n");
706
707 out:
708         return ret_val;
709 }
710
711 /**
712  *  e1000_pool_flash_update_done_i210 - Pool FLUDONE status.
713  *  @hw: pointer to the HW structure
714  *
715  **/
716 s32 e1000_pool_flash_update_done_i210(struct e1000_hw *hw)
717 {
718         s32 ret_val = -E1000_ERR_NVM;
719         u32 i, reg;
720
721         DEBUGFUNC("e1000_pool_flash_update_done_i210");
722
723         for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
724                 reg = E1000_READ_REG(hw, E1000_EECD);
725                 if (reg & E1000_EECD_FLUDONE_I210) {
726                         ret_val = E1000_SUCCESS;
727                         break;
728                 }
729                 usec_delay(5);
730         }
731
732         return ret_val;
733 }
734
735 /**
736  *  e1000_init_nvm_params_i210 - Initialize i210 NVM function pointers
737  *  @hw: pointer to the HW structure
738  *
739  *  Initialize the i210 NVM parameters and function pointers.
740  **/
741 STATIC s32 e1000_init_nvm_params_i210(struct e1000_hw *hw)
742 {
743         s32 ret_val = E1000_SUCCESS;
744         struct e1000_nvm_info *nvm = &hw->nvm;
745
746         DEBUGFUNC("e1000_init_nvm_params_i210");
747
748         ret_val = e1000_init_nvm_params_82575(hw);
749
750         nvm->ops.acquire = e1000_acquire_nvm_i210;
751         nvm->ops.release = e1000_release_nvm_i210;
752         nvm->ops.read    = e1000_read_nvm_srrd_i210;
753         nvm->ops.write   = e1000_write_nvm_srwr_i210;
754         nvm->ops.valid_led_default = e1000_valid_led_default_i210;
755         nvm->ops.validate = e1000_validate_nvm_checksum_i210;
756         nvm->ops.update   = e1000_update_nvm_checksum_i210;
757
758         return ret_val;
759 }
760
761 /**
762  *  e1000_init_nvm_params_i211 - Initialize i211 NVM function pointers
763  *  @hw: pointer to the HW structure
764  *
765  *  Initialize the NVM parameters and function pointers for i211.
766  **/
767 STATIC s32 e1000_init_nvm_params_i211(struct e1000_hw *hw)
768 {
769         struct e1000_nvm_info *nvm = &hw->nvm;
770
771         DEBUGFUNC("e1000_init_nvm_params_i211");
772
773         nvm->ops.acquire  = e1000_acquire_nvm_i210;
774         nvm->ops.release  = e1000_release_nvm_i210;
775         nvm->ops.read     = e1000_read_nvm_i211;
776         nvm->ops.valid_led_default = e1000_valid_led_default_i210;
777         nvm->ops.write    = e1000_null_write_nvm;
778         nvm->ops.validate = e1000_null_ops_generic;
779         nvm->ops.update   = e1000_null_ops_generic;
780
781         return E1000_SUCCESS;
782 }
783
784 /**
785  *  e1000_init_function_pointers_i210 - Init func ptrs.
786  *  @hw: pointer to the HW structure
787  *
788  *  Called to initialize all function pointers and parameters.
789  **/
790 void e1000_init_function_pointers_i210(struct e1000_hw *hw)
791 {
792         e1000_init_function_pointers_82575(hw);
793
794         switch (hw->mac.type) {
795         case e1000_i210:
796                 hw->nvm.ops.init_params = e1000_init_nvm_params_i210;
797                 break;
798         case e1000_i211:
799                 hw->nvm.ops.init_params = e1000_init_nvm_params_i211;
800                 break;
801         default:
802                 break;
803         }
804         return;
805 }
806
807 /**
808  *  e1000_valid_led_default_i210 - Verify a valid default LED config
809  *  @hw: pointer to the HW structure
810  *  @data: pointer to the NVM (EEPROM)
811  *
812  *  Read the EEPROM for the current default LED configuration.  If the
813  *  LED configuration is not valid, set to a valid LED configuration.
814  **/
815 STATIC s32 e1000_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
816 {
817         s32 ret_val;
818
819         DEBUGFUNC("e1000_valid_led_default_i210");
820
821         ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
822         if (ret_val) {
823                 DEBUGOUT("NVM Read Error\n");
824                 goto out;
825         }
826
827         if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
828                 switch (hw->phy.media_type) {
829                 case e1000_media_type_internal_serdes:
830                         *data = ID_LED_DEFAULT_I210_SERDES;
831                         break;
832                 case e1000_media_type_copper:
833                 default:
834                         *data = ID_LED_DEFAULT_I210;
835                         break;
836                 }
837         }
838 out:
839         return ret_val;
840 }