1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always thru the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = (1 << sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 DEBUGFUNC("i40e_release_nvm");
162 if (!hw->nvm.blank_nvm_mode)
163 i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
167 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
168 * @hw: pointer to the HW structure
170 * Polls the SRCTL Shadow RAM register done bit.
172 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
174 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
177 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
179 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
180 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
181 srctl = rd32(hw, I40E_GLNVM_SRCTL);
182 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
183 ret_code = I40E_SUCCESS;
188 if (ret_code == I40E_ERR_TIMEOUT)
189 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
194 * i40e_read_nvm_word - Reads Shadow RAM
195 * @hw: pointer to the HW structure
196 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
197 * @data: word read from the Shadow RAM
199 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
201 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
204 return i40e_read_nvm_word_srctl(hw, offset, data);
208 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
209 * @hw: pointer to the HW structure
210 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
211 * @data: word read from the Shadow RAM
213 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
215 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
218 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
221 DEBUGFUNC("i40e_read_nvm_word_srctl");
223 if (offset >= hw->nvm.sr_size) {
224 i40e_debug(hw, I40E_DEBUG_NVM,
225 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
226 offset, hw->nvm.sr_size);
227 ret_code = I40E_ERR_PARAM;
231 /* Poll the done bit first */
232 ret_code = i40e_poll_sr_srctl_done_bit(hw);
233 if (ret_code == I40E_SUCCESS) {
234 /* Write the address and start reading */
235 sr_reg = (u32)(offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
236 (1 << I40E_GLNVM_SRCTL_START_SHIFT);
237 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
239 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
240 ret_code = i40e_poll_sr_srctl_done_bit(hw);
241 if (ret_code == I40E_SUCCESS) {
242 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
243 *data = (u16)((sr_reg &
244 I40E_GLNVM_SRDATA_RDDATA_MASK)
245 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
248 if (ret_code != I40E_SUCCESS)
249 i40e_debug(hw, I40E_DEBUG_NVM,
250 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
258 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
259 * @hw: pointer to the HW structure
260 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
261 * @data: word read from the Shadow RAM
263 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
265 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
268 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
270 DEBUGFUNC("i40e_read_nvm_word_aq");
272 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
273 *data = LE16_TO_CPU(*(__le16 *)data);
279 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
280 * @hw: pointer to the HW structure
281 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
282 * @words: (in) number of words to read; (out) number of words actually read
283 * @data: words read from the Shadow RAM
285 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
286 * method. The buffer read is preceded by the NVM ownership take
287 * and followed by the release.
289 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
290 u16 *words, u16 *data)
292 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
296 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
297 * @hw: pointer to the HW structure
298 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
299 * @words: (in) number of words to read; (out) number of words actually read
300 * @data: words read from the Shadow RAM
302 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
303 * method. The buffer read is preceded by the NVM ownership take
304 * and followed by the release.
306 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
307 u16 *words, u16 *data)
309 enum i40e_status_code ret_code = I40E_SUCCESS;
312 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
314 /* Loop thru the selected region */
315 for (word = 0; word < *words; word++) {
316 index = offset + word;
317 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
318 if (ret_code != I40E_SUCCESS)
322 /* Update the number of words read from the Shadow RAM */
329 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
330 * @hw: pointer to the HW structure
331 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
332 * @words: (in) number of words to read; (out) number of words actually read
333 * @data: words read from the Shadow RAM
335 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
336 * method. The buffer read is preceded by the NVM ownership take
337 * and followed by the release.
339 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
340 u16 *words, u16 *data)
342 enum i40e_status_code ret_code;
343 u16 read_size = *words;
344 bool last_cmd = false;
348 DEBUGFUNC("i40e_read_nvm_buffer_aq");
351 /* Calculate number of bytes we should read in this step.
352 * FVL AQ do not allow to read more than one page at a time or
353 * to cross page boundaries.
355 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
356 read_size = min(*words,
357 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
358 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
360 read_size = min((*words - words_read),
361 I40E_SR_SECTOR_SIZE_IN_WORDS);
363 /* Check if this is last command, if so set proper flag */
364 if ((words_read + read_size) >= *words)
367 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
368 data + words_read, last_cmd);
369 if (ret_code != I40E_SUCCESS)
370 goto read_nvm_buffer_aq_exit;
372 /* Increment counter for words already read and move offset to
375 words_read += read_size;
377 } while (words_read < *words);
379 for (i = 0; i < *words; i++)
380 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
382 read_nvm_buffer_aq_exit:
388 * i40e_read_nvm_aq - Read Shadow RAM.
389 * @hw: pointer to the HW structure.
390 * @module_pointer: module pointer location in words from the NVM beginning
391 * @offset: offset in words from module start
392 * @words: number of words to write
393 * @data: buffer with words to write to the Shadow RAM
394 * @last_command: tells the AdminQ that this is the last command
396 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
398 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
399 u32 offset, u16 words, void *data,
402 enum i40e_status_code ret_code = I40E_ERR_NVM;
403 struct i40e_asq_cmd_details cmd_details;
405 DEBUGFUNC("i40e_read_nvm_aq");
407 memset(&cmd_details, 0, sizeof(cmd_details));
408 cmd_details.wb_desc = &hw->nvm_wb_desc;
410 /* Here we are checking the SR limit only for the flat memory model.
411 * We cannot do it for the module-based model, as we did not acquire
412 * the NVM resource yet (we cannot get the module pointer value).
413 * Firmware will check the module-based model.
415 if ((offset + words) > hw->nvm.sr_size)
416 i40e_debug(hw, I40E_DEBUG_NVM,
417 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
418 (offset + words), hw->nvm.sr_size);
419 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
420 /* We can write only up to 4KB (one sector), in one AQ write */
421 i40e_debug(hw, I40E_DEBUG_NVM,
422 "NVM write fail error: tried to write %d words, limit is %d.\n",
423 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
424 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
425 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
426 /* A single write cannot spread over two sectors */
427 i40e_debug(hw, I40E_DEBUG_NVM,
428 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
431 ret_code = i40e_aq_read_nvm(hw, module_pointer,
432 2 * offset, /*bytes*/
434 data, last_command, &cmd_details);
440 * i40e_write_nvm_aq - Writes Shadow RAM.
441 * @hw: pointer to the HW structure.
442 * @module_pointer: module pointer location in words from the NVM beginning
443 * @offset: offset in words from module start
444 * @words: number of words to write
445 * @data: buffer with words to write to the Shadow RAM
446 * @last_command: tells the AdminQ that this is the last command
448 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
450 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
451 u32 offset, u16 words, void *data,
454 enum i40e_status_code ret_code = I40E_ERR_NVM;
455 struct i40e_asq_cmd_details cmd_details;
457 DEBUGFUNC("i40e_write_nvm_aq");
459 memset(&cmd_details, 0, sizeof(cmd_details));
460 cmd_details.wb_desc = &hw->nvm_wb_desc;
462 /* Here we are checking the SR limit only for the flat memory model.
463 * We cannot do it for the module-based model, as we did not acquire
464 * the NVM resource yet (we cannot get the module pointer value).
465 * Firmware will check the module-based model.
467 if ((offset + words) > hw->nvm.sr_size)
468 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
469 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
470 /* We can write only up to 4KB (one sector), in one AQ write */
471 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
472 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
473 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
474 /* A single write cannot spread over two sectors */
475 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
477 ret_code = i40e_aq_update_nvm(hw, module_pointer,
478 2 * offset, /*bytes*/
480 data, last_command, &cmd_details);
486 * i40e_write_nvm_word - Writes Shadow RAM word
487 * @hw: pointer to the HW structure
488 * @offset: offset of the Shadow RAM word to write
489 * @data: word to write to the Shadow RAM
491 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
492 * NVM ownership have to be acquired and released (on ARQ completion event
493 * reception) by caller. To commit SR to NVM update checksum function
496 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
499 DEBUGFUNC("i40e_write_nvm_word");
501 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
503 /* Value 0x00 below means that we treat SR as a flat mem */
504 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
508 * i40e_write_nvm_buffer - Writes Shadow RAM buffer
509 * @hw: pointer to the HW structure
510 * @module_pointer: module pointer location in words from the NVM beginning
511 * @offset: offset of the Shadow RAM buffer to write
512 * @words: number of words to write
513 * @data: words to write to the Shadow RAM
515 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
516 * NVM ownership must be acquired before calling this function and released
517 * on ARQ completion event reception by caller. To commit SR to NVM update
518 * checksum function should be called.
520 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
521 u8 module_pointer, u32 offset,
522 u16 words, void *data)
524 __le16 *le_word_ptr = (__le16 *)data;
525 u16 *word_ptr = (u16 *)data;
528 DEBUGFUNC("i40e_write_nvm_buffer");
530 for (i = 0; i < words; i++)
531 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
533 /* Here we will only write one buffer as the size of the modules
534 * mirrored in the Shadow RAM is always less than 4K.
536 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
541 * i40e_calc_nvm_checksum - Calculates and returns the checksum
542 * @hw: pointer to hardware structure
543 * @checksum: pointer to the checksum
545 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
546 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
547 * is customer specific and unknown. Therefore, this function skips all maximum
548 * possible size of VPD (1kB).
550 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
552 enum i40e_status_code ret_code = I40E_SUCCESS;
553 struct i40e_virt_mem vmem;
554 u16 pcie_alt_module = 0;
555 u16 checksum_local = 0;
560 DEBUGFUNC("i40e_calc_nvm_checksum");
562 ret_code = i40e_allocate_virt_mem(hw, &vmem,
563 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
565 goto i40e_calc_nvm_checksum_exit;
566 data = (u16 *)vmem.va;
568 /* read pointer to VPD area */
569 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
570 if (ret_code != I40E_SUCCESS) {
571 ret_code = I40E_ERR_NVM_CHECKSUM;
572 goto i40e_calc_nvm_checksum_exit;
575 /* read pointer to PCIe Alt Auto-load module */
576 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
578 if (ret_code != I40E_SUCCESS) {
579 ret_code = I40E_ERR_NVM_CHECKSUM;
580 goto i40e_calc_nvm_checksum_exit;
583 /* Calculate SW checksum that covers the whole 64kB shadow RAM
584 * except the VPD and PCIe ALT Auto-load modules
586 for (i = 0; i < hw->nvm.sr_size; i++) {
588 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
589 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
590 ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
591 if (ret_code != I40E_SUCCESS) {
592 ret_code = I40E_ERR_NVM_CHECKSUM;
593 goto i40e_calc_nvm_checksum_exit;
597 /* Skip Checksum word */
598 if (i == I40E_SR_SW_CHECKSUM_WORD)
600 /* Skip VPD module (convert byte size to word count) */
601 if ((i >= (u32)vpd_module) &&
602 (i < ((u32)vpd_module +
603 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
606 /* Skip PCIe ALT module (convert byte size to word count) */
607 if ((i >= (u32)pcie_alt_module) &&
608 (i < ((u32)pcie_alt_module +
609 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
613 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
616 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
618 i40e_calc_nvm_checksum_exit:
619 i40e_free_virt_mem(hw, &vmem);
624 * i40e_update_nvm_checksum - Updates the NVM checksum
625 * @hw: pointer to hardware structure
627 * NVM ownership must be acquired before calling this function and released
628 * on ARQ completion event reception by caller.
629 * This function will commit SR to NVM.
631 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
633 enum i40e_status_code ret_code = I40E_SUCCESS;
636 DEBUGFUNC("i40e_update_nvm_checksum");
638 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
639 if (ret_code == I40E_SUCCESS)
640 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
647 * i40e_validate_nvm_checksum - Validate EEPROM checksum
648 * @hw: pointer to hardware structure
649 * @checksum: calculated checksum
651 * Performs checksum calculation and validates the NVM SW checksum. If the
652 * caller does not need checksum, the value can be NULL.
654 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
657 enum i40e_status_code ret_code = I40E_SUCCESS;
659 u16 checksum_local = 0;
661 DEBUGFUNC("i40e_validate_nvm_checksum");
663 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
664 if (ret_code != I40E_SUCCESS)
665 goto i40e_validate_nvm_checksum_exit;
667 /* Do not use i40e_read_nvm_word() because we do not want to take
668 * the synchronization semaphores twice here.
670 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
672 /* Verify read checksum from EEPROM is the same as
673 * calculated checksum
675 if (checksum_local != checksum_sr)
676 ret_code = I40E_ERR_NVM_CHECKSUM;
678 /* If the user cares, return the calculated checksum */
680 *checksum = checksum_local;
682 i40e_validate_nvm_checksum_exit:
686 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
687 struct i40e_nvm_access *cmd,
688 u8 *bytes, int *perrno);
689 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
690 struct i40e_nvm_access *cmd,
691 u8 *bytes, int *perrno);
692 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
693 struct i40e_nvm_access *cmd,
694 u8 *bytes, int *perrno);
695 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
696 struct i40e_nvm_access *cmd,
698 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
699 struct i40e_nvm_access *cmd,
701 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
702 struct i40e_nvm_access *cmd,
703 u8 *bytes, int *perrno);
704 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
705 struct i40e_nvm_access *cmd,
706 u8 *bytes, int *perrno);
707 STATIC inline u8 i40e_nvmupd_get_module(u32 val)
709 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
711 STATIC inline u8 i40e_nvmupd_get_transaction(u32 val)
713 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
716 STATIC const char *i40e_nvm_update_state_str[] = {
717 "I40E_NVMUPD_INVALID",
718 "I40E_NVMUPD_READ_CON",
719 "I40E_NVMUPD_READ_SNT",
720 "I40E_NVMUPD_READ_LCB",
721 "I40E_NVMUPD_READ_SA",
722 "I40E_NVMUPD_WRITE_ERA",
723 "I40E_NVMUPD_WRITE_CON",
724 "I40E_NVMUPD_WRITE_SNT",
725 "I40E_NVMUPD_WRITE_LCB",
726 "I40E_NVMUPD_WRITE_SA",
727 "I40E_NVMUPD_CSUM_CON",
728 "I40E_NVMUPD_CSUM_SA",
729 "I40E_NVMUPD_CSUM_LCB",
733 * i40e_nvmupd_command - Process an NVM update command
734 * @hw: pointer to hardware structure
735 * @cmd: pointer to nvm update command
736 * @bytes: pointer to the data buffer
737 * @perrno: pointer to return error code
739 * Dispatches command depending on what update state is current
741 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
742 struct i40e_nvm_access *cmd,
743 u8 *bytes, int *perrno)
745 enum i40e_status_code status;
747 DEBUGFUNC("i40e_nvmupd_command");
752 switch (hw->nvmupd_state) {
753 case I40E_NVMUPD_STATE_INIT:
754 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
757 case I40E_NVMUPD_STATE_READING:
758 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
761 case I40E_NVMUPD_STATE_WRITING:
762 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
765 case I40E_NVMUPD_STATE_INIT_WAIT:
766 case I40E_NVMUPD_STATE_WRITE_WAIT:
767 status = I40E_ERR_NOT_READY;
772 /* invalid state, should never happen */
773 i40e_debug(hw, I40E_DEBUG_NVM,
774 "NVMUPD: no such state %d\n", hw->nvmupd_state);
775 status = I40E_NOT_SUPPORTED;
783 * i40e_nvmupd_state_init - Handle NVM update state Init
784 * @hw: pointer to hardware structure
785 * @cmd: pointer to nvm update command buffer
786 * @bytes: pointer to the data buffer
787 * @perrno: pointer to return error code
789 * Process legitimate commands of the Init state and conditionally set next
790 * state. Reject all other commands.
792 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
793 struct i40e_nvm_access *cmd,
794 u8 *bytes, int *perrno)
796 enum i40e_status_code status = I40E_SUCCESS;
797 enum i40e_nvmupd_cmd upd_cmd;
799 DEBUGFUNC("i40e_nvmupd_state_init");
801 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
804 case I40E_NVMUPD_READ_SA:
805 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
807 *perrno = i40e_aq_rc_to_posix(status,
808 hw->aq.asq_last_status);
810 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
811 i40e_release_nvm(hw);
815 case I40E_NVMUPD_READ_SNT:
816 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
818 *perrno = i40e_aq_rc_to_posix(status,
819 hw->aq.asq_last_status);
821 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
823 i40e_release_nvm(hw);
825 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
829 case I40E_NVMUPD_WRITE_ERA:
830 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
832 *perrno = i40e_aq_rc_to_posix(status,
833 hw->aq.asq_last_status);
835 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
837 i40e_release_nvm(hw);
839 hw->aq.nvm_release_on_done = true;
840 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
845 case I40E_NVMUPD_WRITE_SA:
846 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
848 *perrno = i40e_aq_rc_to_posix(status,
849 hw->aq.asq_last_status);
851 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
853 i40e_release_nvm(hw);
855 hw->aq.nvm_release_on_done = true;
856 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
861 case I40E_NVMUPD_WRITE_SNT:
862 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
864 *perrno = i40e_aq_rc_to_posix(status,
865 hw->aq.asq_last_status);
867 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
869 i40e_release_nvm(hw);
871 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
875 case I40E_NVMUPD_CSUM_SA:
876 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
878 *perrno = i40e_aq_rc_to_posix(status,
879 hw->aq.asq_last_status);
881 status = i40e_update_nvm_checksum(hw);
883 *perrno = hw->aq.asq_last_status ?
884 i40e_aq_rc_to_posix(status,
885 hw->aq.asq_last_status) :
887 i40e_release_nvm(hw);
889 hw->aq.nvm_release_on_done = true;
890 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
896 i40e_debug(hw, I40E_DEBUG_NVM,
897 "NVMUPD: bad cmd %s in init state\n",
898 i40e_nvm_update_state_str[upd_cmd]);
899 status = I40E_ERR_NVM;
907 * i40e_nvmupd_state_reading - Handle NVM update state Reading
908 * @hw: pointer to hardware structure
909 * @cmd: pointer to nvm update command buffer
910 * @bytes: pointer to the data buffer
911 * @perrno: pointer to return error code
913 * NVM ownership is already held. Process legitimate commands and set any
914 * change in state; reject all other commands.
916 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
917 struct i40e_nvm_access *cmd,
918 u8 *bytes, int *perrno)
920 enum i40e_status_code status = I40E_SUCCESS;
921 enum i40e_nvmupd_cmd upd_cmd;
923 DEBUGFUNC("i40e_nvmupd_state_reading");
925 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
928 case I40E_NVMUPD_READ_SA:
929 case I40E_NVMUPD_READ_CON:
930 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
933 case I40E_NVMUPD_READ_LCB:
934 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
935 i40e_release_nvm(hw);
936 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
940 i40e_debug(hw, I40E_DEBUG_NVM,
941 "NVMUPD: bad cmd %s in reading state.\n",
942 i40e_nvm_update_state_str[upd_cmd]);
943 status = I40E_NOT_SUPPORTED;
951 * i40e_nvmupd_state_writing - Handle NVM update state Writing
952 * @hw: pointer to hardware structure
953 * @cmd: pointer to nvm update command buffer
954 * @bytes: pointer to the data buffer
955 * @perrno: pointer to return error code
957 * NVM ownership is already held. Process legitimate commands and set any
958 * change in state; reject all other commands
960 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
961 struct i40e_nvm_access *cmd,
962 u8 *bytes, int *perrno)
964 enum i40e_status_code status = I40E_SUCCESS;
965 enum i40e_nvmupd_cmd upd_cmd;
966 bool retry_attempt = false;
968 DEBUGFUNC("i40e_nvmupd_state_writing");
970 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
974 case I40E_NVMUPD_WRITE_CON:
975 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
977 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
980 case I40E_NVMUPD_WRITE_LCB:
981 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
983 *perrno = hw->aq.asq_last_status ?
984 i40e_aq_rc_to_posix(status,
985 hw->aq.asq_last_status) :
987 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
989 hw->aq.nvm_release_on_done = true;
990 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
994 case I40E_NVMUPD_CSUM_CON:
995 status = i40e_update_nvm_checksum(hw);
997 *perrno = hw->aq.asq_last_status ?
998 i40e_aq_rc_to_posix(status,
999 hw->aq.asq_last_status) :
1001 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1003 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1007 case I40E_NVMUPD_CSUM_LCB:
1008 status = i40e_update_nvm_checksum(hw);
1010 *perrno = hw->aq.asq_last_status ?
1011 i40e_aq_rc_to_posix(status,
1012 hw->aq.asq_last_status) :
1014 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1016 hw->aq.nvm_release_on_done = true;
1017 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1022 i40e_debug(hw, I40E_DEBUG_NVM,
1023 "NVMUPD: bad cmd %s in writing state.\n",
1024 i40e_nvm_update_state_str[upd_cmd]);
1025 status = I40E_NOT_SUPPORTED;
1030 /* In some circumstances, a multi-write transaction takes longer
1031 * than the default 3 minute timeout on the write semaphore. If
1032 * the write failed with an EBUSY status, this is likely the problem,
1033 * so here we try to reacquire the semaphore then retry the write.
1034 * We only do one retry, then give up.
1036 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1038 enum i40e_status_code old_status = status;
1039 u32 old_asq_status = hw->aq.asq_last_status;
1042 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1043 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1044 i40e_debug(hw, I40E_DEBUG_ALL,
1045 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1046 gtime, hw->nvm.hw_semaphore_timeout);
1047 i40e_release_nvm(hw);
1048 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1050 i40e_debug(hw, I40E_DEBUG_ALL,
1051 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1052 hw->aq.asq_last_status);
1053 status = old_status;
1054 hw->aq.asq_last_status = old_asq_status;
1056 retry_attempt = true;
1066 * i40e_nvmupd_validate_command - Validate given command
1067 * @hw: pointer to hardware structure
1068 * @cmd: pointer to nvm update command buffer
1069 * @perrno: pointer to return error code
1071 * Return one of the valid command types or I40E_NVMUPD_INVALID
1073 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1074 struct i40e_nvm_access *cmd,
1077 enum i40e_nvmupd_cmd upd_cmd;
1078 u8 transaction, module;
1080 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1082 /* anything that doesn't match a recognized case is an error */
1083 upd_cmd = I40E_NVMUPD_INVALID;
1085 transaction = i40e_nvmupd_get_transaction(cmd->config);
1086 module = i40e_nvmupd_get_module(cmd->config);
1088 /* limits on data size */
1089 if ((cmd->data_size < 1) ||
1090 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1091 i40e_debug(hw, I40E_DEBUG_NVM,
1092 "i40e_nvmupd_validate_command data_size %d\n",
1095 return I40E_NVMUPD_INVALID;
1098 switch (cmd->command) {
1100 switch (transaction) {
1102 upd_cmd = I40E_NVMUPD_READ_CON;
1105 upd_cmd = I40E_NVMUPD_READ_SNT;
1108 upd_cmd = I40E_NVMUPD_READ_LCB;
1111 upd_cmd = I40E_NVMUPD_READ_SA;
1116 case I40E_NVM_WRITE:
1117 switch (transaction) {
1119 upd_cmd = I40E_NVMUPD_WRITE_CON;
1122 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1125 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1128 upd_cmd = I40E_NVMUPD_WRITE_SA;
1131 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1134 upd_cmd = I40E_NVMUPD_CSUM_CON;
1136 case (I40E_NVM_CSUM|I40E_NVM_SA):
1137 upd_cmd = I40E_NVMUPD_CSUM_SA;
1139 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1140 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1145 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
1146 i40e_nvm_update_state_str[upd_cmd],
1148 hw->aq.nvm_release_on_done);
1150 if (upd_cmd == I40E_NVMUPD_INVALID) {
1152 i40e_debug(hw, I40E_DEBUG_NVM,
1153 "i40e_nvmupd_validate_command returns %d perrno %d\n",
1160 * i40e_nvmupd_nvm_read - Read NVM
1161 * @hw: pointer to hardware structure
1162 * @cmd: pointer to nvm update command buffer
1163 * @bytes: pointer to the data buffer
1164 * @perrno: pointer to return error code
1166 * cmd structure contains identifiers and data buffer
1168 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1169 struct i40e_nvm_access *cmd,
1170 u8 *bytes, int *perrno)
1172 struct i40e_asq_cmd_details cmd_details;
1173 enum i40e_status_code status;
1174 u8 module, transaction;
1177 transaction = i40e_nvmupd_get_transaction(cmd->config);
1178 module = i40e_nvmupd_get_module(cmd->config);
1179 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1181 memset(&cmd_details, 0, sizeof(cmd_details));
1182 cmd_details.wb_desc = &hw->nvm_wb_desc;
1184 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1185 bytes, last, &cmd_details);
1187 i40e_debug(hw, I40E_DEBUG_NVM,
1188 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1189 module, cmd->offset, cmd->data_size);
1190 i40e_debug(hw, I40E_DEBUG_NVM,
1191 "i40e_nvmupd_nvm_read status %d aq %d\n",
1192 status, hw->aq.asq_last_status);
1193 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1200 * i40e_nvmupd_nvm_erase - Erase an NVM module
1201 * @hw: pointer to hardware structure
1202 * @cmd: pointer to nvm update command buffer
1203 * @perrno: pointer to return error code
1205 * module, offset, data_size and data are in cmd structure
1207 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1208 struct i40e_nvm_access *cmd,
1211 enum i40e_status_code status = I40E_SUCCESS;
1212 struct i40e_asq_cmd_details cmd_details;
1213 u8 module, transaction;
1216 transaction = i40e_nvmupd_get_transaction(cmd->config);
1217 module = i40e_nvmupd_get_module(cmd->config);
1218 last = (transaction & I40E_NVM_LCB);
1220 memset(&cmd_details, 0, sizeof(cmd_details));
1221 cmd_details.wb_desc = &hw->nvm_wb_desc;
1223 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1224 last, &cmd_details);
1226 i40e_debug(hw, I40E_DEBUG_NVM,
1227 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1228 module, cmd->offset, cmd->data_size);
1229 i40e_debug(hw, I40E_DEBUG_NVM,
1230 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1231 status, hw->aq.asq_last_status);
1232 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1239 * i40e_nvmupd_nvm_write - Write NVM
1240 * @hw: pointer to hardware structure
1241 * @cmd: pointer to nvm update command buffer
1242 * @bytes: pointer to the data buffer
1243 * @perrno: pointer to return error code
1245 * module, offset, data_size and data are in cmd structure
1247 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1248 struct i40e_nvm_access *cmd,
1249 u8 *bytes, int *perrno)
1251 enum i40e_status_code status = I40E_SUCCESS;
1252 struct i40e_asq_cmd_details cmd_details;
1253 u8 module, transaction;
1256 transaction = i40e_nvmupd_get_transaction(cmd->config);
1257 module = i40e_nvmupd_get_module(cmd->config);
1258 last = (transaction & I40E_NVM_LCB);
1260 memset(&cmd_details, 0, sizeof(cmd_details));
1261 cmd_details.wb_desc = &hw->nvm_wb_desc;
1263 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1264 (u16)cmd->data_size, bytes, last,
1267 i40e_debug(hw, I40E_DEBUG_NVM,
1268 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1269 module, cmd->offset, cmd->data_size);
1270 i40e_debug(hw, I40E_DEBUG_NVM,
1271 "i40e_nvmupd_nvm_write status %d aq %d\n",
1272 status, hw->aq.asq_last_status);
1273 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);