1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
7 #include "i40e_prototype.h"
10 * i40e_init_nvm - Initialize NVM function pointers
11 * @hw: pointer to the HW structure
13 * Setup the function pointers and the NVM info structure. Should be called
14 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
15 * Please notice that the NVM term is used here (& in all methods covered
16 * in this file) as an equivalent of the FLASH part mapped into the SR.
17 * We are accessing FLASH always through the Shadow RAM.
19 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
21 struct i40e_nvm_info *nvm = &hw->nvm;
22 enum i40e_status_code ret_code = I40E_SUCCESS;
26 DEBUGFUNC("i40e_init_nvm");
28 /* The SR size is stored regardless of the nvm programming mode
29 * as the blank mode may be used in the factory line.
31 gens = rd32(hw, I40E_GLNVM_GENS);
32 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
33 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
34 /* Switching to words (sr_size contains power of 2KB) */
35 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
37 /* Check if we are in the normal or blank NVM programming mode */
38 fla = rd32(hw, I40E_GLNVM_FLA);
39 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
41 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
42 nvm->blank_nvm_mode = false;
43 } else { /* Blank programming mode */
44 nvm->blank_nvm_mode = true;
45 ret_code = I40E_ERR_NVM_BLANK_MODE;
46 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
53 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
54 * @hw: pointer to the HW structure
55 * @access: NVM access type (read or write)
57 * This function will request NVM ownership for reading
58 * via the proper Admin Command.
60 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
61 enum i40e_aq_resource_access_type access)
63 enum i40e_status_code ret_code = I40E_SUCCESS;
67 DEBUGFUNC("i40e_acquire_nvm");
69 if (hw->nvm.blank_nvm_mode)
70 goto i40e_i40e_acquire_nvm_exit;
72 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
74 /* Reading the Global Device Timer */
75 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
77 /* Store the timeout */
78 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
81 i40e_debug(hw, I40E_DEBUG_NVM,
82 "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n",
83 access, time_left, ret_code, hw->aq.asq_last_status);
85 if (ret_code && time_left) {
86 /* Poll until the current NVM owner timeouts */
87 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
88 while ((gtime < timeout) && time_left) {
90 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
91 ret_code = i40e_aq_request_resource(hw,
93 access, 0, &time_left,
95 if (ret_code == I40E_SUCCESS) {
96 hw->nvm.hw_semaphore_timeout =
97 I40E_MS_TO_GTIME(time_left) + gtime;
101 if (ret_code != I40E_SUCCESS) {
102 hw->nvm.hw_semaphore_timeout = 0;
103 i40e_debug(hw, I40E_DEBUG_NVM,
104 "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n",
105 time_left, ret_code, hw->aq.asq_last_status);
109 i40e_i40e_acquire_nvm_exit:
114 * i40e_release_nvm - Generic request for releasing the NVM ownership
115 * @hw: pointer to the HW structure
117 * This function will release NVM resource via the proper Admin Command.
119 void i40e_release_nvm(struct i40e_hw *hw)
121 enum i40e_status_code ret_code = I40E_SUCCESS;
124 DEBUGFUNC("i40e_release_nvm");
126 if (hw->nvm.blank_nvm_mode)
129 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
131 /* there are some rare cases when trying to release the resource
132 * results in an admin Q timeout, so handle them correctly
134 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
135 (total_delay < hw->aq.asq_cmd_timeout)) {
137 ret_code = i40e_aq_release_resource(hw,
138 I40E_NVM_RESOURCE_ID, 0, NULL);
144 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
145 * @hw: pointer to the HW structure
147 * Polls the SRCTL Shadow RAM register done bit.
149 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
151 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
154 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
156 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
157 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
158 srctl = rd32(hw, I40E_GLNVM_SRCTL);
159 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
160 ret_code = I40E_SUCCESS;
165 if (ret_code == I40E_ERR_TIMEOUT)
166 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
171 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
172 * @hw: pointer to the HW structure
173 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
174 * @data: word read from the Shadow RAM
176 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
178 STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
182 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
185 DEBUGFUNC("i40e_read_nvm_word_srctl");
187 if (offset >= hw->nvm.sr_size) {
188 i40e_debug(hw, I40E_DEBUG_NVM,
189 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
190 offset, hw->nvm.sr_size);
191 ret_code = I40E_ERR_PARAM;
195 /* Poll the done bit first */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (ret_code == I40E_SUCCESS) {
198 /* Write the address and start reading */
199 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
200 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
201 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
203 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
204 ret_code = i40e_poll_sr_srctl_done_bit(hw);
205 if (ret_code == I40E_SUCCESS) {
206 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
207 *data = (u16)((sr_reg &
208 I40E_GLNVM_SRDATA_RDDATA_MASK)
209 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
212 if (ret_code != I40E_SUCCESS)
213 i40e_debug(hw, I40E_DEBUG_NVM,
214 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
222 * i40e_read_nvm_aq - Read Shadow RAM.
223 * @hw: pointer to the HW structure.
224 * @module_pointer: module pointer location in words from the NVM beginning
225 * @offset: offset in words from module start
226 * @words: number of words to write
227 * @data: buffer with words to write to the Shadow RAM
228 * @last_command: tells the AdminQ that this is the last command
230 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
232 STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
233 u8 module_pointer, u32 offset,
234 u16 words, void *data,
237 enum i40e_status_code ret_code = I40E_ERR_NVM;
238 struct i40e_asq_cmd_details cmd_details;
240 DEBUGFUNC("i40e_read_nvm_aq");
242 memset(&cmd_details, 0, sizeof(cmd_details));
243 cmd_details.wb_desc = &hw->nvm_wb_desc;
245 /* Here we are checking the SR limit only for the flat memory model.
246 * We cannot do it for the module-based model, as we did not acquire
247 * the NVM resource yet (we cannot get the module pointer value).
248 * Firmware will check the module-based model.
250 if ((offset + words) > hw->nvm.sr_size)
251 i40e_debug(hw, I40E_DEBUG_NVM,
252 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
253 (offset + words), hw->nvm.sr_size);
254 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
255 /* We can write only up to 4KB (one sector), in one AQ write */
256 i40e_debug(hw, I40E_DEBUG_NVM,
257 "NVM write fail error: tried to write %d words, limit is %d.\n",
258 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
259 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
260 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
261 /* A single write cannot spread over two sectors */
262 i40e_debug(hw, I40E_DEBUG_NVM,
263 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
266 ret_code = i40e_aq_read_nvm(hw, module_pointer,
267 2 * offset, /*bytes*/
269 data, last_command, &cmd_details);
275 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
276 * @hw: pointer to the HW structure
277 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
278 * @data: word read from the Shadow RAM
280 * Reads one 16 bit word from the Shadow RAM using the AdminQ
282 STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
285 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
287 DEBUGFUNC("i40e_read_nvm_word_aq");
289 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
290 *data = LE16_TO_CPU(*(__le16 *)data);
296 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
297 * @hw: pointer to the HW structure
298 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
299 * @data: word read from the Shadow RAM
301 * Reads one 16 bit word from the Shadow RAM.
303 * Do not use this function except in cases where the nvm lock is already
304 * taken via i40e_acquire_nvm().
306 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
311 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
312 return i40e_read_nvm_word_aq(hw, offset, data);
314 return i40e_read_nvm_word_srctl(hw, offset, data);
318 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
319 * @hw: pointer to the HW structure
320 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
321 * @data: word read from the Shadow RAM
323 * Reads one 16 bit word from the Shadow RAM.
325 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
328 enum i40e_status_code ret_code = I40E_SUCCESS;
330 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
331 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
335 ret_code = __i40e_read_nvm_word(hw, offset, data);
337 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
338 i40e_release_nvm(hw);
343 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
344 * @hw: Pointer to the HW structure
345 * @module_ptr: Pointer to module in words with respect to NVM beginning
346 * @module_offset: Offset in words from module start
347 * @data_offset: Offset in words from reading data area start
348 * @words_data_size: Words to read from NVM
349 * @data_ptr: Pointer to memory location where resulting buffer will be stored
351 enum i40e_status_code
352 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
353 u16 data_offset, u16 words_data_size, u16 *data_ptr)
355 enum i40e_status_code status;
356 u16 specific_ptr = 0;
360 if (module_ptr != 0) {
361 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
362 if (status != I40E_SUCCESS) {
363 i40e_debug(hw, I40E_DEBUG_ALL,
364 "Reading nvm word failed.Error code: %d.\n",
369 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
370 #define I40E_NVM_INVALID_VAL 0xFFFF
372 /* Pointer not initialized */
373 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
374 ptr_value == I40E_NVM_INVALID_VAL) {
375 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
376 return I40E_ERR_BAD_PTR;
379 /* Check whether the module is in SR mapped area or outside */
380 if (ptr_value & I40E_PTR_TYPE) {
381 /* Pointer points outside of the Shared RAM mapped area */
382 i40e_debug(hw, I40E_DEBUG_ALL,
383 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
385 return I40E_ERR_PARAM;
387 /* Read from the Shadow RAM */
389 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
391 if (status != I40E_SUCCESS) {
392 i40e_debug(hw, I40E_DEBUG_ALL,
393 "Reading nvm word failed.Error code: %d.\n",
398 offset = ptr_value + module_offset + specific_ptr +
401 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
403 if (status != I40E_SUCCESS) {
404 i40e_debug(hw, I40E_DEBUG_ALL,
405 "Reading nvm buffer failed.Error code: %d.\n",
414 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
415 * @hw: pointer to the HW structure
416 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417 * @words: (in) number of words to read; (out) number of words actually read
418 * @data: words read from the Shadow RAM
420 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
421 * method. The buffer read is preceded by the NVM ownership take
422 * and followed by the release.
424 STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
425 u16 *words, u16 *data)
427 enum i40e_status_code ret_code = I40E_SUCCESS;
430 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
432 /* Loop through the selected region */
433 for (word = 0; word < *words; word++) {
434 index = offset + word;
435 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
436 if (ret_code != I40E_SUCCESS)
440 /* Update the number of words read from the Shadow RAM */
447 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
448 * @hw: pointer to the HW structure
449 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
450 * @words: (in) number of words to read; (out) number of words actually read
451 * @data: words read from the Shadow RAM
453 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
454 * method. The buffer read is preceded by the NVM ownership take
455 * and followed by the release.
457 STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
458 u16 *words, u16 *data)
460 enum i40e_status_code ret_code;
461 u16 read_size = *words;
462 bool last_cmd = false;
466 DEBUGFUNC("i40e_read_nvm_buffer_aq");
469 /* Calculate number of bytes we should read in this step.
470 * FVL AQ do not allow to read more than one page at a time or
471 * to cross page boundaries.
473 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
474 read_size = min(*words,
475 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
476 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
478 read_size = min((*words - words_read),
479 I40E_SR_SECTOR_SIZE_IN_WORDS);
481 /* Check if this is last command, if so set proper flag */
482 if ((words_read + read_size) >= *words)
485 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
486 data + words_read, last_cmd);
487 if (ret_code != I40E_SUCCESS)
488 goto read_nvm_buffer_aq_exit;
490 /* Increment counter for words already read and move offset to
493 words_read += read_size;
495 } while (words_read < *words);
497 for (i = 0; i < *words; i++)
498 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
500 read_nvm_buffer_aq_exit:
506 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
507 * @hw: pointer to the HW structure
508 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
509 * @words: (in) number of words to read; (out) number of words actually read
510 * @data: words read from the Shadow RAM
512 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
515 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
517 u16 *words, u16 *data)
519 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
520 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
522 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
526 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
527 * @hw: pointer to the HW structure
528 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
529 * @words: (in) number of words to read; (out) number of words actually read
530 * @data: words read from the Shadow RAM
532 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
533 * method. The buffer read is preceded by the NVM ownership take
534 * and followed by the release.
536 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
537 u16 *words, u16 *data)
539 enum i40e_status_code ret_code = I40E_SUCCESS;
541 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
542 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
544 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
546 i40e_release_nvm(hw);
549 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
556 * i40e_write_nvm_aq - Writes Shadow RAM.
557 * @hw: pointer to the HW structure.
558 * @module_pointer: module pointer location in words from the NVM beginning
559 * @offset: offset in words from module start
560 * @words: number of words to write
561 * @data: buffer with words to write to the Shadow RAM
562 * @last_command: tells the AdminQ that this is the last command
564 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
566 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
567 u32 offset, u16 words, void *data,
570 enum i40e_status_code ret_code = I40E_ERR_NVM;
571 struct i40e_asq_cmd_details cmd_details;
573 DEBUGFUNC("i40e_write_nvm_aq");
575 memset(&cmd_details, 0, sizeof(cmd_details));
576 cmd_details.wb_desc = &hw->nvm_wb_desc;
578 /* Here we are checking the SR limit only for the flat memory model.
579 * We cannot do it for the module-based model, as we did not acquire
580 * the NVM resource yet (we cannot get the module pointer value).
581 * Firmware will check the module-based model.
583 if ((offset + words) > hw->nvm.sr_size)
584 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
585 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
586 /* We can write only up to 4KB (one sector), in one AQ write */
587 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
588 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
589 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
590 /* A single write cannot spread over two sectors */
591 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
593 ret_code = i40e_aq_update_nvm(hw, module_pointer,
594 2 * offset, /*bytes*/
596 data, last_command, 0,
603 * __i40e_write_nvm_word - Writes Shadow RAM word
604 * @hw: pointer to the HW structure
605 * @offset: offset of the Shadow RAM word to write
606 * @data: word to write to the Shadow RAM
608 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
609 * NVM ownership have to be acquired and released (on ARQ completion event
610 * reception) by caller. To commit SR to NVM update checksum function
613 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
616 DEBUGFUNC("i40e_write_nvm_word");
618 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
620 /* Value 0x00 below means that we treat SR as a flat mem */
621 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
625 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
626 * @hw: pointer to the HW structure
627 * @module_pointer: module pointer location in words from the NVM beginning
628 * @offset: offset of the Shadow RAM buffer to write
629 * @words: number of words to write
630 * @data: words to write to the Shadow RAM
632 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
633 * NVM ownership must be acquired before calling this function and released
634 * on ARQ completion event reception by caller. To commit SR to NVM update
635 * checksum function should be called.
637 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
638 u8 module_pointer, u32 offset,
639 u16 words, void *data)
641 __le16 *le_word_ptr = (__le16 *)data;
642 u16 *word_ptr = (u16 *)data;
645 DEBUGFUNC("i40e_write_nvm_buffer");
647 for (i = 0; i < words; i++)
648 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
650 /* Here we will only write one buffer as the size of the modules
651 * mirrored in the Shadow RAM is always less than 4K.
653 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
658 * i40e_calc_nvm_checksum - Calculates and returns the checksum
659 * @hw: pointer to hardware structure
660 * @checksum: pointer to the checksum
662 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
663 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
664 * is customer specific and unknown. Therefore, this function skips all maximum
665 * possible size of VPD (1kB).
667 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
669 enum i40e_status_code ret_code = I40E_SUCCESS;
670 struct i40e_virt_mem vmem;
671 u16 pcie_alt_module = 0;
672 u16 checksum_local = 0;
677 DEBUGFUNC("i40e_calc_nvm_checksum");
679 ret_code = i40e_allocate_virt_mem(hw, &vmem,
680 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
682 goto i40e_calc_nvm_checksum_exit;
683 data = (u16 *)vmem.va;
685 /* read pointer to VPD area */
686 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
687 if (ret_code != I40E_SUCCESS) {
688 ret_code = I40E_ERR_NVM_CHECKSUM;
689 goto i40e_calc_nvm_checksum_exit;
692 /* read pointer to PCIe Alt Auto-load module */
693 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
695 if (ret_code != I40E_SUCCESS) {
696 ret_code = I40E_ERR_NVM_CHECKSUM;
697 goto i40e_calc_nvm_checksum_exit;
700 /* Calculate SW checksum that covers the whole 64kB shadow RAM
701 * except the VPD and PCIe ALT Auto-load modules
703 for (i = 0; i < hw->nvm.sr_size; i++) {
705 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
706 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
708 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
709 if (ret_code != I40E_SUCCESS) {
710 ret_code = I40E_ERR_NVM_CHECKSUM;
711 goto i40e_calc_nvm_checksum_exit;
715 /* Skip Checksum word */
716 if (i == I40E_SR_SW_CHECKSUM_WORD)
718 /* Skip VPD module (convert byte size to word count) */
719 if ((i >= (u32)vpd_module) &&
720 (i < ((u32)vpd_module +
721 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
724 /* Skip PCIe ALT module (convert byte size to word count) */
725 if ((i >= (u32)pcie_alt_module) &&
726 (i < ((u32)pcie_alt_module +
727 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
731 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
734 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
736 i40e_calc_nvm_checksum_exit:
737 i40e_free_virt_mem(hw, &vmem);
742 * i40e_update_nvm_checksum - Updates the NVM checksum
743 * @hw: pointer to hardware structure
745 * NVM ownership must be acquired before calling this function and released
746 * on ARQ completion event reception by caller.
747 * This function will commit SR to NVM.
749 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
751 enum i40e_status_code ret_code = I40E_SUCCESS;
755 DEBUGFUNC("i40e_update_nvm_checksum");
757 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
758 if (ret_code == I40E_SUCCESS) {
759 le_sum = CPU_TO_LE16(checksum);
760 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
768 * i40e_validate_nvm_checksum - Validate EEPROM checksum
769 * @hw: pointer to hardware structure
770 * @checksum: calculated checksum
772 * Performs checksum calculation and validates the NVM SW checksum. If the
773 * caller does not need checksum, the value can be NULL.
775 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
778 enum i40e_status_code ret_code = I40E_SUCCESS;
780 u16 checksum_local = 0;
782 DEBUGFUNC("i40e_validate_nvm_checksum");
784 /* We must acquire the NVM lock in order to correctly synchronize the
785 * NVM accesses across multiple PFs. Without doing so it is possible
786 * for one of the PFs to read invalid data potentially indicating that
787 * the checksum is invalid.
789 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
792 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
793 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
794 i40e_release_nvm(hw);
798 /* Verify read checksum from EEPROM is the same as
799 * calculated checksum
801 if (checksum_local != checksum_sr)
802 ret_code = I40E_ERR_NVM_CHECKSUM;
804 /* If the user cares, return the calculated checksum */
806 *checksum = checksum_local;
811 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
812 struct i40e_nvm_access *cmd,
813 u8 *bytes, int *perrno);
814 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
815 struct i40e_nvm_access *cmd,
816 u8 *bytes, int *perrno);
817 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
818 struct i40e_nvm_access *cmd,
819 u8 *bytes, int *perrno);
820 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
821 struct i40e_nvm_access *cmd,
823 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
824 struct i40e_nvm_access *cmd,
826 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
827 struct i40e_nvm_access *cmd,
828 u8 *bytes, int *perrno);
829 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
830 struct i40e_nvm_access *cmd,
831 u8 *bytes, int *perrno);
832 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
833 struct i40e_nvm_access *cmd,
834 u8 *bytes, int *perrno);
835 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
836 struct i40e_nvm_access *cmd,
837 u8 *bytes, int *perrno);
838 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
839 struct i40e_nvm_access *cmd,
840 u8 *bytes, int *perrno);
841 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
843 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
845 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
847 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
850 STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
852 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
853 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
856 STATIC const char *i40e_nvm_update_state_str[] = {
857 "I40E_NVMUPD_INVALID",
858 "I40E_NVMUPD_READ_CON",
859 "I40E_NVMUPD_READ_SNT",
860 "I40E_NVMUPD_READ_LCB",
861 "I40E_NVMUPD_READ_SA",
862 "I40E_NVMUPD_WRITE_ERA",
863 "I40E_NVMUPD_WRITE_CON",
864 "I40E_NVMUPD_WRITE_SNT",
865 "I40E_NVMUPD_WRITE_LCB",
866 "I40E_NVMUPD_WRITE_SA",
867 "I40E_NVMUPD_CSUM_CON",
868 "I40E_NVMUPD_CSUM_SA",
869 "I40E_NVMUPD_CSUM_LCB",
870 "I40E_NVMUPD_STATUS",
871 "I40E_NVMUPD_EXEC_AQ",
872 "I40E_NVMUPD_GET_AQ_RESULT",
873 "I40E_NVMUPD_GET_AQ_EVENT",
874 "I40E_NVMUPD_GET_FEATURES",
878 * i40e_nvmupd_command - Process an NVM update command
879 * @hw: pointer to hardware structure
880 * @cmd: pointer to nvm update command
881 * @bytes: pointer to the data buffer
882 * @perrno: pointer to return error code
884 * Dispatches command depending on what update state is current
886 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
887 struct i40e_nvm_access *cmd,
888 u8 *bytes, int *perrno)
890 enum i40e_status_code status;
891 enum i40e_nvmupd_cmd upd_cmd;
893 DEBUGFUNC("i40e_nvmupd_command");
898 /* early check for status command and debug msgs */
899 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
901 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
902 i40e_nvm_update_state_str[upd_cmd],
904 hw->nvm_release_on_done, hw->nvm_wait_opcode,
905 cmd->command, cmd->config, cmd->offset, cmd->data_size);
907 if (upd_cmd == I40E_NVMUPD_INVALID) {
909 i40e_debug(hw, I40E_DEBUG_NVM,
910 "i40e_nvmupd_validate_command returns %d errno %d\n",
914 /* a status request returns immediately rather than
915 * going into the state machine
917 if (upd_cmd == I40E_NVMUPD_STATUS) {
918 if (!cmd->data_size) {
920 return I40E_ERR_BUF_TOO_SHORT;
923 bytes[0] = hw->nvmupd_state;
925 if (cmd->data_size >= 4) {
927 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
930 /* Clear error status on read */
931 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
932 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
938 * A supported features request returns immediately
939 * rather than going into state machine
941 if (upd_cmd == I40E_NVMUPD_FEATURES) {
942 if (cmd->data_size < hw->nvmupd_features.size) {
944 return I40E_ERR_BUF_TOO_SHORT;
948 * If buffer is bigger than i40e_nvmupd_features structure,
949 * make sure the trailing bytes are set to 0x0.
951 if (cmd->data_size > hw->nvmupd_features.size)
952 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
953 cmd->data_size - hw->nvmupd_features.size,
956 i40e_memcpy(bytes, &hw->nvmupd_features,
957 hw->nvmupd_features.size, I40E_NONDMA_MEM);
962 /* Clear status even it is not read and log */
963 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
964 i40e_debug(hw, I40E_DEBUG_NVM,
965 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
966 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
969 /* Acquire lock to prevent race condition where adminq_task
970 * can execute after i40e_nvmupd_nvm_read/write but before state
971 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
973 * During NVMUpdate, it is observed that lock could be held for
974 * ~5ms for most commands. However lock is held for ~60ms for
975 * NVMUPD_CSUM_LCB command.
977 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
978 switch (hw->nvmupd_state) {
979 case I40E_NVMUPD_STATE_INIT:
980 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
983 case I40E_NVMUPD_STATE_READING:
984 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
987 case I40E_NVMUPD_STATE_WRITING:
988 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
991 case I40E_NVMUPD_STATE_INIT_WAIT:
992 case I40E_NVMUPD_STATE_WRITE_WAIT:
993 /* if we need to stop waiting for an event, clear
994 * the wait info and return before doing anything else
996 if (cmd->offset == 0xffff) {
997 i40e_nvmupd_clear_wait_state(hw);
998 status = I40E_SUCCESS;
1002 status = I40E_ERR_NOT_READY;
1007 /* invalid state, should never happen */
1008 i40e_debug(hw, I40E_DEBUG_NVM,
1009 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1010 status = I40E_NOT_SUPPORTED;
1015 i40e_release_spinlock(&hw->aq.arq_spinlock);
1020 * i40e_nvmupd_state_init - Handle NVM update state Init
1021 * @hw: pointer to hardware structure
1022 * @cmd: pointer to nvm update command buffer
1023 * @bytes: pointer to the data buffer
1024 * @perrno: pointer to return error code
1026 * Process legitimate commands of the Init state and conditionally set next
1027 * state. Reject all other commands.
1029 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1030 struct i40e_nvm_access *cmd,
1031 u8 *bytes, int *perrno)
1033 enum i40e_status_code status = I40E_SUCCESS;
1034 enum i40e_nvmupd_cmd upd_cmd;
1036 DEBUGFUNC("i40e_nvmupd_state_init");
1038 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1041 case I40E_NVMUPD_READ_SA:
1042 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1044 *perrno = i40e_aq_rc_to_posix(status,
1045 hw->aq.asq_last_status);
1047 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1048 i40e_release_nvm(hw);
1052 case I40E_NVMUPD_READ_SNT:
1053 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1055 *perrno = i40e_aq_rc_to_posix(status,
1056 hw->aq.asq_last_status);
1058 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1060 i40e_release_nvm(hw);
1062 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1066 case I40E_NVMUPD_WRITE_ERA:
1067 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1069 *perrno = i40e_aq_rc_to_posix(status,
1070 hw->aq.asq_last_status);
1072 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1074 i40e_release_nvm(hw);
1076 hw->nvm_release_on_done = true;
1077 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1078 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1083 case I40E_NVMUPD_WRITE_SA:
1084 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1086 *perrno = i40e_aq_rc_to_posix(status,
1087 hw->aq.asq_last_status);
1089 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1091 i40e_release_nvm(hw);
1093 hw->nvm_release_on_done = true;
1094 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1095 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1100 case I40E_NVMUPD_WRITE_SNT:
1101 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1103 *perrno = i40e_aq_rc_to_posix(status,
1104 hw->aq.asq_last_status);
1106 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1108 i40e_release_nvm(hw);
1110 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1111 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1116 case I40E_NVMUPD_CSUM_SA:
1117 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1119 *perrno = i40e_aq_rc_to_posix(status,
1120 hw->aq.asq_last_status);
1122 status = i40e_update_nvm_checksum(hw);
1124 *perrno = hw->aq.asq_last_status ?
1125 i40e_aq_rc_to_posix(status,
1126 hw->aq.asq_last_status) :
1128 i40e_release_nvm(hw);
1130 hw->nvm_release_on_done = true;
1131 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1132 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1137 case I40E_NVMUPD_EXEC_AQ:
1138 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1141 case I40E_NVMUPD_GET_AQ_RESULT:
1142 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1145 case I40E_NVMUPD_GET_AQ_EVENT:
1146 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1150 i40e_debug(hw, I40E_DEBUG_NVM,
1151 "NVMUPD: bad cmd %s in init state\n",
1152 i40e_nvm_update_state_str[upd_cmd]);
1153 status = I40E_ERR_NVM;
1161 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1162 * @hw: pointer to hardware structure
1163 * @cmd: pointer to nvm update command buffer
1164 * @bytes: pointer to the data buffer
1165 * @perrno: pointer to return error code
1167 * NVM ownership is already held. Process legitimate commands and set any
1168 * change in state; reject all other commands.
1170 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1171 struct i40e_nvm_access *cmd,
1172 u8 *bytes, int *perrno)
1174 enum i40e_status_code status = I40E_SUCCESS;
1175 enum i40e_nvmupd_cmd upd_cmd;
1177 DEBUGFUNC("i40e_nvmupd_state_reading");
1179 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1182 case I40E_NVMUPD_READ_SA:
1183 case I40E_NVMUPD_READ_CON:
1184 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1187 case I40E_NVMUPD_READ_LCB:
1188 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1189 i40e_release_nvm(hw);
1190 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1194 i40e_debug(hw, I40E_DEBUG_NVM,
1195 "NVMUPD: bad cmd %s in reading state.\n",
1196 i40e_nvm_update_state_str[upd_cmd]);
1197 status = I40E_NOT_SUPPORTED;
1205 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1206 * @hw: pointer to hardware structure
1207 * @cmd: pointer to nvm update command buffer
1208 * @bytes: pointer to the data buffer
1209 * @perrno: pointer to return error code
1211 * NVM ownership is already held. Process legitimate commands and set any
1212 * change in state; reject all other commands
1214 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1215 struct i40e_nvm_access *cmd,
1216 u8 *bytes, int *perrno)
1218 enum i40e_status_code status = I40E_SUCCESS;
1219 enum i40e_nvmupd_cmd upd_cmd;
1220 bool retry_attempt = false;
1222 DEBUGFUNC("i40e_nvmupd_state_writing");
1224 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1228 case I40E_NVMUPD_WRITE_CON:
1229 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1231 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1232 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1236 case I40E_NVMUPD_WRITE_LCB:
1237 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1239 *perrno = hw->aq.asq_last_status ?
1240 i40e_aq_rc_to_posix(status,
1241 hw->aq.asq_last_status) :
1243 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1245 hw->nvm_release_on_done = true;
1246 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1247 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1251 case I40E_NVMUPD_CSUM_CON:
1252 /* Assumes the caller has acquired the nvm */
1253 status = i40e_update_nvm_checksum(hw);
1255 *perrno = hw->aq.asq_last_status ?
1256 i40e_aq_rc_to_posix(status,
1257 hw->aq.asq_last_status) :
1259 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1261 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1262 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1266 case I40E_NVMUPD_CSUM_LCB:
1267 /* Assumes the caller has acquired the nvm */
1268 status = i40e_update_nvm_checksum(hw);
1270 *perrno = hw->aq.asq_last_status ?
1271 i40e_aq_rc_to_posix(status,
1272 hw->aq.asq_last_status) :
1274 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1276 hw->nvm_release_on_done = true;
1277 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1278 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1283 i40e_debug(hw, I40E_DEBUG_NVM,
1284 "NVMUPD: bad cmd %s in writing state.\n",
1285 i40e_nvm_update_state_str[upd_cmd]);
1286 status = I40E_NOT_SUPPORTED;
1291 /* In some circumstances, a multi-write transaction takes longer
1292 * than the default 3 minute timeout on the write semaphore. If
1293 * the write failed with an EBUSY status, this is likely the problem,
1294 * so here we try to reacquire the semaphore then retry the write.
1295 * We only do one retry, then give up.
1297 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1299 enum i40e_status_code old_status = status;
1300 u32 old_asq_status = hw->aq.asq_last_status;
1303 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1304 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1305 i40e_debug(hw, I40E_DEBUG_ALL,
1306 "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
1307 gtime, hw->nvm.hw_semaphore_timeout);
1308 i40e_release_nvm(hw);
1309 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1311 i40e_debug(hw, I40E_DEBUG_ALL,
1312 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1313 hw->aq.asq_last_status);
1314 status = old_status;
1315 hw->aq.asq_last_status = old_asq_status;
1317 retry_attempt = true;
1327 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1328 * @hw: pointer to the hardware structure
1330 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1332 i40e_debug(hw, I40E_DEBUG_NVM,
1333 "NVMUPD: clearing wait on opcode 0x%04x\n",
1334 hw->nvm_wait_opcode);
1336 if (hw->nvm_release_on_done) {
1337 i40e_release_nvm(hw);
1338 hw->nvm_release_on_done = false;
1340 hw->nvm_wait_opcode = 0;
1342 if (hw->aq.arq_last_status) {
1343 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1347 switch (hw->nvmupd_state) {
1348 case I40E_NVMUPD_STATE_INIT_WAIT:
1349 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1352 case I40E_NVMUPD_STATE_WRITE_WAIT:
1353 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1362 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1363 * @hw: pointer to the hardware structure
1364 * @opcode: the event that just happened
1365 * @desc: AdminQ descriptor
1367 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1368 struct i40e_aq_desc *desc)
1370 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1372 if (opcode == hw->nvm_wait_opcode) {
1373 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1374 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1375 i40e_nvmupd_clear_wait_state(hw);
1380 * i40e_nvmupd_validate_command - Validate given command
1381 * @hw: pointer to hardware structure
1382 * @cmd: pointer to nvm update command buffer
1383 * @perrno: pointer to return error code
1385 * Return one of the valid command types or I40E_NVMUPD_INVALID
1387 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1388 struct i40e_nvm_access *cmd,
1391 enum i40e_nvmupd_cmd upd_cmd;
1392 u8 module, transaction;
1394 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1396 /* anything that doesn't match a recognized case is an error */
1397 upd_cmd = I40E_NVMUPD_INVALID;
1399 transaction = i40e_nvmupd_get_transaction(cmd->config);
1400 module = i40e_nvmupd_get_module(cmd->config);
1402 /* limits on data size */
1403 if ((cmd->data_size < 1) ||
1404 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1405 i40e_debug(hw, I40E_DEBUG_NVM,
1406 "i40e_nvmupd_validate_command data_size %d\n",
1409 return I40E_NVMUPD_INVALID;
1412 switch (cmd->command) {
1414 switch (transaction) {
1416 upd_cmd = I40E_NVMUPD_READ_CON;
1419 upd_cmd = I40E_NVMUPD_READ_SNT;
1422 upd_cmd = I40E_NVMUPD_READ_LCB;
1425 upd_cmd = I40E_NVMUPD_READ_SA;
1429 case I40E_NVM_EXEC_GET_AQ_RESULT:
1430 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1432 case I40E_NVM_EXEC_FEATURES:
1433 upd_cmd = I40E_NVMUPD_FEATURES;
1435 case I40E_NVM_EXEC_STATUS:
1436 upd_cmd = I40E_NVMUPD_STATUS;
1440 return I40E_NVMUPD_INVALID;
1444 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1449 case I40E_NVM_WRITE:
1450 switch (transaction) {
1452 upd_cmd = I40E_NVMUPD_WRITE_CON;
1455 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1458 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1461 upd_cmd = I40E_NVMUPD_WRITE_SA;
1464 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1467 upd_cmd = I40E_NVMUPD_CSUM_CON;
1469 case (I40E_NVM_CSUM|I40E_NVM_SA):
1470 upd_cmd = I40E_NVMUPD_CSUM_SA;
1472 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1473 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1477 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1487 * i40e_nvmupd_exec_aq - Run an AQ command
1488 * @hw: pointer to hardware structure
1489 * @cmd: pointer to nvm update command buffer
1490 * @bytes: pointer to the data buffer
1491 * @perrno: pointer to return error code
1493 * cmd structure contains identifiers and data buffer
1495 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1496 struct i40e_nvm_access *cmd,
1497 u8 *bytes, int *perrno)
1499 struct i40e_asq_cmd_details cmd_details;
1500 enum i40e_status_code status;
1501 struct i40e_aq_desc *aq_desc;
1507 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1508 if (cmd->offset == 0xffff)
1509 return I40E_SUCCESS;
1511 memset(&cmd_details, 0, sizeof(cmd_details));
1512 cmd_details.wb_desc = &hw->nvm_wb_desc;
1514 aq_desc_len = sizeof(struct i40e_aq_desc);
1515 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1517 /* get the aq descriptor */
1518 if (cmd->data_size < aq_desc_len) {
1519 i40e_debug(hw, I40E_DEBUG_NVM,
1520 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1521 cmd->data_size, aq_desc_len);
1523 return I40E_ERR_PARAM;
1525 aq_desc = (struct i40e_aq_desc *)bytes;
1527 /* if data buffer needed, make sure it's ready */
1528 aq_data_len = cmd->data_size - aq_desc_len;
1529 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1531 if (!hw->nvm_buff.va) {
1532 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1533 hw->aq.asq_buf_size);
1535 i40e_debug(hw, I40E_DEBUG_NVM,
1536 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1540 if (hw->nvm_buff.va) {
1541 buff = hw->nvm_buff.va;
1542 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1543 I40E_NONDMA_TO_NONDMA);
1548 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1550 /* and away we go! */
1551 status = i40e_asq_send_command(hw, aq_desc, buff,
1552 buff_size, &cmd_details);
1554 i40e_debug(hw, I40E_DEBUG_NVM,
1555 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1556 i40e_stat_str(hw, status),
1557 i40e_aq_str(hw, hw->aq.asq_last_status));
1558 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1562 /* should we wait for a followup event? */
1564 hw->nvm_wait_opcode = cmd->offset;
1565 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1572 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1573 * @hw: pointer to hardware structure
1574 * @cmd: pointer to nvm update command buffer
1575 * @bytes: pointer to the data buffer
1576 * @perrno: pointer to return error code
1578 * cmd structure contains identifiers and data buffer
1580 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1581 struct i40e_nvm_access *cmd,
1582 u8 *bytes, int *perrno)
1589 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1591 aq_desc_len = sizeof(struct i40e_aq_desc);
1592 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1594 /* check offset range */
1595 if (cmd->offset > aq_total_len) {
1596 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1597 __func__, cmd->offset, aq_total_len);
1599 return I40E_ERR_PARAM;
1602 /* check copylength range */
1603 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1604 int new_len = aq_total_len - cmd->offset;
1606 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1607 __func__, cmd->data_size, new_len);
1608 cmd->data_size = new_len;
1611 remainder = cmd->data_size;
1612 if (cmd->offset < aq_desc_len) {
1613 u32 len = aq_desc_len - cmd->offset;
1615 len = min(len, cmd->data_size);
1616 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1617 __func__, cmd->offset, cmd->offset + len);
1619 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1620 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1624 buff = hw->nvm_buff.va;
1626 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1629 if (remainder > 0) {
1630 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1632 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1633 __func__, start_byte, start_byte + remainder);
1634 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1637 return I40E_SUCCESS;
1641 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1642 * @hw: pointer to hardware structure
1643 * @cmd: pointer to nvm update command buffer
1644 * @bytes: pointer to the data buffer
1645 * @perrno: pointer to return error code
1647 * cmd structure contains identifiers and data buffer
1649 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1650 struct i40e_nvm_access *cmd,
1651 u8 *bytes, int *perrno)
1656 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1658 aq_desc_len = sizeof(struct i40e_aq_desc);
1659 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1661 /* check copylength range */
1662 if (cmd->data_size > aq_total_len) {
1663 i40e_debug(hw, I40E_DEBUG_NVM,
1664 "%s: copy length %d too big, trimming to %d\n",
1665 __func__, cmd->data_size, aq_total_len);
1666 cmd->data_size = aq_total_len;
1669 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1670 I40E_NONDMA_TO_NONDMA);
1672 return I40E_SUCCESS;
1676 * i40e_nvmupd_nvm_read - Read NVM
1677 * @hw: pointer to hardware structure
1678 * @cmd: pointer to nvm update command buffer
1679 * @bytes: pointer to the data buffer
1680 * @perrno: pointer to return error code
1682 * cmd structure contains identifiers and data buffer
1684 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1685 struct i40e_nvm_access *cmd,
1686 u8 *bytes, int *perrno)
1688 struct i40e_asq_cmd_details cmd_details;
1689 enum i40e_status_code status;
1690 u8 module, transaction;
1693 transaction = i40e_nvmupd_get_transaction(cmd->config);
1694 module = i40e_nvmupd_get_module(cmd->config);
1695 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1697 memset(&cmd_details, 0, sizeof(cmd_details));
1698 cmd_details.wb_desc = &hw->nvm_wb_desc;
1700 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1701 bytes, last, &cmd_details);
1703 i40e_debug(hw, I40E_DEBUG_NVM,
1704 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1705 module, cmd->offset, cmd->data_size);
1706 i40e_debug(hw, I40E_DEBUG_NVM,
1707 "i40e_nvmupd_nvm_read status %d aq %d\n",
1708 status, hw->aq.asq_last_status);
1709 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1716 * i40e_nvmupd_nvm_erase - Erase an NVM module
1717 * @hw: pointer to hardware structure
1718 * @cmd: pointer to nvm update command buffer
1719 * @perrno: pointer to return error code
1721 * module, offset, data_size and data are in cmd structure
1723 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1724 struct i40e_nvm_access *cmd,
1727 enum i40e_status_code status = I40E_SUCCESS;
1728 struct i40e_asq_cmd_details cmd_details;
1729 u8 module, transaction;
1732 transaction = i40e_nvmupd_get_transaction(cmd->config);
1733 module = i40e_nvmupd_get_module(cmd->config);
1734 last = (transaction & I40E_NVM_LCB);
1736 memset(&cmd_details, 0, sizeof(cmd_details));
1737 cmd_details.wb_desc = &hw->nvm_wb_desc;
1739 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1740 last, &cmd_details);
1742 i40e_debug(hw, I40E_DEBUG_NVM,
1743 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1744 module, cmd->offset, cmd->data_size);
1745 i40e_debug(hw, I40E_DEBUG_NVM,
1746 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1747 status, hw->aq.asq_last_status);
1748 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1755 * i40e_nvmupd_nvm_write - Write NVM
1756 * @hw: pointer to hardware structure
1757 * @cmd: pointer to nvm update command buffer
1758 * @bytes: pointer to the data buffer
1759 * @perrno: pointer to return error code
1761 * module, offset, data_size and data are in cmd structure
1763 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1764 struct i40e_nvm_access *cmd,
1765 u8 *bytes, int *perrno)
1767 enum i40e_status_code status = I40E_SUCCESS;
1768 struct i40e_asq_cmd_details cmd_details;
1769 u8 module, transaction;
1770 u8 preservation_flags;
1773 transaction = i40e_nvmupd_get_transaction(cmd->config);
1774 module = i40e_nvmupd_get_module(cmd->config);
1775 last = (transaction & I40E_NVM_LCB);
1776 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1778 memset(&cmd_details, 0, sizeof(cmd_details));
1779 cmd_details.wb_desc = &hw->nvm_wb_desc;
1781 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1782 (u16)cmd->data_size, bytes, last,
1783 preservation_flags, &cmd_details);
1785 i40e_debug(hw, I40E_DEBUG_NVM,
1786 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1787 module, cmd->offset, cmd->data_size);
1788 i40e_debug(hw, I40E_DEBUG_NVM,
1789 "i40e_nvmupd_nvm_write status %d aq %d\n",
1790 status, hw->aq.asq_last_status);
1791 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);