1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2020 Intel Corporation
7 #include "i40e_prototype.h"
10 * i40e_init_nvm_ops - Initialize NVM function pointers
11 * @hw: pointer to the HW structure
13 * Setup the function pointers and the NVM info structure. Should be called
14 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
15 * Please notice that the NVM term is used here (& in all methods covered
16 * in this file) as an equivalent of the FLASH part mapped into the SR.
17 * We are accessing FLASH always through the Shadow RAM.
19 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
21 struct i40e_nvm_info *nvm = &hw->nvm;
22 enum i40e_status_code ret_code = I40E_SUCCESS;
26 DEBUGFUNC("i40e_init_nvm");
28 /* The SR size is stored regardless of the nvm programming mode
29 * as the blank mode may be used in the factory line.
31 gens = rd32(hw, I40E_GLNVM_GENS);
32 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
33 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
34 /* Switching to words (sr_size contains power of 2KB) */
35 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
37 /* Check if we are in the normal or blank NVM programming mode */
38 fla = rd32(hw, I40E_GLNVM_FLA);
39 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
41 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
42 nvm->blank_nvm_mode = false;
43 } else { /* Blank programming mode */
44 nvm->blank_nvm_mode = true;
45 ret_code = I40E_ERR_NVM_BLANK_MODE;
46 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
53 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
54 * @hw: pointer to the HW structure
55 * @access: NVM access type (read or write)
57 * This function will request NVM ownership for reading
58 * via the proper Admin Command.
60 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
61 enum i40e_aq_resource_access_type access)
63 enum i40e_status_code ret_code = I40E_SUCCESS;
67 DEBUGFUNC("i40e_acquire_nvm");
69 if (hw->nvm.blank_nvm_mode)
70 goto i40e_i40e_acquire_nvm_exit;
72 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
74 /* Reading the Global Device Timer */
75 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
77 /* Store the timeout */
78 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
81 i40e_debug(hw, I40E_DEBUG_NVM,
82 "NVM acquire type %d failed time_left=%" PRIu64 " ret=%d aq_err=%d\n",
83 access, time_left, ret_code, hw->aq.asq_last_status);
85 if (ret_code && time_left) {
86 /* Poll until the current NVM owner timeouts */
87 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
88 while ((gtime < timeout) && time_left) {
90 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
91 ret_code = i40e_aq_request_resource(hw,
93 access, 0, &time_left,
95 if (ret_code == I40E_SUCCESS) {
96 hw->nvm.hw_semaphore_timeout =
97 I40E_MS_TO_GTIME(time_left) + gtime;
101 if (ret_code != I40E_SUCCESS) {
102 hw->nvm.hw_semaphore_timeout = 0;
103 i40e_debug(hw, I40E_DEBUG_NVM,
104 "NVM acquire timed out, wait %" PRIu64 " ms before trying again. status=%d aq_err=%d\n",
105 time_left, ret_code, hw->aq.asq_last_status);
109 i40e_i40e_acquire_nvm_exit:
114 * i40e_release_nvm - Generic request for releasing the NVM ownership
115 * @hw: pointer to the HW structure
117 * This function will release NVM resource via the proper Admin Command.
119 void i40e_release_nvm(struct i40e_hw *hw)
121 enum i40e_status_code ret_code = I40E_SUCCESS;
124 DEBUGFUNC("i40e_release_nvm");
126 if (hw->nvm.blank_nvm_mode)
129 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
131 /* there are some rare cases when trying to release the resource
132 * results in an admin Q timeout, so handle them correctly
134 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
135 (total_delay < hw->aq.asq_cmd_timeout)) {
137 ret_code = i40e_aq_release_resource(hw,
138 I40E_NVM_RESOURCE_ID, 0, NULL);
144 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
145 * @hw: pointer to the HW structure
147 * Polls the SRCTL Shadow RAM register done bit.
149 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
151 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
154 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
156 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
157 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
158 srctl = rd32(hw, I40E_GLNVM_SRCTL);
159 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
160 ret_code = I40E_SUCCESS;
165 if (ret_code == I40E_ERR_TIMEOUT)
166 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
171 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
172 * @hw: pointer to the HW structure
173 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
174 * @data: word read from the Shadow RAM
176 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
178 STATIC enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw,
182 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
185 DEBUGFUNC("i40e_read_nvm_word_srctl");
187 if (offset >= hw->nvm.sr_size) {
188 i40e_debug(hw, I40E_DEBUG_NVM,
189 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
190 offset, hw->nvm.sr_size);
191 ret_code = I40E_ERR_PARAM;
195 /* Poll the done bit first */
196 ret_code = i40e_poll_sr_srctl_done_bit(hw);
197 if (ret_code == I40E_SUCCESS) {
198 /* Write the address and start reading */
199 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
200 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
201 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
203 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
204 ret_code = i40e_poll_sr_srctl_done_bit(hw);
205 if (ret_code == I40E_SUCCESS) {
206 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
207 *data = (u16)((sr_reg &
208 I40E_GLNVM_SRDATA_RDDATA_MASK)
209 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
212 if (ret_code != I40E_SUCCESS)
213 i40e_debug(hw, I40E_DEBUG_NVM,
214 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
222 * i40e_read_nvm_aq - Read Shadow RAM.
223 * @hw: pointer to the HW structure.
224 * @module_pointer: module pointer location in words from the NVM beginning
225 * @offset: offset in words from module start
226 * @words: number of words to write
227 * @data: buffer with words to write to the Shadow RAM
228 * @last_command: tells the AdminQ that this is the last command
230 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
232 STATIC enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw,
233 u8 module_pointer, u32 offset,
234 u16 words, void *data,
237 enum i40e_status_code ret_code = I40E_ERR_NVM;
238 struct i40e_asq_cmd_details cmd_details;
240 DEBUGFUNC("i40e_read_nvm_aq");
242 memset(&cmd_details, 0, sizeof(cmd_details));
243 cmd_details.wb_desc = &hw->nvm_wb_desc;
245 /* Here we are checking the SR limit only for the flat memory model.
246 * We cannot do it for the module-based model, as we did not acquire
247 * the NVM resource yet (we cannot get the module pointer value).
248 * Firmware will check the module-based model.
250 if ((offset + words) > hw->nvm.sr_size)
251 i40e_debug(hw, I40E_DEBUG_NVM,
252 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
253 (offset + words), hw->nvm.sr_size);
254 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
255 /* We can write only up to 4KB (one sector), in one AQ write */
256 i40e_debug(hw, I40E_DEBUG_NVM,
257 "NVM write fail error: tried to write %d words, limit is %d.\n",
258 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
259 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
260 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
261 /* A single write cannot spread over two sectors */
262 i40e_debug(hw, I40E_DEBUG_NVM,
263 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
266 ret_code = i40e_aq_read_nvm(hw, module_pointer,
267 2 * offset, /*bytes*/
269 data, last_command, &cmd_details);
275 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
276 * @hw: pointer to the HW structure
277 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
278 * @data: word read from the Shadow RAM
280 * Reads one 16 bit word from the Shadow RAM using the AdminQ
282 STATIC enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
285 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
287 DEBUGFUNC("i40e_read_nvm_word_aq");
289 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
290 *data = LE16_TO_CPU(*(__le16 *)data);
296 * __i40e_read_nvm_word - Reads NVM word, assumes caller does the locking
297 * @hw: pointer to the HW structure
298 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
299 * @data: word read from the Shadow RAM
301 * Reads one 16 bit word from the Shadow RAM.
303 * Do not use this function except in cases where the nvm lock is already
304 * taken via i40e_acquire_nvm().
306 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
311 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
312 return i40e_read_nvm_word_aq(hw, offset, data);
314 return i40e_read_nvm_word_srctl(hw, offset, data);
318 * i40e_read_nvm_word - Reads NVM word, acquires lock if necessary
319 * @hw: pointer to the HW structure
320 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
321 * @data: word read from the Shadow RAM
323 * Reads one 16 bit word from the Shadow RAM.
325 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
328 enum i40e_status_code ret_code = I40E_SUCCESS;
330 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
331 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
335 ret_code = __i40e_read_nvm_word(hw, offset, data);
337 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
338 i40e_release_nvm(hw);
343 * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
344 * @hw: Pointer to the HW structure
345 * @module_ptr: Pointer to module in words with respect to NVM beginning
346 * @module_offset: Offset in words from module start
347 * @data_offset: Offset in words from reading data area start
348 * @words_data_size: Words to read from NVM
349 * @data_ptr: Pointer to memory location where resulting buffer will be stored
351 enum i40e_status_code
352 i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
353 u16 data_offset, u16 words_data_size, u16 *data_ptr)
355 enum i40e_status_code status;
356 u16 specific_ptr = 0;
360 if (module_ptr != 0) {
361 status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
362 if (status != I40E_SUCCESS) {
363 i40e_debug(hw, I40E_DEBUG_ALL,
364 "Reading nvm word failed.Error code: %d.\n",
369 #define I40E_NVM_INVALID_PTR_VAL 0x7FFF
370 #define I40E_NVM_INVALID_VAL 0xFFFF
372 /* Pointer not initialized */
373 if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
374 ptr_value == I40E_NVM_INVALID_VAL) {
375 i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
376 return I40E_ERR_BAD_PTR;
379 /* Check whether the module is in SR mapped area or outside */
380 if (ptr_value & I40E_PTR_TYPE) {
381 /* Pointer points outside of the Shared RAM mapped area */
382 i40e_debug(hw, I40E_DEBUG_ALL,
383 "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
385 return I40E_ERR_PARAM;
387 /* Read from the Shadow RAM */
389 status = i40e_read_nvm_word(hw, ptr_value + module_offset,
391 if (status != I40E_SUCCESS) {
392 i40e_debug(hw, I40E_DEBUG_ALL,
393 "Reading nvm word failed.Error code: %d.\n",
398 offset = ptr_value + module_offset + specific_ptr +
401 status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
403 if (status != I40E_SUCCESS) {
404 i40e_debug(hw, I40E_DEBUG_ALL,
405 "Reading nvm buffer failed.Error code: %d.\n",
414 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
415 * @hw: pointer to the HW structure
416 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
417 * @words: (in) number of words to read; (out) number of words actually read
418 * @data: words read from the Shadow RAM
420 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
421 * method. The buffer read is preceded by the NVM ownership take
422 * and followed by the release.
424 STATIC enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
425 u16 *words, u16 *data)
427 enum i40e_status_code ret_code = I40E_SUCCESS;
430 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
432 /* Loop through the selected region */
433 for (word = 0; word < *words; word++) {
434 index = offset + word;
435 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
436 if (ret_code != I40E_SUCCESS)
440 /* Update the number of words read from the Shadow RAM */
447 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
448 * @hw: pointer to the HW structure
449 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
450 * @words: (in) number of words to read; (out) number of words actually read
451 * @data: words read from the Shadow RAM
453 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
454 * method. The buffer read is preceded by the NVM ownership take
455 * and followed by the release.
457 STATIC enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
458 u16 *words, u16 *data)
460 enum i40e_status_code ret_code;
461 u16 read_size = *words;
462 bool last_cmd = false;
466 DEBUGFUNC("i40e_read_nvm_buffer_aq");
469 /* Calculate number of bytes we should read in this step.
470 * FVL AQ do not allow to read more than one page at a time or
471 * to cross page boundaries.
473 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
474 read_size = min(*words,
475 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
476 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
478 read_size = min((*words - words_read),
479 I40E_SR_SECTOR_SIZE_IN_WORDS);
481 /* Check if this is last command, if so set proper flag */
482 if ((words_read + read_size) >= *words)
485 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
486 data + words_read, last_cmd);
487 if (ret_code != I40E_SUCCESS)
488 goto read_nvm_buffer_aq_exit;
490 /* Increment counter for words already read and move offset to
493 words_read += read_size;
495 } while (words_read < *words);
497 for (i = 0; i < *words; i++)
498 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
500 read_nvm_buffer_aq_exit:
506 * __i40e_read_nvm_buffer - Reads NVM buffer, caller must acquire lock
507 * @hw: pointer to the HW structure
508 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
509 * @words: (in) number of words to read; (out) number of words actually read
510 * @data: words read from the Shadow RAM
512 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
515 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
517 u16 *words, u16 *data)
519 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
520 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
522 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
526 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acquire lock if necessary
527 * @hw: pointer to the HW structure
528 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
529 * @words: (in) number of words to read; (out) number of words actually read
530 * @data: words read from the Shadow RAM
532 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
533 * method. The buffer read is preceded by the NVM ownership take
534 * and followed by the release.
536 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
537 u16 *words, u16 *data)
539 enum i40e_status_code ret_code = I40E_SUCCESS;
541 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
542 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
544 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
546 i40e_release_nvm(hw);
549 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
556 * i40e_write_nvm_aq - Writes Shadow RAM.
557 * @hw: pointer to the HW structure.
558 * @module_pointer: module pointer location in words from the NVM beginning
559 * @offset: offset in words from module start
560 * @words: number of words to write
561 * @data: buffer with words to write to the Shadow RAM
562 * @last_command: tells the AdminQ that this is the last command
564 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
566 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
567 u32 offset, u16 words, void *data,
570 enum i40e_status_code ret_code = I40E_ERR_NVM;
571 struct i40e_asq_cmd_details cmd_details;
573 DEBUGFUNC("i40e_write_nvm_aq");
575 memset(&cmd_details, 0, sizeof(cmd_details));
576 cmd_details.wb_desc = &hw->nvm_wb_desc;
578 /* Here we are checking the SR limit only for the flat memory model.
579 * We cannot do it for the module-based model, as we did not acquire
580 * the NVM resource yet (we cannot get the module pointer value).
581 * Firmware will check the module-based model.
583 if ((offset + words) > hw->nvm.sr_size)
584 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
585 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
586 /* We can write only up to 4KB (one sector), in one AQ write */
587 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
588 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
589 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
590 /* A single write cannot spread over two sectors */
591 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
593 ret_code = i40e_aq_update_nvm(hw, module_pointer,
594 2 * offset, /*bytes*/
596 data, last_command, 0,
603 * __i40e_write_nvm_word - Writes Shadow RAM word
604 * @hw: pointer to the HW structure
605 * @offset: offset of the Shadow RAM word to write
606 * @data: word to write to the Shadow RAM
608 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
609 * NVM ownership have to be acquired and released (on ARQ completion event
610 * reception) by caller. To commit SR to NVM update checksum function
613 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
616 DEBUGFUNC("i40e_write_nvm_word");
618 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
620 /* Value 0x00 below means that we treat SR as a flat mem */
621 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
625 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
626 * @hw: pointer to the HW structure
627 * @module_pointer: module pointer location in words from the NVM beginning
628 * @offset: offset of the Shadow RAM buffer to write
629 * @words: number of words to write
630 * @data: words to write to the Shadow RAM
632 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
633 * NVM ownership must be acquired before calling this function and released
634 * on ARQ completion event reception by caller. To commit SR to NVM update
635 * checksum function should be called.
637 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
638 u8 module_pointer, u32 offset,
639 u16 words, void *data)
641 __le16 *le_word_ptr = (__le16 *)data;
642 u16 *word_ptr = (u16 *)data;
645 DEBUGFUNC("i40e_write_nvm_buffer");
647 for (i = 0; i < words; i++)
648 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
650 /* Here we will only write one buffer as the size of the modules
651 * mirrored in the Shadow RAM is always less than 4K.
653 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
658 * i40e_calc_nvm_checksum - Calculates and returns the checksum
659 * @hw: pointer to hardware structure
660 * @checksum: pointer to the checksum
662 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
663 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
664 * is customer specific and unknown. Therefore, this function skips all maximum
665 * possible size of VPD (1kB).
667 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
669 enum i40e_status_code ret_code = I40E_SUCCESS;
670 struct i40e_virt_mem vmem;
671 u16 pcie_alt_module = 0;
672 u16 checksum_local = 0;
677 DEBUGFUNC("i40e_calc_nvm_checksum");
679 ret_code = i40e_allocate_virt_mem(hw, &vmem,
680 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
682 goto i40e_calc_nvm_checksum_exit;
683 data = (u16 *)vmem.va;
685 /* read pointer to VPD area */
686 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
687 if (ret_code != I40E_SUCCESS) {
688 ret_code = I40E_ERR_NVM_CHECKSUM;
689 goto i40e_calc_nvm_checksum_exit;
692 /* read pointer to PCIe Alt Auto-load module */
693 ret_code = __i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
695 if (ret_code != I40E_SUCCESS) {
696 ret_code = I40E_ERR_NVM_CHECKSUM;
697 goto i40e_calc_nvm_checksum_exit;
700 /* Calculate SW checksum that covers the whole 64kB shadow RAM
701 * except the VPD and PCIe ALT Auto-load modules
703 for (i = 0; i < hw->nvm.sr_size; i++) {
705 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
706 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
708 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
709 if (ret_code != I40E_SUCCESS) {
710 ret_code = I40E_ERR_NVM_CHECKSUM;
711 goto i40e_calc_nvm_checksum_exit;
715 /* Skip Checksum word */
716 if (i == I40E_SR_SW_CHECKSUM_WORD)
718 /* Skip VPD module (convert byte size to word count) */
719 if ((i >= (u32)vpd_module) &&
720 (i < ((u32)vpd_module +
721 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
724 /* Skip PCIe ALT module (convert byte size to word count) */
725 if ((i >= (u32)pcie_alt_module) &&
726 (i < ((u32)pcie_alt_module +
727 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
731 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
734 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
736 i40e_calc_nvm_checksum_exit:
737 i40e_free_virt_mem(hw, &vmem);
742 * i40e_update_nvm_checksum - Updates the NVM checksum
743 * @hw: pointer to hardware structure
745 * NVM ownership must be acquired before calling this function and released
746 * on ARQ completion event reception by caller.
747 * This function will commit SR to NVM.
749 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
751 enum i40e_status_code ret_code = I40E_SUCCESS;
755 DEBUGFUNC("i40e_update_nvm_checksum");
757 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
758 le_sum = CPU_TO_LE16(checksum);
759 if (ret_code == I40E_SUCCESS)
760 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
767 * i40e_validate_nvm_checksum - Validate EEPROM checksum
768 * @hw: pointer to hardware structure
769 * @checksum: calculated checksum
771 * Performs checksum calculation and validates the NVM SW checksum. If the
772 * caller does not need checksum, the value can be NULL.
774 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
777 enum i40e_status_code ret_code = I40E_SUCCESS;
779 u16 checksum_local = 0;
781 DEBUGFUNC("i40e_validate_nvm_checksum");
783 /* We must acquire the NVM lock in order to correctly synchronize the
784 * NVM accesses across multiple PFs. Without doing so it is possible
785 * for one of the PFs to read invalid data potentially indicating that
786 * the checksum is invalid.
788 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
791 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
792 __i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
793 i40e_release_nvm(hw);
797 /* Verify read checksum from EEPROM is the same as
798 * calculated checksum
800 if (checksum_local != checksum_sr)
801 ret_code = I40E_ERR_NVM_CHECKSUM;
803 /* If the user cares, return the calculated checksum */
805 *checksum = checksum_local;
810 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
811 struct i40e_nvm_access *cmd,
812 u8 *bytes, int *perrno);
813 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
814 struct i40e_nvm_access *cmd,
815 u8 *bytes, int *perrno);
816 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
817 struct i40e_nvm_access *cmd,
818 u8 *bytes, int *perrno);
819 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
820 struct i40e_nvm_access *cmd,
822 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
823 struct i40e_nvm_access *cmd,
825 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
826 struct i40e_nvm_access *cmd,
827 u8 *bytes, int *perrno);
828 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
829 struct i40e_nvm_access *cmd,
830 u8 *bytes, int *perrno);
831 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
832 struct i40e_nvm_access *cmd,
833 u8 *bytes, int *perrno);
834 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
835 struct i40e_nvm_access *cmd,
836 u8 *bytes, int *perrno);
837 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
838 struct i40e_nvm_access *cmd,
839 u8 *bytes, int *perrno);
840 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
842 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
844 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
846 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
849 STATIC INLINE u8 i40e_nvmupd_get_preservation_flags(u32 val)
851 return (u8)((val & I40E_NVM_PRESERVATION_FLAGS_MASK) >>
852 I40E_NVM_PRESERVATION_FLAGS_SHIFT);
855 STATIC const char *i40e_nvm_update_state_str[] = {
856 "I40E_NVMUPD_INVALID",
857 "I40E_NVMUPD_READ_CON",
858 "I40E_NVMUPD_READ_SNT",
859 "I40E_NVMUPD_READ_LCB",
860 "I40E_NVMUPD_READ_SA",
861 "I40E_NVMUPD_WRITE_ERA",
862 "I40E_NVMUPD_WRITE_CON",
863 "I40E_NVMUPD_WRITE_SNT",
864 "I40E_NVMUPD_WRITE_LCB",
865 "I40E_NVMUPD_WRITE_SA",
866 "I40E_NVMUPD_CSUM_CON",
867 "I40E_NVMUPD_CSUM_SA",
868 "I40E_NVMUPD_CSUM_LCB",
869 "I40E_NVMUPD_STATUS",
870 "I40E_NVMUPD_EXEC_AQ",
871 "I40E_NVMUPD_GET_AQ_RESULT",
872 "I40E_NVMUPD_GET_AQ_EVENT",
873 "I40E_NVMUPD_GET_FEATURES",
877 * i40e_nvmupd_command - Process an NVM update command
878 * @hw: pointer to hardware structure
879 * @cmd: pointer to nvm update command
880 * @bytes: pointer to the data buffer
881 * @perrno: pointer to return error code
883 * Dispatches command depending on what update state is current
885 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
886 struct i40e_nvm_access *cmd,
887 u8 *bytes, int *perrno)
889 enum i40e_status_code status;
890 enum i40e_nvmupd_cmd upd_cmd;
892 DEBUGFUNC("i40e_nvmupd_command");
897 /* early check for status command and debug msgs */
898 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
900 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
901 i40e_nvm_update_state_str[upd_cmd],
903 hw->nvm_release_on_done, hw->nvm_wait_opcode,
904 cmd->command, cmd->config, cmd->offset, cmd->data_size);
906 if (upd_cmd == I40E_NVMUPD_INVALID) {
908 i40e_debug(hw, I40E_DEBUG_NVM,
909 "i40e_nvmupd_validate_command returns %d errno %d\n",
913 /* a status request returns immediately rather than
914 * going into the state machine
916 if (upd_cmd == I40E_NVMUPD_STATUS) {
917 if (!cmd->data_size) {
919 return I40E_ERR_BUF_TOO_SHORT;
922 bytes[0] = hw->nvmupd_state;
924 if (cmd->data_size >= 4) {
926 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
929 /* Clear error status on read */
930 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
931 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
937 * A supported features request returns immediately
938 * rather than going into state machine
940 if (upd_cmd == I40E_NVMUPD_FEATURES) {
941 if (cmd->data_size < hw->nvmupd_features.size) {
943 return I40E_ERR_BUF_TOO_SHORT;
947 * If buffer is bigger than i40e_nvmupd_features structure,
948 * make sure the trailing bytes are set to 0x0.
950 if (cmd->data_size > hw->nvmupd_features.size)
951 i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
952 cmd->data_size - hw->nvmupd_features.size,
955 i40e_memcpy(bytes, &hw->nvmupd_features,
956 hw->nvmupd_features.size, I40E_NONDMA_MEM);
961 /* Clear status even it is not read and log */
962 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
963 i40e_debug(hw, I40E_DEBUG_NVM,
964 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
965 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
968 /* Acquire lock to prevent race condition where adminq_task
969 * can execute after i40e_nvmupd_nvm_read/write but before state
970 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
972 * During NVMUpdate, it is observed that lock could be held for
973 * ~5ms for most commands. However lock is held for ~60ms for
974 * NVMUPD_CSUM_LCB command.
976 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
977 switch (hw->nvmupd_state) {
978 case I40E_NVMUPD_STATE_INIT:
979 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
982 case I40E_NVMUPD_STATE_READING:
983 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
986 case I40E_NVMUPD_STATE_WRITING:
987 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
990 case I40E_NVMUPD_STATE_INIT_WAIT:
991 case I40E_NVMUPD_STATE_WRITE_WAIT:
992 /* if we need to stop waiting for an event, clear
993 * the wait info and return before doing anything else
995 if (cmd->offset == 0xffff) {
996 i40e_nvmupd_clear_wait_state(hw);
997 status = I40E_SUCCESS;
1001 status = I40E_ERR_NOT_READY;
1006 /* invalid state, should never happen */
1007 i40e_debug(hw, I40E_DEBUG_NVM,
1008 "NVMUPD: no such state %d\n", hw->nvmupd_state);
1009 status = I40E_NOT_SUPPORTED;
1014 i40e_release_spinlock(&hw->aq.arq_spinlock);
1019 * i40e_nvmupd_state_init - Handle NVM update state Init
1020 * @hw: pointer to hardware structure
1021 * @cmd: pointer to nvm update command buffer
1022 * @bytes: pointer to the data buffer
1023 * @perrno: pointer to return error code
1025 * Process legitimate commands of the Init state and conditionally set next
1026 * state. Reject all other commands.
1028 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
1029 struct i40e_nvm_access *cmd,
1030 u8 *bytes, int *perrno)
1032 enum i40e_status_code status = I40E_SUCCESS;
1033 enum i40e_nvmupd_cmd upd_cmd;
1035 DEBUGFUNC("i40e_nvmupd_state_init");
1037 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1040 case I40E_NVMUPD_READ_SA:
1041 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1043 *perrno = i40e_aq_rc_to_posix(status,
1044 hw->aq.asq_last_status);
1046 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1047 i40e_release_nvm(hw);
1051 case I40E_NVMUPD_READ_SNT:
1052 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
1054 *perrno = i40e_aq_rc_to_posix(status,
1055 hw->aq.asq_last_status);
1057 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1059 i40e_release_nvm(hw);
1061 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1065 case I40E_NVMUPD_WRITE_ERA:
1066 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1068 *perrno = i40e_aq_rc_to_posix(status,
1069 hw->aq.asq_last_status);
1071 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1073 i40e_release_nvm(hw);
1075 hw->nvm_release_on_done = true;
1076 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1077 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1082 case I40E_NVMUPD_WRITE_SA:
1083 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1085 *perrno = i40e_aq_rc_to_posix(status,
1086 hw->aq.asq_last_status);
1088 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1090 i40e_release_nvm(hw);
1092 hw->nvm_release_on_done = true;
1093 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1094 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1099 case I40E_NVMUPD_WRITE_SNT:
1100 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1102 *perrno = i40e_aq_rc_to_posix(status,
1103 hw->aq.asq_last_status);
1105 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1107 i40e_release_nvm(hw);
1109 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1110 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1115 case I40E_NVMUPD_CSUM_SA:
1116 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1118 *perrno = i40e_aq_rc_to_posix(status,
1119 hw->aq.asq_last_status);
1121 status = i40e_update_nvm_checksum(hw);
1123 *perrno = hw->aq.asq_last_status ?
1124 i40e_aq_rc_to_posix(status,
1125 hw->aq.asq_last_status) :
1127 i40e_release_nvm(hw);
1129 hw->nvm_release_on_done = true;
1130 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1131 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1136 case I40E_NVMUPD_EXEC_AQ:
1137 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1140 case I40E_NVMUPD_GET_AQ_RESULT:
1141 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1144 case I40E_NVMUPD_GET_AQ_EVENT:
1145 status = i40e_nvmupd_get_aq_event(hw, cmd, bytes, perrno);
1149 i40e_debug(hw, I40E_DEBUG_NVM,
1150 "NVMUPD: bad cmd %s in init state\n",
1151 i40e_nvm_update_state_str[upd_cmd]);
1152 status = I40E_ERR_NVM;
1160 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1161 * @hw: pointer to hardware structure
1162 * @cmd: pointer to nvm update command buffer
1163 * @bytes: pointer to the data buffer
1164 * @perrno: pointer to return error code
1166 * NVM ownership is already held. Process legitimate commands and set any
1167 * change in state; reject all other commands.
1169 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1170 struct i40e_nvm_access *cmd,
1171 u8 *bytes, int *perrno)
1173 enum i40e_status_code status = I40E_SUCCESS;
1174 enum i40e_nvmupd_cmd upd_cmd;
1176 DEBUGFUNC("i40e_nvmupd_state_reading");
1178 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1181 case I40E_NVMUPD_READ_SA:
1182 case I40E_NVMUPD_READ_CON:
1183 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1186 case I40E_NVMUPD_READ_LCB:
1187 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1188 i40e_release_nvm(hw);
1189 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1193 i40e_debug(hw, I40E_DEBUG_NVM,
1194 "NVMUPD: bad cmd %s in reading state.\n",
1195 i40e_nvm_update_state_str[upd_cmd]);
1196 status = I40E_NOT_SUPPORTED;
1204 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1205 * @hw: pointer to hardware structure
1206 * @cmd: pointer to nvm update command buffer
1207 * @bytes: pointer to the data buffer
1208 * @perrno: pointer to return error code
1210 * NVM ownership is already held. Process legitimate commands and set any
1211 * change in state; reject all other commands
1213 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1214 struct i40e_nvm_access *cmd,
1215 u8 *bytes, int *perrno)
1217 enum i40e_status_code status = I40E_SUCCESS;
1218 enum i40e_nvmupd_cmd upd_cmd;
1219 bool retry_attempt = false;
1221 DEBUGFUNC("i40e_nvmupd_state_writing");
1223 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1227 case I40E_NVMUPD_WRITE_CON:
1228 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1230 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1231 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1235 case I40E_NVMUPD_WRITE_LCB:
1236 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1238 *perrno = hw->aq.asq_last_status ?
1239 i40e_aq_rc_to_posix(status,
1240 hw->aq.asq_last_status) :
1242 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1244 hw->nvm_release_on_done = true;
1245 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1246 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1250 case I40E_NVMUPD_CSUM_CON:
1251 /* Assumes the caller has acquired the nvm */
1252 status = i40e_update_nvm_checksum(hw);
1254 *perrno = hw->aq.asq_last_status ?
1255 i40e_aq_rc_to_posix(status,
1256 hw->aq.asq_last_status) :
1258 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1260 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1261 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1265 case I40E_NVMUPD_CSUM_LCB:
1266 /* Assumes the caller has acquired the nvm */
1267 status = i40e_update_nvm_checksum(hw);
1269 *perrno = hw->aq.asq_last_status ?
1270 i40e_aq_rc_to_posix(status,
1271 hw->aq.asq_last_status) :
1273 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1275 hw->nvm_release_on_done = true;
1276 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1277 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1282 i40e_debug(hw, I40E_DEBUG_NVM,
1283 "NVMUPD: bad cmd %s in writing state.\n",
1284 i40e_nvm_update_state_str[upd_cmd]);
1285 status = I40E_NOT_SUPPORTED;
1290 /* In some circumstances, a multi-write transaction takes longer
1291 * than the default 3 minute timeout on the write semaphore. If
1292 * the write failed with an EBUSY status, this is likely the problem,
1293 * so here we try to reacquire the semaphore then retry the write.
1294 * We only do one retry, then give up.
1296 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1298 enum i40e_status_code old_status = status;
1299 u32 old_asq_status = hw->aq.asq_last_status;
1302 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1303 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1304 i40e_debug(hw, I40E_DEBUG_ALL,
1305 "NVMUPD: write semaphore expired (%d >= %" PRIu64 "), retrying\n",
1306 gtime, hw->nvm.hw_semaphore_timeout);
1307 i40e_release_nvm(hw);
1308 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1310 i40e_debug(hw, I40E_DEBUG_ALL,
1311 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1312 hw->aq.asq_last_status);
1313 status = old_status;
1314 hw->aq.asq_last_status = old_asq_status;
1316 retry_attempt = true;
1326 * i40e_nvmupd_clear_wait_state - clear wait state on hw
1327 * @hw: pointer to the hardware structure
1329 void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
1331 i40e_debug(hw, I40E_DEBUG_NVM,
1332 "NVMUPD: clearing wait on opcode 0x%04x\n",
1333 hw->nvm_wait_opcode);
1335 if (hw->nvm_release_on_done) {
1336 i40e_release_nvm(hw);
1337 hw->nvm_release_on_done = false;
1339 hw->nvm_wait_opcode = 0;
1341 if (hw->aq.arq_last_status) {
1342 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1346 switch (hw->nvmupd_state) {
1347 case I40E_NVMUPD_STATE_INIT_WAIT:
1348 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1351 case I40E_NVMUPD_STATE_WRITE_WAIT:
1352 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1361 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1362 * @hw: pointer to the hardware structure
1363 * @opcode: the event that just happened
1364 * @desc: AdminQ descriptor
1366 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
1367 struct i40e_aq_desc *desc)
1369 u32 aq_desc_len = sizeof(struct i40e_aq_desc);
1371 if (opcode == hw->nvm_wait_opcode) {
1372 i40e_memcpy(&hw->nvm_aq_event_desc, desc,
1373 aq_desc_len, I40E_NONDMA_TO_NONDMA);
1374 i40e_nvmupd_clear_wait_state(hw);
1379 * i40e_nvmupd_validate_command - Validate given command
1380 * @hw: pointer to hardware structure
1381 * @cmd: pointer to nvm update command buffer
1382 * @perrno: pointer to return error code
1384 * Return one of the valid command types or I40E_NVMUPD_INVALID
1386 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1387 struct i40e_nvm_access *cmd,
1390 enum i40e_nvmupd_cmd upd_cmd;
1391 u8 module, transaction;
1393 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1395 /* anything that doesn't match a recognized case is an error */
1396 upd_cmd = I40E_NVMUPD_INVALID;
1398 transaction = i40e_nvmupd_get_transaction(cmd->config);
1399 module = i40e_nvmupd_get_module(cmd->config);
1401 /* limits on data size */
1402 if ((cmd->data_size < 1) ||
1403 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1404 i40e_debug(hw, I40E_DEBUG_NVM,
1405 "i40e_nvmupd_validate_command data_size %d\n",
1408 return I40E_NVMUPD_INVALID;
1411 switch (cmd->command) {
1413 switch (transaction) {
1415 upd_cmd = I40E_NVMUPD_READ_CON;
1418 upd_cmd = I40E_NVMUPD_READ_SNT;
1421 upd_cmd = I40E_NVMUPD_READ_LCB;
1424 upd_cmd = I40E_NVMUPD_READ_SA;
1428 case I40E_NVM_EXEC_GET_AQ_RESULT:
1429 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1431 case I40E_NVM_EXEC_FEATURES:
1432 upd_cmd = I40E_NVMUPD_FEATURES;
1434 case I40E_NVM_EXEC_STATUS:
1435 upd_cmd = I40E_NVMUPD_STATUS;
1439 return I40E_NVMUPD_INVALID;
1443 upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
1448 case I40E_NVM_WRITE:
1449 switch (transaction) {
1451 upd_cmd = I40E_NVMUPD_WRITE_CON;
1454 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1457 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1460 upd_cmd = I40E_NVMUPD_WRITE_SA;
1463 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1466 upd_cmd = I40E_NVMUPD_CSUM_CON;
1468 case (I40E_NVM_CSUM|I40E_NVM_SA):
1469 upd_cmd = I40E_NVMUPD_CSUM_SA;
1471 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1472 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1476 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1486 * i40e_nvmupd_exec_aq - Run an AQ command
1487 * @hw: pointer to hardware structure
1488 * @cmd: pointer to nvm update command buffer
1489 * @bytes: pointer to the data buffer
1490 * @perrno: pointer to return error code
1492 * cmd structure contains identifiers and data buffer
1494 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1495 struct i40e_nvm_access *cmd,
1496 u8 *bytes, int *perrno)
1498 struct i40e_asq_cmd_details cmd_details;
1499 enum i40e_status_code status;
1500 struct i40e_aq_desc *aq_desc;
1506 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1507 if (cmd->offset == 0xffff)
1508 return I40E_SUCCESS;
1510 memset(&cmd_details, 0, sizeof(cmd_details));
1511 cmd_details.wb_desc = &hw->nvm_wb_desc;
1513 aq_desc_len = sizeof(struct i40e_aq_desc);
1514 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1516 /* get the aq descriptor */
1517 if (cmd->data_size < aq_desc_len) {
1518 i40e_debug(hw, I40E_DEBUG_NVM,
1519 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1520 cmd->data_size, aq_desc_len);
1522 return I40E_ERR_PARAM;
1524 aq_desc = (struct i40e_aq_desc *)bytes;
1526 /* if data buffer needed, make sure it's ready */
1527 aq_data_len = cmd->data_size - aq_desc_len;
1528 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1530 if (!hw->nvm_buff.va) {
1531 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1532 hw->aq.asq_buf_size);
1534 i40e_debug(hw, I40E_DEBUG_NVM,
1535 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1539 if (hw->nvm_buff.va) {
1540 buff = hw->nvm_buff.va;
1541 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1542 I40E_NONDMA_TO_NONDMA);
1547 memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
1549 /* and away we go! */
1550 status = i40e_asq_send_command(hw, aq_desc, buff,
1551 buff_size, &cmd_details);
1553 i40e_debug(hw, I40E_DEBUG_NVM,
1554 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1555 i40e_stat_str(hw, status),
1556 i40e_aq_str(hw, hw->aq.asq_last_status));
1557 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1561 /* should we wait for a followup event? */
1563 hw->nvm_wait_opcode = cmd->offset;
1564 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1571 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1572 * @hw: pointer to hardware structure
1573 * @cmd: pointer to nvm update command buffer
1574 * @bytes: pointer to the data buffer
1575 * @perrno: pointer to return error code
1577 * cmd structure contains identifiers and data buffer
1579 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1580 struct i40e_nvm_access *cmd,
1581 u8 *bytes, int *perrno)
1588 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1590 aq_desc_len = sizeof(struct i40e_aq_desc);
1591 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1593 /* check offset range */
1594 if (cmd->offset > aq_total_len) {
1595 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1596 __func__, cmd->offset, aq_total_len);
1598 return I40E_ERR_PARAM;
1601 /* check copylength range */
1602 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1603 int new_len = aq_total_len - cmd->offset;
1605 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1606 __func__, cmd->data_size, new_len);
1607 cmd->data_size = new_len;
1610 remainder = cmd->data_size;
1611 if (cmd->offset < aq_desc_len) {
1612 u32 len = aq_desc_len - cmd->offset;
1614 len = min(len, cmd->data_size);
1615 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1616 __func__, cmd->offset, cmd->offset + len);
1618 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1619 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1623 buff = hw->nvm_buff.va;
1625 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1628 if (remainder > 0) {
1629 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1631 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1632 __func__, start_byte, start_byte + remainder);
1633 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1636 return I40E_SUCCESS;
1640 * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
1641 * @hw: pointer to hardware structure
1642 * @cmd: pointer to nvm update command buffer
1643 * @bytes: pointer to the data buffer
1644 * @perrno: pointer to return error code
1646 * cmd structure contains identifiers and data buffer
1648 STATIC enum i40e_status_code i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
1649 struct i40e_nvm_access *cmd,
1650 u8 *bytes, int *perrno)
1655 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1657 aq_desc_len = sizeof(struct i40e_aq_desc);
1658 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_aq_event_desc.datalen);
1660 /* check copylength range */
1661 if (cmd->data_size > aq_total_len) {
1662 i40e_debug(hw, I40E_DEBUG_NVM,
1663 "%s: copy length %d too big, trimming to %d\n",
1664 __func__, cmd->data_size, aq_total_len);
1665 cmd->data_size = aq_total_len;
1668 i40e_memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size,
1669 I40E_NONDMA_TO_NONDMA);
1671 return I40E_SUCCESS;
1675 * i40e_nvmupd_nvm_read - Read NVM
1676 * @hw: pointer to hardware structure
1677 * @cmd: pointer to nvm update command buffer
1678 * @bytes: pointer to the data buffer
1679 * @perrno: pointer to return error code
1681 * cmd structure contains identifiers and data buffer
1683 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1684 struct i40e_nvm_access *cmd,
1685 u8 *bytes, int *perrno)
1687 struct i40e_asq_cmd_details cmd_details;
1688 enum i40e_status_code status;
1689 u8 module, transaction;
1692 transaction = i40e_nvmupd_get_transaction(cmd->config);
1693 module = i40e_nvmupd_get_module(cmd->config);
1694 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1696 memset(&cmd_details, 0, sizeof(cmd_details));
1697 cmd_details.wb_desc = &hw->nvm_wb_desc;
1699 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1700 bytes, last, &cmd_details);
1702 i40e_debug(hw, I40E_DEBUG_NVM,
1703 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1704 module, cmd->offset, cmd->data_size);
1705 i40e_debug(hw, I40E_DEBUG_NVM,
1706 "i40e_nvmupd_nvm_read status %d aq %d\n",
1707 status, hw->aq.asq_last_status);
1708 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1715 * i40e_nvmupd_nvm_erase - Erase an NVM module
1716 * @hw: pointer to hardware structure
1717 * @cmd: pointer to nvm update command buffer
1718 * @perrno: pointer to return error code
1720 * module, offset, data_size and data are in cmd structure
1722 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1723 struct i40e_nvm_access *cmd,
1726 enum i40e_status_code status = I40E_SUCCESS;
1727 struct i40e_asq_cmd_details cmd_details;
1728 u8 module, transaction;
1731 transaction = i40e_nvmupd_get_transaction(cmd->config);
1732 module = i40e_nvmupd_get_module(cmd->config);
1733 last = (transaction & I40E_NVM_LCB);
1735 memset(&cmd_details, 0, sizeof(cmd_details));
1736 cmd_details.wb_desc = &hw->nvm_wb_desc;
1738 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1739 last, &cmd_details);
1741 i40e_debug(hw, I40E_DEBUG_NVM,
1742 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1743 module, cmd->offset, cmd->data_size);
1744 i40e_debug(hw, I40E_DEBUG_NVM,
1745 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1746 status, hw->aq.asq_last_status);
1747 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1754 * i40e_nvmupd_nvm_write - Write NVM
1755 * @hw: pointer to hardware structure
1756 * @cmd: pointer to nvm update command buffer
1757 * @bytes: pointer to the data buffer
1758 * @perrno: pointer to return error code
1760 * module, offset, data_size and data are in cmd structure
1762 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1763 struct i40e_nvm_access *cmd,
1764 u8 *bytes, int *perrno)
1766 enum i40e_status_code status = I40E_SUCCESS;
1767 struct i40e_asq_cmd_details cmd_details;
1768 u8 module, transaction;
1769 u8 preservation_flags;
1772 transaction = i40e_nvmupd_get_transaction(cmd->config);
1773 module = i40e_nvmupd_get_module(cmd->config);
1774 last = (transaction & I40E_NVM_LCB);
1775 preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
1777 memset(&cmd_details, 0, sizeof(cmd_details));
1778 cmd_details.wb_desc = &hw->nvm_wb_desc;
1780 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1781 (u16)cmd->data_size, bytes, last,
1782 preservation_flags, &cmd_details);
1784 i40e_debug(hw, I40E_DEBUG_NVM,
1785 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1786 module, cmd->offset, cmd->data_size);
1787 i40e_debug(hw, I40E_DEBUG_NVM,
1788 "i40e_nvmupd_nvm_write status %d aq %d\n",
1789 status, hw->aq.asq_last_status);
1790 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);