1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always thru the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 enum i40e_status_code ret_code = I40E_SUCCESS;
163 DEBUGFUNC("i40e_release_nvm");
165 if (hw->nvm.blank_nvm_mode)
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
186 * Polls the SRCTL Shadow RAM register done bit.
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
210 * i40e_read_nvm_word - Reads Shadow RAM
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
220 enum i40e_status_code ret_code = I40E_SUCCESS;
223 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
224 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
226 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227 i40e_release_nvm(hw);
230 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
233 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
239 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
240 * @hw: pointer to the HW structure
241 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
242 * @data: word read from the Shadow RAM
244 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
246 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
249 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
252 DEBUGFUNC("i40e_read_nvm_word_srctl");
254 if (offset >= hw->nvm.sr_size) {
255 i40e_debug(hw, I40E_DEBUG_NVM,
256 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
257 offset, hw->nvm.sr_size);
258 ret_code = I40E_ERR_PARAM;
262 /* Poll the done bit first */
263 ret_code = i40e_poll_sr_srctl_done_bit(hw);
264 if (ret_code == I40E_SUCCESS) {
265 /* Write the address and start reading */
266 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
267 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
268 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
270 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
271 ret_code = i40e_poll_sr_srctl_done_bit(hw);
272 if (ret_code == I40E_SUCCESS) {
273 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
274 *data = (u16)((sr_reg &
275 I40E_GLNVM_SRDATA_RDDATA_MASK)
276 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
279 if (ret_code != I40E_SUCCESS)
280 i40e_debug(hw, I40E_DEBUG_NVM,
281 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
289 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
290 * @hw: pointer to the HW structure
291 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
292 * @data: word read from the Shadow RAM
294 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
296 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
299 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
301 DEBUGFUNC("i40e_read_nvm_word_aq");
303 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
304 *data = LE16_TO_CPU(*(__le16 *)data);
310 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
311 * @hw: pointer to the HW structure
312 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
313 * @words: (in) number of words to read; (out) number of words actually read
314 * @data: words read from the Shadow RAM
316 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
317 * method. The buffer read is preceded by the NVM ownership take
318 * and followed by the release.
320 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
321 u16 *words, u16 *data)
323 enum i40e_status_code ret_code = I40E_SUCCESS;
326 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
327 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
329 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
331 i40e_release_nvm(hw);
334 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
337 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
343 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
344 * @hw: pointer to the HW structure
345 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
346 * @words: (in) number of words to read; (out) number of words actually read
347 * @data: words read from the Shadow RAM
349 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
350 * method. The buffer read is preceded by the NVM ownership take
351 * and followed by the release.
353 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
354 u16 *words, u16 *data)
356 enum i40e_status_code ret_code = I40E_SUCCESS;
359 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
361 /* Loop thru the selected region */
362 for (word = 0; word < *words; word++) {
363 index = offset + word;
364 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
365 if (ret_code != I40E_SUCCESS)
369 /* Update the number of words read from the Shadow RAM */
376 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
377 * @hw: pointer to the HW structure
378 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
379 * @words: (in) number of words to read; (out) number of words actually read
380 * @data: words read from the Shadow RAM
382 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
383 * method. The buffer read is preceded by the NVM ownership take
384 * and followed by the release.
386 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
387 u16 *words, u16 *data)
389 enum i40e_status_code ret_code;
390 u16 read_size = *words;
391 bool last_cmd = false;
395 DEBUGFUNC("i40e_read_nvm_buffer_aq");
398 /* Calculate number of bytes we should read in this step.
399 * FVL AQ do not allow to read more than one page at a time or
400 * to cross page boundaries.
402 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
403 read_size = min(*words,
404 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
405 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
407 read_size = min((*words - words_read),
408 I40E_SR_SECTOR_SIZE_IN_WORDS);
410 /* Check if this is last command, if so set proper flag */
411 if ((words_read + read_size) >= *words)
414 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
415 data + words_read, last_cmd);
416 if (ret_code != I40E_SUCCESS)
417 goto read_nvm_buffer_aq_exit;
419 /* Increment counter for words already read and move offset to
422 words_read += read_size;
424 } while (words_read < *words);
426 for (i = 0; i < *words; i++)
427 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
429 read_nvm_buffer_aq_exit:
435 * i40e_read_nvm_aq - Read Shadow RAM.
436 * @hw: pointer to the HW structure.
437 * @module_pointer: module pointer location in words from the NVM beginning
438 * @offset: offset in words from module start
439 * @words: number of words to write
440 * @data: buffer with words to write to the Shadow RAM
441 * @last_command: tells the AdminQ that this is the last command
443 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
445 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
446 u32 offset, u16 words, void *data,
449 enum i40e_status_code ret_code = I40E_ERR_NVM;
450 struct i40e_asq_cmd_details cmd_details;
452 DEBUGFUNC("i40e_read_nvm_aq");
454 memset(&cmd_details, 0, sizeof(cmd_details));
455 cmd_details.wb_desc = &hw->nvm_wb_desc;
457 /* Here we are checking the SR limit only for the flat memory model.
458 * We cannot do it for the module-based model, as we did not acquire
459 * the NVM resource yet (we cannot get the module pointer value).
460 * Firmware will check the module-based model.
462 if ((offset + words) > hw->nvm.sr_size)
463 i40e_debug(hw, I40E_DEBUG_NVM,
464 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
465 (offset + words), hw->nvm.sr_size);
466 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
467 /* We can write only up to 4KB (one sector), in one AQ write */
468 i40e_debug(hw, I40E_DEBUG_NVM,
469 "NVM write fail error: tried to write %d words, limit is %d.\n",
470 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
471 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
472 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
473 /* A single write cannot spread over two sectors */
474 i40e_debug(hw, I40E_DEBUG_NVM,
475 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
478 ret_code = i40e_aq_read_nvm(hw, module_pointer,
479 2 * offset, /*bytes*/
481 data, last_command, &cmd_details);
487 * i40e_write_nvm_aq - Writes Shadow RAM.
488 * @hw: pointer to the HW structure.
489 * @module_pointer: module pointer location in words from the NVM beginning
490 * @offset: offset in words from module start
491 * @words: number of words to write
492 * @data: buffer with words to write to the Shadow RAM
493 * @last_command: tells the AdminQ that this is the last command
495 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
497 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
498 u32 offset, u16 words, void *data,
501 enum i40e_status_code ret_code = I40E_ERR_NVM;
502 struct i40e_asq_cmd_details cmd_details;
504 DEBUGFUNC("i40e_write_nvm_aq");
506 memset(&cmd_details, 0, sizeof(cmd_details));
507 cmd_details.wb_desc = &hw->nvm_wb_desc;
509 /* Here we are checking the SR limit only for the flat memory model.
510 * We cannot do it for the module-based model, as we did not acquire
511 * the NVM resource yet (we cannot get the module pointer value).
512 * Firmware will check the module-based model.
514 if ((offset + words) > hw->nvm.sr_size)
515 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
516 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
517 /* We can write only up to 4KB (one sector), in one AQ write */
518 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
519 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
520 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
521 /* A single write cannot spread over two sectors */
522 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
524 ret_code = i40e_aq_update_nvm(hw, module_pointer,
525 2 * offset, /*bytes*/
527 data, last_command, &cmd_details);
533 * i40e_write_nvm_word - Writes Shadow RAM word
534 * @hw: pointer to the HW structure
535 * @offset: offset of the Shadow RAM word to write
536 * @data: word to write to the Shadow RAM
538 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
539 * NVM ownership have to be acquired and released (on ARQ completion event
540 * reception) by caller. To commit SR to NVM update checksum function
543 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
546 DEBUGFUNC("i40e_write_nvm_word");
548 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
550 /* Value 0x00 below means that we treat SR as a flat mem */
551 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
555 * i40e_write_nvm_buffer - Writes Shadow RAM buffer
556 * @hw: pointer to the HW structure
557 * @module_pointer: module pointer location in words from the NVM beginning
558 * @offset: offset of the Shadow RAM buffer to write
559 * @words: number of words to write
560 * @data: words to write to the Shadow RAM
562 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
563 * NVM ownership must be acquired before calling this function and released
564 * on ARQ completion event reception by caller. To commit SR to NVM update
565 * checksum function should be called.
567 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
568 u8 module_pointer, u32 offset,
569 u16 words, void *data)
571 __le16 *le_word_ptr = (__le16 *)data;
572 u16 *word_ptr = (u16 *)data;
575 DEBUGFUNC("i40e_write_nvm_buffer");
577 for (i = 0; i < words; i++)
578 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
580 /* Here we will only write one buffer as the size of the modules
581 * mirrored in the Shadow RAM is always less than 4K.
583 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
588 * i40e_calc_nvm_checksum - Calculates and returns the checksum
589 * @hw: pointer to hardware structure
590 * @checksum: pointer to the checksum
592 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
593 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
594 * is customer specific and unknown. Therefore, this function skips all maximum
595 * possible size of VPD (1kB).
597 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
599 enum i40e_status_code ret_code = I40E_SUCCESS;
600 struct i40e_virt_mem vmem;
601 u16 pcie_alt_module = 0;
602 u16 checksum_local = 0;
607 DEBUGFUNC("i40e_calc_nvm_checksum");
609 ret_code = i40e_allocate_virt_mem(hw, &vmem,
610 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
612 goto i40e_calc_nvm_checksum_exit;
613 data = (u16 *)vmem.va;
615 /* read pointer to VPD area */
616 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
617 if (ret_code != I40E_SUCCESS) {
618 ret_code = I40E_ERR_NVM_CHECKSUM;
619 goto i40e_calc_nvm_checksum_exit;
622 /* read pointer to PCIe Alt Auto-load module */
623 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
625 if (ret_code != I40E_SUCCESS) {
626 ret_code = I40E_ERR_NVM_CHECKSUM;
627 goto i40e_calc_nvm_checksum_exit;
630 /* Calculate SW checksum that covers the whole 64kB shadow RAM
631 * except the VPD and PCIe ALT Auto-load modules
633 for (i = 0; i < hw->nvm.sr_size; i++) {
635 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
636 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
638 ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
639 if (ret_code != I40E_SUCCESS) {
640 ret_code = I40E_ERR_NVM_CHECKSUM;
641 goto i40e_calc_nvm_checksum_exit;
645 /* Skip Checksum word */
646 if (i == I40E_SR_SW_CHECKSUM_WORD)
648 /* Skip VPD module (convert byte size to word count) */
649 if ((i >= (u32)vpd_module) &&
650 (i < ((u32)vpd_module +
651 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
654 /* Skip PCIe ALT module (convert byte size to word count) */
655 if ((i >= (u32)pcie_alt_module) &&
656 (i < ((u32)pcie_alt_module +
657 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
661 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
664 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
666 i40e_calc_nvm_checksum_exit:
667 i40e_free_virt_mem(hw, &vmem);
672 * i40e_update_nvm_checksum - Updates the NVM checksum
673 * @hw: pointer to hardware structure
675 * NVM ownership must be acquired before calling this function and released
676 * on ARQ completion event reception by caller.
677 * This function will commit SR to NVM.
679 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
681 enum i40e_status_code ret_code = I40E_SUCCESS;
685 DEBUGFUNC("i40e_update_nvm_checksum");
687 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
688 le_sum = CPU_TO_LE16(checksum);
689 if (ret_code == I40E_SUCCESS)
690 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
697 * i40e_validate_nvm_checksum - Validate EEPROM checksum
698 * @hw: pointer to hardware structure
699 * @checksum: calculated checksum
701 * Performs checksum calculation and validates the NVM SW checksum. If the
702 * caller does not need checksum, the value can be NULL.
704 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
707 enum i40e_status_code ret_code = I40E_SUCCESS;
709 u16 checksum_local = 0;
711 DEBUGFUNC("i40e_validate_nvm_checksum");
713 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
714 if (ret_code != I40E_SUCCESS)
715 goto i40e_validate_nvm_checksum_exit;
717 /* Do not use i40e_read_nvm_word() because we do not want to take
718 * the synchronization semaphores twice here.
720 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
722 /* Verify read checksum from EEPROM is the same as
723 * calculated checksum
725 if (checksum_local != checksum_sr)
726 ret_code = I40E_ERR_NVM_CHECKSUM;
728 /* If the user cares, return the calculated checksum */
730 *checksum = checksum_local;
732 i40e_validate_nvm_checksum_exit:
736 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
737 struct i40e_nvm_access *cmd,
738 u8 *bytes, int *perrno);
739 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
740 struct i40e_nvm_access *cmd,
741 u8 *bytes, int *perrno);
742 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
743 struct i40e_nvm_access *cmd,
744 u8 *bytes, int *perrno);
745 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
746 struct i40e_nvm_access *cmd,
748 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
749 struct i40e_nvm_access *cmd,
751 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
752 struct i40e_nvm_access *cmd,
753 u8 *bytes, int *perrno);
754 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
755 struct i40e_nvm_access *cmd,
756 u8 *bytes, int *perrno);
757 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
758 struct i40e_nvm_access *cmd,
759 u8 *bytes, int *perrno);
760 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
761 struct i40e_nvm_access *cmd,
762 u8 *bytes, int *perrno);
763 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
765 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
767 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
769 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
772 STATIC const char *i40e_nvm_update_state_str[] = {
773 "I40E_NVMUPD_INVALID",
774 "I40E_NVMUPD_READ_CON",
775 "I40E_NVMUPD_READ_SNT",
776 "I40E_NVMUPD_READ_LCB",
777 "I40E_NVMUPD_READ_SA",
778 "I40E_NVMUPD_WRITE_ERA",
779 "I40E_NVMUPD_WRITE_CON",
780 "I40E_NVMUPD_WRITE_SNT",
781 "I40E_NVMUPD_WRITE_LCB",
782 "I40E_NVMUPD_WRITE_SA",
783 "I40E_NVMUPD_CSUM_CON",
784 "I40E_NVMUPD_CSUM_SA",
785 "I40E_NVMUPD_CSUM_LCB",
786 "I40E_NVMUPD_STATUS",
787 "I40E_NVMUPD_EXEC_AQ",
788 "I40E_NVMUPD_GET_AQ_RESULT",
792 * i40e_nvmupd_command - Process an NVM update command
793 * @hw: pointer to hardware structure
794 * @cmd: pointer to nvm update command
795 * @bytes: pointer to the data buffer
796 * @perrno: pointer to return error code
798 * Dispatches command depending on what update state is current
800 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
801 struct i40e_nvm_access *cmd,
802 u8 *bytes, int *perrno)
804 enum i40e_status_code status;
805 enum i40e_nvmupd_cmd upd_cmd;
807 DEBUGFUNC("i40e_nvmupd_command");
812 /* early check for status command and debug msgs */
813 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
815 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
816 i40e_nvm_update_state_str[upd_cmd],
818 hw->aq.nvm_release_on_done);
820 if (upd_cmd == I40E_NVMUPD_INVALID) {
822 i40e_debug(hw, I40E_DEBUG_NVM,
823 "i40e_nvmupd_validate_command returns %d errno %d\n",
827 /* a status request returns immediately rather than
828 * going into the state machine
830 if (upd_cmd == I40E_NVMUPD_STATUS) {
831 bytes[0] = hw->nvmupd_state;
835 switch (hw->nvmupd_state) {
836 case I40E_NVMUPD_STATE_INIT:
837 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
840 case I40E_NVMUPD_STATE_READING:
841 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
844 case I40E_NVMUPD_STATE_WRITING:
845 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
848 case I40E_NVMUPD_STATE_INIT_WAIT:
849 case I40E_NVMUPD_STATE_WRITE_WAIT:
850 status = I40E_ERR_NOT_READY;
855 /* invalid state, should never happen */
856 i40e_debug(hw, I40E_DEBUG_NVM,
857 "NVMUPD: no such state %d\n", hw->nvmupd_state);
858 status = I40E_NOT_SUPPORTED;
866 * i40e_nvmupd_state_init - Handle NVM update state Init
867 * @hw: pointer to hardware structure
868 * @cmd: pointer to nvm update command buffer
869 * @bytes: pointer to the data buffer
870 * @perrno: pointer to return error code
872 * Process legitimate commands of the Init state and conditionally set next
873 * state. Reject all other commands.
875 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
876 struct i40e_nvm_access *cmd,
877 u8 *bytes, int *perrno)
879 enum i40e_status_code status = I40E_SUCCESS;
880 enum i40e_nvmupd_cmd upd_cmd;
882 DEBUGFUNC("i40e_nvmupd_state_init");
884 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
887 case I40E_NVMUPD_READ_SA:
888 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
890 *perrno = i40e_aq_rc_to_posix(status,
891 hw->aq.asq_last_status);
893 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
894 i40e_release_nvm(hw);
898 case I40E_NVMUPD_READ_SNT:
899 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
901 *perrno = i40e_aq_rc_to_posix(status,
902 hw->aq.asq_last_status);
904 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
906 i40e_release_nvm(hw);
908 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
912 case I40E_NVMUPD_WRITE_ERA:
913 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
915 *perrno = i40e_aq_rc_to_posix(status,
916 hw->aq.asq_last_status);
918 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
920 i40e_release_nvm(hw);
922 hw->aq.nvm_release_on_done = true;
923 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
928 case I40E_NVMUPD_WRITE_SA:
929 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
931 *perrno = i40e_aq_rc_to_posix(status,
932 hw->aq.asq_last_status);
934 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
936 i40e_release_nvm(hw);
938 hw->aq.nvm_release_on_done = true;
939 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
944 case I40E_NVMUPD_WRITE_SNT:
945 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
947 *perrno = i40e_aq_rc_to_posix(status,
948 hw->aq.asq_last_status);
950 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
952 i40e_release_nvm(hw);
954 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
958 case I40E_NVMUPD_CSUM_SA:
959 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
961 *perrno = i40e_aq_rc_to_posix(status,
962 hw->aq.asq_last_status);
964 status = i40e_update_nvm_checksum(hw);
966 *perrno = hw->aq.asq_last_status ?
967 i40e_aq_rc_to_posix(status,
968 hw->aq.asq_last_status) :
970 i40e_release_nvm(hw);
972 hw->aq.nvm_release_on_done = true;
973 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
978 case I40E_NVMUPD_EXEC_AQ:
979 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
982 case I40E_NVMUPD_GET_AQ_RESULT:
983 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
987 i40e_debug(hw, I40E_DEBUG_NVM,
988 "NVMUPD: bad cmd %s in init state\n",
989 i40e_nvm_update_state_str[upd_cmd]);
990 status = I40E_ERR_NVM;
998 * i40e_nvmupd_state_reading - Handle NVM update state Reading
999 * @hw: pointer to hardware structure
1000 * @cmd: pointer to nvm update command buffer
1001 * @bytes: pointer to the data buffer
1002 * @perrno: pointer to return error code
1004 * NVM ownership is already held. Process legitimate commands and set any
1005 * change in state; reject all other commands.
1007 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1008 struct i40e_nvm_access *cmd,
1009 u8 *bytes, int *perrno)
1011 enum i40e_status_code status = I40E_SUCCESS;
1012 enum i40e_nvmupd_cmd upd_cmd;
1014 DEBUGFUNC("i40e_nvmupd_state_reading");
1016 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1019 case I40E_NVMUPD_READ_SA:
1020 case I40E_NVMUPD_READ_CON:
1021 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1024 case I40E_NVMUPD_READ_LCB:
1025 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1026 i40e_release_nvm(hw);
1027 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1031 i40e_debug(hw, I40E_DEBUG_NVM,
1032 "NVMUPD: bad cmd %s in reading state.\n",
1033 i40e_nvm_update_state_str[upd_cmd]);
1034 status = I40E_NOT_SUPPORTED;
1042 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1043 * @hw: pointer to hardware structure
1044 * @cmd: pointer to nvm update command buffer
1045 * @bytes: pointer to the data buffer
1046 * @perrno: pointer to return error code
1048 * NVM ownership is already held. Process legitimate commands and set any
1049 * change in state; reject all other commands
1051 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1052 struct i40e_nvm_access *cmd,
1053 u8 *bytes, int *perrno)
1055 enum i40e_status_code status = I40E_SUCCESS;
1056 enum i40e_nvmupd_cmd upd_cmd;
1057 bool retry_attempt = false;
1059 DEBUGFUNC("i40e_nvmupd_state_writing");
1061 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1065 case I40E_NVMUPD_WRITE_CON:
1066 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1068 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1071 case I40E_NVMUPD_WRITE_LCB:
1072 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1074 *perrno = hw->aq.asq_last_status ?
1075 i40e_aq_rc_to_posix(status,
1076 hw->aq.asq_last_status) :
1078 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1080 hw->aq.nvm_release_on_done = true;
1081 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1085 case I40E_NVMUPD_CSUM_CON:
1086 status = i40e_update_nvm_checksum(hw);
1088 *perrno = hw->aq.asq_last_status ?
1089 i40e_aq_rc_to_posix(status,
1090 hw->aq.asq_last_status) :
1092 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1094 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1098 case I40E_NVMUPD_CSUM_LCB:
1099 status = i40e_update_nvm_checksum(hw);
1101 *perrno = hw->aq.asq_last_status ?
1102 i40e_aq_rc_to_posix(status,
1103 hw->aq.asq_last_status) :
1105 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1107 hw->aq.nvm_release_on_done = true;
1108 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1113 i40e_debug(hw, I40E_DEBUG_NVM,
1114 "NVMUPD: bad cmd %s in writing state.\n",
1115 i40e_nvm_update_state_str[upd_cmd]);
1116 status = I40E_NOT_SUPPORTED;
1121 /* In some circumstances, a multi-write transaction takes longer
1122 * than the default 3 minute timeout on the write semaphore. If
1123 * the write failed with an EBUSY status, this is likely the problem,
1124 * so here we try to reacquire the semaphore then retry the write.
1125 * We only do one retry, then give up.
1127 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1129 enum i40e_status_code old_status = status;
1130 u32 old_asq_status = hw->aq.asq_last_status;
1133 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1134 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1135 i40e_debug(hw, I40E_DEBUG_ALL,
1136 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1137 gtime, hw->nvm.hw_semaphore_timeout);
1138 i40e_release_nvm(hw);
1139 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1141 i40e_debug(hw, I40E_DEBUG_ALL,
1142 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1143 hw->aq.asq_last_status);
1144 status = old_status;
1145 hw->aq.asq_last_status = old_asq_status;
1147 retry_attempt = true;
1157 * i40e_nvmupd_validate_command - Validate given command
1158 * @hw: pointer to hardware structure
1159 * @cmd: pointer to nvm update command buffer
1160 * @perrno: pointer to return error code
1162 * Return one of the valid command types or I40E_NVMUPD_INVALID
1164 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1165 struct i40e_nvm_access *cmd,
1168 enum i40e_nvmupd_cmd upd_cmd;
1169 u8 module, transaction;
1171 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1173 /* anything that doesn't match a recognized case is an error */
1174 upd_cmd = I40E_NVMUPD_INVALID;
1176 transaction = i40e_nvmupd_get_transaction(cmd->config);
1177 module = i40e_nvmupd_get_module(cmd->config);
1179 /* limits on data size */
1180 if ((cmd->data_size < 1) ||
1181 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1182 i40e_debug(hw, I40E_DEBUG_NVM,
1183 "i40e_nvmupd_validate_command data_size %d\n",
1186 return I40E_NVMUPD_INVALID;
1189 switch (cmd->command) {
1191 switch (transaction) {
1193 upd_cmd = I40E_NVMUPD_READ_CON;
1196 upd_cmd = I40E_NVMUPD_READ_SNT;
1199 upd_cmd = I40E_NVMUPD_READ_LCB;
1202 upd_cmd = I40E_NVMUPD_READ_SA;
1206 upd_cmd = I40E_NVMUPD_STATUS;
1207 else if (module == 0)
1208 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1213 case I40E_NVM_WRITE:
1214 switch (transaction) {
1216 upd_cmd = I40E_NVMUPD_WRITE_CON;
1219 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1222 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1225 upd_cmd = I40E_NVMUPD_WRITE_SA;
1228 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1231 upd_cmd = I40E_NVMUPD_CSUM_CON;
1233 case (I40E_NVM_CSUM|I40E_NVM_SA):
1234 upd_cmd = I40E_NVMUPD_CSUM_SA;
1236 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1237 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1241 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1251 * i40e_nvmupd_exec_aq - Run an AQ command
1252 * @hw: pointer to hardware structure
1253 * @cmd: pointer to nvm update command buffer
1254 * @bytes: pointer to the data buffer
1255 * @perrno: pointer to return error code
1257 * cmd structure contains identifiers and data buffer
1259 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1260 struct i40e_nvm_access *cmd,
1261 u8 *bytes, int *perrno)
1263 struct i40e_asq_cmd_details cmd_details;
1264 enum i40e_status_code status;
1265 struct i40e_aq_desc *aq_desc;
1271 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1272 memset(&cmd_details, 0, sizeof(cmd_details));
1273 cmd_details.wb_desc = &hw->nvm_wb_desc;
1275 aq_desc_len = sizeof(struct i40e_aq_desc);
1276 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1278 /* get the aq descriptor */
1279 if (cmd->data_size < aq_desc_len) {
1280 i40e_debug(hw, I40E_DEBUG_NVM,
1281 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1282 cmd->data_size, aq_desc_len);
1284 return I40E_ERR_PARAM;
1286 aq_desc = (struct i40e_aq_desc *)bytes;
1288 /* if data buffer needed, make sure it's ready */
1289 aq_data_len = cmd->data_size - aq_desc_len;
1290 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1292 if (!hw->nvm_buff.va) {
1293 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1294 hw->aq.asq_buf_size);
1296 i40e_debug(hw, I40E_DEBUG_NVM,
1297 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1301 if (hw->nvm_buff.va) {
1302 buff = hw->nvm_buff.va;
1303 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1307 /* and away we go! */
1308 status = i40e_asq_send_command(hw, aq_desc, buff,
1309 buff_size, &cmd_details);
1311 i40e_debug(hw, I40E_DEBUG_NVM,
1312 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1313 i40e_stat_str(hw, status),
1314 i40e_aq_str(hw, hw->aq.asq_last_status));
1315 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1322 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1323 * @hw: pointer to hardware structure
1324 * @cmd: pointer to nvm update command buffer
1325 * @bytes: pointer to the data buffer
1326 * @perrno: pointer to return error code
1328 * cmd structure contains identifiers and data buffer
1330 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1331 struct i40e_nvm_access *cmd,
1332 u8 *bytes, int *perrno)
1339 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1341 aq_desc_len = sizeof(struct i40e_aq_desc);
1342 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1344 /* check offset range */
1345 if (cmd->offset > aq_total_len) {
1346 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1347 __func__, cmd->offset, aq_total_len);
1349 return I40E_ERR_PARAM;
1352 /* check copylength range */
1353 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1354 int new_len = aq_total_len - cmd->offset;
1356 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1357 __func__, cmd->data_size, new_len);
1358 cmd->data_size = new_len;
1361 remainder = cmd->data_size;
1362 if (cmd->offset < aq_desc_len) {
1363 u32 len = aq_desc_len - cmd->offset;
1365 len = min(len, cmd->data_size);
1366 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1367 __func__, cmd->offset, cmd->offset + len);
1369 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1370 memcpy(bytes, buff, len);
1374 buff = hw->nvm_buff.va;
1376 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1379 if (remainder > 0) {
1380 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1382 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1383 __func__, start_byte, start_byte + remainder);
1384 memcpy(bytes, buff, remainder);
1387 return I40E_SUCCESS;
1391 * i40e_nvmupd_nvm_read - Read NVM
1392 * @hw: pointer to hardware structure
1393 * @cmd: pointer to nvm update command buffer
1394 * @bytes: pointer to the data buffer
1395 * @perrno: pointer to return error code
1397 * cmd structure contains identifiers and data buffer
1399 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1400 struct i40e_nvm_access *cmd,
1401 u8 *bytes, int *perrno)
1403 struct i40e_asq_cmd_details cmd_details;
1404 enum i40e_status_code status;
1405 u8 module, transaction;
1408 transaction = i40e_nvmupd_get_transaction(cmd->config);
1409 module = i40e_nvmupd_get_module(cmd->config);
1410 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1412 memset(&cmd_details, 0, sizeof(cmd_details));
1413 cmd_details.wb_desc = &hw->nvm_wb_desc;
1415 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1416 bytes, last, &cmd_details);
1418 i40e_debug(hw, I40E_DEBUG_NVM,
1419 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1420 module, cmd->offset, cmd->data_size);
1421 i40e_debug(hw, I40E_DEBUG_NVM,
1422 "i40e_nvmupd_nvm_read status %d aq %d\n",
1423 status, hw->aq.asq_last_status);
1424 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1431 * i40e_nvmupd_nvm_erase - Erase an NVM module
1432 * @hw: pointer to hardware structure
1433 * @cmd: pointer to nvm update command buffer
1434 * @perrno: pointer to return error code
1436 * module, offset, data_size and data are in cmd structure
1438 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1439 struct i40e_nvm_access *cmd,
1442 enum i40e_status_code status = I40E_SUCCESS;
1443 struct i40e_asq_cmd_details cmd_details;
1444 u8 module, transaction;
1447 transaction = i40e_nvmupd_get_transaction(cmd->config);
1448 module = i40e_nvmupd_get_module(cmd->config);
1449 last = (transaction & I40E_NVM_LCB);
1451 memset(&cmd_details, 0, sizeof(cmd_details));
1452 cmd_details.wb_desc = &hw->nvm_wb_desc;
1454 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1455 last, &cmd_details);
1457 i40e_debug(hw, I40E_DEBUG_NVM,
1458 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1459 module, cmd->offset, cmd->data_size);
1460 i40e_debug(hw, I40E_DEBUG_NVM,
1461 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1462 status, hw->aq.asq_last_status);
1463 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1470 * i40e_nvmupd_nvm_write - Write NVM
1471 * @hw: pointer to hardware structure
1472 * @cmd: pointer to nvm update command buffer
1473 * @bytes: pointer to the data buffer
1474 * @perrno: pointer to return error code
1476 * module, offset, data_size and data are in cmd structure
1478 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1479 struct i40e_nvm_access *cmd,
1480 u8 *bytes, int *perrno)
1482 enum i40e_status_code status = I40E_SUCCESS;
1483 struct i40e_asq_cmd_details cmd_details;
1484 u8 module, transaction;
1487 transaction = i40e_nvmupd_get_transaction(cmd->config);
1488 module = i40e_nvmupd_get_module(cmd->config);
1489 last = (transaction & I40E_NVM_LCB);
1491 memset(&cmd_details, 0, sizeof(cmd_details));
1492 cmd_details.wb_desc = &hw->nvm_wb_desc;
1494 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1495 (u16)cmd->data_size, bytes, last,
1498 i40e_debug(hw, I40E_DEBUG_NVM,
1499 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1500 module, cmd->offset, cmd->data_size);
1501 i40e_debug(hw, I40E_DEBUG_NVM,
1502 "i40e_nvmupd_nvm_write status %d aq %d\n",
1503 status, hw->aq.asq_last_status);
1504 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);