1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always thru the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 enum i40e_status_code ret_code = I40E_SUCCESS;
163 DEBUGFUNC("i40e_release_nvm");
165 if (hw->nvm.blank_nvm_mode)
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
186 * Polls the SRCTL Shadow RAM register done bit.
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
210 * i40e_read_nvm_word - Reads Shadow RAM
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
221 if (hw->mac.type == I40E_MAC_X722)
222 return i40e_read_nvm_word_aq(hw, offset, data);
224 return i40e_read_nvm_word_srctl(hw, offset, data);
228 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
229 * @hw: pointer to the HW structure
230 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
231 * @data: word read from the Shadow RAM
233 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
235 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
238 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
241 DEBUGFUNC("i40e_read_nvm_word_srctl");
243 if (offset >= hw->nvm.sr_size) {
244 i40e_debug(hw, I40E_DEBUG_NVM,
245 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
246 offset, hw->nvm.sr_size);
247 ret_code = I40E_ERR_PARAM;
251 /* Poll the done bit first */
252 ret_code = i40e_poll_sr_srctl_done_bit(hw);
253 if (ret_code == I40E_SUCCESS) {
254 /* Write the address and start reading */
255 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
256 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
257 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
259 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
260 ret_code = i40e_poll_sr_srctl_done_bit(hw);
261 if (ret_code == I40E_SUCCESS) {
262 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
263 *data = (u16)((sr_reg &
264 I40E_GLNVM_SRDATA_RDDATA_MASK)
265 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
268 if (ret_code != I40E_SUCCESS)
269 i40e_debug(hw, I40E_DEBUG_NVM,
270 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
278 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
279 * @hw: pointer to the HW structure
280 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
281 * @data: word read from the Shadow RAM
283 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
285 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
288 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
290 DEBUGFUNC("i40e_read_nvm_word_aq");
292 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
293 *data = LE16_TO_CPU(*(__le16 *)data);
299 * i40e_read_nvm_buffer - Reads Shadow RAM buffer
300 * @hw: pointer to the HW structure
301 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
302 * @words: (in) number of words to read; (out) number of words actually read
303 * @data: words read from the Shadow RAM
305 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
306 * method. The buffer read is preceded by the NVM ownership take
307 * and followed by the release.
309 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
310 u16 *words, u16 *data)
313 if (hw->mac.type == I40E_MAC_X722)
314 return i40e_read_nvm_buffer_aq(hw, offset, words, data);
316 return i40e_read_nvm_buffer_srctl(hw, offset, words, data);
320 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
321 * @hw: pointer to the HW structure
322 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
323 * @words: (in) number of words to read; (out) number of words actually read
324 * @data: words read from the Shadow RAM
326 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
327 * method. The buffer read is preceded by the NVM ownership take
328 * and followed by the release.
330 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
331 u16 *words, u16 *data)
333 enum i40e_status_code ret_code = I40E_SUCCESS;
336 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
338 /* Loop thru the selected region */
339 for (word = 0; word < *words; word++) {
340 index = offset + word;
341 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
342 if (ret_code != I40E_SUCCESS)
346 /* Update the number of words read from the Shadow RAM */
353 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
354 * @hw: pointer to the HW structure
355 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
356 * @words: (in) number of words to read; (out) number of words actually read
357 * @data: words read from the Shadow RAM
359 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
360 * method. The buffer read is preceded by the NVM ownership take
361 * and followed by the release.
363 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
364 u16 *words, u16 *data)
366 enum i40e_status_code ret_code;
367 u16 read_size = *words;
368 bool last_cmd = false;
372 DEBUGFUNC("i40e_read_nvm_buffer_aq");
375 /* Calculate number of bytes we should read in this step.
376 * FVL AQ do not allow to read more than one page at a time or
377 * to cross page boundaries.
379 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
380 read_size = min(*words,
381 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
382 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
384 read_size = min((*words - words_read),
385 I40E_SR_SECTOR_SIZE_IN_WORDS);
387 /* Check if this is last command, if so set proper flag */
388 if ((words_read + read_size) >= *words)
391 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
392 data + words_read, last_cmd);
393 if (ret_code != I40E_SUCCESS)
394 goto read_nvm_buffer_aq_exit;
396 /* Increment counter for words already read and move offset to
399 words_read += read_size;
401 } while (words_read < *words);
403 for (i = 0; i < *words; i++)
404 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
406 read_nvm_buffer_aq_exit:
412 * i40e_read_nvm_aq - Read Shadow RAM.
413 * @hw: pointer to the HW structure.
414 * @module_pointer: module pointer location in words from the NVM beginning
415 * @offset: offset in words from module start
416 * @words: number of words to write
417 * @data: buffer with words to write to the Shadow RAM
418 * @last_command: tells the AdminQ that this is the last command
420 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
422 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
423 u32 offset, u16 words, void *data,
426 enum i40e_status_code ret_code = I40E_ERR_NVM;
427 struct i40e_asq_cmd_details cmd_details;
429 DEBUGFUNC("i40e_read_nvm_aq");
431 memset(&cmd_details, 0, sizeof(cmd_details));
432 cmd_details.wb_desc = &hw->nvm_wb_desc;
434 /* Here we are checking the SR limit only for the flat memory model.
435 * We cannot do it for the module-based model, as we did not acquire
436 * the NVM resource yet (we cannot get the module pointer value).
437 * Firmware will check the module-based model.
439 if ((offset + words) > hw->nvm.sr_size)
440 i40e_debug(hw, I40E_DEBUG_NVM,
441 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
442 (offset + words), hw->nvm.sr_size);
443 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
444 /* We can write only up to 4KB (one sector), in one AQ write */
445 i40e_debug(hw, I40E_DEBUG_NVM,
446 "NVM write fail error: tried to write %d words, limit is %d.\n",
447 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
448 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
449 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
450 /* A single write cannot spread over two sectors */
451 i40e_debug(hw, I40E_DEBUG_NVM,
452 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
455 ret_code = i40e_aq_read_nvm(hw, module_pointer,
456 2 * offset, /*bytes*/
458 data, last_command, &cmd_details);
464 * i40e_write_nvm_aq - Writes Shadow RAM.
465 * @hw: pointer to the HW structure.
466 * @module_pointer: module pointer location in words from the NVM beginning
467 * @offset: offset in words from module start
468 * @words: number of words to write
469 * @data: buffer with words to write to the Shadow RAM
470 * @last_command: tells the AdminQ that this is the last command
472 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
474 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
475 u32 offset, u16 words, void *data,
478 enum i40e_status_code ret_code = I40E_ERR_NVM;
479 struct i40e_asq_cmd_details cmd_details;
481 DEBUGFUNC("i40e_write_nvm_aq");
483 memset(&cmd_details, 0, sizeof(cmd_details));
484 cmd_details.wb_desc = &hw->nvm_wb_desc;
486 /* Here we are checking the SR limit only for the flat memory model.
487 * We cannot do it for the module-based model, as we did not acquire
488 * the NVM resource yet (we cannot get the module pointer value).
489 * Firmware will check the module-based model.
491 if ((offset + words) > hw->nvm.sr_size)
492 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
493 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
494 /* We can write only up to 4KB (one sector), in one AQ write */
495 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
496 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
497 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
498 /* A single write cannot spread over two sectors */
499 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
501 ret_code = i40e_aq_update_nvm(hw, module_pointer,
502 2 * offset, /*bytes*/
504 data, last_command, &cmd_details);
510 * i40e_write_nvm_word - Writes Shadow RAM word
511 * @hw: pointer to the HW structure
512 * @offset: offset of the Shadow RAM word to write
513 * @data: word to write to the Shadow RAM
515 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
516 * NVM ownership have to be acquired and released (on ARQ completion event
517 * reception) by caller. To commit SR to NVM update checksum function
520 enum i40e_status_code i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
523 DEBUGFUNC("i40e_write_nvm_word");
525 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
527 /* Value 0x00 below means that we treat SR as a flat mem */
528 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
532 * i40e_write_nvm_buffer - Writes Shadow RAM buffer
533 * @hw: pointer to the HW structure
534 * @module_pointer: module pointer location in words from the NVM beginning
535 * @offset: offset of the Shadow RAM buffer to write
536 * @words: number of words to write
537 * @data: words to write to the Shadow RAM
539 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
540 * NVM ownership must be acquired before calling this function and released
541 * on ARQ completion event reception by caller. To commit SR to NVM update
542 * checksum function should be called.
544 enum i40e_status_code i40e_write_nvm_buffer(struct i40e_hw *hw,
545 u8 module_pointer, u32 offset,
546 u16 words, void *data)
548 __le16 *le_word_ptr = (__le16 *)data;
549 u16 *word_ptr = (u16 *)data;
552 DEBUGFUNC("i40e_write_nvm_buffer");
554 for (i = 0; i < words; i++)
555 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
557 /* Here we will only write one buffer as the size of the modules
558 * mirrored in the Shadow RAM is always less than 4K.
560 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
565 * i40e_calc_nvm_checksum - Calculates and returns the checksum
566 * @hw: pointer to hardware structure
567 * @checksum: pointer to the checksum
569 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
570 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
571 * is customer specific and unknown. Therefore, this function skips all maximum
572 * possible size of VPD (1kB).
574 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
576 enum i40e_status_code ret_code = I40E_SUCCESS;
577 struct i40e_virt_mem vmem;
578 u16 pcie_alt_module = 0;
579 u16 checksum_local = 0;
584 DEBUGFUNC("i40e_calc_nvm_checksum");
586 ret_code = i40e_allocate_virt_mem(hw, &vmem,
587 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
589 goto i40e_calc_nvm_checksum_exit;
590 data = (u16 *)vmem.va;
592 /* read pointer to VPD area */
593 ret_code = i40e_read_nvm_word(hw, I40E_SR_VPD_PTR, &vpd_module);
594 if (ret_code != I40E_SUCCESS) {
595 ret_code = I40E_ERR_NVM_CHECKSUM;
596 goto i40e_calc_nvm_checksum_exit;
599 /* read pointer to PCIe Alt Auto-load module */
600 ret_code = i40e_read_nvm_word(hw, I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
602 if (ret_code != I40E_SUCCESS) {
603 ret_code = I40E_ERR_NVM_CHECKSUM;
604 goto i40e_calc_nvm_checksum_exit;
607 /* Calculate SW checksum that covers the whole 64kB shadow RAM
608 * except the VPD and PCIe ALT Auto-load modules
610 for (i = 0; i < hw->nvm.sr_size; i++) {
612 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
613 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
615 ret_code = i40e_read_nvm_buffer(hw, i, &words, data);
616 if (ret_code != I40E_SUCCESS) {
617 ret_code = I40E_ERR_NVM_CHECKSUM;
618 goto i40e_calc_nvm_checksum_exit;
622 /* Skip Checksum word */
623 if (i == I40E_SR_SW_CHECKSUM_WORD)
625 /* Skip VPD module (convert byte size to word count) */
626 if ((i >= (u32)vpd_module) &&
627 (i < ((u32)vpd_module +
628 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
631 /* Skip PCIe ALT module (convert byte size to word count) */
632 if ((i >= (u32)pcie_alt_module) &&
633 (i < ((u32)pcie_alt_module +
634 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
638 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
641 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
643 i40e_calc_nvm_checksum_exit:
644 i40e_free_virt_mem(hw, &vmem);
649 * i40e_update_nvm_checksum - Updates the NVM checksum
650 * @hw: pointer to hardware structure
652 * NVM ownership must be acquired before calling this function and released
653 * on ARQ completion event reception by caller.
654 * This function will commit SR to NVM.
656 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
658 enum i40e_status_code ret_code = I40E_SUCCESS;
662 DEBUGFUNC("i40e_update_nvm_checksum");
664 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
665 le_sum = CPU_TO_LE16(checksum);
666 if (ret_code == I40E_SUCCESS)
667 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
674 * i40e_validate_nvm_checksum - Validate EEPROM checksum
675 * @hw: pointer to hardware structure
676 * @checksum: calculated checksum
678 * Performs checksum calculation and validates the NVM SW checksum. If the
679 * caller does not need checksum, the value can be NULL.
681 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
684 enum i40e_status_code ret_code = I40E_SUCCESS;
686 u16 checksum_local = 0;
688 DEBUGFUNC("i40e_validate_nvm_checksum");
690 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
691 if (ret_code != I40E_SUCCESS)
692 goto i40e_validate_nvm_checksum_exit;
694 /* Do not use i40e_read_nvm_word() because we do not want to take
695 * the synchronization semaphores twice here.
697 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
699 /* Verify read checksum from EEPROM is the same as
700 * calculated checksum
702 if (checksum_local != checksum_sr)
703 ret_code = I40E_ERR_NVM_CHECKSUM;
705 /* If the user cares, return the calculated checksum */
707 *checksum = checksum_local;
709 i40e_validate_nvm_checksum_exit:
713 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
714 struct i40e_nvm_access *cmd,
715 u8 *bytes, int *perrno);
716 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
717 struct i40e_nvm_access *cmd,
718 u8 *bytes, int *perrno);
719 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
720 struct i40e_nvm_access *cmd,
721 u8 *bytes, int *perrno);
722 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
723 struct i40e_nvm_access *cmd,
725 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
726 struct i40e_nvm_access *cmd,
728 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
729 struct i40e_nvm_access *cmd,
730 u8 *bytes, int *perrno);
731 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
732 struct i40e_nvm_access *cmd,
733 u8 *bytes, int *perrno);
734 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
735 struct i40e_nvm_access *cmd,
736 u8 *bytes, int *perrno);
737 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
738 struct i40e_nvm_access *cmd,
739 u8 *bytes, int *perrno);
740 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
742 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
744 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
746 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
749 STATIC const char *i40e_nvm_update_state_str[] = {
750 "I40E_NVMUPD_INVALID",
751 "I40E_NVMUPD_READ_CON",
752 "I40E_NVMUPD_READ_SNT",
753 "I40E_NVMUPD_READ_LCB",
754 "I40E_NVMUPD_READ_SA",
755 "I40E_NVMUPD_WRITE_ERA",
756 "I40E_NVMUPD_WRITE_CON",
757 "I40E_NVMUPD_WRITE_SNT",
758 "I40E_NVMUPD_WRITE_LCB",
759 "I40E_NVMUPD_WRITE_SA",
760 "I40E_NVMUPD_CSUM_CON",
761 "I40E_NVMUPD_CSUM_SA",
762 "I40E_NVMUPD_CSUM_LCB",
763 "I40E_NVMUPD_STATUS",
764 "I40E_NVMUPD_EXEC_AQ",
765 "I40E_NVMUPD_GET_AQ_RESULT",
769 * i40e_nvmupd_command - Process an NVM update command
770 * @hw: pointer to hardware structure
771 * @cmd: pointer to nvm update command
772 * @bytes: pointer to the data buffer
773 * @perrno: pointer to return error code
775 * Dispatches command depending on what update state is current
777 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
778 struct i40e_nvm_access *cmd,
779 u8 *bytes, int *perrno)
781 enum i40e_status_code status;
782 enum i40e_nvmupd_cmd upd_cmd;
784 DEBUGFUNC("i40e_nvmupd_command");
789 /* early check for status command and debug msgs */
790 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
792 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n",
793 i40e_nvm_update_state_str[upd_cmd],
795 hw->aq.nvm_release_on_done);
797 if (upd_cmd == I40E_NVMUPD_INVALID) {
799 i40e_debug(hw, I40E_DEBUG_NVM,
800 "i40e_nvmupd_validate_command returns %d errno %d\n",
804 /* a status request returns immediately rather than
805 * going into the state machine
807 if (upd_cmd == I40E_NVMUPD_STATUS) {
808 bytes[0] = hw->nvmupd_state;
812 switch (hw->nvmupd_state) {
813 case I40E_NVMUPD_STATE_INIT:
814 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
817 case I40E_NVMUPD_STATE_READING:
818 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
821 case I40E_NVMUPD_STATE_WRITING:
822 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
825 case I40E_NVMUPD_STATE_INIT_WAIT:
826 case I40E_NVMUPD_STATE_WRITE_WAIT:
827 status = I40E_ERR_NOT_READY;
832 /* invalid state, should never happen */
833 i40e_debug(hw, I40E_DEBUG_NVM,
834 "NVMUPD: no such state %d\n", hw->nvmupd_state);
835 status = I40E_NOT_SUPPORTED;
843 * i40e_nvmupd_state_init - Handle NVM update state Init
844 * @hw: pointer to hardware structure
845 * @cmd: pointer to nvm update command buffer
846 * @bytes: pointer to the data buffer
847 * @perrno: pointer to return error code
849 * Process legitimate commands of the Init state and conditionally set next
850 * state. Reject all other commands.
852 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
853 struct i40e_nvm_access *cmd,
854 u8 *bytes, int *perrno)
856 enum i40e_status_code status = I40E_SUCCESS;
857 enum i40e_nvmupd_cmd upd_cmd;
859 DEBUGFUNC("i40e_nvmupd_state_init");
861 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
864 case I40E_NVMUPD_READ_SA:
865 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
867 *perrno = i40e_aq_rc_to_posix(status,
868 hw->aq.asq_last_status);
870 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
871 i40e_release_nvm(hw);
875 case I40E_NVMUPD_READ_SNT:
876 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
878 *perrno = i40e_aq_rc_to_posix(status,
879 hw->aq.asq_last_status);
881 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
883 i40e_release_nvm(hw);
885 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
889 case I40E_NVMUPD_WRITE_ERA:
890 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
892 *perrno = i40e_aq_rc_to_posix(status,
893 hw->aq.asq_last_status);
895 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
897 i40e_release_nvm(hw);
899 hw->aq.nvm_release_on_done = true;
900 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
905 case I40E_NVMUPD_WRITE_SA:
906 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
908 *perrno = i40e_aq_rc_to_posix(status,
909 hw->aq.asq_last_status);
911 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
913 i40e_release_nvm(hw);
915 hw->aq.nvm_release_on_done = true;
916 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
921 case I40E_NVMUPD_WRITE_SNT:
922 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
924 *perrno = i40e_aq_rc_to_posix(status,
925 hw->aq.asq_last_status);
927 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
929 i40e_release_nvm(hw);
931 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
935 case I40E_NVMUPD_CSUM_SA:
936 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
938 *perrno = i40e_aq_rc_to_posix(status,
939 hw->aq.asq_last_status);
941 status = i40e_update_nvm_checksum(hw);
943 *perrno = hw->aq.asq_last_status ?
944 i40e_aq_rc_to_posix(status,
945 hw->aq.asq_last_status) :
947 i40e_release_nvm(hw);
949 hw->aq.nvm_release_on_done = true;
950 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
955 case I40E_NVMUPD_EXEC_AQ:
956 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
959 case I40E_NVMUPD_GET_AQ_RESULT:
960 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
964 i40e_debug(hw, I40E_DEBUG_NVM,
965 "NVMUPD: bad cmd %s in init state\n",
966 i40e_nvm_update_state_str[upd_cmd]);
967 status = I40E_ERR_NVM;
975 * i40e_nvmupd_state_reading - Handle NVM update state Reading
976 * @hw: pointer to hardware structure
977 * @cmd: pointer to nvm update command buffer
978 * @bytes: pointer to the data buffer
979 * @perrno: pointer to return error code
981 * NVM ownership is already held. Process legitimate commands and set any
982 * change in state; reject all other commands.
984 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
985 struct i40e_nvm_access *cmd,
986 u8 *bytes, int *perrno)
988 enum i40e_status_code status = I40E_SUCCESS;
989 enum i40e_nvmupd_cmd upd_cmd;
991 DEBUGFUNC("i40e_nvmupd_state_reading");
993 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
996 case I40E_NVMUPD_READ_SA:
997 case I40E_NVMUPD_READ_CON:
998 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1001 case I40E_NVMUPD_READ_LCB:
1002 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1003 i40e_release_nvm(hw);
1004 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1008 i40e_debug(hw, I40E_DEBUG_NVM,
1009 "NVMUPD: bad cmd %s in reading state.\n",
1010 i40e_nvm_update_state_str[upd_cmd]);
1011 status = I40E_NOT_SUPPORTED;
1019 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1020 * @hw: pointer to hardware structure
1021 * @cmd: pointer to nvm update command buffer
1022 * @bytes: pointer to the data buffer
1023 * @perrno: pointer to return error code
1025 * NVM ownership is already held. Process legitimate commands and set any
1026 * change in state; reject all other commands
1028 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1029 struct i40e_nvm_access *cmd,
1030 u8 *bytes, int *perrno)
1032 enum i40e_status_code status = I40E_SUCCESS;
1033 enum i40e_nvmupd_cmd upd_cmd;
1034 bool retry_attempt = false;
1036 DEBUGFUNC("i40e_nvmupd_state_writing");
1038 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1042 case I40E_NVMUPD_WRITE_CON:
1043 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1045 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1048 case I40E_NVMUPD_WRITE_LCB:
1049 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1051 *perrno = hw->aq.asq_last_status ?
1052 i40e_aq_rc_to_posix(status,
1053 hw->aq.asq_last_status) :
1055 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1057 hw->aq.nvm_release_on_done = true;
1058 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1062 case I40E_NVMUPD_CSUM_CON:
1063 status = i40e_update_nvm_checksum(hw);
1065 *perrno = hw->aq.asq_last_status ?
1066 i40e_aq_rc_to_posix(status,
1067 hw->aq.asq_last_status) :
1069 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1071 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1075 case I40E_NVMUPD_CSUM_LCB:
1076 status = i40e_update_nvm_checksum(hw);
1078 *perrno = hw->aq.asq_last_status ?
1079 i40e_aq_rc_to_posix(status,
1080 hw->aq.asq_last_status) :
1082 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1084 hw->aq.nvm_release_on_done = true;
1085 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1090 i40e_debug(hw, I40E_DEBUG_NVM,
1091 "NVMUPD: bad cmd %s in writing state.\n",
1092 i40e_nvm_update_state_str[upd_cmd]);
1093 status = I40E_NOT_SUPPORTED;
1098 /* In some circumstances, a multi-write transaction takes longer
1099 * than the default 3 minute timeout on the write semaphore. If
1100 * the write failed with an EBUSY status, this is likely the problem,
1101 * so here we try to reacquire the semaphore then retry the write.
1102 * We only do one retry, then give up.
1104 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1106 enum i40e_status_code old_status = status;
1107 u32 old_asq_status = hw->aq.asq_last_status;
1110 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1111 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1112 i40e_debug(hw, I40E_DEBUG_ALL,
1113 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1114 gtime, hw->nvm.hw_semaphore_timeout);
1115 i40e_release_nvm(hw);
1116 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1118 i40e_debug(hw, I40E_DEBUG_ALL,
1119 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1120 hw->aq.asq_last_status);
1121 status = old_status;
1122 hw->aq.asq_last_status = old_asq_status;
1124 retry_attempt = true;
1134 * i40e_nvmupd_validate_command - Validate given command
1135 * @hw: pointer to hardware structure
1136 * @cmd: pointer to nvm update command buffer
1137 * @perrno: pointer to return error code
1139 * Return one of the valid command types or I40E_NVMUPD_INVALID
1141 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1142 struct i40e_nvm_access *cmd,
1145 enum i40e_nvmupd_cmd upd_cmd;
1146 u8 module, transaction;
1148 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1150 /* anything that doesn't match a recognized case is an error */
1151 upd_cmd = I40E_NVMUPD_INVALID;
1153 transaction = i40e_nvmupd_get_transaction(cmd->config);
1154 module = i40e_nvmupd_get_module(cmd->config);
1156 /* limits on data size */
1157 if ((cmd->data_size < 1) ||
1158 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1159 i40e_debug(hw, I40E_DEBUG_NVM,
1160 "i40e_nvmupd_validate_command data_size %d\n",
1163 return I40E_NVMUPD_INVALID;
1166 switch (cmd->command) {
1168 switch (transaction) {
1170 upd_cmd = I40E_NVMUPD_READ_CON;
1173 upd_cmd = I40E_NVMUPD_READ_SNT;
1176 upd_cmd = I40E_NVMUPD_READ_LCB;
1179 upd_cmd = I40E_NVMUPD_READ_SA;
1183 upd_cmd = I40E_NVMUPD_STATUS;
1184 else if (module == 0)
1185 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1190 case I40E_NVM_WRITE:
1191 switch (transaction) {
1193 upd_cmd = I40E_NVMUPD_WRITE_CON;
1196 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1199 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1202 upd_cmd = I40E_NVMUPD_WRITE_SA;
1205 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1208 upd_cmd = I40E_NVMUPD_CSUM_CON;
1210 case (I40E_NVM_CSUM|I40E_NVM_SA):
1211 upd_cmd = I40E_NVMUPD_CSUM_SA;
1213 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1214 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1218 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1228 * i40e_nvmupd_exec_aq - Run an AQ command
1229 * @hw: pointer to hardware structure
1230 * @cmd: pointer to nvm update command buffer
1231 * @bytes: pointer to the data buffer
1232 * @perrno: pointer to return error code
1234 * cmd structure contains identifiers and data buffer
1236 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1237 struct i40e_nvm_access *cmd,
1238 u8 *bytes, int *perrno)
1240 struct i40e_asq_cmd_details cmd_details;
1241 enum i40e_status_code status;
1242 struct i40e_aq_desc *aq_desc;
1248 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1249 memset(&cmd_details, 0, sizeof(cmd_details));
1250 cmd_details.wb_desc = &hw->nvm_wb_desc;
1252 aq_desc_len = sizeof(struct i40e_aq_desc);
1253 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1255 /* get the aq descriptor */
1256 if (cmd->data_size < aq_desc_len) {
1257 i40e_debug(hw, I40E_DEBUG_NVM,
1258 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1259 cmd->data_size, aq_desc_len);
1261 return I40E_ERR_PARAM;
1263 aq_desc = (struct i40e_aq_desc *)bytes;
1265 /* if data buffer needed, make sure it's ready */
1266 aq_data_len = cmd->data_size - aq_desc_len;
1267 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1269 if (!hw->nvm_buff.va) {
1270 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1271 hw->aq.asq_buf_size);
1273 i40e_debug(hw, I40E_DEBUG_NVM,
1274 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1278 if (hw->nvm_buff.va) {
1279 buff = hw->nvm_buff.va;
1280 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1284 /* and away we go! */
1285 status = i40e_asq_send_command(hw, aq_desc, buff,
1286 buff_size, &cmd_details);
1288 i40e_debug(hw, I40E_DEBUG_NVM,
1289 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1290 i40e_stat_str(hw, status),
1291 i40e_aq_str(hw, hw->aq.asq_last_status));
1292 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1299 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1300 * @hw: pointer to hardware structure
1301 * @cmd: pointer to nvm update command buffer
1302 * @bytes: pointer to the data buffer
1303 * @perrno: pointer to return error code
1305 * cmd structure contains identifiers and data buffer
1307 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1308 struct i40e_nvm_access *cmd,
1309 u8 *bytes, int *perrno)
1316 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1318 aq_desc_len = sizeof(struct i40e_aq_desc);
1319 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1321 /* check offset range */
1322 if (cmd->offset > aq_total_len) {
1323 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1324 __func__, cmd->offset, aq_total_len);
1326 return I40E_ERR_PARAM;
1329 /* check copylength range */
1330 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1331 int new_len = aq_total_len - cmd->offset;
1333 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1334 __func__, cmd->data_size, new_len);
1335 cmd->data_size = new_len;
1338 remainder = cmd->data_size;
1339 if (cmd->offset < aq_desc_len) {
1340 u32 len = aq_desc_len - cmd->offset;
1342 len = min(len, cmd->data_size);
1343 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1344 __func__, cmd->offset, cmd->offset + len);
1346 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1347 memcpy(bytes, buff, len);
1351 buff = hw->nvm_buff.va;
1353 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1356 if (remainder > 0) {
1357 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1359 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1360 __func__, start_byte, start_byte + remainder);
1361 memcpy(bytes, buff, remainder);
1364 return I40E_SUCCESS;
1368 * i40e_nvmupd_nvm_read - Read NVM
1369 * @hw: pointer to hardware structure
1370 * @cmd: pointer to nvm update command buffer
1371 * @bytes: pointer to the data buffer
1372 * @perrno: pointer to return error code
1374 * cmd structure contains identifiers and data buffer
1376 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1377 struct i40e_nvm_access *cmd,
1378 u8 *bytes, int *perrno)
1380 struct i40e_asq_cmd_details cmd_details;
1381 enum i40e_status_code status;
1382 u8 module, transaction;
1385 transaction = i40e_nvmupd_get_transaction(cmd->config);
1386 module = i40e_nvmupd_get_module(cmd->config);
1387 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1389 memset(&cmd_details, 0, sizeof(cmd_details));
1390 cmd_details.wb_desc = &hw->nvm_wb_desc;
1392 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1393 bytes, last, &cmd_details);
1395 i40e_debug(hw, I40E_DEBUG_NVM,
1396 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1397 module, cmd->offset, cmd->data_size);
1398 i40e_debug(hw, I40E_DEBUG_NVM,
1399 "i40e_nvmupd_nvm_read status %d aq %d\n",
1400 status, hw->aq.asq_last_status);
1401 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1408 * i40e_nvmupd_nvm_erase - Erase an NVM module
1409 * @hw: pointer to hardware structure
1410 * @cmd: pointer to nvm update command buffer
1411 * @perrno: pointer to return error code
1413 * module, offset, data_size and data are in cmd structure
1415 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1416 struct i40e_nvm_access *cmd,
1419 enum i40e_status_code status = I40E_SUCCESS;
1420 struct i40e_asq_cmd_details cmd_details;
1421 u8 module, transaction;
1424 transaction = i40e_nvmupd_get_transaction(cmd->config);
1425 module = i40e_nvmupd_get_module(cmd->config);
1426 last = (transaction & I40E_NVM_LCB);
1428 memset(&cmd_details, 0, sizeof(cmd_details));
1429 cmd_details.wb_desc = &hw->nvm_wb_desc;
1431 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1432 last, &cmd_details);
1434 i40e_debug(hw, I40E_DEBUG_NVM,
1435 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1436 module, cmd->offset, cmd->data_size);
1437 i40e_debug(hw, I40E_DEBUG_NVM,
1438 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1439 status, hw->aq.asq_last_status);
1440 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1447 * i40e_nvmupd_nvm_write - Write NVM
1448 * @hw: pointer to hardware structure
1449 * @cmd: pointer to nvm update command buffer
1450 * @bytes: pointer to the data buffer
1451 * @perrno: pointer to return error code
1453 * module, offset, data_size and data are in cmd structure
1455 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1456 struct i40e_nvm_access *cmd,
1457 u8 *bytes, int *perrno)
1459 enum i40e_status_code status = I40E_SUCCESS;
1460 struct i40e_asq_cmd_details cmd_details;
1461 u8 module, transaction;
1464 transaction = i40e_nvmupd_get_transaction(cmd->config);
1465 module = i40e_nvmupd_get_module(cmd->config);
1466 last = (transaction & I40E_NVM_LCB);
1468 memset(&cmd_details, 0, sizeof(cmd_details));
1469 cmd_details.wb_desc = &hw->nvm_wb_desc;
1471 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1472 (u16)cmd->data_size, bytes, last,
1475 i40e_debug(hw, I40E_DEBUG_NVM,
1476 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1477 module, cmd->offset, cmd->data_size);
1478 i40e_debug(hw, I40E_DEBUG_NVM,
1479 "i40e_nvmupd_nvm_write status %d aq %d\n",
1480 status, hw->aq.asq_last_status);
1481 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);