1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always through the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 enum i40e_status_code ret_code = I40E_SUCCESS;
163 DEBUGFUNC("i40e_release_nvm");
165 if (hw->nvm.blank_nvm_mode)
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
186 * Polls the SRCTL Shadow RAM register done bit.
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
210 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
220 enum i40e_status_code ret_code = I40E_SUCCESS;
222 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
224 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
225 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
229 i40e_release_nvm(hw);
235 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
236 * @hw: pointer to the HW structure
237 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
238 * @data: word read from the Shadow RAM
240 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
242 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
246 enum i40e_status_code ret_code = I40E_SUCCESS;
248 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
249 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
251 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
256 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
257 * @hw: pointer to the HW structure
258 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
259 * @data: word read from the Shadow RAM
261 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
263 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
266 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
269 DEBUGFUNC("i40e_read_nvm_word_srctl");
271 if (offset >= hw->nvm.sr_size) {
272 i40e_debug(hw, I40E_DEBUG_NVM,
273 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
274 offset, hw->nvm.sr_size);
275 ret_code = I40E_ERR_PARAM;
279 /* Poll the done bit first */
280 ret_code = i40e_poll_sr_srctl_done_bit(hw);
281 if (ret_code == I40E_SUCCESS) {
282 /* Write the address and start reading */
283 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
284 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
285 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
287 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
288 ret_code = i40e_poll_sr_srctl_done_bit(hw);
289 if (ret_code == I40E_SUCCESS) {
290 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
291 *data = (u16)((sr_reg &
292 I40E_GLNVM_SRDATA_RDDATA_MASK)
293 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
296 if (ret_code != I40E_SUCCESS)
297 i40e_debug(hw, I40E_DEBUG_NVM,
298 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
306 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
307 * @hw: pointer to the HW structure
308 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
309 * @data: word read from the Shadow RAM
311 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
313 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
316 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
318 DEBUGFUNC("i40e_read_nvm_word_aq");
320 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
321 *data = LE16_TO_CPU(*(__le16 *)data);
327 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
328 * @hw: pointer to the HW structure
329 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
330 * @words: (in) number of words to read; (out) number of words actually read
331 * @data: words read from the Shadow RAM
333 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
334 * method. The buffer read is preceded by the NVM ownership take
335 * and followed by the release.
337 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
339 u16 *words, u16 *data)
341 enum i40e_status_code ret_code = I40E_SUCCESS;
343 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
344 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
346 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
351 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
352 * @hw: pointer to the HW structure
353 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
354 * @words: (in) number of words to read; (out) number of words actually read
355 * @data: words read from the Shadow RAM
357 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
358 * method. The buffer read is preceded by the NVM ownership take
359 * and followed by the release.
361 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
362 u16 *words, u16 *data)
364 enum i40e_status_code ret_code = I40E_SUCCESS;
366 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
367 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
369 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
371 i40e_release_nvm(hw);
374 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
380 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
381 * @hw: pointer to the HW structure
382 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
383 * @words: (in) number of words to read; (out) number of words actually read
384 * @data: words read from the Shadow RAM
386 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
387 * method. The buffer read is preceded by the NVM ownership take
388 * and followed by the release.
390 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
391 u16 *words, u16 *data)
393 enum i40e_status_code ret_code = I40E_SUCCESS;
396 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
398 /* Loop through the selected region */
399 for (word = 0; word < *words; word++) {
400 index = offset + word;
401 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
402 if (ret_code != I40E_SUCCESS)
406 /* Update the number of words read from the Shadow RAM */
413 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
414 * @hw: pointer to the HW structure
415 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
416 * @words: (in) number of words to read; (out) number of words actually read
417 * @data: words read from the Shadow RAM
419 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
420 * method. The buffer read is preceded by the NVM ownership take
421 * and followed by the release.
423 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
424 u16 *words, u16 *data)
426 enum i40e_status_code ret_code;
427 u16 read_size = *words;
428 bool last_cmd = false;
432 DEBUGFUNC("i40e_read_nvm_buffer_aq");
435 /* Calculate number of bytes we should read in this step.
436 * FVL AQ do not allow to read more than one page at a time or
437 * to cross page boundaries.
439 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
440 read_size = min(*words,
441 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
442 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
444 read_size = min((*words - words_read),
445 I40E_SR_SECTOR_SIZE_IN_WORDS);
447 /* Check if this is last command, if so set proper flag */
448 if ((words_read + read_size) >= *words)
451 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
452 data + words_read, last_cmd);
453 if (ret_code != I40E_SUCCESS)
454 goto read_nvm_buffer_aq_exit;
456 /* Increment counter for words already read and move offset to
459 words_read += read_size;
461 } while (words_read < *words);
463 for (i = 0; i < *words; i++)
464 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
466 read_nvm_buffer_aq_exit:
472 * i40e_read_nvm_aq - Read Shadow RAM.
473 * @hw: pointer to the HW structure.
474 * @module_pointer: module pointer location in words from the NVM beginning
475 * @offset: offset in words from module start
476 * @words: number of words to write
477 * @data: buffer with words to write to the Shadow RAM
478 * @last_command: tells the AdminQ that this is the last command
480 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
482 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
483 u32 offset, u16 words, void *data,
486 enum i40e_status_code ret_code = I40E_ERR_NVM;
487 struct i40e_asq_cmd_details cmd_details;
489 DEBUGFUNC("i40e_read_nvm_aq");
491 memset(&cmd_details, 0, sizeof(cmd_details));
492 cmd_details.wb_desc = &hw->nvm_wb_desc;
494 /* Here we are checking the SR limit only for the flat memory model.
495 * We cannot do it for the module-based model, as we did not acquire
496 * the NVM resource yet (we cannot get the module pointer value).
497 * Firmware will check the module-based model.
499 if ((offset + words) > hw->nvm.sr_size)
500 i40e_debug(hw, I40E_DEBUG_NVM,
501 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
502 (offset + words), hw->nvm.sr_size);
503 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
504 /* We can write only up to 4KB (one sector), in one AQ write */
505 i40e_debug(hw, I40E_DEBUG_NVM,
506 "NVM write fail error: tried to write %d words, limit is %d.\n",
507 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
508 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
509 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
510 /* A single write cannot spread over two sectors */
511 i40e_debug(hw, I40E_DEBUG_NVM,
512 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
515 ret_code = i40e_aq_read_nvm(hw, module_pointer,
516 2 * offset, /*bytes*/
518 data, last_command, &cmd_details);
524 * i40e_write_nvm_aq - Writes Shadow RAM.
525 * @hw: pointer to the HW structure.
526 * @module_pointer: module pointer location in words from the NVM beginning
527 * @offset: offset in words from module start
528 * @words: number of words to write
529 * @data: buffer with words to write to the Shadow RAM
530 * @last_command: tells the AdminQ that this is the last command
532 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
534 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
535 u32 offset, u16 words, void *data,
538 enum i40e_status_code ret_code = I40E_ERR_NVM;
539 struct i40e_asq_cmd_details cmd_details;
541 DEBUGFUNC("i40e_write_nvm_aq");
543 memset(&cmd_details, 0, sizeof(cmd_details));
544 cmd_details.wb_desc = &hw->nvm_wb_desc;
546 /* Here we are checking the SR limit only for the flat memory model.
547 * We cannot do it for the module-based model, as we did not acquire
548 * the NVM resource yet (we cannot get the module pointer value).
549 * Firmware will check the module-based model.
551 if ((offset + words) > hw->nvm.sr_size)
552 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
553 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
554 /* We can write only up to 4KB (one sector), in one AQ write */
555 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
556 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
557 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
558 /* A single write cannot spread over two sectors */
559 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
561 ret_code = i40e_aq_update_nvm(hw, module_pointer,
562 2 * offset, /*bytes*/
564 data, last_command, &cmd_details);
570 * __i40e_write_nvm_word - Writes Shadow RAM word
571 * @hw: pointer to the HW structure
572 * @offset: offset of the Shadow RAM word to write
573 * @data: word to write to the Shadow RAM
575 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
576 * NVM ownership have to be acquired and released (on ARQ completion event
577 * reception) by caller. To commit SR to NVM update checksum function
580 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
583 DEBUGFUNC("i40e_write_nvm_word");
585 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
587 /* Value 0x00 below means that we treat SR as a flat mem */
588 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
592 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
593 * @hw: pointer to the HW structure
594 * @module_pointer: module pointer location in words from the NVM beginning
595 * @offset: offset of the Shadow RAM buffer to write
596 * @words: number of words to write
597 * @data: words to write to the Shadow RAM
599 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
600 * NVM ownership must be acquired before calling this function and released
601 * on ARQ completion event reception by caller. To commit SR to NVM update
602 * checksum function should be called.
604 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
605 u8 module_pointer, u32 offset,
606 u16 words, void *data)
608 __le16 *le_word_ptr = (__le16 *)data;
609 u16 *word_ptr = (u16 *)data;
612 DEBUGFUNC("i40e_write_nvm_buffer");
614 for (i = 0; i < words; i++)
615 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
617 /* Here we will only write one buffer as the size of the modules
618 * mirrored in the Shadow RAM is always less than 4K.
620 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
625 * i40e_calc_nvm_checksum - Calculates and returns the checksum
626 * @hw: pointer to hardware structure
627 * @checksum: pointer to the checksum
629 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
630 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
631 * is customer specific and unknown. Therefore, this function skips all maximum
632 * possible size of VPD (1kB).
634 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
636 enum i40e_status_code ret_code = I40E_SUCCESS;
637 struct i40e_virt_mem vmem;
638 u16 pcie_alt_module = 0;
639 u16 checksum_local = 0;
644 DEBUGFUNC("i40e_calc_nvm_checksum");
646 ret_code = i40e_allocate_virt_mem(hw, &vmem,
647 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
649 goto i40e_calc_nvm_checksum_exit;
650 data = (u16 *)vmem.va;
652 /* read pointer to VPD area */
653 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
655 if (ret_code != I40E_SUCCESS) {
656 ret_code = I40E_ERR_NVM_CHECKSUM;
657 goto i40e_calc_nvm_checksum_exit;
660 /* read pointer to PCIe Alt Auto-load module */
661 ret_code = __i40e_read_nvm_word(hw,
662 I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
664 if (ret_code != I40E_SUCCESS) {
665 ret_code = I40E_ERR_NVM_CHECKSUM;
666 goto i40e_calc_nvm_checksum_exit;
669 /* Calculate SW checksum that covers the whole 64kB shadow RAM
670 * except the VPD and PCIe ALT Auto-load modules
672 for (i = 0; i < hw->nvm.sr_size; i++) {
674 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
675 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
677 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
678 if (ret_code != I40E_SUCCESS) {
679 ret_code = I40E_ERR_NVM_CHECKSUM;
680 goto i40e_calc_nvm_checksum_exit;
684 /* Skip Checksum word */
685 if (i == I40E_SR_SW_CHECKSUM_WORD)
687 /* Skip VPD module (convert byte size to word count) */
688 if ((i >= (u32)vpd_module) &&
689 (i < ((u32)vpd_module +
690 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
693 /* Skip PCIe ALT module (convert byte size to word count) */
694 if ((i >= (u32)pcie_alt_module) &&
695 (i < ((u32)pcie_alt_module +
696 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
700 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
703 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
705 i40e_calc_nvm_checksum_exit:
706 i40e_free_virt_mem(hw, &vmem);
711 * i40e_update_nvm_checksum - Updates the NVM checksum
712 * @hw: pointer to hardware structure
714 * NVM ownership must be acquired before calling this function and released
715 * on ARQ completion event reception by caller.
716 * This function will commit SR to NVM.
718 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
720 enum i40e_status_code ret_code = I40E_SUCCESS;
724 DEBUGFUNC("i40e_update_nvm_checksum");
726 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
727 le_sum = CPU_TO_LE16(checksum);
728 if (ret_code == I40E_SUCCESS)
729 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
736 * i40e_validate_nvm_checksum - Validate EEPROM checksum
737 * @hw: pointer to hardware structure
738 * @checksum: calculated checksum
740 * Performs checksum calculation and validates the NVM SW checksum. If the
741 * caller does not need checksum, the value can be NULL.
743 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
746 enum i40e_status_code ret_code = I40E_SUCCESS;
748 u16 checksum_local = 0;
750 DEBUGFUNC("i40e_validate_nvm_checksum");
752 /* acquire_nvm provides exclusive NVM lock to synchronize access across
753 * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
754 * twice (first time to be able to write address to I40E_GLNVM_SRCTL
755 * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
756 * done bit and try to write address, while another one will interpret
757 * it as a good time to read data. It will cause invalid data to be
760 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
762 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
763 i40e_release_nvm(hw);
764 if (ret_code != I40E_SUCCESS)
765 goto i40e_validate_nvm_checksum_exit;
767 goto i40e_validate_nvm_checksum_exit;
770 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
772 /* Verify read checksum from EEPROM is the same as
773 * calculated checksum
775 if (checksum_local != checksum_sr)
776 ret_code = I40E_ERR_NVM_CHECKSUM;
778 /* If the user cares, return the calculated checksum */
780 *checksum = checksum_local;
782 i40e_validate_nvm_checksum_exit:
786 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
787 struct i40e_nvm_access *cmd,
788 u8 *bytes, int *perrno);
789 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
790 struct i40e_nvm_access *cmd,
791 u8 *bytes, int *perrno);
792 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
793 struct i40e_nvm_access *cmd,
794 u8 *bytes, int *perrno);
795 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
796 struct i40e_nvm_access *cmd,
798 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
799 struct i40e_nvm_access *cmd,
801 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
802 struct i40e_nvm_access *cmd,
803 u8 *bytes, int *perrno);
804 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
805 struct i40e_nvm_access *cmd,
806 u8 *bytes, int *perrno);
807 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
808 struct i40e_nvm_access *cmd,
809 u8 *bytes, int *perrno);
810 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
811 struct i40e_nvm_access *cmd,
812 u8 *bytes, int *perrno);
813 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
815 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
817 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
819 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
822 STATIC const char *i40e_nvm_update_state_str[] = {
823 "I40E_NVMUPD_INVALID",
824 "I40E_NVMUPD_READ_CON",
825 "I40E_NVMUPD_READ_SNT",
826 "I40E_NVMUPD_READ_LCB",
827 "I40E_NVMUPD_READ_SA",
828 "I40E_NVMUPD_WRITE_ERA",
829 "I40E_NVMUPD_WRITE_CON",
830 "I40E_NVMUPD_WRITE_SNT",
831 "I40E_NVMUPD_WRITE_LCB",
832 "I40E_NVMUPD_WRITE_SA",
833 "I40E_NVMUPD_CSUM_CON",
834 "I40E_NVMUPD_CSUM_SA",
835 "I40E_NVMUPD_CSUM_LCB",
836 "I40E_NVMUPD_STATUS",
837 "I40E_NVMUPD_EXEC_AQ",
838 "I40E_NVMUPD_GET_AQ_RESULT",
842 * i40e_nvmupd_command - Process an NVM update command
843 * @hw: pointer to hardware structure
844 * @cmd: pointer to nvm update command
845 * @bytes: pointer to the data buffer
846 * @perrno: pointer to return error code
848 * Dispatches command depending on what update state is current
850 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
851 struct i40e_nvm_access *cmd,
852 u8 *bytes, int *perrno)
854 enum i40e_status_code status;
855 enum i40e_nvmupd_cmd upd_cmd;
857 DEBUGFUNC("i40e_nvmupd_command");
862 /* early check for status command and debug msgs */
863 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
865 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
866 i40e_nvm_update_state_str[upd_cmd],
868 hw->nvm_release_on_done, hw->nvm_wait_opcode,
869 cmd->command, cmd->config, cmd->offset, cmd->data_size);
871 if (upd_cmd == I40E_NVMUPD_INVALID) {
873 i40e_debug(hw, I40E_DEBUG_NVM,
874 "i40e_nvmupd_validate_command returns %d errno %d\n",
878 /* a status request returns immediately rather than
879 * going into the state machine
881 if (upd_cmd == I40E_NVMUPD_STATUS) {
882 if (!cmd->data_size) {
884 return I40E_ERR_BUF_TOO_SHORT;
887 bytes[0] = hw->nvmupd_state;
889 if (cmd->data_size >= 4) {
891 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
894 /* Clear error status on read */
895 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
896 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
901 /* Clear status even it is not read and log */
902 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
903 i40e_debug(hw, I40E_DEBUG_NVM,
904 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
905 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
908 /* Acquire lock to prevent race condition where adminq_task
909 * can execute after i40e_nvmupd_nvm_read/write but before state
910 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
912 * During NVMUpdate, it is observed that lock could be held for
913 * ~5ms for most commands. However lock is held for ~60ms for
914 * NVMUPD_CSUM_LCB command.
916 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
917 switch (hw->nvmupd_state) {
918 case I40E_NVMUPD_STATE_INIT:
919 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
922 case I40E_NVMUPD_STATE_READING:
923 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
926 case I40E_NVMUPD_STATE_WRITING:
927 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
930 case I40E_NVMUPD_STATE_INIT_WAIT:
931 case I40E_NVMUPD_STATE_WRITE_WAIT:
932 /* if we need to stop waiting for an event, clear
933 * the wait info and return before doing anything else
935 if (cmd->offset == 0xffff) {
936 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
937 status = I40E_SUCCESS;
941 status = I40E_ERR_NOT_READY;
946 /* invalid state, should never happen */
947 i40e_debug(hw, I40E_DEBUG_NVM,
948 "NVMUPD: no such state %d\n", hw->nvmupd_state);
949 status = I40E_NOT_SUPPORTED;
954 i40e_release_spinlock(&hw->aq.arq_spinlock);
959 * i40e_nvmupd_state_init - Handle NVM update state Init
960 * @hw: pointer to hardware structure
961 * @cmd: pointer to nvm update command buffer
962 * @bytes: pointer to the data buffer
963 * @perrno: pointer to return error code
965 * Process legitimate commands of the Init state and conditionally set next
966 * state. Reject all other commands.
968 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
969 struct i40e_nvm_access *cmd,
970 u8 *bytes, int *perrno)
972 enum i40e_status_code status = I40E_SUCCESS;
973 enum i40e_nvmupd_cmd upd_cmd;
975 DEBUGFUNC("i40e_nvmupd_state_init");
977 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
980 case I40E_NVMUPD_READ_SA:
981 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
983 *perrno = i40e_aq_rc_to_posix(status,
984 hw->aq.asq_last_status);
986 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
987 i40e_release_nvm(hw);
991 case I40E_NVMUPD_READ_SNT:
992 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
994 *perrno = i40e_aq_rc_to_posix(status,
995 hw->aq.asq_last_status);
997 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
999 i40e_release_nvm(hw);
1001 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1005 case I40E_NVMUPD_WRITE_ERA:
1006 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1008 *perrno = i40e_aq_rc_to_posix(status,
1009 hw->aq.asq_last_status);
1011 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1013 i40e_release_nvm(hw);
1015 hw->nvm_release_on_done = true;
1016 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1017 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1022 case I40E_NVMUPD_WRITE_SA:
1023 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1025 *perrno = i40e_aq_rc_to_posix(status,
1026 hw->aq.asq_last_status);
1028 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1030 i40e_release_nvm(hw);
1032 hw->nvm_release_on_done = true;
1033 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1034 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1039 case I40E_NVMUPD_WRITE_SNT:
1040 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1042 *perrno = i40e_aq_rc_to_posix(status,
1043 hw->aq.asq_last_status);
1045 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1047 i40e_release_nvm(hw);
1049 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1050 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1055 case I40E_NVMUPD_CSUM_SA:
1056 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1058 *perrno = i40e_aq_rc_to_posix(status,
1059 hw->aq.asq_last_status);
1061 status = i40e_update_nvm_checksum(hw);
1063 *perrno = hw->aq.asq_last_status ?
1064 i40e_aq_rc_to_posix(status,
1065 hw->aq.asq_last_status) :
1067 i40e_release_nvm(hw);
1069 hw->nvm_release_on_done = true;
1070 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1071 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1076 case I40E_NVMUPD_EXEC_AQ:
1077 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1080 case I40E_NVMUPD_GET_AQ_RESULT:
1081 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1085 i40e_debug(hw, I40E_DEBUG_NVM,
1086 "NVMUPD: bad cmd %s in init state\n",
1087 i40e_nvm_update_state_str[upd_cmd]);
1088 status = I40E_ERR_NVM;
1096 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1097 * @hw: pointer to hardware structure
1098 * @cmd: pointer to nvm update command buffer
1099 * @bytes: pointer to the data buffer
1100 * @perrno: pointer to return error code
1102 * NVM ownership is already held. Process legitimate commands and set any
1103 * change in state; reject all other commands.
1105 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1106 struct i40e_nvm_access *cmd,
1107 u8 *bytes, int *perrno)
1109 enum i40e_status_code status = I40E_SUCCESS;
1110 enum i40e_nvmupd_cmd upd_cmd;
1112 DEBUGFUNC("i40e_nvmupd_state_reading");
1114 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1117 case I40E_NVMUPD_READ_SA:
1118 case I40E_NVMUPD_READ_CON:
1119 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1122 case I40E_NVMUPD_READ_LCB:
1123 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1124 i40e_release_nvm(hw);
1125 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1129 i40e_debug(hw, I40E_DEBUG_NVM,
1130 "NVMUPD: bad cmd %s in reading state.\n",
1131 i40e_nvm_update_state_str[upd_cmd]);
1132 status = I40E_NOT_SUPPORTED;
1140 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1141 * @hw: pointer to hardware structure
1142 * @cmd: pointer to nvm update command buffer
1143 * @bytes: pointer to the data buffer
1144 * @perrno: pointer to return error code
1146 * NVM ownership is already held. Process legitimate commands and set any
1147 * change in state; reject all other commands
1149 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1150 struct i40e_nvm_access *cmd,
1151 u8 *bytes, int *perrno)
1153 enum i40e_status_code status = I40E_SUCCESS;
1154 enum i40e_nvmupd_cmd upd_cmd;
1155 bool retry_attempt = false;
1157 DEBUGFUNC("i40e_nvmupd_state_writing");
1159 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1163 case I40E_NVMUPD_WRITE_CON:
1164 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1166 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1167 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1171 case I40E_NVMUPD_WRITE_LCB:
1172 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1174 *perrno = hw->aq.asq_last_status ?
1175 i40e_aq_rc_to_posix(status,
1176 hw->aq.asq_last_status) :
1178 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1180 hw->nvm_release_on_done = true;
1181 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1182 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1186 case I40E_NVMUPD_CSUM_CON:
1187 /* Assumes the caller has acquired the nvm */
1188 status = i40e_update_nvm_checksum(hw);
1190 *perrno = hw->aq.asq_last_status ?
1191 i40e_aq_rc_to_posix(status,
1192 hw->aq.asq_last_status) :
1194 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1196 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1197 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1201 case I40E_NVMUPD_CSUM_LCB:
1202 /* Assumes the caller has acquired the nvm */
1203 status = i40e_update_nvm_checksum(hw);
1205 *perrno = hw->aq.asq_last_status ?
1206 i40e_aq_rc_to_posix(status,
1207 hw->aq.asq_last_status) :
1209 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1211 hw->nvm_release_on_done = true;
1212 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1213 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1218 i40e_debug(hw, I40E_DEBUG_NVM,
1219 "NVMUPD: bad cmd %s in writing state.\n",
1220 i40e_nvm_update_state_str[upd_cmd]);
1221 status = I40E_NOT_SUPPORTED;
1226 /* In some circumstances, a multi-write transaction takes longer
1227 * than the default 3 minute timeout on the write semaphore. If
1228 * the write failed with an EBUSY status, this is likely the problem,
1229 * so here we try to reacquire the semaphore then retry the write.
1230 * We only do one retry, then give up.
1232 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1234 enum i40e_status_code old_status = status;
1235 u32 old_asq_status = hw->aq.asq_last_status;
1238 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1239 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1240 i40e_debug(hw, I40E_DEBUG_ALL,
1241 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1242 gtime, hw->nvm.hw_semaphore_timeout);
1243 i40e_release_nvm(hw);
1244 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1246 i40e_debug(hw, I40E_DEBUG_ALL,
1247 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1248 hw->aq.asq_last_status);
1249 status = old_status;
1250 hw->aq.asq_last_status = old_asq_status;
1252 retry_attempt = true;
1262 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1263 * @hw: pointer to the hardware structure
1264 * @opcode: the event that just happened
1266 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1268 if (opcode == hw->nvm_wait_opcode) {
1270 i40e_debug(hw, I40E_DEBUG_NVM,
1271 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1272 if (hw->nvm_release_on_done) {
1273 i40e_release_nvm(hw);
1274 hw->nvm_release_on_done = false;
1276 hw->nvm_wait_opcode = 0;
1278 if (hw->aq.arq_last_status) {
1279 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1283 switch (hw->nvmupd_state) {
1284 case I40E_NVMUPD_STATE_INIT_WAIT:
1285 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1288 case I40E_NVMUPD_STATE_WRITE_WAIT:
1289 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1299 * i40e_nvmupd_validate_command - Validate given command
1300 * @hw: pointer to hardware structure
1301 * @cmd: pointer to nvm update command buffer
1302 * @perrno: pointer to return error code
1304 * Return one of the valid command types or I40E_NVMUPD_INVALID
1306 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1307 struct i40e_nvm_access *cmd,
1310 enum i40e_nvmupd_cmd upd_cmd;
1311 u8 module, transaction;
1313 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1315 /* anything that doesn't match a recognized case is an error */
1316 upd_cmd = I40E_NVMUPD_INVALID;
1318 transaction = i40e_nvmupd_get_transaction(cmd->config);
1319 module = i40e_nvmupd_get_module(cmd->config);
1321 /* limits on data size */
1322 if ((cmd->data_size < 1) ||
1323 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1324 i40e_debug(hw, I40E_DEBUG_NVM,
1325 "i40e_nvmupd_validate_command data_size %d\n",
1328 return I40E_NVMUPD_INVALID;
1331 switch (cmd->command) {
1333 switch (transaction) {
1335 upd_cmd = I40E_NVMUPD_READ_CON;
1338 upd_cmd = I40E_NVMUPD_READ_SNT;
1341 upd_cmd = I40E_NVMUPD_READ_LCB;
1344 upd_cmd = I40E_NVMUPD_READ_SA;
1348 upd_cmd = I40E_NVMUPD_STATUS;
1349 else if (module == 0)
1350 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1355 case I40E_NVM_WRITE:
1356 switch (transaction) {
1358 upd_cmd = I40E_NVMUPD_WRITE_CON;
1361 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1364 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1367 upd_cmd = I40E_NVMUPD_WRITE_SA;
1370 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1373 upd_cmd = I40E_NVMUPD_CSUM_CON;
1375 case (I40E_NVM_CSUM|I40E_NVM_SA):
1376 upd_cmd = I40E_NVMUPD_CSUM_SA;
1378 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1379 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1383 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1393 * i40e_nvmupd_exec_aq - Run an AQ command
1394 * @hw: pointer to hardware structure
1395 * @cmd: pointer to nvm update command buffer
1396 * @bytes: pointer to the data buffer
1397 * @perrno: pointer to return error code
1399 * cmd structure contains identifiers and data buffer
1401 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1402 struct i40e_nvm_access *cmd,
1403 u8 *bytes, int *perrno)
1405 struct i40e_asq_cmd_details cmd_details;
1406 enum i40e_status_code status;
1407 struct i40e_aq_desc *aq_desc;
1413 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1414 memset(&cmd_details, 0, sizeof(cmd_details));
1415 cmd_details.wb_desc = &hw->nvm_wb_desc;
1417 aq_desc_len = sizeof(struct i40e_aq_desc);
1418 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1420 /* get the aq descriptor */
1421 if (cmd->data_size < aq_desc_len) {
1422 i40e_debug(hw, I40E_DEBUG_NVM,
1423 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1424 cmd->data_size, aq_desc_len);
1426 return I40E_ERR_PARAM;
1428 aq_desc = (struct i40e_aq_desc *)bytes;
1430 /* if data buffer needed, make sure it's ready */
1431 aq_data_len = cmd->data_size - aq_desc_len;
1432 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1434 if (!hw->nvm_buff.va) {
1435 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1436 hw->aq.asq_buf_size);
1438 i40e_debug(hw, I40E_DEBUG_NVM,
1439 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1443 if (hw->nvm_buff.va) {
1444 buff = hw->nvm_buff.va;
1445 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1446 I40E_NONDMA_TO_NONDMA);
1450 /* and away we go! */
1451 status = i40e_asq_send_command(hw, aq_desc, buff,
1452 buff_size, &cmd_details);
1454 i40e_debug(hw, I40E_DEBUG_NVM,
1455 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1456 i40e_stat_str(hw, status),
1457 i40e_aq_str(hw, hw->aq.asq_last_status));
1458 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1461 /* should we wait for a followup event? */
1463 hw->nvm_wait_opcode = cmd->offset;
1464 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1471 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1472 * @hw: pointer to hardware structure
1473 * @cmd: pointer to nvm update command buffer
1474 * @bytes: pointer to the data buffer
1475 * @perrno: pointer to return error code
1477 * cmd structure contains identifiers and data buffer
1479 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1480 struct i40e_nvm_access *cmd,
1481 u8 *bytes, int *perrno)
1488 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1490 aq_desc_len = sizeof(struct i40e_aq_desc);
1491 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1493 /* check offset range */
1494 if (cmd->offset > aq_total_len) {
1495 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1496 __func__, cmd->offset, aq_total_len);
1498 return I40E_ERR_PARAM;
1501 /* check copylength range */
1502 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1503 int new_len = aq_total_len - cmd->offset;
1505 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1506 __func__, cmd->data_size, new_len);
1507 cmd->data_size = new_len;
1510 remainder = cmd->data_size;
1511 if (cmd->offset < aq_desc_len) {
1512 u32 len = aq_desc_len - cmd->offset;
1514 len = min(len, cmd->data_size);
1515 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1516 __func__, cmd->offset, cmd->offset + len);
1518 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1519 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1523 buff = hw->nvm_buff.va;
1525 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1528 if (remainder > 0) {
1529 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1531 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1532 __func__, start_byte, start_byte + remainder);
1533 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1536 return I40E_SUCCESS;
1540 * i40e_nvmupd_nvm_read - Read NVM
1541 * @hw: pointer to hardware structure
1542 * @cmd: pointer to nvm update command buffer
1543 * @bytes: pointer to the data buffer
1544 * @perrno: pointer to return error code
1546 * cmd structure contains identifiers and data buffer
1548 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1549 struct i40e_nvm_access *cmd,
1550 u8 *bytes, int *perrno)
1552 struct i40e_asq_cmd_details cmd_details;
1553 enum i40e_status_code status;
1554 u8 module, transaction;
1557 transaction = i40e_nvmupd_get_transaction(cmd->config);
1558 module = i40e_nvmupd_get_module(cmd->config);
1559 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1561 memset(&cmd_details, 0, sizeof(cmd_details));
1562 cmd_details.wb_desc = &hw->nvm_wb_desc;
1564 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1565 bytes, last, &cmd_details);
1567 i40e_debug(hw, I40E_DEBUG_NVM,
1568 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1569 module, cmd->offset, cmd->data_size);
1570 i40e_debug(hw, I40E_DEBUG_NVM,
1571 "i40e_nvmupd_nvm_read status %d aq %d\n",
1572 status, hw->aq.asq_last_status);
1573 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1580 * i40e_nvmupd_nvm_erase - Erase an NVM module
1581 * @hw: pointer to hardware structure
1582 * @cmd: pointer to nvm update command buffer
1583 * @perrno: pointer to return error code
1585 * module, offset, data_size and data are in cmd structure
1587 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1588 struct i40e_nvm_access *cmd,
1591 enum i40e_status_code status = I40E_SUCCESS;
1592 struct i40e_asq_cmd_details cmd_details;
1593 u8 module, transaction;
1596 transaction = i40e_nvmupd_get_transaction(cmd->config);
1597 module = i40e_nvmupd_get_module(cmd->config);
1598 last = (transaction & I40E_NVM_LCB);
1600 memset(&cmd_details, 0, sizeof(cmd_details));
1601 cmd_details.wb_desc = &hw->nvm_wb_desc;
1603 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1604 last, &cmd_details);
1606 i40e_debug(hw, I40E_DEBUG_NVM,
1607 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1608 module, cmd->offset, cmd->data_size);
1609 i40e_debug(hw, I40E_DEBUG_NVM,
1610 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1611 status, hw->aq.asq_last_status);
1612 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1619 * i40e_nvmupd_nvm_write - Write NVM
1620 * @hw: pointer to hardware structure
1621 * @cmd: pointer to nvm update command buffer
1622 * @bytes: pointer to the data buffer
1623 * @perrno: pointer to return error code
1625 * module, offset, data_size and data are in cmd structure
1627 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1628 struct i40e_nvm_access *cmd,
1629 u8 *bytes, int *perrno)
1631 enum i40e_status_code status = I40E_SUCCESS;
1632 struct i40e_asq_cmd_details cmd_details;
1633 u8 module, transaction;
1636 transaction = i40e_nvmupd_get_transaction(cmd->config);
1637 module = i40e_nvmupd_get_module(cmd->config);
1638 last = (transaction & I40E_NVM_LCB);
1640 memset(&cmd_details, 0, sizeof(cmd_details));
1641 cmd_details.wb_desc = &hw->nvm_wb_desc;
1643 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1644 (u16)cmd->data_size, bytes, last,
1647 i40e_debug(hw, I40E_DEBUG_NVM,
1648 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1649 module, cmd->offset, cmd->data_size);
1650 i40e_debug(hw, I40E_DEBUG_NVM,
1651 "i40e_nvmupd_nvm_write status %d aq %d\n",
1652 status, hw->aq.asq_last_status);
1653 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);