1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always through the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 enum i40e_status_code ret_code = I40E_SUCCESS;
163 DEBUGFUNC("i40e_release_nvm");
165 if (hw->nvm.blank_nvm_mode)
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
186 * Polls the SRCTL Shadow RAM register done bit.
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
210 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
220 enum i40e_status_code ret_code = I40E_SUCCESS;
223 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
224 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
226 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
227 i40e_release_nvm(hw);
230 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
233 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
239 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
240 * @hw: pointer to the HW structure
241 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
242 * @data: word read from the Shadow RAM
244 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
246 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
250 enum i40e_status_code ret_code = I40E_SUCCESS;
253 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
254 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
256 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
258 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
264 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
265 * @hw: pointer to the HW structure
266 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
267 * @data: word read from the Shadow RAM
269 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
271 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
274 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
277 DEBUGFUNC("i40e_read_nvm_word_srctl");
279 if (offset >= hw->nvm.sr_size) {
280 i40e_debug(hw, I40E_DEBUG_NVM,
281 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
282 offset, hw->nvm.sr_size);
283 ret_code = I40E_ERR_PARAM;
287 /* Poll the done bit first */
288 ret_code = i40e_poll_sr_srctl_done_bit(hw);
289 if (ret_code == I40E_SUCCESS) {
290 /* Write the address and start reading */
291 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
292 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
293 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
295 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
296 ret_code = i40e_poll_sr_srctl_done_bit(hw);
297 if (ret_code == I40E_SUCCESS) {
298 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
299 *data = (u16)((sr_reg &
300 I40E_GLNVM_SRDATA_RDDATA_MASK)
301 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
304 if (ret_code != I40E_SUCCESS)
305 i40e_debug(hw, I40E_DEBUG_NVM,
306 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
314 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
315 * @hw: pointer to the HW structure
316 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
317 * @data: word read from the Shadow RAM
319 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
321 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
324 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
326 DEBUGFUNC("i40e_read_nvm_word_aq");
328 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
329 *data = LE16_TO_CPU(*(__le16 *)data);
335 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
336 * @hw: pointer to the HW structure
337 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
338 * @words: (in) number of words to read; (out) number of words actually read
339 * @data: words read from the Shadow RAM
341 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
342 * method. The buffer read is preceded by the NVM ownership take
343 * and followed by the release.
345 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
347 u16 *words, u16 *data)
349 enum i40e_status_code ret_code = I40E_SUCCESS;
352 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
353 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
355 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
357 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
363 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
364 * @hw: pointer to the HW structure
365 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
366 * @words: (in) number of words to read; (out) number of words actually read
367 * @data: words read from the Shadow RAM
369 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
370 * method. The buffer read is preceded by the NVM ownership take
371 * and followed by the release.
373 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
374 u16 *words, u16 *data)
376 enum i40e_status_code ret_code = I40E_SUCCESS;
379 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
380 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
382 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
384 i40e_release_nvm(hw);
387 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
390 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
396 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
397 * @hw: pointer to the HW structure
398 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
399 * @words: (in) number of words to read; (out) number of words actually read
400 * @data: words read from the Shadow RAM
402 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
403 * method. The buffer read is preceded by the NVM ownership take
404 * and followed by the release.
406 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
407 u16 *words, u16 *data)
409 enum i40e_status_code ret_code = I40E_SUCCESS;
412 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
414 /* Loop through the selected region */
415 for (word = 0; word < *words; word++) {
416 index = offset + word;
417 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
418 if (ret_code != I40E_SUCCESS)
422 /* Update the number of words read from the Shadow RAM */
429 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
430 * @hw: pointer to the HW structure
431 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
432 * @words: (in) number of words to read; (out) number of words actually read
433 * @data: words read from the Shadow RAM
435 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
436 * method. The buffer read is preceded by the NVM ownership take
437 * and followed by the release.
439 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
440 u16 *words, u16 *data)
442 enum i40e_status_code ret_code;
443 u16 read_size = *words;
444 bool last_cmd = false;
448 DEBUGFUNC("i40e_read_nvm_buffer_aq");
451 /* Calculate number of bytes we should read in this step.
452 * FVL AQ do not allow to read more than one page at a time or
453 * to cross page boundaries.
455 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
456 read_size = min(*words,
457 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
458 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
460 read_size = min((*words - words_read),
461 I40E_SR_SECTOR_SIZE_IN_WORDS);
463 /* Check if this is last command, if so set proper flag */
464 if ((words_read + read_size) >= *words)
467 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
468 data + words_read, last_cmd);
469 if (ret_code != I40E_SUCCESS)
470 goto read_nvm_buffer_aq_exit;
472 /* Increment counter for words already read and move offset to
475 words_read += read_size;
477 } while (words_read < *words);
479 for (i = 0; i < *words; i++)
480 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
482 read_nvm_buffer_aq_exit:
488 * i40e_read_nvm_aq - Read Shadow RAM.
489 * @hw: pointer to the HW structure.
490 * @module_pointer: module pointer location in words from the NVM beginning
491 * @offset: offset in words from module start
492 * @words: number of words to write
493 * @data: buffer with words to write to the Shadow RAM
494 * @last_command: tells the AdminQ that this is the last command
496 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
498 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
499 u32 offset, u16 words, void *data,
502 enum i40e_status_code ret_code = I40E_ERR_NVM;
503 struct i40e_asq_cmd_details cmd_details;
505 DEBUGFUNC("i40e_read_nvm_aq");
507 memset(&cmd_details, 0, sizeof(cmd_details));
508 cmd_details.wb_desc = &hw->nvm_wb_desc;
510 /* Here we are checking the SR limit only for the flat memory model.
511 * We cannot do it for the module-based model, as we did not acquire
512 * the NVM resource yet (we cannot get the module pointer value).
513 * Firmware will check the module-based model.
515 if ((offset + words) > hw->nvm.sr_size)
516 i40e_debug(hw, I40E_DEBUG_NVM,
517 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
518 (offset + words), hw->nvm.sr_size);
519 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
520 /* We can write only up to 4KB (one sector), in one AQ write */
521 i40e_debug(hw, I40E_DEBUG_NVM,
522 "NVM write fail error: tried to write %d words, limit is %d.\n",
523 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
524 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
525 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
526 /* A single write cannot spread over two sectors */
527 i40e_debug(hw, I40E_DEBUG_NVM,
528 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
531 ret_code = i40e_aq_read_nvm(hw, module_pointer,
532 2 * offset, /*bytes*/
534 data, last_command, &cmd_details);
540 * i40e_write_nvm_aq - Writes Shadow RAM.
541 * @hw: pointer to the HW structure.
542 * @module_pointer: module pointer location in words from the NVM beginning
543 * @offset: offset in words from module start
544 * @words: number of words to write
545 * @data: buffer with words to write to the Shadow RAM
546 * @last_command: tells the AdminQ that this is the last command
548 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
550 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
551 u32 offset, u16 words, void *data,
554 enum i40e_status_code ret_code = I40E_ERR_NVM;
555 struct i40e_asq_cmd_details cmd_details;
557 DEBUGFUNC("i40e_write_nvm_aq");
559 memset(&cmd_details, 0, sizeof(cmd_details));
560 cmd_details.wb_desc = &hw->nvm_wb_desc;
562 /* Here we are checking the SR limit only for the flat memory model.
563 * We cannot do it for the module-based model, as we did not acquire
564 * the NVM resource yet (we cannot get the module pointer value).
565 * Firmware will check the module-based model.
567 if ((offset + words) > hw->nvm.sr_size)
568 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
569 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
570 /* We can write only up to 4KB (one sector), in one AQ write */
571 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
572 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
573 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
574 /* A single write cannot spread over two sectors */
575 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
577 ret_code = i40e_aq_update_nvm(hw, module_pointer,
578 2 * offset, /*bytes*/
580 data, last_command, &cmd_details);
586 * __i40e_write_nvm_word - Writes Shadow RAM word
587 * @hw: pointer to the HW structure
588 * @offset: offset of the Shadow RAM word to write
589 * @data: word to write to the Shadow RAM
591 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
592 * NVM ownership have to be acquired and released (on ARQ completion event
593 * reception) by caller. To commit SR to NVM update checksum function
596 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
599 DEBUGFUNC("i40e_write_nvm_word");
601 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
603 /* Value 0x00 below means that we treat SR as a flat mem */
604 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
608 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
609 * @hw: pointer to the HW structure
610 * @module_pointer: module pointer location in words from the NVM beginning
611 * @offset: offset of the Shadow RAM buffer to write
612 * @words: number of words to write
613 * @data: words to write to the Shadow RAM
615 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
616 * NVM ownership must be acquired before calling this function and released
617 * on ARQ completion event reception by caller. To commit SR to NVM update
618 * checksum function should be called.
620 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
621 u8 module_pointer, u32 offset,
622 u16 words, void *data)
624 __le16 *le_word_ptr = (__le16 *)data;
625 u16 *word_ptr = (u16 *)data;
628 DEBUGFUNC("i40e_write_nvm_buffer");
630 for (i = 0; i < words; i++)
631 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
633 /* Here we will only write one buffer as the size of the modules
634 * mirrored in the Shadow RAM is always less than 4K.
636 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
641 * i40e_calc_nvm_checksum - Calculates and returns the checksum
642 * @hw: pointer to hardware structure
643 * @checksum: pointer to the checksum
645 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
646 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
647 * is customer specific and unknown. Therefore, this function skips all maximum
648 * possible size of VPD (1kB).
650 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
652 enum i40e_status_code ret_code = I40E_SUCCESS;
653 struct i40e_virt_mem vmem;
654 u16 pcie_alt_module = 0;
655 u16 checksum_local = 0;
660 DEBUGFUNC("i40e_calc_nvm_checksum");
662 ret_code = i40e_allocate_virt_mem(hw, &vmem,
663 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
665 goto i40e_calc_nvm_checksum_exit;
666 data = (u16 *)vmem.va;
668 /* read pointer to VPD area */
669 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
671 if (ret_code != I40E_SUCCESS) {
672 ret_code = I40E_ERR_NVM_CHECKSUM;
673 goto i40e_calc_nvm_checksum_exit;
676 /* read pointer to PCIe Alt Auto-load module */
677 ret_code = __i40e_read_nvm_word(hw,
678 I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
680 if (ret_code != I40E_SUCCESS) {
681 ret_code = I40E_ERR_NVM_CHECKSUM;
682 goto i40e_calc_nvm_checksum_exit;
685 /* Calculate SW checksum that covers the whole 64kB shadow RAM
686 * except the VPD and PCIe ALT Auto-load modules
688 for (i = 0; i < hw->nvm.sr_size; i++) {
690 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
691 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
693 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
694 if (ret_code != I40E_SUCCESS) {
695 ret_code = I40E_ERR_NVM_CHECKSUM;
696 goto i40e_calc_nvm_checksum_exit;
700 /* Skip Checksum word */
701 if (i == I40E_SR_SW_CHECKSUM_WORD)
703 /* Skip VPD module (convert byte size to word count) */
704 if ((i >= (u32)vpd_module) &&
705 (i < ((u32)vpd_module +
706 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
709 /* Skip PCIe ALT module (convert byte size to word count) */
710 if ((i >= (u32)pcie_alt_module) &&
711 (i < ((u32)pcie_alt_module +
712 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
716 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
719 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
721 i40e_calc_nvm_checksum_exit:
722 i40e_free_virt_mem(hw, &vmem);
727 * i40e_update_nvm_checksum - Updates the NVM checksum
728 * @hw: pointer to hardware structure
730 * NVM ownership must be acquired before calling this function and released
731 * on ARQ completion event reception by caller.
732 * This function will commit SR to NVM.
734 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
736 enum i40e_status_code ret_code = I40E_SUCCESS;
740 DEBUGFUNC("i40e_update_nvm_checksum");
742 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
743 le_sum = CPU_TO_LE16(checksum);
744 if (ret_code == I40E_SUCCESS)
745 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
752 * i40e_validate_nvm_checksum - Validate EEPROM checksum
753 * @hw: pointer to hardware structure
754 * @checksum: calculated checksum
756 * Performs checksum calculation and validates the NVM SW checksum. If the
757 * caller does not need checksum, the value can be NULL.
759 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
762 enum i40e_status_code ret_code = I40E_SUCCESS;
764 u16 checksum_local = 0;
766 DEBUGFUNC("i40e_validate_nvm_checksum");
768 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
769 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
771 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
772 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
773 i40e_release_nvm(hw);
774 if (ret_code != I40E_SUCCESS)
775 goto i40e_validate_nvm_checksum_exit;
777 goto i40e_validate_nvm_checksum_exit;
780 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
782 /* Verify read checksum from EEPROM is the same as
783 * calculated checksum
785 if (checksum_local != checksum_sr)
786 ret_code = I40E_ERR_NVM_CHECKSUM;
788 /* If the user cares, return the calculated checksum */
790 *checksum = checksum_local;
792 i40e_validate_nvm_checksum_exit:
796 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
797 struct i40e_nvm_access *cmd,
798 u8 *bytes, int *perrno);
799 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
800 struct i40e_nvm_access *cmd,
801 u8 *bytes, int *perrno);
802 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
803 struct i40e_nvm_access *cmd,
804 u8 *bytes, int *perrno);
805 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
806 struct i40e_nvm_access *cmd,
808 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
809 struct i40e_nvm_access *cmd,
811 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
812 struct i40e_nvm_access *cmd,
813 u8 *bytes, int *perrno);
814 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
815 struct i40e_nvm_access *cmd,
816 u8 *bytes, int *perrno);
817 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
818 struct i40e_nvm_access *cmd,
819 u8 *bytes, int *perrno);
820 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
821 struct i40e_nvm_access *cmd,
822 u8 *bytes, int *perrno);
823 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
825 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
827 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
829 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
832 STATIC const char *i40e_nvm_update_state_str[] = {
833 "I40E_NVMUPD_INVALID",
834 "I40E_NVMUPD_READ_CON",
835 "I40E_NVMUPD_READ_SNT",
836 "I40E_NVMUPD_READ_LCB",
837 "I40E_NVMUPD_READ_SA",
838 "I40E_NVMUPD_WRITE_ERA",
839 "I40E_NVMUPD_WRITE_CON",
840 "I40E_NVMUPD_WRITE_SNT",
841 "I40E_NVMUPD_WRITE_LCB",
842 "I40E_NVMUPD_WRITE_SA",
843 "I40E_NVMUPD_CSUM_CON",
844 "I40E_NVMUPD_CSUM_SA",
845 "I40E_NVMUPD_CSUM_LCB",
846 "I40E_NVMUPD_STATUS",
847 "I40E_NVMUPD_EXEC_AQ",
848 "I40E_NVMUPD_GET_AQ_RESULT",
852 * i40e_nvmupd_command - Process an NVM update command
853 * @hw: pointer to hardware structure
854 * @cmd: pointer to nvm update command
855 * @bytes: pointer to the data buffer
856 * @perrno: pointer to return error code
858 * Dispatches command depending on what update state is current
860 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
861 struct i40e_nvm_access *cmd,
862 u8 *bytes, int *perrno)
864 enum i40e_status_code status;
865 enum i40e_nvmupd_cmd upd_cmd;
867 DEBUGFUNC("i40e_nvmupd_command");
872 /* early check for status command and debug msgs */
873 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
875 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
876 i40e_nvm_update_state_str[upd_cmd],
878 hw->nvm_release_on_done, hw->nvm_wait_opcode,
879 cmd->command, cmd->config, cmd->offset, cmd->data_size);
881 if (upd_cmd == I40E_NVMUPD_INVALID) {
883 i40e_debug(hw, I40E_DEBUG_NVM,
884 "i40e_nvmupd_validate_command returns %d errno %d\n",
888 /* a status request returns immediately rather than
889 * going into the state machine
891 if (upd_cmd == I40E_NVMUPD_STATUS) {
892 if (!cmd->data_size) {
894 return I40E_ERR_BUF_TOO_SHORT;
897 bytes[0] = hw->nvmupd_state;
899 if (cmd->data_size >= 4) {
901 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
907 switch (hw->nvmupd_state) {
908 case I40E_NVMUPD_STATE_INIT:
909 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
912 case I40E_NVMUPD_STATE_READING:
913 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
916 case I40E_NVMUPD_STATE_WRITING:
917 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
920 case I40E_NVMUPD_STATE_INIT_WAIT:
921 case I40E_NVMUPD_STATE_WRITE_WAIT:
922 /* if we need to stop waiting for an event, clear
923 * the wait info and return before doing anything else
925 if (cmd->offset == 0xffff) {
926 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
930 status = I40E_ERR_NOT_READY;
935 /* invalid state, should never happen */
936 i40e_debug(hw, I40E_DEBUG_NVM,
937 "NVMUPD: no such state %d\n", hw->nvmupd_state);
938 status = I40E_NOT_SUPPORTED;
946 * i40e_nvmupd_state_init - Handle NVM update state Init
947 * @hw: pointer to hardware structure
948 * @cmd: pointer to nvm update command buffer
949 * @bytes: pointer to the data buffer
950 * @perrno: pointer to return error code
952 * Process legitimate commands of the Init state and conditionally set next
953 * state. Reject all other commands.
955 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
956 struct i40e_nvm_access *cmd,
957 u8 *bytes, int *perrno)
959 enum i40e_status_code status = I40E_SUCCESS;
960 enum i40e_nvmupd_cmd upd_cmd;
962 DEBUGFUNC("i40e_nvmupd_state_init");
964 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
967 case I40E_NVMUPD_READ_SA:
968 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
970 *perrno = i40e_aq_rc_to_posix(status,
971 hw->aq.asq_last_status);
973 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
974 i40e_release_nvm(hw);
978 case I40E_NVMUPD_READ_SNT:
979 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
981 *perrno = i40e_aq_rc_to_posix(status,
982 hw->aq.asq_last_status);
984 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
986 i40e_release_nvm(hw);
988 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
992 case I40E_NVMUPD_WRITE_ERA:
993 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
995 *perrno = i40e_aq_rc_to_posix(status,
996 hw->aq.asq_last_status);
998 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1000 i40e_release_nvm(hw);
1002 hw->nvm_release_on_done = true;
1003 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1004 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1009 case I40E_NVMUPD_WRITE_SA:
1010 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1012 *perrno = i40e_aq_rc_to_posix(status,
1013 hw->aq.asq_last_status);
1015 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1017 i40e_release_nvm(hw);
1019 hw->nvm_release_on_done = true;
1020 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1021 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1026 case I40E_NVMUPD_WRITE_SNT:
1027 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1029 *perrno = i40e_aq_rc_to_posix(status,
1030 hw->aq.asq_last_status);
1032 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1034 i40e_release_nvm(hw);
1036 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1037 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1042 case I40E_NVMUPD_CSUM_SA:
1043 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1045 *perrno = i40e_aq_rc_to_posix(status,
1046 hw->aq.asq_last_status);
1048 status = i40e_update_nvm_checksum(hw);
1050 *perrno = hw->aq.asq_last_status ?
1051 i40e_aq_rc_to_posix(status,
1052 hw->aq.asq_last_status) :
1054 i40e_release_nvm(hw);
1056 hw->nvm_release_on_done = true;
1057 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1058 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1063 case I40E_NVMUPD_EXEC_AQ:
1064 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1067 case I40E_NVMUPD_GET_AQ_RESULT:
1068 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1072 i40e_debug(hw, I40E_DEBUG_NVM,
1073 "NVMUPD: bad cmd %s in init state\n",
1074 i40e_nvm_update_state_str[upd_cmd]);
1075 status = I40E_ERR_NVM;
1083 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1084 * @hw: pointer to hardware structure
1085 * @cmd: pointer to nvm update command buffer
1086 * @bytes: pointer to the data buffer
1087 * @perrno: pointer to return error code
1089 * NVM ownership is already held. Process legitimate commands and set any
1090 * change in state; reject all other commands.
1092 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1093 struct i40e_nvm_access *cmd,
1094 u8 *bytes, int *perrno)
1096 enum i40e_status_code status = I40E_SUCCESS;
1097 enum i40e_nvmupd_cmd upd_cmd;
1099 DEBUGFUNC("i40e_nvmupd_state_reading");
1101 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1104 case I40E_NVMUPD_READ_SA:
1105 case I40E_NVMUPD_READ_CON:
1106 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1109 case I40E_NVMUPD_READ_LCB:
1110 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1111 i40e_release_nvm(hw);
1112 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1116 i40e_debug(hw, I40E_DEBUG_NVM,
1117 "NVMUPD: bad cmd %s in reading state.\n",
1118 i40e_nvm_update_state_str[upd_cmd]);
1119 status = I40E_NOT_SUPPORTED;
1127 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1128 * @hw: pointer to hardware structure
1129 * @cmd: pointer to nvm update command buffer
1130 * @bytes: pointer to the data buffer
1131 * @perrno: pointer to return error code
1133 * NVM ownership is already held. Process legitimate commands and set any
1134 * change in state; reject all other commands
1136 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1137 struct i40e_nvm_access *cmd,
1138 u8 *bytes, int *perrno)
1140 enum i40e_status_code status = I40E_SUCCESS;
1141 enum i40e_nvmupd_cmd upd_cmd;
1142 bool retry_attempt = false;
1144 DEBUGFUNC("i40e_nvmupd_state_writing");
1146 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1150 case I40E_NVMUPD_WRITE_CON:
1151 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1153 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1154 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1158 case I40E_NVMUPD_WRITE_LCB:
1159 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1161 *perrno = hw->aq.asq_last_status ?
1162 i40e_aq_rc_to_posix(status,
1163 hw->aq.asq_last_status) :
1165 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1167 hw->nvm_release_on_done = true;
1168 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1169 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1173 case I40E_NVMUPD_CSUM_CON:
1174 /* Assumes the caller has acquired the nvm */
1175 status = i40e_update_nvm_checksum(hw);
1177 *perrno = hw->aq.asq_last_status ?
1178 i40e_aq_rc_to_posix(status,
1179 hw->aq.asq_last_status) :
1181 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1183 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1184 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1188 case I40E_NVMUPD_CSUM_LCB:
1189 /* Assumes the caller has acquired the nvm */
1190 status = i40e_update_nvm_checksum(hw);
1192 *perrno = hw->aq.asq_last_status ?
1193 i40e_aq_rc_to_posix(status,
1194 hw->aq.asq_last_status) :
1196 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1198 hw->nvm_release_on_done = true;
1199 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1200 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1205 i40e_debug(hw, I40E_DEBUG_NVM,
1206 "NVMUPD: bad cmd %s in writing state.\n",
1207 i40e_nvm_update_state_str[upd_cmd]);
1208 status = I40E_NOT_SUPPORTED;
1213 /* In some circumstances, a multi-write transaction takes longer
1214 * than the default 3 minute timeout on the write semaphore. If
1215 * the write failed with an EBUSY status, this is likely the problem,
1216 * so here we try to reacquire the semaphore then retry the write.
1217 * We only do one retry, then give up.
1219 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1221 enum i40e_status_code old_status = status;
1222 u32 old_asq_status = hw->aq.asq_last_status;
1225 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1226 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1227 i40e_debug(hw, I40E_DEBUG_ALL,
1228 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1229 gtime, hw->nvm.hw_semaphore_timeout);
1230 i40e_release_nvm(hw);
1231 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1233 i40e_debug(hw, I40E_DEBUG_ALL,
1234 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1235 hw->aq.asq_last_status);
1236 status = old_status;
1237 hw->aq.asq_last_status = old_asq_status;
1239 retry_attempt = true;
1249 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1250 * @hw: pointer to the hardware structure
1251 * @opcode: the event that just happened
1253 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1255 if (opcode == hw->nvm_wait_opcode) {
1256 i40e_debug(hw, I40E_DEBUG_NVM,
1257 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1258 if (hw->nvm_release_on_done) {
1259 i40e_release_nvm(hw);
1260 hw->nvm_release_on_done = false;
1262 hw->nvm_wait_opcode = 0;
1264 switch (hw->nvmupd_state) {
1265 case I40E_NVMUPD_STATE_INIT_WAIT:
1266 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1269 case I40E_NVMUPD_STATE_WRITE_WAIT:
1270 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1280 * i40e_nvmupd_validate_command - Validate given command
1281 * @hw: pointer to hardware structure
1282 * @cmd: pointer to nvm update command buffer
1283 * @perrno: pointer to return error code
1285 * Return one of the valid command types or I40E_NVMUPD_INVALID
1287 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1288 struct i40e_nvm_access *cmd,
1291 enum i40e_nvmupd_cmd upd_cmd;
1292 u8 module, transaction;
1294 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1296 /* anything that doesn't match a recognized case is an error */
1297 upd_cmd = I40E_NVMUPD_INVALID;
1299 transaction = i40e_nvmupd_get_transaction(cmd->config);
1300 module = i40e_nvmupd_get_module(cmd->config);
1302 /* limits on data size */
1303 if ((cmd->data_size < 1) ||
1304 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1305 i40e_debug(hw, I40E_DEBUG_NVM,
1306 "i40e_nvmupd_validate_command data_size %d\n",
1309 return I40E_NVMUPD_INVALID;
1312 switch (cmd->command) {
1314 switch (transaction) {
1316 upd_cmd = I40E_NVMUPD_READ_CON;
1319 upd_cmd = I40E_NVMUPD_READ_SNT;
1322 upd_cmd = I40E_NVMUPD_READ_LCB;
1325 upd_cmd = I40E_NVMUPD_READ_SA;
1329 upd_cmd = I40E_NVMUPD_STATUS;
1330 else if (module == 0)
1331 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1336 case I40E_NVM_WRITE:
1337 switch (transaction) {
1339 upd_cmd = I40E_NVMUPD_WRITE_CON;
1342 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1345 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1348 upd_cmd = I40E_NVMUPD_WRITE_SA;
1351 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1354 upd_cmd = I40E_NVMUPD_CSUM_CON;
1356 case (I40E_NVM_CSUM|I40E_NVM_SA):
1357 upd_cmd = I40E_NVMUPD_CSUM_SA;
1359 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1360 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1364 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1374 * i40e_nvmupd_exec_aq - Run an AQ command
1375 * @hw: pointer to hardware structure
1376 * @cmd: pointer to nvm update command buffer
1377 * @bytes: pointer to the data buffer
1378 * @perrno: pointer to return error code
1380 * cmd structure contains identifiers and data buffer
1382 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1383 struct i40e_nvm_access *cmd,
1384 u8 *bytes, int *perrno)
1386 struct i40e_asq_cmd_details cmd_details;
1387 enum i40e_status_code status;
1388 struct i40e_aq_desc *aq_desc;
1394 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1395 memset(&cmd_details, 0, sizeof(cmd_details));
1396 cmd_details.wb_desc = &hw->nvm_wb_desc;
1398 aq_desc_len = sizeof(struct i40e_aq_desc);
1399 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1401 /* get the aq descriptor */
1402 if (cmd->data_size < aq_desc_len) {
1403 i40e_debug(hw, I40E_DEBUG_NVM,
1404 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1405 cmd->data_size, aq_desc_len);
1407 return I40E_ERR_PARAM;
1409 aq_desc = (struct i40e_aq_desc *)bytes;
1411 /* if data buffer needed, make sure it's ready */
1412 aq_data_len = cmd->data_size - aq_desc_len;
1413 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1415 if (!hw->nvm_buff.va) {
1416 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1417 hw->aq.asq_buf_size);
1419 i40e_debug(hw, I40E_DEBUG_NVM,
1420 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1424 if (hw->nvm_buff.va) {
1425 buff = hw->nvm_buff.va;
1426 memcpy(buff, &bytes[aq_desc_len], aq_data_len);
1430 /* and away we go! */
1431 status = i40e_asq_send_command(hw, aq_desc, buff,
1432 buff_size, &cmd_details);
1434 i40e_debug(hw, I40E_DEBUG_NVM,
1435 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1436 i40e_stat_str(hw, status),
1437 i40e_aq_str(hw, hw->aq.asq_last_status));
1438 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1441 /* should we wait for a followup event? */
1443 hw->nvm_wait_opcode = cmd->offset;
1444 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1451 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1452 * @hw: pointer to hardware structure
1453 * @cmd: pointer to nvm update command buffer
1454 * @bytes: pointer to the data buffer
1455 * @perrno: pointer to return error code
1457 * cmd structure contains identifiers and data buffer
1459 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1460 struct i40e_nvm_access *cmd,
1461 u8 *bytes, int *perrno)
1468 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1470 aq_desc_len = sizeof(struct i40e_aq_desc);
1471 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1473 /* check offset range */
1474 if (cmd->offset > aq_total_len) {
1475 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1476 __func__, cmd->offset, aq_total_len);
1478 return I40E_ERR_PARAM;
1481 /* check copylength range */
1482 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1483 int new_len = aq_total_len - cmd->offset;
1485 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1486 __func__, cmd->data_size, new_len);
1487 cmd->data_size = new_len;
1490 remainder = cmd->data_size;
1491 if (cmd->offset < aq_desc_len) {
1492 u32 len = aq_desc_len - cmd->offset;
1494 len = min(len, cmd->data_size);
1495 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1496 __func__, cmd->offset, cmd->offset + len);
1498 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1499 memcpy(bytes, buff, len);
1503 buff = hw->nvm_buff.va;
1505 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1508 if (remainder > 0) {
1509 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1511 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1512 __func__, start_byte, start_byte + remainder);
1513 memcpy(bytes, buff, remainder);
1516 return I40E_SUCCESS;
1520 * i40e_nvmupd_nvm_read - Read NVM
1521 * @hw: pointer to hardware structure
1522 * @cmd: pointer to nvm update command buffer
1523 * @bytes: pointer to the data buffer
1524 * @perrno: pointer to return error code
1526 * cmd structure contains identifiers and data buffer
1528 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1529 struct i40e_nvm_access *cmd,
1530 u8 *bytes, int *perrno)
1532 struct i40e_asq_cmd_details cmd_details;
1533 enum i40e_status_code status;
1534 u8 module, transaction;
1537 transaction = i40e_nvmupd_get_transaction(cmd->config);
1538 module = i40e_nvmupd_get_module(cmd->config);
1539 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1541 memset(&cmd_details, 0, sizeof(cmd_details));
1542 cmd_details.wb_desc = &hw->nvm_wb_desc;
1544 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1545 bytes, last, &cmd_details);
1547 i40e_debug(hw, I40E_DEBUG_NVM,
1548 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1549 module, cmd->offset, cmd->data_size);
1550 i40e_debug(hw, I40E_DEBUG_NVM,
1551 "i40e_nvmupd_nvm_read status %d aq %d\n",
1552 status, hw->aq.asq_last_status);
1553 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1560 * i40e_nvmupd_nvm_erase - Erase an NVM module
1561 * @hw: pointer to hardware structure
1562 * @cmd: pointer to nvm update command buffer
1563 * @perrno: pointer to return error code
1565 * module, offset, data_size and data are in cmd structure
1567 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1568 struct i40e_nvm_access *cmd,
1571 enum i40e_status_code status = I40E_SUCCESS;
1572 struct i40e_asq_cmd_details cmd_details;
1573 u8 module, transaction;
1576 transaction = i40e_nvmupd_get_transaction(cmd->config);
1577 module = i40e_nvmupd_get_module(cmd->config);
1578 last = (transaction & I40E_NVM_LCB);
1580 memset(&cmd_details, 0, sizeof(cmd_details));
1581 cmd_details.wb_desc = &hw->nvm_wb_desc;
1583 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1584 last, &cmd_details);
1586 i40e_debug(hw, I40E_DEBUG_NVM,
1587 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1588 module, cmd->offset, cmd->data_size);
1589 i40e_debug(hw, I40E_DEBUG_NVM,
1590 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1591 status, hw->aq.asq_last_status);
1592 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1599 * i40e_nvmupd_nvm_write - Write NVM
1600 * @hw: pointer to hardware structure
1601 * @cmd: pointer to nvm update command buffer
1602 * @bytes: pointer to the data buffer
1603 * @perrno: pointer to return error code
1605 * module, offset, data_size and data are in cmd structure
1607 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1608 struct i40e_nvm_access *cmd,
1609 u8 *bytes, int *perrno)
1611 enum i40e_status_code status = I40E_SUCCESS;
1612 struct i40e_asq_cmd_details cmd_details;
1613 u8 module, transaction;
1616 transaction = i40e_nvmupd_get_transaction(cmd->config);
1617 module = i40e_nvmupd_get_module(cmd->config);
1618 last = (transaction & I40E_NVM_LCB);
1620 memset(&cmd_details, 0, sizeof(cmd_details));
1621 cmd_details.wb_desc = &hw->nvm_wb_desc;
1623 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1624 (u16)cmd->data_size, bytes, last,
1627 i40e_debug(hw, I40E_DEBUG_NVM,
1628 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1629 module, cmd->offset, cmd->data_size);
1630 i40e_debug(hw, I40E_DEBUG_NVM,
1631 "i40e_nvmupd_nvm_write status %d aq %d\n",
1632 status, hw->aq.asq_last_status);
1633 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);