1 /*******************************************************************************
3 Copyright (c) 2013 - 2015, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ***************************************************************************/
34 #include "i40e_prototype.h"
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41 u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43 u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45 u32 offset, u16 words, void *data,
49 * i40e_init_nvm_ops - Initialize NVM function pointers
50 * @hw: pointer to the HW structure
52 * Setup the function pointers and the NVM info structure. Should be called
53 * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54 * Please notice that the NVM term is used here (& in all methods covered
55 * in this file) as an equivalent of the FLASH part mapped into the SR.
56 * We are accessing FLASH always through the Shadow RAM.
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
60 struct i40e_nvm_info *nvm = &hw->nvm;
61 enum i40e_status_code ret_code = I40E_SUCCESS;
65 DEBUGFUNC("i40e_init_nvm");
67 /* The SR size is stored regardless of the nvm programming mode
68 * as the blank mode may be used in the factory line.
70 gens = rd32(hw, I40E_GLNVM_GENS);
71 sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72 I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73 /* Switching to words (sr_size contains power of 2KB) */
74 nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
76 /* Check if we are in the normal or blank NVM programming mode */
77 fla = rd32(hw, I40E_GLNVM_FLA);
78 if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
80 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81 nvm->blank_nvm_mode = false;
82 } else { /* Blank programming mode */
83 nvm->blank_nvm_mode = true;
84 ret_code = I40E_ERR_NVM_BLANK_MODE;
85 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
92 * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93 * @hw: pointer to the HW structure
94 * @access: NVM access type (read or write)
96 * This function will request NVM ownership for reading
97 * via the proper Admin Command.
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100 enum i40e_aq_resource_access_type access)
102 enum i40e_status_code ret_code = I40E_SUCCESS;
106 DEBUGFUNC("i40e_acquire_nvm");
108 if (hw->nvm.blank_nvm_mode)
109 goto i40e_i40e_acquire_nvm_exit;
111 ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112 0, &time_left, NULL);
113 /* Reading the Global Device Timer */
114 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
116 /* Store the timeout */
117 hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
120 i40e_debug(hw, I40E_DEBUG_NVM,
121 "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122 access, time_left, ret_code, hw->aq.asq_last_status);
124 if (ret_code && time_left) {
125 /* Poll until the current NVM owner timeouts */
126 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127 while ((gtime < timeout) && time_left) {
129 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130 ret_code = i40e_aq_request_resource(hw,
131 I40E_NVM_RESOURCE_ID,
132 access, 0, &time_left,
134 if (ret_code == I40E_SUCCESS) {
135 hw->nvm.hw_semaphore_timeout =
136 I40E_MS_TO_GTIME(time_left) + gtime;
140 if (ret_code != I40E_SUCCESS) {
141 hw->nvm.hw_semaphore_timeout = 0;
142 i40e_debug(hw, I40E_DEBUG_NVM,
143 "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144 time_left, ret_code, hw->aq.asq_last_status);
148 i40e_i40e_acquire_nvm_exit:
153 * i40e_release_nvm - Generic request for releasing the NVM ownership
154 * @hw: pointer to the HW structure
156 * This function will release NVM resource via the proper Admin Command.
158 void i40e_release_nvm(struct i40e_hw *hw)
160 enum i40e_status_code ret_code = I40E_SUCCESS;
163 DEBUGFUNC("i40e_release_nvm");
165 if (hw->nvm.blank_nvm_mode)
168 ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
170 /* there are some rare cases when trying to release the resource
171 * results in an admin Q timeout, so handle them correctly
173 while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174 (total_delay < hw->aq.asq_cmd_timeout)) {
176 ret_code = i40e_aq_release_resource(hw,
177 I40E_NVM_RESOURCE_ID, 0, NULL);
183 * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184 * @hw: pointer to the HW structure
186 * Polls the SRCTL Shadow RAM register done bit.
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
190 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
193 DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
195 /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196 for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199 ret_code = I40E_SUCCESS;
204 if (ret_code == I40E_ERR_TIMEOUT)
205 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
210 * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211 * @hw: pointer to the HW structure
212 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213 * @data: word read from the Shadow RAM
215 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
220 enum i40e_status_code ret_code = I40E_SUCCESS;
222 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
223 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
226 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
227 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
229 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
231 if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
232 i40e_release_nvm(hw);
238 * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
239 * @hw: pointer to the HW structure
240 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
241 * @data: word read from the Shadow RAM
243 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
245 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
249 enum i40e_status_code ret_code = I40E_SUCCESS;
251 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
252 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
254 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
259 * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
260 * @hw: pointer to the HW structure
261 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
262 * @data: word read from the Shadow RAM
264 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
266 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
269 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
272 DEBUGFUNC("i40e_read_nvm_word_srctl");
274 if (offset >= hw->nvm.sr_size) {
275 i40e_debug(hw, I40E_DEBUG_NVM,
276 "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
277 offset, hw->nvm.sr_size);
278 ret_code = I40E_ERR_PARAM;
282 /* Poll the done bit first */
283 ret_code = i40e_poll_sr_srctl_done_bit(hw);
284 if (ret_code == I40E_SUCCESS) {
285 /* Write the address and start reading */
286 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
287 BIT(I40E_GLNVM_SRCTL_START_SHIFT);
288 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
290 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
291 ret_code = i40e_poll_sr_srctl_done_bit(hw);
292 if (ret_code == I40E_SUCCESS) {
293 sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
294 *data = (u16)((sr_reg &
295 I40E_GLNVM_SRDATA_RDDATA_MASK)
296 >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
299 if (ret_code != I40E_SUCCESS)
300 i40e_debug(hw, I40E_DEBUG_NVM,
301 "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
309 * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
310 * @hw: pointer to the HW structure
311 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
312 * @data: word read from the Shadow RAM
314 * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
316 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
319 enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
321 DEBUGFUNC("i40e_read_nvm_word_aq");
323 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
324 *data = LE16_TO_CPU(*(__le16 *)data);
330 * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
331 * @hw: pointer to the HW structure
332 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
333 * @words: (in) number of words to read; (out) number of words actually read
334 * @data: words read from the Shadow RAM
336 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
337 * method. The buffer read is preceded by the NVM ownership take
338 * and followed by the release.
340 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
342 u16 *words, u16 *data)
344 enum i40e_status_code ret_code = I40E_SUCCESS;
346 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
347 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
349 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
354 * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
355 * @hw: pointer to the HW structure
356 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
357 * @words: (in) number of words to read; (out) number of words actually read
358 * @data: words read from the Shadow RAM
360 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
361 * method. The buffer read is preceded by the NVM ownership take
362 * and followed by the release.
364 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
365 u16 *words, u16 *data)
367 enum i40e_status_code ret_code = I40E_SUCCESS;
369 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
370 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
372 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
374 i40e_release_nvm(hw);
377 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
383 * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
384 * @hw: pointer to the HW structure
385 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
386 * @words: (in) number of words to read; (out) number of words actually read
387 * @data: words read from the Shadow RAM
389 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
390 * method. The buffer read is preceded by the NVM ownership take
391 * and followed by the release.
393 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
394 u16 *words, u16 *data)
396 enum i40e_status_code ret_code = I40E_SUCCESS;
399 DEBUGFUNC("i40e_read_nvm_buffer_srctl");
401 /* Loop through the selected region */
402 for (word = 0; word < *words; word++) {
403 index = offset + word;
404 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
405 if (ret_code != I40E_SUCCESS)
409 /* Update the number of words read from the Shadow RAM */
416 * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
417 * @hw: pointer to the HW structure
418 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
419 * @words: (in) number of words to read; (out) number of words actually read
420 * @data: words read from the Shadow RAM
422 * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
423 * method. The buffer read is preceded by the NVM ownership take
424 * and followed by the release.
426 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
427 u16 *words, u16 *data)
429 enum i40e_status_code ret_code;
430 u16 read_size = *words;
431 bool last_cmd = false;
435 DEBUGFUNC("i40e_read_nvm_buffer_aq");
438 /* Calculate number of bytes we should read in this step.
439 * FVL AQ do not allow to read more than one page at a time or
440 * to cross page boundaries.
442 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
443 read_size = min(*words,
444 (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
445 (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
447 read_size = min((*words - words_read),
448 I40E_SR_SECTOR_SIZE_IN_WORDS);
450 /* Check if this is last command, if so set proper flag */
451 if ((words_read + read_size) >= *words)
454 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
455 data + words_read, last_cmd);
456 if (ret_code != I40E_SUCCESS)
457 goto read_nvm_buffer_aq_exit;
459 /* Increment counter for words already read and move offset to
462 words_read += read_size;
464 } while (words_read < *words);
466 for (i = 0; i < *words; i++)
467 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
469 read_nvm_buffer_aq_exit:
475 * i40e_read_nvm_aq - Read Shadow RAM.
476 * @hw: pointer to the HW structure.
477 * @module_pointer: module pointer location in words from the NVM beginning
478 * @offset: offset in words from module start
479 * @words: number of words to write
480 * @data: buffer with words to write to the Shadow RAM
481 * @last_command: tells the AdminQ that this is the last command
483 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
485 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
486 u32 offset, u16 words, void *data,
489 enum i40e_status_code ret_code = I40E_ERR_NVM;
490 struct i40e_asq_cmd_details cmd_details;
492 DEBUGFUNC("i40e_read_nvm_aq");
494 memset(&cmd_details, 0, sizeof(cmd_details));
495 cmd_details.wb_desc = &hw->nvm_wb_desc;
497 /* Here we are checking the SR limit only for the flat memory model.
498 * We cannot do it for the module-based model, as we did not acquire
499 * the NVM resource yet (we cannot get the module pointer value).
500 * Firmware will check the module-based model.
502 if ((offset + words) > hw->nvm.sr_size)
503 i40e_debug(hw, I40E_DEBUG_NVM,
504 "NVM write error: offset %d beyond Shadow RAM limit %d\n",
505 (offset + words), hw->nvm.sr_size);
506 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
507 /* We can write only up to 4KB (one sector), in one AQ write */
508 i40e_debug(hw, I40E_DEBUG_NVM,
509 "NVM write fail error: tried to write %d words, limit is %d.\n",
510 words, I40E_SR_SECTOR_SIZE_IN_WORDS);
511 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
512 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
513 /* A single write cannot spread over two sectors */
514 i40e_debug(hw, I40E_DEBUG_NVM,
515 "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
518 ret_code = i40e_aq_read_nvm(hw, module_pointer,
519 2 * offset, /*bytes*/
521 data, last_command, &cmd_details);
527 * i40e_write_nvm_aq - Writes Shadow RAM.
528 * @hw: pointer to the HW structure.
529 * @module_pointer: module pointer location in words from the NVM beginning
530 * @offset: offset in words from module start
531 * @words: number of words to write
532 * @data: buffer with words to write to the Shadow RAM
533 * @last_command: tells the AdminQ that this is the last command
535 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
537 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
538 u32 offset, u16 words, void *data,
541 enum i40e_status_code ret_code = I40E_ERR_NVM;
542 struct i40e_asq_cmd_details cmd_details;
544 DEBUGFUNC("i40e_write_nvm_aq");
546 memset(&cmd_details, 0, sizeof(cmd_details));
547 cmd_details.wb_desc = &hw->nvm_wb_desc;
549 /* Here we are checking the SR limit only for the flat memory model.
550 * We cannot do it for the module-based model, as we did not acquire
551 * the NVM resource yet (we cannot get the module pointer value).
552 * Firmware will check the module-based model.
554 if ((offset + words) > hw->nvm.sr_size)
555 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
556 else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
557 /* We can write only up to 4KB (one sector), in one AQ write */
558 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
559 else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
560 != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
561 /* A single write cannot spread over two sectors */
562 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
564 ret_code = i40e_aq_update_nvm(hw, module_pointer,
565 2 * offset, /*bytes*/
567 data, last_command, &cmd_details);
573 * __i40e_write_nvm_word - Writes Shadow RAM word
574 * @hw: pointer to the HW structure
575 * @offset: offset of the Shadow RAM word to write
576 * @data: word to write to the Shadow RAM
578 * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
579 * NVM ownership have to be acquired and released (on ARQ completion event
580 * reception) by caller. To commit SR to NVM update checksum function
583 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
586 DEBUGFUNC("i40e_write_nvm_word");
588 *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
590 /* Value 0x00 below means that we treat SR as a flat mem */
591 return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
595 * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
596 * @hw: pointer to the HW structure
597 * @module_pointer: module pointer location in words from the NVM beginning
598 * @offset: offset of the Shadow RAM buffer to write
599 * @words: number of words to write
600 * @data: words to write to the Shadow RAM
602 * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
603 * NVM ownership must be acquired before calling this function and released
604 * on ARQ completion event reception by caller. To commit SR to NVM update
605 * checksum function should be called.
607 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
608 u8 module_pointer, u32 offset,
609 u16 words, void *data)
611 __le16 *le_word_ptr = (__le16 *)data;
612 u16 *word_ptr = (u16 *)data;
615 DEBUGFUNC("i40e_write_nvm_buffer");
617 for (i = 0; i < words; i++)
618 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
620 /* Here we will only write one buffer as the size of the modules
621 * mirrored in the Shadow RAM is always less than 4K.
623 return i40e_write_nvm_aq(hw, module_pointer, offset, words,
628 * i40e_calc_nvm_checksum - Calculates and returns the checksum
629 * @hw: pointer to hardware structure
630 * @checksum: pointer to the checksum
632 * This function calculates SW Checksum that covers the whole 64kB shadow RAM
633 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
634 * is customer specific and unknown. Therefore, this function skips all maximum
635 * possible size of VPD (1kB).
637 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
639 enum i40e_status_code ret_code = I40E_SUCCESS;
640 struct i40e_virt_mem vmem;
641 u16 pcie_alt_module = 0;
642 u16 checksum_local = 0;
647 DEBUGFUNC("i40e_calc_nvm_checksum");
649 ret_code = i40e_allocate_virt_mem(hw, &vmem,
650 I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
652 goto i40e_calc_nvm_checksum_exit;
653 data = (u16 *)vmem.va;
655 /* read pointer to VPD area */
656 ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
658 if (ret_code != I40E_SUCCESS) {
659 ret_code = I40E_ERR_NVM_CHECKSUM;
660 goto i40e_calc_nvm_checksum_exit;
663 /* read pointer to PCIe Alt Auto-load module */
664 ret_code = __i40e_read_nvm_word(hw,
665 I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
667 if (ret_code != I40E_SUCCESS) {
668 ret_code = I40E_ERR_NVM_CHECKSUM;
669 goto i40e_calc_nvm_checksum_exit;
672 /* Calculate SW checksum that covers the whole 64kB shadow RAM
673 * except the VPD and PCIe ALT Auto-load modules
675 for (i = 0; i < hw->nvm.sr_size; i++) {
677 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
678 u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
680 ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
681 if (ret_code != I40E_SUCCESS) {
682 ret_code = I40E_ERR_NVM_CHECKSUM;
683 goto i40e_calc_nvm_checksum_exit;
687 /* Skip Checksum word */
688 if (i == I40E_SR_SW_CHECKSUM_WORD)
690 /* Skip VPD module (convert byte size to word count) */
691 if ((i >= (u32)vpd_module) &&
692 (i < ((u32)vpd_module +
693 (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
696 /* Skip PCIe ALT module (convert byte size to word count) */
697 if ((i >= (u32)pcie_alt_module) &&
698 (i < ((u32)pcie_alt_module +
699 (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
703 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
706 *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
708 i40e_calc_nvm_checksum_exit:
709 i40e_free_virt_mem(hw, &vmem);
714 * i40e_update_nvm_checksum - Updates the NVM checksum
715 * @hw: pointer to hardware structure
717 * NVM ownership must be acquired before calling this function and released
718 * on ARQ completion event reception by caller.
719 * This function will commit SR to NVM.
721 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
723 enum i40e_status_code ret_code = I40E_SUCCESS;
727 DEBUGFUNC("i40e_update_nvm_checksum");
729 ret_code = i40e_calc_nvm_checksum(hw, &checksum);
730 le_sum = CPU_TO_LE16(checksum);
731 if (ret_code == I40E_SUCCESS)
732 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
739 * i40e_validate_nvm_checksum - Validate EEPROM checksum
740 * @hw: pointer to hardware structure
741 * @checksum: calculated checksum
743 * Performs checksum calculation and validates the NVM SW checksum. If the
744 * caller does not need checksum, the value can be NULL.
746 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
749 enum i40e_status_code ret_code = I40E_SUCCESS;
751 u16 checksum_local = 0;
753 DEBUGFUNC("i40e_validate_nvm_checksum");
755 /* acquire_nvm provides exclusive NVM lock to synchronize access across
756 * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
757 * twice (first time to be able to write address to I40E_GLNVM_SRCTL
758 * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
759 * done bit and try to write address, while another one will interpret
760 * it as a good time to read data. It will cause invalid data to be
763 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
765 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
766 i40e_release_nvm(hw);
767 if (ret_code != I40E_SUCCESS)
768 goto i40e_validate_nvm_checksum_exit;
770 goto i40e_validate_nvm_checksum_exit;
773 i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
775 /* Verify read checksum from EEPROM is the same as
776 * calculated checksum
778 if (checksum_local != checksum_sr)
779 ret_code = I40E_ERR_NVM_CHECKSUM;
781 /* If the user cares, return the calculated checksum */
783 *checksum = checksum_local;
785 i40e_validate_nvm_checksum_exit:
789 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
790 struct i40e_nvm_access *cmd,
791 u8 *bytes, int *perrno);
792 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
793 struct i40e_nvm_access *cmd,
794 u8 *bytes, int *perrno);
795 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
796 struct i40e_nvm_access *cmd,
797 u8 *bytes, int *perrno);
798 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
799 struct i40e_nvm_access *cmd,
801 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
802 struct i40e_nvm_access *cmd,
804 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
805 struct i40e_nvm_access *cmd,
806 u8 *bytes, int *perrno);
807 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
808 struct i40e_nvm_access *cmd,
809 u8 *bytes, int *perrno);
810 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
811 struct i40e_nvm_access *cmd,
812 u8 *bytes, int *perrno);
813 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
814 struct i40e_nvm_access *cmd,
815 u8 *bytes, int *perrno);
816 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
818 return (u8)(val & I40E_NVM_MOD_PNT_MASK);
820 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
822 return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
825 STATIC const char *i40e_nvm_update_state_str[] = {
826 "I40E_NVMUPD_INVALID",
827 "I40E_NVMUPD_READ_CON",
828 "I40E_NVMUPD_READ_SNT",
829 "I40E_NVMUPD_READ_LCB",
830 "I40E_NVMUPD_READ_SA",
831 "I40E_NVMUPD_WRITE_ERA",
832 "I40E_NVMUPD_WRITE_CON",
833 "I40E_NVMUPD_WRITE_SNT",
834 "I40E_NVMUPD_WRITE_LCB",
835 "I40E_NVMUPD_WRITE_SA",
836 "I40E_NVMUPD_CSUM_CON",
837 "I40E_NVMUPD_CSUM_SA",
838 "I40E_NVMUPD_CSUM_LCB",
839 "I40E_NVMUPD_STATUS",
840 "I40E_NVMUPD_EXEC_AQ",
841 "I40E_NVMUPD_GET_AQ_RESULT",
845 * i40e_nvmupd_command - Process an NVM update command
846 * @hw: pointer to hardware structure
847 * @cmd: pointer to nvm update command
848 * @bytes: pointer to the data buffer
849 * @perrno: pointer to return error code
851 * Dispatches command depending on what update state is current
853 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
854 struct i40e_nvm_access *cmd,
855 u8 *bytes, int *perrno)
857 enum i40e_status_code status;
858 enum i40e_nvmupd_cmd upd_cmd;
860 DEBUGFUNC("i40e_nvmupd_command");
865 /* early check for status command and debug msgs */
866 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
868 i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
869 i40e_nvm_update_state_str[upd_cmd],
871 hw->nvm_release_on_done, hw->nvm_wait_opcode,
872 cmd->command, cmd->config, cmd->offset, cmd->data_size);
874 if (upd_cmd == I40E_NVMUPD_INVALID) {
876 i40e_debug(hw, I40E_DEBUG_NVM,
877 "i40e_nvmupd_validate_command returns %d errno %d\n",
881 /* a status request returns immediately rather than
882 * going into the state machine
884 if (upd_cmd == I40E_NVMUPD_STATUS) {
885 if (!cmd->data_size) {
887 return I40E_ERR_BUF_TOO_SHORT;
890 bytes[0] = hw->nvmupd_state;
892 if (cmd->data_size >= 4) {
894 *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
897 /* Clear error status on read */
898 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
899 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
904 /* Clear status even it is not read and log */
905 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
906 i40e_debug(hw, I40E_DEBUG_NVM,
907 "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
908 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
911 /* Acquire lock to prevent race condition where adminq_task
912 * can execute after i40e_nvmupd_nvm_read/write but before state
913 * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
915 * During NVMUpdate, it is observed that lock could be held for
916 * ~5ms for most commands. However lock is held for ~60ms for
917 * NVMUPD_CSUM_LCB command.
919 i40e_acquire_spinlock(&hw->aq.arq_spinlock);
920 switch (hw->nvmupd_state) {
921 case I40E_NVMUPD_STATE_INIT:
922 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
925 case I40E_NVMUPD_STATE_READING:
926 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
929 case I40E_NVMUPD_STATE_WRITING:
930 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
933 case I40E_NVMUPD_STATE_INIT_WAIT:
934 case I40E_NVMUPD_STATE_WRITE_WAIT:
935 /* if we need to stop waiting for an event, clear
936 * the wait info and return before doing anything else
938 if (cmd->offset == 0xffff) {
939 i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
940 status = I40E_SUCCESS;
944 status = I40E_ERR_NOT_READY;
949 /* invalid state, should never happen */
950 i40e_debug(hw, I40E_DEBUG_NVM,
951 "NVMUPD: no such state %d\n", hw->nvmupd_state);
952 status = I40E_NOT_SUPPORTED;
957 i40e_release_spinlock(&hw->aq.arq_spinlock);
962 * i40e_nvmupd_state_init - Handle NVM update state Init
963 * @hw: pointer to hardware structure
964 * @cmd: pointer to nvm update command buffer
965 * @bytes: pointer to the data buffer
966 * @perrno: pointer to return error code
968 * Process legitimate commands of the Init state and conditionally set next
969 * state. Reject all other commands.
971 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
972 struct i40e_nvm_access *cmd,
973 u8 *bytes, int *perrno)
975 enum i40e_status_code status = I40E_SUCCESS;
976 enum i40e_nvmupd_cmd upd_cmd;
978 DEBUGFUNC("i40e_nvmupd_state_init");
980 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
983 case I40E_NVMUPD_READ_SA:
984 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
986 *perrno = i40e_aq_rc_to_posix(status,
987 hw->aq.asq_last_status);
989 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
990 i40e_release_nvm(hw);
994 case I40E_NVMUPD_READ_SNT:
995 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
997 *perrno = i40e_aq_rc_to_posix(status,
998 hw->aq.asq_last_status);
1000 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1002 i40e_release_nvm(hw);
1004 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1008 case I40E_NVMUPD_WRITE_ERA:
1009 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1011 *perrno = i40e_aq_rc_to_posix(status,
1012 hw->aq.asq_last_status);
1014 status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1016 i40e_release_nvm(hw);
1018 hw->nvm_release_on_done = true;
1019 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1020 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1025 case I40E_NVMUPD_WRITE_SA:
1026 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1028 *perrno = i40e_aq_rc_to_posix(status,
1029 hw->aq.asq_last_status);
1031 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1033 i40e_release_nvm(hw);
1035 hw->nvm_release_on_done = true;
1036 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1037 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1042 case I40E_NVMUPD_WRITE_SNT:
1043 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1045 *perrno = i40e_aq_rc_to_posix(status,
1046 hw->aq.asq_last_status);
1048 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1050 i40e_release_nvm(hw);
1052 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1053 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1058 case I40E_NVMUPD_CSUM_SA:
1059 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1061 *perrno = i40e_aq_rc_to_posix(status,
1062 hw->aq.asq_last_status);
1064 status = i40e_update_nvm_checksum(hw);
1066 *perrno = hw->aq.asq_last_status ?
1067 i40e_aq_rc_to_posix(status,
1068 hw->aq.asq_last_status) :
1070 i40e_release_nvm(hw);
1072 hw->nvm_release_on_done = true;
1073 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1074 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1079 case I40E_NVMUPD_EXEC_AQ:
1080 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1083 case I40E_NVMUPD_GET_AQ_RESULT:
1084 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1088 i40e_debug(hw, I40E_DEBUG_NVM,
1089 "NVMUPD: bad cmd %s in init state\n",
1090 i40e_nvm_update_state_str[upd_cmd]);
1091 status = I40E_ERR_NVM;
1099 * i40e_nvmupd_state_reading - Handle NVM update state Reading
1100 * @hw: pointer to hardware structure
1101 * @cmd: pointer to nvm update command buffer
1102 * @bytes: pointer to the data buffer
1103 * @perrno: pointer to return error code
1105 * NVM ownership is already held. Process legitimate commands and set any
1106 * change in state; reject all other commands.
1108 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1109 struct i40e_nvm_access *cmd,
1110 u8 *bytes, int *perrno)
1112 enum i40e_status_code status = I40E_SUCCESS;
1113 enum i40e_nvmupd_cmd upd_cmd;
1115 DEBUGFUNC("i40e_nvmupd_state_reading");
1117 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1120 case I40E_NVMUPD_READ_SA:
1121 case I40E_NVMUPD_READ_CON:
1122 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1125 case I40E_NVMUPD_READ_LCB:
1126 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1127 i40e_release_nvm(hw);
1128 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1132 i40e_debug(hw, I40E_DEBUG_NVM,
1133 "NVMUPD: bad cmd %s in reading state.\n",
1134 i40e_nvm_update_state_str[upd_cmd]);
1135 status = I40E_NOT_SUPPORTED;
1143 * i40e_nvmupd_state_writing - Handle NVM update state Writing
1144 * @hw: pointer to hardware structure
1145 * @cmd: pointer to nvm update command buffer
1146 * @bytes: pointer to the data buffer
1147 * @perrno: pointer to return error code
1149 * NVM ownership is already held. Process legitimate commands and set any
1150 * change in state; reject all other commands
1152 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1153 struct i40e_nvm_access *cmd,
1154 u8 *bytes, int *perrno)
1156 enum i40e_status_code status = I40E_SUCCESS;
1157 enum i40e_nvmupd_cmd upd_cmd;
1158 bool retry_attempt = false;
1160 DEBUGFUNC("i40e_nvmupd_state_writing");
1162 upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1166 case I40E_NVMUPD_WRITE_CON:
1167 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1169 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1170 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1174 case I40E_NVMUPD_WRITE_LCB:
1175 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1177 *perrno = hw->aq.asq_last_status ?
1178 i40e_aq_rc_to_posix(status,
1179 hw->aq.asq_last_status) :
1181 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1183 hw->nvm_release_on_done = true;
1184 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1185 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1189 case I40E_NVMUPD_CSUM_CON:
1190 /* Assumes the caller has acquired the nvm */
1191 status = i40e_update_nvm_checksum(hw);
1193 *perrno = hw->aq.asq_last_status ?
1194 i40e_aq_rc_to_posix(status,
1195 hw->aq.asq_last_status) :
1197 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1199 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1200 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1204 case I40E_NVMUPD_CSUM_LCB:
1205 /* Assumes the caller has acquired the nvm */
1206 status = i40e_update_nvm_checksum(hw);
1208 *perrno = hw->aq.asq_last_status ?
1209 i40e_aq_rc_to_posix(status,
1210 hw->aq.asq_last_status) :
1212 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1214 hw->nvm_release_on_done = true;
1215 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1216 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1221 i40e_debug(hw, I40E_DEBUG_NVM,
1222 "NVMUPD: bad cmd %s in writing state.\n",
1223 i40e_nvm_update_state_str[upd_cmd]);
1224 status = I40E_NOT_SUPPORTED;
1229 /* In some circumstances, a multi-write transaction takes longer
1230 * than the default 3 minute timeout on the write semaphore. If
1231 * the write failed with an EBUSY status, this is likely the problem,
1232 * so here we try to reacquire the semaphore then retry the write.
1233 * We only do one retry, then give up.
1235 if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1237 enum i40e_status_code old_status = status;
1238 u32 old_asq_status = hw->aq.asq_last_status;
1241 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1242 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1243 i40e_debug(hw, I40E_DEBUG_ALL,
1244 "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1245 gtime, hw->nvm.hw_semaphore_timeout);
1246 i40e_release_nvm(hw);
1247 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1249 i40e_debug(hw, I40E_DEBUG_ALL,
1250 "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1251 hw->aq.asq_last_status);
1252 status = old_status;
1253 hw->aq.asq_last_status = old_asq_status;
1255 retry_attempt = true;
1265 * i40e_nvmupd_check_wait_event - handle NVM update operation events
1266 * @hw: pointer to the hardware structure
1267 * @opcode: the event that just happened
1269 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1271 if (opcode == hw->nvm_wait_opcode) {
1273 i40e_debug(hw, I40E_DEBUG_NVM,
1274 "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1275 if (hw->nvm_release_on_done) {
1276 i40e_release_nvm(hw);
1277 hw->nvm_release_on_done = false;
1279 hw->nvm_wait_opcode = 0;
1281 if (hw->aq.arq_last_status) {
1282 hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1286 switch (hw->nvmupd_state) {
1287 case I40E_NVMUPD_STATE_INIT_WAIT:
1288 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1291 case I40E_NVMUPD_STATE_WRITE_WAIT:
1292 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1302 * i40e_nvmupd_validate_command - Validate given command
1303 * @hw: pointer to hardware structure
1304 * @cmd: pointer to nvm update command buffer
1305 * @perrno: pointer to return error code
1307 * Return one of the valid command types or I40E_NVMUPD_INVALID
1309 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1310 struct i40e_nvm_access *cmd,
1313 enum i40e_nvmupd_cmd upd_cmd;
1314 u8 module, transaction;
1316 DEBUGFUNC("i40e_nvmupd_validate_command\n");
1318 /* anything that doesn't match a recognized case is an error */
1319 upd_cmd = I40E_NVMUPD_INVALID;
1321 transaction = i40e_nvmupd_get_transaction(cmd->config);
1322 module = i40e_nvmupd_get_module(cmd->config);
1324 /* limits on data size */
1325 if ((cmd->data_size < 1) ||
1326 (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1327 i40e_debug(hw, I40E_DEBUG_NVM,
1328 "i40e_nvmupd_validate_command data_size %d\n",
1331 return I40E_NVMUPD_INVALID;
1334 switch (cmd->command) {
1336 switch (transaction) {
1338 upd_cmd = I40E_NVMUPD_READ_CON;
1341 upd_cmd = I40E_NVMUPD_READ_SNT;
1344 upd_cmd = I40E_NVMUPD_READ_LCB;
1347 upd_cmd = I40E_NVMUPD_READ_SA;
1351 upd_cmd = I40E_NVMUPD_STATUS;
1352 else if (module == 0)
1353 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1358 case I40E_NVM_WRITE:
1359 switch (transaction) {
1361 upd_cmd = I40E_NVMUPD_WRITE_CON;
1364 upd_cmd = I40E_NVMUPD_WRITE_SNT;
1367 upd_cmd = I40E_NVMUPD_WRITE_LCB;
1370 upd_cmd = I40E_NVMUPD_WRITE_SA;
1373 upd_cmd = I40E_NVMUPD_WRITE_ERA;
1376 upd_cmd = I40E_NVMUPD_CSUM_CON;
1378 case (I40E_NVM_CSUM|I40E_NVM_SA):
1379 upd_cmd = I40E_NVMUPD_CSUM_SA;
1381 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1382 upd_cmd = I40E_NVMUPD_CSUM_LCB;
1386 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1396 * i40e_nvmupd_exec_aq - Run an AQ command
1397 * @hw: pointer to hardware structure
1398 * @cmd: pointer to nvm update command buffer
1399 * @bytes: pointer to the data buffer
1400 * @perrno: pointer to return error code
1402 * cmd structure contains identifiers and data buffer
1404 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1405 struct i40e_nvm_access *cmd,
1406 u8 *bytes, int *perrno)
1408 struct i40e_asq_cmd_details cmd_details;
1409 enum i40e_status_code status;
1410 struct i40e_aq_desc *aq_desc;
1416 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1417 memset(&cmd_details, 0, sizeof(cmd_details));
1418 cmd_details.wb_desc = &hw->nvm_wb_desc;
1420 aq_desc_len = sizeof(struct i40e_aq_desc);
1421 memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1423 /* get the aq descriptor */
1424 if (cmd->data_size < aq_desc_len) {
1425 i40e_debug(hw, I40E_DEBUG_NVM,
1426 "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1427 cmd->data_size, aq_desc_len);
1429 return I40E_ERR_PARAM;
1431 aq_desc = (struct i40e_aq_desc *)bytes;
1433 /* if data buffer needed, make sure it's ready */
1434 aq_data_len = cmd->data_size - aq_desc_len;
1435 buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1437 if (!hw->nvm_buff.va) {
1438 status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1439 hw->aq.asq_buf_size);
1441 i40e_debug(hw, I40E_DEBUG_NVM,
1442 "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1446 if (hw->nvm_buff.va) {
1447 buff = hw->nvm_buff.va;
1448 i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1449 I40E_NONDMA_TO_NONDMA);
1453 /* and away we go! */
1454 status = i40e_asq_send_command(hw, aq_desc, buff,
1455 buff_size, &cmd_details);
1457 i40e_debug(hw, I40E_DEBUG_NVM,
1458 "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1459 i40e_stat_str(hw, status),
1460 i40e_aq_str(hw, hw->aq.asq_last_status));
1461 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1464 /* should we wait for a followup event? */
1466 hw->nvm_wait_opcode = cmd->offset;
1467 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1474 * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1475 * @hw: pointer to hardware structure
1476 * @cmd: pointer to nvm update command buffer
1477 * @bytes: pointer to the data buffer
1478 * @perrno: pointer to return error code
1480 * cmd structure contains identifiers and data buffer
1482 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1483 struct i40e_nvm_access *cmd,
1484 u8 *bytes, int *perrno)
1491 i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1493 aq_desc_len = sizeof(struct i40e_aq_desc);
1494 aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1496 /* check offset range */
1497 if (cmd->offset > aq_total_len) {
1498 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1499 __func__, cmd->offset, aq_total_len);
1501 return I40E_ERR_PARAM;
1504 /* check copylength range */
1505 if (cmd->data_size > (aq_total_len - cmd->offset)) {
1506 int new_len = aq_total_len - cmd->offset;
1508 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1509 __func__, cmd->data_size, new_len);
1510 cmd->data_size = new_len;
1513 remainder = cmd->data_size;
1514 if (cmd->offset < aq_desc_len) {
1515 u32 len = aq_desc_len - cmd->offset;
1517 len = min(len, cmd->data_size);
1518 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1519 __func__, cmd->offset, cmd->offset + len);
1521 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1522 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1526 buff = hw->nvm_buff.va;
1528 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1531 if (remainder > 0) {
1532 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1534 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1535 __func__, start_byte, start_byte + remainder);
1536 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1539 return I40E_SUCCESS;
1543 * i40e_nvmupd_nvm_read - Read NVM
1544 * @hw: pointer to hardware structure
1545 * @cmd: pointer to nvm update command buffer
1546 * @bytes: pointer to the data buffer
1547 * @perrno: pointer to return error code
1549 * cmd structure contains identifiers and data buffer
1551 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1552 struct i40e_nvm_access *cmd,
1553 u8 *bytes, int *perrno)
1555 struct i40e_asq_cmd_details cmd_details;
1556 enum i40e_status_code status;
1557 u8 module, transaction;
1560 transaction = i40e_nvmupd_get_transaction(cmd->config);
1561 module = i40e_nvmupd_get_module(cmd->config);
1562 last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1564 memset(&cmd_details, 0, sizeof(cmd_details));
1565 cmd_details.wb_desc = &hw->nvm_wb_desc;
1567 status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1568 bytes, last, &cmd_details);
1570 i40e_debug(hw, I40E_DEBUG_NVM,
1571 "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
1572 module, cmd->offset, cmd->data_size);
1573 i40e_debug(hw, I40E_DEBUG_NVM,
1574 "i40e_nvmupd_nvm_read status %d aq %d\n",
1575 status, hw->aq.asq_last_status);
1576 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1583 * i40e_nvmupd_nvm_erase - Erase an NVM module
1584 * @hw: pointer to hardware structure
1585 * @cmd: pointer to nvm update command buffer
1586 * @perrno: pointer to return error code
1588 * module, offset, data_size and data are in cmd structure
1590 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1591 struct i40e_nvm_access *cmd,
1594 enum i40e_status_code status = I40E_SUCCESS;
1595 struct i40e_asq_cmd_details cmd_details;
1596 u8 module, transaction;
1599 transaction = i40e_nvmupd_get_transaction(cmd->config);
1600 module = i40e_nvmupd_get_module(cmd->config);
1601 last = (transaction & I40E_NVM_LCB);
1603 memset(&cmd_details, 0, sizeof(cmd_details));
1604 cmd_details.wb_desc = &hw->nvm_wb_desc;
1606 status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1607 last, &cmd_details);
1609 i40e_debug(hw, I40E_DEBUG_NVM,
1610 "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
1611 module, cmd->offset, cmd->data_size);
1612 i40e_debug(hw, I40E_DEBUG_NVM,
1613 "i40e_nvmupd_nvm_erase status %d aq %d\n",
1614 status, hw->aq.asq_last_status);
1615 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1622 * i40e_nvmupd_nvm_write - Write NVM
1623 * @hw: pointer to hardware structure
1624 * @cmd: pointer to nvm update command buffer
1625 * @bytes: pointer to the data buffer
1626 * @perrno: pointer to return error code
1628 * module, offset, data_size and data are in cmd structure
1630 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1631 struct i40e_nvm_access *cmd,
1632 u8 *bytes, int *perrno)
1634 enum i40e_status_code status = I40E_SUCCESS;
1635 struct i40e_asq_cmd_details cmd_details;
1636 u8 module, transaction;
1639 transaction = i40e_nvmupd_get_transaction(cmd->config);
1640 module = i40e_nvmupd_get_module(cmd->config);
1641 last = (transaction & I40E_NVM_LCB);
1643 memset(&cmd_details, 0, sizeof(cmd_details));
1644 cmd_details.wb_desc = &hw->nvm_wb_desc;
1646 status = i40e_aq_update_nvm(hw, module, cmd->offset,
1647 (u16)cmd->data_size, bytes, last,
1650 i40e_debug(hw, I40E_DEBUG_NVM,
1651 "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1652 module, cmd->offset, cmd->data_size);
1653 i40e_debug(hw, I40E_DEBUG_NVM,
1654 "i40e_nvmupd_nvm_write status %d aq %d\n",
1655 status, hw->aq.asq_last_status);
1656 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);