net/i40e/base: release spinlock before function returns
[dpdk.git] / drivers / net / i40e / base / i40e_nvm.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_prototype.h"
35
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
37                                                u16 *data);
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
39                                             u16 *data);
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41                                                  u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43                                               u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45                                        u32 offset, u16 words, void *data,
46                                        bool last_command);
47
48 /**
49  * i40e_init_nvm_ops - Initialize NVM function pointers
50  * @hw: pointer to the HW structure
51  *
52  * Setup the function pointers and the NVM info structure. Should be called
53  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54  * Please notice that the NVM term is used here (& in all methods covered
55  * in this file) as an equivalent of the FLASH part mapped into the SR.
56  * We are accessing FLASH always through the Shadow RAM.
57  **/
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
59 {
60         struct i40e_nvm_info *nvm = &hw->nvm;
61         enum i40e_status_code ret_code = I40E_SUCCESS;
62         u32 fla, gens;
63         u8 sr_size;
64
65         DEBUGFUNC("i40e_init_nvm");
66
67         /* The SR size is stored regardless of the nvm programming mode
68          * as the blank mode may be used in the factory line.
69          */
70         gens = rd32(hw, I40E_GLNVM_GENS);
71         sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72                            I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73         /* Switching to words (sr_size contains power of 2KB) */
74         nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
75
76         /* Check if we are in the normal or blank NVM programming mode */
77         fla = rd32(hw, I40E_GLNVM_FLA);
78         if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
79                 /* Max NVM timeout */
80                 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81                 nvm->blank_nvm_mode = false;
82         } else { /* Blank programming mode */
83                 nvm->blank_nvm_mode = true;
84                 ret_code = I40E_ERR_NVM_BLANK_MODE;
85                 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
86         }
87
88         return ret_code;
89 }
90
91 /**
92  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93  * @hw: pointer to the HW structure
94  * @access: NVM access type (read or write)
95  *
96  * This function will request NVM ownership for reading
97  * via the proper Admin Command.
98  **/
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100                                        enum i40e_aq_resource_access_type access)
101 {
102         enum i40e_status_code ret_code = I40E_SUCCESS;
103         u64 gtime, timeout;
104         u64 time_left = 0;
105
106         DEBUGFUNC("i40e_acquire_nvm");
107
108         if (hw->nvm.blank_nvm_mode)
109                 goto i40e_i40e_acquire_nvm_exit;
110
111         ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112                                             0, &time_left, NULL);
113         /* Reading the Global Device Timer */
114         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
115
116         /* Store the timeout */
117         hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
118
119         if (ret_code)
120                 i40e_debug(hw, I40E_DEBUG_NVM,
121                            "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122                            access, time_left, ret_code, hw->aq.asq_last_status);
123
124         if (ret_code && time_left) {
125                 /* Poll until the current NVM owner timeouts */
126                 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127                 while ((gtime < timeout) && time_left) {
128                         i40e_msec_delay(10);
129                         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130                         ret_code = i40e_aq_request_resource(hw,
131                                                         I40E_NVM_RESOURCE_ID,
132                                                         access, 0, &time_left,
133                                                         NULL);
134                         if (ret_code == I40E_SUCCESS) {
135                                 hw->nvm.hw_semaphore_timeout =
136                                             I40E_MS_TO_GTIME(time_left) + gtime;
137                                 break;
138                         }
139                 }
140                 if (ret_code != I40E_SUCCESS) {
141                         hw->nvm.hw_semaphore_timeout = 0;
142                         i40e_debug(hw, I40E_DEBUG_NVM,
143                                    "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144                                    time_left, ret_code, hw->aq.asq_last_status);
145                 }
146         }
147
148 i40e_i40e_acquire_nvm_exit:
149         return ret_code;
150 }
151
152 /**
153  * i40e_release_nvm - Generic request for releasing the NVM ownership
154  * @hw: pointer to the HW structure
155  *
156  * This function will release NVM resource via the proper Admin Command.
157  **/
158 void i40e_release_nvm(struct i40e_hw *hw)
159 {
160         enum i40e_status_code ret_code = I40E_SUCCESS;
161         u32 total_delay = 0;
162
163         DEBUGFUNC("i40e_release_nvm");
164
165         if (hw->nvm.blank_nvm_mode)
166                 return;
167
168         ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
169
170         /* there are some rare cases when trying to release the resource
171          * results in an admin Q timeout, so handle them correctly
172          */
173         while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174                (total_delay < hw->aq.asq_cmd_timeout)) {
175                         i40e_msec_delay(1);
176                         ret_code = i40e_aq_release_resource(hw,
177                                                 I40E_NVM_RESOURCE_ID, 0, NULL);
178                         total_delay++;
179         }
180 }
181
182 /**
183  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184  * @hw: pointer to the HW structure
185  *
186  * Polls the SRCTL Shadow RAM register done bit.
187  **/
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
189 {
190         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
191         u32 srctl, wait_cnt;
192
193         DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
194
195         /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196         for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197                 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198                 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199                         ret_code = I40E_SUCCESS;
200                         break;
201                 }
202                 i40e_usec_delay(5);
203         }
204         if (ret_code == I40E_ERR_TIMEOUT)
205                 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
206         return ret_code;
207 }
208
209 /**
210  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211  * @hw: pointer to the HW structure
212  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213  * @data: word read from the Shadow RAM
214  *
215  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
216  **/
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
218                                          u16 *data)
219 {
220         enum i40e_status_code ret_code = I40E_SUCCESS;
221
222         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
223         if (!ret_code) {
224                 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
225                         ret_code = i40e_read_nvm_word_aq(hw, offset, data);
226                 } else {
227                         ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
228                 }
229                 i40e_release_nvm(hw);
230         }
231         return ret_code;
232 }
233
234 /**
235  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
236  * @hw: pointer to the HW structure
237  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
238  * @data: word read from the Shadow RAM
239  *
240  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
241  **/
242 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
243                                            u16 offset,
244                                            u16 *data)
245 {
246         enum i40e_status_code ret_code = I40E_SUCCESS;
247
248         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
249                 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
250         else
251                 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
252         return ret_code;
253 }
254
255 /**
256  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
257  * @hw: pointer to the HW structure
258  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
259  * @data: word read from the Shadow RAM
260  *
261  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
262  **/
263 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
264                                                u16 *data)
265 {
266         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
267         u32 sr_reg;
268
269         DEBUGFUNC("i40e_read_nvm_word_srctl");
270
271         if (offset >= hw->nvm.sr_size) {
272                 i40e_debug(hw, I40E_DEBUG_NVM,
273                            "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
274                            offset, hw->nvm.sr_size);
275                 ret_code = I40E_ERR_PARAM;
276                 goto read_nvm_exit;
277         }
278
279         /* Poll the done bit first */
280         ret_code = i40e_poll_sr_srctl_done_bit(hw);
281         if (ret_code == I40E_SUCCESS) {
282                 /* Write the address and start reading */
283                 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
284                          BIT(I40E_GLNVM_SRCTL_START_SHIFT);
285                 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
286
287                 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
288                 ret_code = i40e_poll_sr_srctl_done_bit(hw);
289                 if (ret_code == I40E_SUCCESS) {
290                         sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
291                         *data = (u16)((sr_reg &
292                                        I40E_GLNVM_SRDATA_RDDATA_MASK)
293                                     >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
294                 }
295         }
296         if (ret_code != I40E_SUCCESS)
297                 i40e_debug(hw, I40E_DEBUG_NVM,
298                            "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
299                            offset);
300
301 read_nvm_exit:
302         return ret_code;
303 }
304
305 /**
306  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
307  * @hw: pointer to the HW structure
308  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
309  * @data: word read from the Shadow RAM
310  *
311  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
312  **/
313 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
314                                             u16 *data)
315 {
316         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
317
318         DEBUGFUNC("i40e_read_nvm_word_aq");
319
320         ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
321         *data = LE16_TO_CPU(*(__le16 *)data);
322
323         return ret_code;
324 }
325
326 /**
327  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
328  * @hw: pointer to the HW structure
329  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
330  * @words: (in) number of words to read; (out) number of words actually read
331  * @data: words read from the Shadow RAM
332  *
333  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
334  * method. The buffer read is preceded by the NVM ownership take
335  * and followed by the release.
336  **/
337 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
338                                              u16 offset,
339                                              u16 *words, u16 *data)
340 {
341         enum i40e_status_code ret_code = I40E_SUCCESS;
342
343         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
344                 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
345         else
346                 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
347         return ret_code;
348 }
349
350 /**
351  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
352  * @hw: pointer to the HW structure
353  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
354  * @words: (in) number of words to read; (out) number of words actually read
355  * @data: words read from the Shadow RAM
356  *
357  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
358  * method. The buffer read is preceded by the NVM ownership take
359  * and followed by the release.
360  **/
361 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
362                                            u16 *words, u16 *data)
363 {
364         enum i40e_status_code ret_code = I40E_SUCCESS;
365
366         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
367                 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
368                 if (!ret_code) {
369                         ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
370                                                          data);
371                         i40e_release_nvm(hw);
372                 }
373         } else {
374                 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
375         }
376         return ret_code;
377 }
378
379 /**
380  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
381  * @hw: pointer to the HW structure
382  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
383  * @words: (in) number of words to read; (out) number of words actually read
384  * @data: words read from the Shadow RAM
385  *
386  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
387  * method. The buffer read is preceded by the NVM ownership take
388  * and followed by the release.
389  **/
390 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
391                                                  u16 *words, u16 *data)
392 {
393         enum i40e_status_code ret_code = I40E_SUCCESS;
394         u16 index, word;
395
396         DEBUGFUNC("i40e_read_nvm_buffer_srctl");
397
398         /* Loop through the selected region */
399         for (word = 0; word < *words; word++) {
400                 index = offset + word;
401                 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
402                 if (ret_code != I40E_SUCCESS)
403                         break;
404         }
405
406         /* Update the number of words read from the Shadow RAM */
407         *words = word;
408
409         return ret_code;
410 }
411
412 /**
413  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
414  * @hw: pointer to the HW structure
415  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
416  * @words: (in) number of words to read; (out) number of words actually read
417  * @data: words read from the Shadow RAM
418  *
419  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
420  * method. The buffer read is preceded by the NVM ownership take
421  * and followed by the release.
422  **/
423 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
424                                               u16 *words, u16 *data)
425 {
426         enum i40e_status_code ret_code;
427         u16 read_size = *words;
428         bool last_cmd = false;
429         u16 words_read = 0;
430         u16 i = 0;
431
432         DEBUGFUNC("i40e_read_nvm_buffer_aq");
433
434         do {
435                 /* Calculate number of bytes we should read in this step.
436                  * FVL AQ do not allow to read more than one page at a time or
437                  * to cross page boundaries.
438                  */
439                 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
440                         read_size = min(*words,
441                                         (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
442                                       (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
443                 else
444                         read_size = min((*words - words_read),
445                                         I40E_SR_SECTOR_SIZE_IN_WORDS);
446
447                 /* Check if this is last command, if so set proper flag */
448                 if ((words_read + read_size) >= *words)
449                         last_cmd = true;
450
451                 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
452                                             data + words_read, last_cmd);
453                 if (ret_code != I40E_SUCCESS)
454                         goto read_nvm_buffer_aq_exit;
455
456                 /* Increment counter for words already read and move offset to
457                  * new read location
458                  */
459                 words_read += read_size;
460                 offset += read_size;
461         } while (words_read < *words);
462
463         for (i = 0; i < *words; i++)
464                 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
465
466 read_nvm_buffer_aq_exit:
467         *words = words_read;
468         return ret_code;
469 }
470
471 /**
472  * i40e_read_nvm_aq - Read Shadow RAM.
473  * @hw: pointer to the HW structure.
474  * @module_pointer: module pointer location in words from the NVM beginning
475  * @offset: offset in words from module start
476  * @words: number of words to write
477  * @data: buffer with words to write to the Shadow RAM
478  * @last_command: tells the AdminQ that this is the last command
479  *
480  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
481  **/
482 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
483                                        u32 offset, u16 words, void *data,
484                                        bool last_command)
485 {
486         enum i40e_status_code ret_code = I40E_ERR_NVM;
487         struct i40e_asq_cmd_details cmd_details;
488
489         DEBUGFUNC("i40e_read_nvm_aq");
490
491         memset(&cmd_details, 0, sizeof(cmd_details));
492         cmd_details.wb_desc = &hw->nvm_wb_desc;
493
494         /* Here we are checking the SR limit only for the flat memory model.
495          * We cannot do it for the module-based model, as we did not acquire
496          * the NVM resource yet (we cannot get the module pointer value).
497          * Firmware will check the module-based model.
498          */
499         if ((offset + words) > hw->nvm.sr_size)
500                 i40e_debug(hw, I40E_DEBUG_NVM,
501                            "NVM write error: offset %d beyond Shadow RAM limit %d\n",
502                            (offset + words), hw->nvm.sr_size);
503         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
504                 /* We can write only up to 4KB (one sector), in one AQ write */
505                 i40e_debug(hw, I40E_DEBUG_NVM,
506                            "NVM write fail error: tried to write %d words, limit is %d.\n",
507                            words, I40E_SR_SECTOR_SIZE_IN_WORDS);
508         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
509                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
510                 /* A single write cannot spread over two sectors */
511                 i40e_debug(hw, I40E_DEBUG_NVM,
512                            "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
513                            offset, words);
514         else
515                 ret_code = i40e_aq_read_nvm(hw, module_pointer,
516                                             2 * offset,  /*bytes*/
517                                             2 * words,   /*bytes*/
518                                             data, last_command, &cmd_details);
519
520         return ret_code;
521 }
522
523 /**
524  * i40e_write_nvm_aq - Writes Shadow RAM.
525  * @hw: pointer to the HW structure.
526  * @module_pointer: module pointer location in words from the NVM beginning
527  * @offset: offset in words from module start
528  * @words: number of words to write
529  * @data: buffer with words to write to the Shadow RAM
530  * @last_command: tells the AdminQ that this is the last command
531  *
532  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
533  **/
534 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
535                                         u32 offset, u16 words, void *data,
536                                         bool last_command)
537 {
538         enum i40e_status_code ret_code = I40E_ERR_NVM;
539         struct i40e_asq_cmd_details cmd_details;
540
541         DEBUGFUNC("i40e_write_nvm_aq");
542
543         memset(&cmd_details, 0, sizeof(cmd_details));
544         cmd_details.wb_desc = &hw->nvm_wb_desc;
545
546         /* Here we are checking the SR limit only for the flat memory model.
547          * We cannot do it for the module-based model, as we did not acquire
548          * the NVM resource yet (we cannot get the module pointer value).
549          * Firmware will check the module-based model.
550          */
551         if ((offset + words) > hw->nvm.sr_size)
552                 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
553         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
554                 /* We can write only up to 4KB (one sector), in one AQ write */
555                 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
556         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
557                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
558                 /* A single write cannot spread over two sectors */
559                 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
560         else
561                 ret_code = i40e_aq_update_nvm(hw, module_pointer,
562                                               2 * offset,  /*bytes*/
563                                               2 * words,   /*bytes*/
564                                               data, last_command, &cmd_details);
565
566         return ret_code;
567 }
568
569 /**
570  * __i40e_write_nvm_word - Writes Shadow RAM word
571  * @hw: pointer to the HW structure
572  * @offset: offset of the Shadow RAM word to write
573  * @data: word to write to the Shadow RAM
574  *
575  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
576  * NVM ownership have to be acquired and released (on ARQ completion event
577  * reception) by caller. To commit SR to NVM update checksum function
578  * should be called.
579  **/
580 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
581                                             void *data)
582 {
583         DEBUGFUNC("i40e_write_nvm_word");
584
585         *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
586
587         /* Value 0x00 below means that we treat SR as a flat mem */
588         return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
589 }
590
591 /**
592  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
593  * @hw: pointer to the HW structure
594  * @module_pointer: module pointer location in words from the NVM beginning
595  * @offset: offset of the Shadow RAM buffer to write
596  * @words: number of words to write
597  * @data: words to write to the Shadow RAM
598  *
599  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
600  * NVM ownership must be acquired before calling this function and released
601  * on ARQ completion event reception by caller. To commit SR to NVM update
602  * checksum function should be called.
603  **/
604 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
605                                               u8 module_pointer, u32 offset,
606                                               u16 words, void *data)
607 {
608         __le16 *le_word_ptr = (__le16 *)data;
609         u16 *word_ptr = (u16 *)data;
610         u32 i = 0;
611
612         DEBUGFUNC("i40e_write_nvm_buffer");
613
614         for (i = 0; i < words; i++)
615                 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
616
617         /* Here we will only write one buffer as the size of the modules
618          * mirrored in the Shadow RAM is always less than 4K.
619          */
620         return i40e_write_nvm_aq(hw, module_pointer, offset, words,
621                                  data, false);
622 }
623
624 /**
625  * i40e_calc_nvm_checksum - Calculates and returns the checksum
626  * @hw: pointer to hardware structure
627  * @checksum: pointer to the checksum
628  *
629  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
630  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
631  * is customer specific and unknown. Therefore, this function skips all maximum
632  * possible size of VPD (1kB).
633  **/
634 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
635 {
636         enum i40e_status_code ret_code = I40E_SUCCESS;
637         struct i40e_virt_mem vmem;
638         u16 pcie_alt_module = 0;
639         u16 checksum_local = 0;
640         u16 vpd_module = 0;
641         u16 *data;
642         u16 i = 0;
643
644         DEBUGFUNC("i40e_calc_nvm_checksum");
645
646         ret_code = i40e_allocate_virt_mem(hw, &vmem,
647                                     I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
648         if (ret_code)
649                 goto i40e_calc_nvm_checksum_exit;
650         data = (u16 *)vmem.va;
651
652         /* read pointer to VPD area */
653         ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
654                                         &vpd_module);
655         if (ret_code != I40E_SUCCESS) {
656                 ret_code = I40E_ERR_NVM_CHECKSUM;
657                 goto i40e_calc_nvm_checksum_exit;
658         }
659
660         /* read pointer to PCIe Alt Auto-load module */
661         ret_code = __i40e_read_nvm_word(hw,
662                                         I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
663                                         &pcie_alt_module);
664         if (ret_code != I40E_SUCCESS) {
665                 ret_code = I40E_ERR_NVM_CHECKSUM;
666                 goto i40e_calc_nvm_checksum_exit;
667         }
668
669         /* Calculate SW checksum that covers the whole 64kB shadow RAM
670          * except the VPD and PCIe ALT Auto-load modules
671          */
672         for (i = 0; i < hw->nvm.sr_size; i++) {
673                 /* Read SR page */
674                 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
675                         u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
676
677                         ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
678                         if (ret_code != I40E_SUCCESS) {
679                                 ret_code = I40E_ERR_NVM_CHECKSUM;
680                                 goto i40e_calc_nvm_checksum_exit;
681                         }
682                 }
683
684                 /* Skip Checksum word */
685                 if (i == I40E_SR_SW_CHECKSUM_WORD)
686                         continue;
687                 /* Skip VPD module (convert byte size to word count) */
688                 if ((i >= (u32)vpd_module) &&
689                     (i < ((u32)vpd_module +
690                      (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
691                         continue;
692                 }
693                 /* Skip PCIe ALT module (convert byte size to word count) */
694                 if ((i >= (u32)pcie_alt_module) &&
695                     (i < ((u32)pcie_alt_module +
696                      (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
697                         continue;
698                 }
699
700                 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
701         }
702
703         *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
704
705 i40e_calc_nvm_checksum_exit:
706         i40e_free_virt_mem(hw, &vmem);
707         return ret_code;
708 }
709
710 /**
711  * i40e_update_nvm_checksum - Updates the NVM checksum
712  * @hw: pointer to hardware structure
713  *
714  * NVM ownership must be acquired before calling this function and released
715  * on ARQ completion event reception by caller.
716  * This function will commit SR to NVM.
717  **/
718 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
719 {
720         enum i40e_status_code ret_code = I40E_SUCCESS;
721         u16 checksum;
722         __le16 le_sum;
723
724         DEBUGFUNC("i40e_update_nvm_checksum");
725
726         ret_code = i40e_calc_nvm_checksum(hw, &checksum);
727         le_sum = CPU_TO_LE16(checksum);
728         if (ret_code == I40E_SUCCESS)
729                 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
730                                              1, &le_sum, true);
731
732         return ret_code;
733 }
734
735 /**
736  * i40e_validate_nvm_checksum - Validate EEPROM checksum
737  * @hw: pointer to hardware structure
738  * @checksum: calculated checksum
739  *
740  * Performs checksum calculation and validates the NVM SW checksum. If the
741  * caller does not need checksum, the value can be NULL.
742  **/
743 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
744                                                  u16 *checksum)
745 {
746         enum i40e_status_code ret_code = I40E_SUCCESS;
747         u16 checksum_sr = 0;
748         u16 checksum_local = 0;
749
750         DEBUGFUNC("i40e_validate_nvm_checksum");
751
752         /* acquire_nvm provides exclusive NVM lock to synchronize access across
753          * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
754          * twice (first time to be able to write address to I40E_GLNVM_SRCTL
755          * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
756          * done bit and try to write address, while another one will interpret
757          * it as a good time to read data. It will cause invalid data to be
758          * read.
759          */
760         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
761         if (!ret_code) {
762                 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
763         i40e_release_nvm(hw);
764                 if (ret_code != I40E_SUCCESS)
765                         goto i40e_validate_nvm_checksum_exit;
766         } else {
767                 goto i40e_validate_nvm_checksum_exit;
768         }
769
770         i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
771
772         /* Verify read checksum from EEPROM is the same as
773          * calculated checksum
774          */
775         if (checksum_local != checksum_sr)
776                 ret_code = I40E_ERR_NVM_CHECKSUM;
777
778         /* If the user cares, return the calculated checksum */
779         if (checksum)
780                 *checksum = checksum_local;
781
782 i40e_validate_nvm_checksum_exit:
783         return ret_code;
784 }
785
786 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
787                                                     struct i40e_nvm_access *cmd,
788                                                     u8 *bytes, int *perrno);
789 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
790                                                     struct i40e_nvm_access *cmd,
791                                                     u8 *bytes, int *perrno);
792 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
793                                                     struct i40e_nvm_access *cmd,
794                                                     u8 *bytes, int *perrno);
795 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
796                                                     struct i40e_nvm_access *cmd,
797                                                     int *perrno);
798 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
799                                                    struct i40e_nvm_access *cmd,
800                                                    int *perrno);
801 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
802                                                    struct i40e_nvm_access *cmd,
803                                                    u8 *bytes, int *perrno);
804 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
805                                                   struct i40e_nvm_access *cmd,
806                                                   u8 *bytes, int *perrno);
807 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
808                                                  struct i40e_nvm_access *cmd,
809                                                  u8 *bytes, int *perrno);
810 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
811                                                     struct i40e_nvm_access *cmd,
812                                                     u8 *bytes, int *perrno);
813 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
814 {
815         return (u8)(val & I40E_NVM_MOD_PNT_MASK);
816 }
817 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
818 {
819         return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
820 }
821
822 STATIC const char *i40e_nvm_update_state_str[] = {
823         "I40E_NVMUPD_INVALID",
824         "I40E_NVMUPD_READ_CON",
825         "I40E_NVMUPD_READ_SNT",
826         "I40E_NVMUPD_READ_LCB",
827         "I40E_NVMUPD_READ_SA",
828         "I40E_NVMUPD_WRITE_ERA",
829         "I40E_NVMUPD_WRITE_CON",
830         "I40E_NVMUPD_WRITE_SNT",
831         "I40E_NVMUPD_WRITE_LCB",
832         "I40E_NVMUPD_WRITE_SA",
833         "I40E_NVMUPD_CSUM_CON",
834         "I40E_NVMUPD_CSUM_SA",
835         "I40E_NVMUPD_CSUM_LCB",
836         "I40E_NVMUPD_STATUS",
837         "I40E_NVMUPD_EXEC_AQ",
838         "I40E_NVMUPD_GET_AQ_RESULT",
839 };
840
841 /**
842  * i40e_nvmupd_command - Process an NVM update command
843  * @hw: pointer to hardware structure
844  * @cmd: pointer to nvm update command
845  * @bytes: pointer to the data buffer
846  * @perrno: pointer to return error code
847  *
848  * Dispatches command depending on what update state is current
849  **/
850 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
851                                           struct i40e_nvm_access *cmd,
852                                           u8 *bytes, int *perrno)
853 {
854         enum i40e_status_code status;
855         enum i40e_nvmupd_cmd upd_cmd;
856
857         DEBUGFUNC("i40e_nvmupd_command");
858
859         /* assume success */
860         *perrno = 0;
861
862         /* early check for status command and debug msgs */
863         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
864
865         i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
866                    i40e_nvm_update_state_str[upd_cmd],
867                    hw->nvmupd_state,
868                    hw->nvm_release_on_done, hw->nvm_wait_opcode,
869                    cmd->command, cmd->config, cmd->offset, cmd->data_size);
870
871         if (upd_cmd == I40E_NVMUPD_INVALID) {
872                 *perrno = -EFAULT;
873                 i40e_debug(hw, I40E_DEBUG_NVM,
874                            "i40e_nvmupd_validate_command returns %d errno %d\n",
875                            upd_cmd, *perrno);
876         }
877
878         /* a status request returns immediately rather than
879          * going into the state machine
880          */
881         if (upd_cmd == I40E_NVMUPD_STATUS) {
882                 if (!cmd->data_size) {
883                         *perrno = -EFAULT;
884                         return I40E_ERR_BUF_TOO_SHORT;
885                 }
886
887                 bytes[0] = hw->nvmupd_state;
888
889                 if (cmd->data_size >= 4) {
890                         bytes[1] = 0;
891                         *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
892                 }
893
894                 /* Clear error status on read */
895                 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
896                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
897
898                 return I40E_SUCCESS;
899         }
900
901         /* Clear status even it is not read and log */
902         if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
903                 i40e_debug(hw, I40E_DEBUG_NVM,
904                            "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
905                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
906         }
907
908         /* Acquire lock to prevent race condition where adminq_task
909          * can execute after i40e_nvmupd_nvm_read/write but before state
910          * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
911          *
912          * During NVMUpdate, it is observed that lock could be held for
913          * ~5ms for most commands. However lock is held for ~60ms for
914          * NVMUPD_CSUM_LCB command.
915          */
916         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
917         switch (hw->nvmupd_state) {
918         case I40E_NVMUPD_STATE_INIT:
919                 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
920                 break;
921
922         case I40E_NVMUPD_STATE_READING:
923                 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
924                 break;
925
926         case I40E_NVMUPD_STATE_WRITING:
927                 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
928                 break;
929
930         case I40E_NVMUPD_STATE_INIT_WAIT:
931         case I40E_NVMUPD_STATE_WRITE_WAIT:
932                 /* if we need to stop waiting for an event, clear
933                  * the wait info and return before doing anything else
934                  */
935                 if (cmd->offset == 0xffff) {
936                         i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
937                         status = I40E_SUCCESS;
938                         goto exit;
939                 }
940
941                 status = I40E_ERR_NOT_READY;
942                 *perrno = -EBUSY;
943                 break;
944
945         default:
946                 /* invalid state, should never happen */
947                 i40e_debug(hw, I40E_DEBUG_NVM,
948                            "NVMUPD: no such state %d\n", hw->nvmupd_state);
949                 status = I40E_NOT_SUPPORTED;
950                 *perrno = -ESRCH;
951                 break;
952         }
953 exit:
954         i40e_release_spinlock(&hw->aq.arq_spinlock);
955         return status;
956 }
957
958 /**
959  * i40e_nvmupd_state_init - Handle NVM update state Init
960  * @hw: pointer to hardware structure
961  * @cmd: pointer to nvm update command buffer
962  * @bytes: pointer to the data buffer
963  * @perrno: pointer to return error code
964  *
965  * Process legitimate commands of the Init state and conditionally set next
966  * state. Reject all other commands.
967  **/
968 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
969                                                     struct i40e_nvm_access *cmd,
970                                                     u8 *bytes, int *perrno)
971 {
972         enum i40e_status_code status = I40E_SUCCESS;
973         enum i40e_nvmupd_cmd upd_cmd;
974
975         DEBUGFUNC("i40e_nvmupd_state_init");
976
977         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
978
979         switch (upd_cmd) {
980         case I40E_NVMUPD_READ_SA:
981                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
982                 if (status) {
983                         *perrno = i40e_aq_rc_to_posix(status,
984                                                      hw->aq.asq_last_status);
985                 } else {
986                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
987                         i40e_release_nvm(hw);
988                 }
989                 break;
990
991         case I40E_NVMUPD_READ_SNT:
992                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
993                 if (status) {
994                         *perrno = i40e_aq_rc_to_posix(status,
995                                                      hw->aq.asq_last_status);
996                 } else {
997                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
998                         if (status)
999                                 i40e_release_nvm(hw);
1000                         else
1001                                 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
1002                 }
1003                 break;
1004
1005         case I40E_NVMUPD_WRITE_ERA:
1006                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1007                 if (status) {
1008                         *perrno = i40e_aq_rc_to_posix(status,
1009                                                      hw->aq.asq_last_status);
1010                 } else {
1011                         status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1012                         if (status) {
1013                                 i40e_release_nvm(hw);
1014                         } else {
1015                                 hw->nvm_release_on_done = true;
1016                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1017                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1018                         }
1019                 }
1020                 break;
1021
1022         case I40E_NVMUPD_WRITE_SA:
1023                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1024                 if (status) {
1025                         *perrno = i40e_aq_rc_to_posix(status,
1026                                                      hw->aq.asq_last_status);
1027                 } else {
1028                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1029                         if (status) {
1030                                 i40e_release_nvm(hw);
1031                         } else {
1032                                 hw->nvm_release_on_done = true;
1033                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1034                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1035                         }
1036                 }
1037                 break;
1038
1039         case I40E_NVMUPD_WRITE_SNT:
1040                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1041                 if (status) {
1042                         *perrno = i40e_aq_rc_to_posix(status,
1043                                                      hw->aq.asq_last_status);
1044                 } else {
1045                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1046                         if (status) {
1047                                 i40e_release_nvm(hw);
1048                         } else {
1049                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1050                                 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1051                         }
1052                 }
1053                 break;
1054
1055         case I40E_NVMUPD_CSUM_SA:
1056                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1057                 if (status) {
1058                         *perrno = i40e_aq_rc_to_posix(status,
1059                                                      hw->aq.asq_last_status);
1060                 } else {
1061                         status = i40e_update_nvm_checksum(hw);
1062                         if (status) {
1063                                 *perrno = hw->aq.asq_last_status ?
1064                                    i40e_aq_rc_to_posix(status,
1065                                                        hw->aq.asq_last_status) :
1066                                    -EIO;
1067                                 i40e_release_nvm(hw);
1068                         } else {
1069                                 hw->nvm_release_on_done = true;
1070                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1071                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1072                         }
1073                 }
1074                 break;
1075
1076         case I40E_NVMUPD_EXEC_AQ:
1077                 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1078                 break;
1079
1080         case I40E_NVMUPD_GET_AQ_RESULT:
1081                 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1082                 break;
1083
1084         default:
1085                 i40e_debug(hw, I40E_DEBUG_NVM,
1086                            "NVMUPD: bad cmd %s in init state\n",
1087                            i40e_nvm_update_state_str[upd_cmd]);
1088                 status = I40E_ERR_NVM;
1089                 *perrno = -ESRCH;
1090                 break;
1091         }
1092         return status;
1093 }
1094
1095 /**
1096  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1097  * @hw: pointer to hardware structure
1098  * @cmd: pointer to nvm update command buffer
1099  * @bytes: pointer to the data buffer
1100  * @perrno: pointer to return error code
1101  *
1102  * NVM ownership is already held.  Process legitimate commands and set any
1103  * change in state; reject all other commands.
1104  **/
1105 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1106                                                     struct i40e_nvm_access *cmd,
1107                                                     u8 *bytes, int *perrno)
1108 {
1109         enum i40e_status_code status = I40E_SUCCESS;
1110         enum i40e_nvmupd_cmd upd_cmd;
1111
1112         DEBUGFUNC("i40e_nvmupd_state_reading");
1113
1114         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1115
1116         switch (upd_cmd) {
1117         case I40E_NVMUPD_READ_SA:
1118         case I40E_NVMUPD_READ_CON:
1119                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1120                 break;
1121
1122         case I40E_NVMUPD_READ_LCB:
1123                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1124                 i40e_release_nvm(hw);
1125                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1126                 break;
1127
1128         default:
1129                 i40e_debug(hw, I40E_DEBUG_NVM,
1130                            "NVMUPD: bad cmd %s in reading state.\n",
1131                            i40e_nvm_update_state_str[upd_cmd]);
1132                 status = I40E_NOT_SUPPORTED;
1133                 *perrno = -ESRCH;
1134                 break;
1135         }
1136         return status;
1137 }
1138
1139 /**
1140  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1141  * @hw: pointer to hardware structure
1142  * @cmd: pointer to nvm update command buffer
1143  * @bytes: pointer to the data buffer
1144  * @perrno: pointer to return error code
1145  *
1146  * NVM ownership is already held.  Process legitimate commands and set any
1147  * change in state; reject all other commands
1148  **/
1149 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1150                                                     struct i40e_nvm_access *cmd,
1151                                                     u8 *bytes, int *perrno)
1152 {
1153         enum i40e_status_code status = I40E_SUCCESS;
1154         enum i40e_nvmupd_cmd upd_cmd;
1155         bool retry_attempt = false;
1156
1157         DEBUGFUNC("i40e_nvmupd_state_writing");
1158
1159         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1160
1161 retry:
1162         switch (upd_cmd) {
1163         case I40E_NVMUPD_WRITE_CON:
1164                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1165                 if (!status) {
1166                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1167                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1168                 }
1169                 break;
1170
1171         case I40E_NVMUPD_WRITE_LCB:
1172                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1173                 if (status) {
1174                         *perrno = hw->aq.asq_last_status ?
1175                                    i40e_aq_rc_to_posix(status,
1176                                                        hw->aq.asq_last_status) :
1177                                    -EIO;
1178                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1179                 } else {
1180                         hw->nvm_release_on_done = true;
1181                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1182                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1183                 }
1184                 break;
1185
1186         case I40E_NVMUPD_CSUM_CON:
1187                 /* Assumes the caller has acquired the nvm */
1188                 status = i40e_update_nvm_checksum(hw);
1189                 if (status) {
1190                         *perrno = hw->aq.asq_last_status ?
1191                                    i40e_aq_rc_to_posix(status,
1192                                                        hw->aq.asq_last_status) :
1193                                    -EIO;
1194                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1195                 } else {
1196                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1197                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1198                 }
1199                 break;
1200
1201         case I40E_NVMUPD_CSUM_LCB:
1202                 /* Assumes the caller has acquired the nvm */
1203                 status = i40e_update_nvm_checksum(hw);
1204                 if (status) {
1205                         *perrno = hw->aq.asq_last_status ?
1206                                    i40e_aq_rc_to_posix(status,
1207                                                        hw->aq.asq_last_status) :
1208                                    -EIO;
1209                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1210                 } else {
1211                         hw->nvm_release_on_done = true;
1212                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1213                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1214                 }
1215                 break;
1216
1217         default:
1218                 i40e_debug(hw, I40E_DEBUG_NVM,
1219                            "NVMUPD: bad cmd %s in writing state.\n",
1220                            i40e_nvm_update_state_str[upd_cmd]);
1221                 status = I40E_NOT_SUPPORTED;
1222                 *perrno = -ESRCH;
1223                 break;
1224         }
1225
1226         /* In some circumstances, a multi-write transaction takes longer
1227          * than the default 3 minute timeout on the write semaphore.  If
1228          * the write failed with an EBUSY status, this is likely the problem,
1229          * so here we try to reacquire the semaphore then retry the write.
1230          * We only do one retry, then give up.
1231          */
1232         if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1233             !retry_attempt) {
1234                 enum i40e_status_code old_status = status;
1235                 u32 old_asq_status = hw->aq.asq_last_status;
1236                 u32 gtime;
1237
1238                 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1239                 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1240                         i40e_debug(hw, I40E_DEBUG_ALL,
1241                                    "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1242                                    gtime, hw->nvm.hw_semaphore_timeout);
1243                         i40e_release_nvm(hw);
1244                         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1245                         if (status) {
1246                                 i40e_debug(hw, I40E_DEBUG_ALL,
1247                                            "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1248                                            hw->aq.asq_last_status);
1249                                 status = old_status;
1250                                 hw->aq.asq_last_status = old_asq_status;
1251                         } else {
1252                                 retry_attempt = true;
1253                                 goto retry;
1254                         }
1255                 }
1256         }
1257
1258         return status;
1259 }
1260
1261 /**
1262  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1263  * @hw: pointer to the hardware structure
1264  * @opcode: the event that just happened
1265  **/
1266 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1267 {
1268         if (opcode == hw->nvm_wait_opcode) {
1269
1270                 i40e_debug(hw, I40E_DEBUG_NVM,
1271                            "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1272                 if (hw->nvm_release_on_done) {
1273                         i40e_release_nvm(hw);
1274                         hw->nvm_release_on_done = false;
1275                 }
1276                 hw->nvm_wait_opcode = 0;
1277
1278                 if (hw->aq.arq_last_status) {
1279                         hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1280                         return;
1281                 }
1282
1283                 switch (hw->nvmupd_state) {
1284                 case I40E_NVMUPD_STATE_INIT_WAIT:
1285                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1286                         break;
1287
1288                 case I40E_NVMUPD_STATE_WRITE_WAIT:
1289                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1290                         break;
1291
1292                 default:
1293                         break;
1294                 }
1295         }
1296 }
1297
1298 /**
1299  * i40e_nvmupd_validate_command - Validate given command
1300  * @hw: pointer to hardware structure
1301  * @cmd: pointer to nvm update command buffer
1302  * @perrno: pointer to return error code
1303  *
1304  * Return one of the valid command types or I40E_NVMUPD_INVALID
1305  **/
1306 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1307                                                     struct i40e_nvm_access *cmd,
1308                                                     int *perrno)
1309 {
1310         enum i40e_nvmupd_cmd upd_cmd;
1311         u8 module, transaction;
1312
1313         DEBUGFUNC("i40e_nvmupd_validate_command\n");
1314
1315         /* anything that doesn't match a recognized case is an error */
1316         upd_cmd = I40E_NVMUPD_INVALID;
1317
1318         transaction = i40e_nvmupd_get_transaction(cmd->config);
1319         module = i40e_nvmupd_get_module(cmd->config);
1320
1321         /* limits on data size */
1322         if ((cmd->data_size < 1) ||
1323             (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1324                 i40e_debug(hw, I40E_DEBUG_NVM,
1325                            "i40e_nvmupd_validate_command data_size %d\n",
1326                            cmd->data_size);
1327                 *perrno = -EFAULT;
1328                 return I40E_NVMUPD_INVALID;
1329         }
1330
1331         switch (cmd->command) {
1332         case I40E_NVM_READ:
1333                 switch (transaction) {
1334                 case I40E_NVM_CON:
1335                         upd_cmd = I40E_NVMUPD_READ_CON;
1336                         break;
1337                 case I40E_NVM_SNT:
1338                         upd_cmd = I40E_NVMUPD_READ_SNT;
1339                         break;
1340                 case I40E_NVM_LCB:
1341                         upd_cmd = I40E_NVMUPD_READ_LCB;
1342                         break;
1343                 case I40E_NVM_SA:
1344                         upd_cmd = I40E_NVMUPD_READ_SA;
1345                         break;
1346                 case I40E_NVM_EXEC:
1347                         if (module == 0xf)
1348                                 upd_cmd = I40E_NVMUPD_STATUS;
1349                         else if (module == 0)
1350                                 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1351                         break;
1352                 }
1353                 break;
1354
1355         case I40E_NVM_WRITE:
1356                 switch (transaction) {
1357                 case I40E_NVM_CON:
1358                         upd_cmd = I40E_NVMUPD_WRITE_CON;
1359                         break;
1360                 case I40E_NVM_SNT:
1361                         upd_cmd = I40E_NVMUPD_WRITE_SNT;
1362                         break;
1363                 case I40E_NVM_LCB:
1364                         upd_cmd = I40E_NVMUPD_WRITE_LCB;
1365                         break;
1366                 case I40E_NVM_SA:
1367                         upd_cmd = I40E_NVMUPD_WRITE_SA;
1368                         break;
1369                 case I40E_NVM_ERA:
1370                         upd_cmd = I40E_NVMUPD_WRITE_ERA;
1371                         break;
1372                 case I40E_NVM_CSUM:
1373                         upd_cmd = I40E_NVMUPD_CSUM_CON;
1374                         break;
1375                 case (I40E_NVM_CSUM|I40E_NVM_SA):
1376                         upd_cmd = I40E_NVMUPD_CSUM_SA;
1377                         break;
1378                 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1379                         upd_cmd = I40E_NVMUPD_CSUM_LCB;
1380                         break;
1381                 case I40E_NVM_EXEC:
1382                         if (module == 0)
1383                                 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1384                         break;
1385                 }
1386                 break;
1387         }
1388
1389         return upd_cmd;
1390 }
1391
1392 /**
1393  * i40e_nvmupd_exec_aq - Run an AQ command
1394  * @hw: pointer to hardware structure
1395  * @cmd: pointer to nvm update command buffer
1396  * @bytes: pointer to the data buffer
1397  * @perrno: pointer to return error code
1398  *
1399  * cmd structure contains identifiers and data buffer
1400  **/
1401 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1402                                                  struct i40e_nvm_access *cmd,
1403                                                  u8 *bytes, int *perrno)
1404 {
1405         struct i40e_asq_cmd_details cmd_details;
1406         enum i40e_status_code status;
1407         struct i40e_aq_desc *aq_desc;
1408         u32 buff_size = 0;
1409         u8 *buff = NULL;
1410         u32 aq_desc_len;
1411         u32 aq_data_len;
1412
1413         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1414         memset(&cmd_details, 0, sizeof(cmd_details));
1415         cmd_details.wb_desc = &hw->nvm_wb_desc;
1416
1417         aq_desc_len = sizeof(struct i40e_aq_desc);
1418         memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1419
1420         /* get the aq descriptor */
1421         if (cmd->data_size < aq_desc_len) {
1422                 i40e_debug(hw, I40E_DEBUG_NVM,
1423                            "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1424                            cmd->data_size, aq_desc_len);
1425                 *perrno = -EINVAL;
1426                 return I40E_ERR_PARAM;
1427         }
1428         aq_desc = (struct i40e_aq_desc *)bytes;
1429
1430         /* if data buffer needed, make sure it's ready */
1431         aq_data_len = cmd->data_size - aq_desc_len;
1432         buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1433         if (buff_size) {
1434                 if (!hw->nvm_buff.va) {
1435                         status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1436                                                         hw->aq.asq_buf_size);
1437                         if (status)
1438                                 i40e_debug(hw, I40E_DEBUG_NVM,
1439                                            "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1440                                            status);
1441                 }
1442
1443                 if (hw->nvm_buff.va) {
1444                         buff = hw->nvm_buff.va;
1445                         i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1446                                 I40E_NONDMA_TO_NONDMA);
1447                 }
1448         }
1449
1450         /* and away we go! */
1451         status = i40e_asq_send_command(hw, aq_desc, buff,
1452                                        buff_size, &cmd_details);
1453         if (status) {
1454                 i40e_debug(hw, I40E_DEBUG_NVM,
1455                            "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1456                            i40e_stat_str(hw, status),
1457                            i40e_aq_str(hw, hw->aq.asq_last_status));
1458                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1459         }
1460
1461         /* should we wait for a followup event? */
1462         if (cmd->offset) {
1463                 hw->nvm_wait_opcode = cmd->offset;
1464                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1465         }
1466
1467         return status;
1468 }
1469
1470 /**
1471  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1472  * @hw: pointer to hardware structure
1473  * @cmd: pointer to nvm update command buffer
1474  * @bytes: pointer to the data buffer
1475  * @perrno: pointer to return error code
1476  *
1477  * cmd structure contains identifiers and data buffer
1478  **/
1479 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1480                                                     struct i40e_nvm_access *cmd,
1481                                                     u8 *bytes, int *perrno)
1482 {
1483         u32 aq_total_len;
1484         u32 aq_desc_len;
1485         int remainder;
1486         u8 *buff;
1487
1488         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1489
1490         aq_desc_len = sizeof(struct i40e_aq_desc);
1491         aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1492
1493         /* check offset range */
1494         if (cmd->offset > aq_total_len) {
1495                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1496                            __func__, cmd->offset, aq_total_len);
1497                 *perrno = -EINVAL;
1498                 return I40E_ERR_PARAM;
1499         }
1500
1501         /* check copylength range */
1502         if (cmd->data_size > (aq_total_len - cmd->offset)) {
1503                 int new_len = aq_total_len - cmd->offset;
1504
1505                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1506                            __func__, cmd->data_size, new_len);
1507                 cmd->data_size = new_len;
1508         }
1509
1510         remainder = cmd->data_size;
1511         if (cmd->offset < aq_desc_len) {
1512                 u32 len = aq_desc_len - cmd->offset;
1513
1514                 len = min(len, cmd->data_size);
1515                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1516                            __func__, cmd->offset, cmd->offset + len);
1517
1518                 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1519                 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1520
1521                 bytes += len;
1522                 remainder -= len;
1523                 buff = hw->nvm_buff.va;
1524         } else {
1525                 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1526         }
1527
1528         if (remainder > 0) {
1529                 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1530
1531                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1532                            __func__, start_byte, start_byte + remainder);
1533                 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1534         }
1535
1536         return I40E_SUCCESS;
1537 }
1538
1539 /**
1540  * i40e_nvmupd_nvm_read - Read NVM
1541  * @hw: pointer to hardware structure
1542  * @cmd: pointer to nvm update command buffer
1543  * @bytes: pointer to the data buffer
1544  * @perrno: pointer to return error code
1545  *
1546  * cmd structure contains identifiers and data buffer
1547  **/
1548 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1549                                                   struct i40e_nvm_access *cmd,
1550                                                   u8 *bytes, int *perrno)
1551 {
1552         struct i40e_asq_cmd_details cmd_details;
1553         enum i40e_status_code status;
1554         u8 module, transaction;
1555         bool last;
1556
1557         transaction = i40e_nvmupd_get_transaction(cmd->config);
1558         module = i40e_nvmupd_get_module(cmd->config);
1559         last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1560
1561         memset(&cmd_details, 0, sizeof(cmd_details));
1562         cmd_details.wb_desc = &hw->nvm_wb_desc;
1563
1564         status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1565                                   bytes, last, &cmd_details);
1566         if (status) {
1567                 i40e_debug(hw, I40E_DEBUG_NVM,
1568                            "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1569                            module, cmd->offset, cmd->data_size);
1570                 i40e_debug(hw, I40E_DEBUG_NVM,
1571                            "i40e_nvmupd_nvm_read status %d aq %d\n",
1572                            status, hw->aq.asq_last_status);
1573                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1574         }
1575
1576         return status;
1577 }
1578
1579 /**
1580  * i40e_nvmupd_nvm_erase - Erase an NVM module
1581  * @hw: pointer to hardware structure
1582  * @cmd: pointer to nvm update command buffer
1583  * @perrno: pointer to return error code
1584  *
1585  * module, offset, data_size and data are in cmd structure
1586  **/
1587 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1588                                                    struct i40e_nvm_access *cmd,
1589                                                    int *perrno)
1590 {
1591         enum i40e_status_code status = I40E_SUCCESS;
1592         struct i40e_asq_cmd_details cmd_details;
1593         u8 module, transaction;
1594         bool last;
1595
1596         transaction = i40e_nvmupd_get_transaction(cmd->config);
1597         module = i40e_nvmupd_get_module(cmd->config);
1598         last = (transaction & I40E_NVM_LCB);
1599
1600         memset(&cmd_details, 0, sizeof(cmd_details));
1601         cmd_details.wb_desc = &hw->nvm_wb_desc;
1602
1603         status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1604                                    last, &cmd_details);
1605         if (status) {
1606                 i40e_debug(hw, I40E_DEBUG_NVM,
1607                            "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1608                            module, cmd->offset, cmd->data_size);
1609                 i40e_debug(hw, I40E_DEBUG_NVM,
1610                            "i40e_nvmupd_nvm_erase status %d aq %d\n",
1611                            status, hw->aq.asq_last_status);
1612                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1613         }
1614
1615         return status;
1616 }
1617
1618 /**
1619  * i40e_nvmupd_nvm_write - Write NVM
1620  * @hw: pointer to hardware structure
1621  * @cmd: pointer to nvm update command buffer
1622  * @bytes: pointer to the data buffer
1623  * @perrno: pointer to return error code
1624  *
1625  * module, offset, data_size and data are in cmd structure
1626  **/
1627 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1628                                                    struct i40e_nvm_access *cmd,
1629                                                    u8 *bytes, int *perrno)
1630 {
1631         enum i40e_status_code status = I40E_SUCCESS;
1632         struct i40e_asq_cmd_details cmd_details;
1633         u8 module, transaction;
1634         bool last;
1635
1636         transaction = i40e_nvmupd_get_transaction(cmd->config);
1637         module = i40e_nvmupd_get_module(cmd->config);
1638         last = (transaction & I40E_NVM_LCB);
1639
1640         memset(&cmd_details, 0, sizeof(cmd_details));
1641         cmd_details.wb_desc = &hw->nvm_wb_desc;
1642
1643         status = i40e_aq_update_nvm(hw, module, cmd->offset,
1644                                     (u16)cmd->data_size, bytes, last,
1645                                     &cmd_details);
1646         if (status) {
1647                 i40e_debug(hw, I40E_DEBUG_NVM,
1648                            "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1649                            module, cmd->offset, cmd->data_size);
1650                 i40e_debug(hw, I40E_DEBUG_NVM,
1651                            "i40e_nvmupd_nvm_write status %d aq %d\n",
1652                            status, hw->aq.asq_last_status);
1653                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1654         }
1655
1656         return status;
1657 }