update Cavium Inc copyright headers
[dpdk.git] / drivers / net / i40e / base / i40e_nvm.c
1 /*******************************************************************************
2
3 Copyright (c) 2013 - 2015, Intel Corporation
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Redistributions in binary form must reproduce the above copyright
13     notice, this list of conditions and the following disclaimer in the
14     documentation and/or other materials provided with the distribution.
15
16  3. Neither the name of the Intel Corporation nor the names of its
17     contributors may be used to endorse or promote products derived from
18     this software without specific prior written permission.
19
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
31
32 ***************************************************************************/
33
34 #include "i40e_prototype.h"
35
36 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
37                                                u16 *data);
38 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
39                                             u16 *data);
40 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
41                                                  u16 *words, u16 *data);
42 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
43                                               u16 *words, u16 *data);
44 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
45                                        u32 offset, u16 words, void *data,
46                                        bool last_command);
47
48 /**
49  * i40e_init_nvm_ops - Initialize NVM function pointers
50  * @hw: pointer to the HW structure
51  *
52  * Setup the function pointers and the NVM info structure. Should be called
53  * once per NVM initialization, e.g. inside the i40e_init_shared_code().
54  * Please notice that the NVM term is used here (& in all methods covered
55  * in this file) as an equivalent of the FLASH part mapped into the SR.
56  * We are accessing FLASH always through the Shadow RAM.
57  **/
58 enum i40e_status_code i40e_init_nvm(struct i40e_hw *hw)
59 {
60         struct i40e_nvm_info *nvm = &hw->nvm;
61         enum i40e_status_code ret_code = I40E_SUCCESS;
62         u32 fla, gens;
63         u8 sr_size;
64
65         DEBUGFUNC("i40e_init_nvm");
66
67         /* The SR size is stored regardless of the nvm programming mode
68          * as the blank mode may be used in the factory line.
69          */
70         gens = rd32(hw, I40E_GLNVM_GENS);
71         sr_size = ((gens & I40E_GLNVM_GENS_SR_SIZE_MASK) >>
72                            I40E_GLNVM_GENS_SR_SIZE_SHIFT);
73         /* Switching to words (sr_size contains power of 2KB) */
74         nvm->sr_size = BIT(sr_size) * I40E_SR_WORDS_IN_1KB;
75
76         /* Check if we are in the normal or blank NVM programming mode */
77         fla = rd32(hw, I40E_GLNVM_FLA);
78         if (fla & I40E_GLNVM_FLA_LOCKED_MASK) { /* Normal programming mode */
79                 /* Max NVM timeout */
80                 nvm->timeout = I40E_MAX_NVM_TIMEOUT;
81                 nvm->blank_nvm_mode = false;
82         } else { /* Blank programming mode */
83                 nvm->blank_nvm_mode = true;
84                 ret_code = I40E_ERR_NVM_BLANK_MODE;
85                 i40e_debug(hw, I40E_DEBUG_NVM, "NVM init error: unsupported blank mode.\n");
86         }
87
88         return ret_code;
89 }
90
91 /**
92  * i40e_acquire_nvm - Generic request for acquiring the NVM ownership
93  * @hw: pointer to the HW structure
94  * @access: NVM access type (read or write)
95  *
96  * This function will request NVM ownership for reading
97  * via the proper Admin Command.
98  **/
99 enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
100                                        enum i40e_aq_resource_access_type access)
101 {
102         enum i40e_status_code ret_code = I40E_SUCCESS;
103         u64 gtime, timeout;
104         u64 time_left = 0;
105
106         DEBUGFUNC("i40e_acquire_nvm");
107
108         if (hw->nvm.blank_nvm_mode)
109                 goto i40e_i40e_acquire_nvm_exit;
110
111         ret_code = i40e_aq_request_resource(hw, I40E_NVM_RESOURCE_ID, access,
112                                             0, &time_left, NULL);
113         /* Reading the Global Device Timer */
114         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
115
116         /* Store the timeout */
117         hw->nvm.hw_semaphore_timeout = I40E_MS_TO_GTIME(time_left) + gtime;
118
119         if (ret_code)
120                 i40e_debug(hw, I40E_DEBUG_NVM,
121                            "NVM acquire type %d failed time_left=%llu ret=%d aq_err=%d\n",
122                            access, time_left, ret_code, hw->aq.asq_last_status);
123
124         if (ret_code && time_left) {
125                 /* Poll until the current NVM owner timeouts */
126                 timeout = I40E_MS_TO_GTIME(I40E_MAX_NVM_TIMEOUT) + gtime;
127                 while ((gtime < timeout) && time_left) {
128                         i40e_msec_delay(10);
129                         gtime = rd32(hw, I40E_GLVFGEN_TIMER);
130                         ret_code = i40e_aq_request_resource(hw,
131                                                         I40E_NVM_RESOURCE_ID,
132                                                         access, 0, &time_left,
133                                                         NULL);
134                         if (ret_code == I40E_SUCCESS) {
135                                 hw->nvm.hw_semaphore_timeout =
136                                             I40E_MS_TO_GTIME(time_left) + gtime;
137                                 break;
138                         }
139                 }
140                 if (ret_code != I40E_SUCCESS) {
141                         hw->nvm.hw_semaphore_timeout = 0;
142                         i40e_debug(hw, I40E_DEBUG_NVM,
143                                    "NVM acquire timed out, wait %llu ms before trying again. status=%d aq_err=%d\n",
144                                    time_left, ret_code, hw->aq.asq_last_status);
145                 }
146         }
147
148 i40e_i40e_acquire_nvm_exit:
149         return ret_code;
150 }
151
152 /**
153  * i40e_release_nvm - Generic request for releasing the NVM ownership
154  * @hw: pointer to the HW structure
155  *
156  * This function will release NVM resource via the proper Admin Command.
157  **/
158 void i40e_release_nvm(struct i40e_hw *hw)
159 {
160         enum i40e_status_code ret_code = I40E_SUCCESS;
161         u32 total_delay = 0;
162
163         DEBUGFUNC("i40e_release_nvm");
164
165         if (hw->nvm.blank_nvm_mode)
166                 return;
167
168         ret_code = i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
169
170         /* there are some rare cases when trying to release the resource
171          * results in an admin Q timeout, so handle them correctly
172          */
173         while ((ret_code == I40E_ERR_ADMIN_QUEUE_TIMEOUT) &&
174                (total_delay < hw->aq.asq_cmd_timeout)) {
175                         i40e_msec_delay(1);
176                         ret_code = i40e_aq_release_resource(hw,
177                                                 I40E_NVM_RESOURCE_ID, 0, NULL);
178                         total_delay++;
179         }
180 }
181
182 /**
183  * i40e_poll_sr_srctl_done_bit - Polls the GLNVM_SRCTL done bit
184  * @hw: pointer to the HW structure
185  *
186  * Polls the SRCTL Shadow RAM register done bit.
187  **/
188 static enum i40e_status_code i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
189 {
190         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
191         u32 srctl, wait_cnt;
192
193         DEBUGFUNC("i40e_poll_sr_srctl_done_bit");
194
195         /* Poll the I40E_GLNVM_SRCTL until the done bit is set */
196         for (wait_cnt = 0; wait_cnt < I40E_SRRD_SRCTL_ATTEMPTS; wait_cnt++) {
197                 srctl = rd32(hw, I40E_GLNVM_SRCTL);
198                 if (srctl & I40E_GLNVM_SRCTL_DONE_MASK) {
199                         ret_code = I40E_SUCCESS;
200                         break;
201                 }
202                 i40e_usec_delay(5);
203         }
204         if (ret_code == I40E_ERR_TIMEOUT)
205                 i40e_debug(hw, I40E_DEBUG_NVM, "Done bit in GLNVM_SRCTL not set");
206         return ret_code;
207 }
208
209 /**
210  * i40e_read_nvm_word - Reads nvm word and acquire lock if necessary
211  * @hw: pointer to the HW structure
212  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
213  * @data: word read from the Shadow RAM
214  *
215  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
216  **/
217 enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
218                                          u16 *data)
219 {
220         enum i40e_status_code ret_code = I40E_SUCCESS;
221
222         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
223         if (!ret_code) {
224                 if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
225                         ret_code = i40e_read_nvm_word_aq(hw, offset, data);
226                 } else {
227                         ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
228                 }
229                 i40e_release_nvm(hw);
230         }
231         return ret_code;
232 }
233
234 /**
235  * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking
236  * @hw: pointer to the HW structure
237  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
238  * @data: word read from the Shadow RAM
239  *
240  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
241  **/
242 enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
243                                            u16 offset,
244                                            u16 *data)
245 {
246         enum i40e_status_code ret_code = I40E_SUCCESS;
247
248         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
249                 ret_code = i40e_read_nvm_word_aq(hw, offset, data);
250         else
251                 ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
252         return ret_code;
253 }
254
255 /**
256  * i40e_read_nvm_word_srctl - Reads Shadow RAM via SRCTL register
257  * @hw: pointer to the HW structure
258  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
259  * @data: word read from the Shadow RAM
260  *
261  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
262  **/
263 enum i40e_status_code i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
264                                                u16 *data)
265 {
266         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
267         u32 sr_reg;
268
269         DEBUGFUNC("i40e_read_nvm_word_srctl");
270
271         if (offset >= hw->nvm.sr_size) {
272                 i40e_debug(hw, I40E_DEBUG_NVM,
273                            "NVM read error: Offset %d beyond Shadow RAM limit %d\n",
274                            offset, hw->nvm.sr_size);
275                 ret_code = I40E_ERR_PARAM;
276                 goto read_nvm_exit;
277         }
278
279         /* Poll the done bit first */
280         ret_code = i40e_poll_sr_srctl_done_bit(hw);
281         if (ret_code == I40E_SUCCESS) {
282                 /* Write the address and start reading */
283                 sr_reg = ((u32)offset << I40E_GLNVM_SRCTL_ADDR_SHIFT) |
284                          BIT(I40E_GLNVM_SRCTL_START_SHIFT);
285                 wr32(hw, I40E_GLNVM_SRCTL, sr_reg);
286
287                 /* Poll I40E_GLNVM_SRCTL until the done bit is set */
288                 ret_code = i40e_poll_sr_srctl_done_bit(hw);
289                 if (ret_code == I40E_SUCCESS) {
290                         sr_reg = rd32(hw, I40E_GLNVM_SRDATA);
291                         *data = (u16)((sr_reg &
292                                        I40E_GLNVM_SRDATA_RDDATA_MASK)
293                                     >> I40E_GLNVM_SRDATA_RDDATA_SHIFT);
294                 }
295         }
296         if (ret_code != I40E_SUCCESS)
297                 i40e_debug(hw, I40E_DEBUG_NVM,
298                            "NVM read error: Couldn't access Shadow RAM address: 0x%x\n",
299                            offset);
300
301 read_nvm_exit:
302         return ret_code;
303 }
304
305 /**
306  * i40e_read_nvm_word_aq - Reads Shadow RAM via AQ
307  * @hw: pointer to the HW structure
308  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
309  * @data: word read from the Shadow RAM
310  *
311  * Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
312  **/
313 enum i40e_status_code i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
314                                             u16 *data)
315 {
316         enum i40e_status_code ret_code = I40E_ERR_TIMEOUT;
317
318         DEBUGFUNC("i40e_read_nvm_word_aq");
319
320         ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
321         *data = LE16_TO_CPU(*(__le16 *)data);
322
323         return ret_code;
324 }
325
326 /**
327  * __i40e_read_nvm_buffer - Reads nvm buffer, caller must acquire lock
328  * @hw: pointer to the HW structure
329  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
330  * @words: (in) number of words to read; (out) number of words actually read
331  * @data: words read from the Shadow RAM
332  *
333  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
334  * method. The buffer read is preceded by the NVM ownership take
335  * and followed by the release.
336  **/
337 enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
338                                              u16 offset,
339                                              u16 *words, u16 *data)
340 {
341         enum i40e_status_code ret_code = I40E_SUCCESS;
342
343         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
344                 ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
345         else
346                 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
347         return ret_code;
348 }
349
350 /**
351  * i40e_read_nvm_buffer - Reads Shadow RAM buffer and acuire lock if necessary
352  * @hw: pointer to the HW structure
353  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
354  * @words: (in) number of words to read; (out) number of words actually read
355  * @data: words read from the Shadow RAM
356  *
357  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
358  * method. The buffer read is preceded by the NVM ownership take
359  * and followed by the release.
360  **/
361 enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
362                                            u16 *words, u16 *data)
363 {
364         enum i40e_status_code ret_code = I40E_SUCCESS;
365
366         if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
367                 ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
368                 if (!ret_code) {
369                         ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
370                                                          data);
371                         i40e_release_nvm(hw);
372                 }
373         } else {
374                 ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
375         }
376         return ret_code;
377 }
378
379 /**
380  * i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
381  * @hw: pointer to the HW structure
382  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
383  * @words: (in) number of words to read; (out) number of words actually read
384  * @data: words read from the Shadow RAM
385  *
386  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
387  * method. The buffer read is preceded by the NVM ownership take
388  * and followed by the release.
389  **/
390 enum i40e_status_code i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
391                                                  u16 *words, u16 *data)
392 {
393         enum i40e_status_code ret_code = I40E_SUCCESS;
394         u16 index, word;
395
396         DEBUGFUNC("i40e_read_nvm_buffer_srctl");
397
398         /* Loop through the selected region */
399         for (word = 0; word < *words; word++) {
400                 index = offset + word;
401                 ret_code = i40e_read_nvm_word_srctl(hw, index, &data[word]);
402                 if (ret_code != I40E_SUCCESS)
403                         break;
404         }
405
406         /* Update the number of words read from the Shadow RAM */
407         *words = word;
408
409         return ret_code;
410 }
411
412 /**
413  * i40e_read_nvm_buffer_aq - Reads Shadow RAM buffer via AQ
414  * @hw: pointer to the HW structure
415  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
416  * @words: (in) number of words to read; (out) number of words actually read
417  * @data: words read from the Shadow RAM
418  *
419  * Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_aq()
420  * method. The buffer read is preceded by the NVM ownership take
421  * and followed by the release.
422  **/
423 enum i40e_status_code i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
424                                               u16 *words, u16 *data)
425 {
426         enum i40e_status_code ret_code;
427         u16 read_size = *words;
428         bool last_cmd = false;
429         u16 words_read = 0;
430         u16 i = 0;
431
432         DEBUGFUNC("i40e_read_nvm_buffer_aq");
433
434         do {
435                 /* Calculate number of bytes we should read in this step.
436                  * FVL AQ do not allow to read more than one page at a time or
437                  * to cross page boundaries.
438                  */
439                 if (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)
440                         read_size = min(*words,
441                                         (u16)(I40E_SR_SECTOR_SIZE_IN_WORDS -
442                                       (offset % I40E_SR_SECTOR_SIZE_IN_WORDS)));
443                 else
444                         read_size = min((*words - words_read),
445                                         I40E_SR_SECTOR_SIZE_IN_WORDS);
446
447                 /* Check if this is last command, if so set proper flag */
448                 if ((words_read + read_size) >= *words)
449                         last_cmd = true;
450
451                 ret_code = i40e_read_nvm_aq(hw, 0x0, offset, read_size,
452                                             data + words_read, last_cmd);
453                 if (ret_code != I40E_SUCCESS)
454                         goto read_nvm_buffer_aq_exit;
455
456                 /* Increment counter for words already read and move offset to
457                  * new read location
458                  */
459                 words_read += read_size;
460                 offset += read_size;
461         } while (words_read < *words);
462
463         for (i = 0; i < *words; i++)
464                 data[i] = LE16_TO_CPU(((__le16 *)data)[i]);
465
466 read_nvm_buffer_aq_exit:
467         *words = words_read;
468         return ret_code;
469 }
470
471 /**
472  * i40e_read_nvm_aq - Read Shadow RAM.
473  * @hw: pointer to the HW structure.
474  * @module_pointer: module pointer location in words from the NVM beginning
475  * @offset: offset in words from module start
476  * @words: number of words to write
477  * @data: buffer with words to write to the Shadow RAM
478  * @last_command: tells the AdminQ that this is the last command
479  *
480  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
481  **/
482 enum i40e_status_code i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
483                                        u32 offset, u16 words, void *data,
484                                        bool last_command)
485 {
486         enum i40e_status_code ret_code = I40E_ERR_NVM;
487         struct i40e_asq_cmd_details cmd_details;
488
489         DEBUGFUNC("i40e_read_nvm_aq");
490
491         memset(&cmd_details, 0, sizeof(cmd_details));
492         cmd_details.wb_desc = &hw->nvm_wb_desc;
493
494         /* Here we are checking the SR limit only for the flat memory model.
495          * We cannot do it for the module-based model, as we did not acquire
496          * the NVM resource yet (we cannot get the module pointer value).
497          * Firmware will check the module-based model.
498          */
499         if ((offset + words) > hw->nvm.sr_size)
500                 i40e_debug(hw, I40E_DEBUG_NVM,
501                            "NVM write error: offset %d beyond Shadow RAM limit %d\n",
502                            (offset + words), hw->nvm.sr_size);
503         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
504                 /* We can write only up to 4KB (one sector), in one AQ write */
505                 i40e_debug(hw, I40E_DEBUG_NVM,
506                            "NVM write fail error: tried to write %d words, limit is %d.\n",
507                            words, I40E_SR_SECTOR_SIZE_IN_WORDS);
508         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
509                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
510                 /* A single write cannot spread over two sectors */
511                 i40e_debug(hw, I40E_DEBUG_NVM,
512                            "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
513                            offset, words);
514         else
515                 ret_code = i40e_aq_read_nvm(hw, module_pointer,
516                                             2 * offset,  /*bytes*/
517                                             2 * words,   /*bytes*/
518                                             data, last_command, &cmd_details);
519
520         return ret_code;
521 }
522
523 /**
524  * i40e_write_nvm_aq - Writes Shadow RAM.
525  * @hw: pointer to the HW structure.
526  * @module_pointer: module pointer location in words from the NVM beginning
527  * @offset: offset in words from module start
528  * @words: number of words to write
529  * @data: buffer with words to write to the Shadow RAM
530  * @last_command: tells the AdminQ that this is the last command
531  *
532  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
533  **/
534 enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
535                                         u32 offset, u16 words, void *data,
536                                         bool last_command)
537 {
538         enum i40e_status_code ret_code = I40E_ERR_NVM;
539         struct i40e_asq_cmd_details cmd_details;
540
541         DEBUGFUNC("i40e_write_nvm_aq");
542
543         memset(&cmd_details, 0, sizeof(cmd_details));
544         cmd_details.wb_desc = &hw->nvm_wb_desc;
545
546         /* Here we are checking the SR limit only for the flat memory model.
547          * We cannot do it for the module-based model, as we did not acquire
548          * the NVM resource yet (we cannot get the module pointer value).
549          * Firmware will check the module-based model.
550          */
551         if ((offset + words) > hw->nvm.sr_size)
552                 DEBUGOUT("NVM write error: offset beyond Shadow RAM limit.\n");
553         else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
554                 /* We can write only up to 4KB (one sector), in one AQ write */
555                 DEBUGOUT("NVM write fail error: cannot write more than 4KB in a single write.\n");
556         else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
557                  != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
558                 /* A single write cannot spread over two sectors */
559                 DEBUGOUT("NVM write error: cannot spread over two sectors in a single write.\n");
560         else
561                 ret_code = i40e_aq_update_nvm(hw, module_pointer,
562                                               2 * offset,  /*bytes*/
563                                               2 * words,   /*bytes*/
564                                               data, last_command, &cmd_details);
565
566         return ret_code;
567 }
568
569 /**
570  * __i40e_write_nvm_word - Writes Shadow RAM word
571  * @hw: pointer to the HW structure
572  * @offset: offset of the Shadow RAM word to write
573  * @data: word to write to the Shadow RAM
574  *
575  * Writes a 16 bit word to the SR using the i40e_write_nvm_aq() method.
576  * NVM ownership have to be acquired and released (on ARQ completion event
577  * reception) by caller. To commit SR to NVM update checksum function
578  * should be called.
579  **/
580 enum i40e_status_code __i40e_write_nvm_word(struct i40e_hw *hw, u32 offset,
581                                             void *data)
582 {
583         DEBUGFUNC("i40e_write_nvm_word");
584
585         *((__le16 *)data) = CPU_TO_LE16(*((u16 *)data));
586
587         /* Value 0x00 below means that we treat SR as a flat mem */
588         return i40e_write_nvm_aq(hw, 0x00, offset, 1, data, false);
589 }
590
591 /**
592  * __i40e_write_nvm_buffer - Writes Shadow RAM buffer
593  * @hw: pointer to the HW structure
594  * @module_pointer: module pointer location in words from the NVM beginning
595  * @offset: offset of the Shadow RAM buffer to write
596  * @words: number of words to write
597  * @data: words to write to the Shadow RAM
598  *
599  * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
600  * NVM ownership must be acquired before calling this function and released
601  * on ARQ completion event reception by caller. To commit SR to NVM update
602  * checksum function should be called.
603  **/
604 enum i40e_status_code __i40e_write_nvm_buffer(struct i40e_hw *hw,
605                                               u8 module_pointer, u32 offset,
606                                               u16 words, void *data)
607 {
608         __le16 *le_word_ptr = (__le16 *)data;
609         u16 *word_ptr = (u16 *)data;
610         u32 i = 0;
611
612         DEBUGFUNC("i40e_write_nvm_buffer");
613
614         for (i = 0; i < words; i++)
615                 le_word_ptr[i] = CPU_TO_LE16(word_ptr[i]);
616
617         /* Here we will only write one buffer as the size of the modules
618          * mirrored in the Shadow RAM is always less than 4K.
619          */
620         return i40e_write_nvm_aq(hw, module_pointer, offset, words,
621                                  data, false);
622 }
623
624 /**
625  * i40e_calc_nvm_checksum - Calculates and returns the checksum
626  * @hw: pointer to hardware structure
627  * @checksum: pointer to the checksum
628  *
629  * This function calculates SW Checksum that covers the whole 64kB shadow RAM
630  * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
631  * is customer specific and unknown. Therefore, this function skips all maximum
632  * possible size of VPD (1kB).
633  **/
634 enum i40e_status_code i40e_calc_nvm_checksum(struct i40e_hw *hw, u16 *checksum)
635 {
636         enum i40e_status_code ret_code = I40E_SUCCESS;
637         struct i40e_virt_mem vmem;
638         u16 pcie_alt_module = 0;
639         u16 checksum_local = 0;
640         u16 vpd_module = 0;
641         u16 *data;
642         u16 i = 0;
643
644         DEBUGFUNC("i40e_calc_nvm_checksum");
645
646         ret_code = i40e_allocate_virt_mem(hw, &vmem,
647                                     I40E_SR_SECTOR_SIZE_IN_WORDS * sizeof(u16));
648         if (ret_code)
649                 goto i40e_calc_nvm_checksum_exit;
650         data = (u16 *)vmem.va;
651
652         /* read pointer to VPD area */
653         ret_code = __i40e_read_nvm_word(hw, I40E_SR_VPD_PTR,
654                                         &vpd_module);
655         if (ret_code != I40E_SUCCESS) {
656                 ret_code = I40E_ERR_NVM_CHECKSUM;
657                 goto i40e_calc_nvm_checksum_exit;
658         }
659
660         /* read pointer to PCIe Alt Auto-load module */
661         ret_code = __i40e_read_nvm_word(hw,
662                                         I40E_SR_PCIE_ALT_AUTO_LOAD_PTR,
663                                         &pcie_alt_module);
664         if (ret_code != I40E_SUCCESS) {
665                 ret_code = I40E_ERR_NVM_CHECKSUM;
666                 goto i40e_calc_nvm_checksum_exit;
667         }
668
669         /* Calculate SW checksum that covers the whole 64kB shadow RAM
670          * except the VPD and PCIe ALT Auto-load modules
671          */
672         for (i = 0; i < hw->nvm.sr_size; i++) {
673                 /* Read SR page */
674                 if ((i % I40E_SR_SECTOR_SIZE_IN_WORDS) == 0) {
675                         u16 words = I40E_SR_SECTOR_SIZE_IN_WORDS;
676
677                         ret_code = __i40e_read_nvm_buffer(hw, i, &words, data);
678                         if (ret_code != I40E_SUCCESS) {
679                                 ret_code = I40E_ERR_NVM_CHECKSUM;
680                                 goto i40e_calc_nvm_checksum_exit;
681                         }
682                 }
683
684                 /* Skip Checksum word */
685                 if (i == I40E_SR_SW_CHECKSUM_WORD)
686                         continue;
687                 /* Skip VPD module (convert byte size to word count) */
688                 if ((i >= (u32)vpd_module) &&
689                     (i < ((u32)vpd_module +
690                      (I40E_SR_VPD_MODULE_MAX_SIZE / 2)))) {
691                         continue;
692                 }
693                 /* Skip PCIe ALT module (convert byte size to word count) */
694                 if ((i >= (u32)pcie_alt_module) &&
695                     (i < ((u32)pcie_alt_module +
696                      (I40E_SR_PCIE_ALT_MODULE_MAX_SIZE / 2)))) {
697                         continue;
698                 }
699
700                 checksum_local += data[i % I40E_SR_SECTOR_SIZE_IN_WORDS];
701         }
702
703         *checksum = (u16)I40E_SR_SW_CHECKSUM_BASE - checksum_local;
704
705 i40e_calc_nvm_checksum_exit:
706         i40e_free_virt_mem(hw, &vmem);
707         return ret_code;
708 }
709
710 /**
711  * i40e_update_nvm_checksum - Updates the NVM checksum
712  * @hw: pointer to hardware structure
713  *
714  * NVM ownership must be acquired before calling this function and released
715  * on ARQ completion event reception by caller.
716  * This function will commit SR to NVM.
717  **/
718 enum i40e_status_code i40e_update_nvm_checksum(struct i40e_hw *hw)
719 {
720         enum i40e_status_code ret_code = I40E_SUCCESS;
721         u16 checksum;
722         __le16 le_sum;
723
724         DEBUGFUNC("i40e_update_nvm_checksum");
725
726         ret_code = i40e_calc_nvm_checksum(hw, &checksum);
727         le_sum = CPU_TO_LE16(checksum);
728         if (ret_code == I40E_SUCCESS)
729                 ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
730                                              1, &le_sum, true);
731
732         return ret_code;
733 }
734
735 /**
736  * i40e_validate_nvm_checksum - Validate EEPROM checksum
737  * @hw: pointer to hardware structure
738  * @checksum: calculated checksum
739  *
740  * Performs checksum calculation and validates the NVM SW checksum. If the
741  * caller does not need checksum, the value can be NULL.
742  **/
743 enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
744                                                  u16 *checksum)
745 {
746         enum i40e_status_code ret_code = I40E_SUCCESS;
747         u16 checksum_sr = 0;
748         u16 checksum_local = 0;
749
750         DEBUGFUNC("i40e_validate_nvm_checksum");
751
752         /* acquire_nvm provides exclusive NVM lock to synchronize access across
753          * PFs. X710 uses i40e_read_nvm_word_srctl which polls for done bit
754          * twice (first time to be able to write address to I40E_GLNVM_SRCTL
755          * register, second to read data from I40E_GLNVM_SRDATA. One PF can see
756          * done bit and try to write address, while another one will interpret
757          * it as a good time to read data. It will cause invalid data to be
758          * read.
759          */
760         ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
761         if (!ret_code) {
762                 ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
763         i40e_release_nvm(hw);
764                 if (ret_code != I40E_SUCCESS)
765                         goto i40e_validate_nvm_checksum_exit;
766         } else {
767                 goto i40e_validate_nvm_checksum_exit;
768         }
769
770         i40e_read_nvm_word(hw, I40E_SR_SW_CHECKSUM_WORD, &checksum_sr);
771
772         /* Verify read checksum from EEPROM is the same as
773          * calculated checksum
774          */
775         if (checksum_local != checksum_sr)
776                 ret_code = I40E_ERR_NVM_CHECKSUM;
777
778         /* If the user cares, return the calculated checksum */
779         if (checksum)
780                 *checksum = checksum_local;
781
782 i40e_validate_nvm_checksum_exit:
783         return ret_code;
784 }
785
786 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
787                                                     struct i40e_nvm_access *cmd,
788                                                     u8 *bytes, int *perrno);
789 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
790                                                     struct i40e_nvm_access *cmd,
791                                                     u8 *bytes, int *perrno);
792 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
793                                                     struct i40e_nvm_access *cmd,
794                                                     u8 *bytes, int *perrno);
795 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
796                                                     struct i40e_nvm_access *cmd,
797                                                     int *perrno);
798 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
799                                                    struct i40e_nvm_access *cmd,
800                                                    int *perrno);
801 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
802                                                    struct i40e_nvm_access *cmd,
803                                                    u8 *bytes, int *perrno);
804 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
805                                                   struct i40e_nvm_access *cmd,
806                                                   u8 *bytes, int *perrno);
807 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
808                                                  struct i40e_nvm_access *cmd,
809                                                  u8 *bytes, int *perrno);
810 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
811                                                     struct i40e_nvm_access *cmd,
812                                                     u8 *bytes, int *perrno);
813 STATIC INLINE u8 i40e_nvmupd_get_module(u32 val)
814 {
815         return (u8)(val & I40E_NVM_MOD_PNT_MASK);
816 }
817 STATIC INLINE u8 i40e_nvmupd_get_transaction(u32 val)
818 {
819         return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
820 }
821
822 STATIC const char *i40e_nvm_update_state_str[] = {
823         "I40E_NVMUPD_INVALID",
824         "I40E_NVMUPD_READ_CON",
825         "I40E_NVMUPD_READ_SNT",
826         "I40E_NVMUPD_READ_LCB",
827         "I40E_NVMUPD_READ_SA",
828         "I40E_NVMUPD_WRITE_ERA",
829         "I40E_NVMUPD_WRITE_CON",
830         "I40E_NVMUPD_WRITE_SNT",
831         "I40E_NVMUPD_WRITE_LCB",
832         "I40E_NVMUPD_WRITE_SA",
833         "I40E_NVMUPD_CSUM_CON",
834         "I40E_NVMUPD_CSUM_SA",
835         "I40E_NVMUPD_CSUM_LCB",
836         "I40E_NVMUPD_STATUS",
837         "I40E_NVMUPD_EXEC_AQ",
838         "I40E_NVMUPD_GET_AQ_RESULT",
839 };
840
841 /**
842  * i40e_nvmupd_command - Process an NVM update command
843  * @hw: pointer to hardware structure
844  * @cmd: pointer to nvm update command
845  * @bytes: pointer to the data buffer
846  * @perrno: pointer to return error code
847  *
848  * Dispatches command depending on what update state is current
849  **/
850 enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
851                                           struct i40e_nvm_access *cmd,
852                                           u8 *bytes, int *perrno)
853 {
854         enum i40e_status_code status;
855         enum i40e_nvmupd_cmd upd_cmd;
856
857         DEBUGFUNC("i40e_nvmupd_command");
858
859         /* assume success */
860         *perrno = 0;
861
862         /* early check for status command and debug msgs */
863         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
864
865         i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
866                    i40e_nvm_update_state_str[upd_cmd],
867                    hw->nvmupd_state,
868                    hw->nvm_release_on_done, hw->nvm_wait_opcode,
869                    cmd->command, cmd->config, cmd->offset, cmd->data_size);
870
871         if (upd_cmd == I40E_NVMUPD_INVALID) {
872                 *perrno = -EFAULT;
873                 i40e_debug(hw, I40E_DEBUG_NVM,
874                            "i40e_nvmupd_validate_command returns %d errno %d\n",
875                            upd_cmd, *perrno);
876         }
877
878         /* a status request returns immediately rather than
879          * going into the state machine
880          */
881         if (upd_cmd == I40E_NVMUPD_STATUS) {
882                 if (!cmd->data_size) {
883                         *perrno = -EFAULT;
884                         return I40E_ERR_BUF_TOO_SHORT;
885                 }
886
887                 bytes[0] = hw->nvmupd_state;
888
889                 if (cmd->data_size >= 4) {
890                         bytes[1] = 0;
891                         *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
892                 }
893
894                 /* Clear error status on read */
895                 if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
896                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
897
898                 return I40E_SUCCESS;
899         }
900
901         /* Clear status even it is not read and log */
902         if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
903                 i40e_debug(hw, I40E_DEBUG_NVM,
904                            "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
905                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
906         }
907
908         /* Acquire lock to prevent race condition where adminq_task
909          * can execute after i40e_nvmupd_nvm_read/write but before state
910          * variables (nvm_wait_opcode, nvm_release_on_done) are updated
911          */
912         i40e_acquire_spinlock(&hw->aq.arq_spinlock);
913         switch (hw->nvmupd_state) {
914         case I40E_NVMUPD_STATE_INIT:
915                 status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
916                 break;
917
918         case I40E_NVMUPD_STATE_READING:
919                 status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
920                 break;
921
922         case I40E_NVMUPD_STATE_WRITING:
923                 status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
924                 break;
925
926         case I40E_NVMUPD_STATE_INIT_WAIT:
927         case I40E_NVMUPD_STATE_WRITE_WAIT:
928                 /* if we need to stop waiting for an event, clear
929                  * the wait info and return before doing anything else
930                  */
931                 if (cmd->offset == 0xffff) {
932                         i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
933                         return I40E_SUCCESS;
934                 }
935
936                 status = I40E_ERR_NOT_READY;
937                 *perrno = -EBUSY;
938                 break;
939
940         default:
941                 /* invalid state, should never happen */
942                 i40e_debug(hw, I40E_DEBUG_NVM,
943                            "NVMUPD: no such state %d\n", hw->nvmupd_state);
944                 status = I40E_NOT_SUPPORTED;
945                 *perrno = -ESRCH;
946                 break;
947         }
948         i40e_release_spinlock(&hw->aq.arq_spinlock);
949         return status;
950 }
951
952 /**
953  * i40e_nvmupd_state_init - Handle NVM update state Init
954  * @hw: pointer to hardware structure
955  * @cmd: pointer to nvm update command buffer
956  * @bytes: pointer to the data buffer
957  * @perrno: pointer to return error code
958  *
959  * Process legitimate commands of the Init state and conditionally set next
960  * state. Reject all other commands.
961  **/
962 STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
963                                                     struct i40e_nvm_access *cmd,
964                                                     u8 *bytes, int *perrno)
965 {
966         enum i40e_status_code status = I40E_SUCCESS;
967         enum i40e_nvmupd_cmd upd_cmd;
968
969         DEBUGFUNC("i40e_nvmupd_state_init");
970
971         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
972
973         switch (upd_cmd) {
974         case I40E_NVMUPD_READ_SA:
975                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
976                 if (status) {
977                         *perrno = i40e_aq_rc_to_posix(status,
978                                                      hw->aq.asq_last_status);
979                 } else {
980                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
981                         i40e_release_nvm(hw);
982                 }
983                 break;
984
985         case I40E_NVMUPD_READ_SNT:
986                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
987                 if (status) {
988                         *perrno = i40e_aq_rc_to_posix(status,
989                                                      hw->aq.asq_last_status);
990                 } else {
991                         status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
992                         if (status)
993                                 i40e_release_nvm(hw);
994                         else
995                                 hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
996                 }
997                 break;
998
999         case I40E_NVMUPD_WRITE_ERA:
1000                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1001                 if (status) {
1002                         *perrno = i40e_aq_rc_to_posix(status,
1003                                                      hw->aq.asq_last_status);
1004                 } else {
1005                         status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
1006                         if (status) {
1007                                 i40e_release_nvm(hw);
1008                         } else {
1009                                 hw->nvm_release_on_done = true;
1010                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
1011                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1012                         }
1013                 }
1014                 break;
1015
1016         case I40E_NVMUPD_WRITE_SA:
1017                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1018                 if (status) {
1019                         *perrno = i40e_aq_rc_to_posix(status,
1020                                                      hw->aq.asq_last_status);
1021                 } else {
1022                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1023                         if (status) {
1024                                 i40e_release_nvm(hw);
1025                         } else {
1026                                 hw->nvm_release_on_done = true;
1027                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1028                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1029                         }
1030                 }
1031                 break;
1032
1033         case I40E_NVMUPD_WRITE_SNT:
1034                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1035                 if (status) {
1036                         *perrno = i40e_aq_rc_to_posix(status,
1037                                                      hw->aq.asq_last_status);
1038                 } else {
1039                         status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1040                         if (status) {
1041                                 i40e_release_nvm(hw);
1042                         } else {
1043                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1044                                 hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1045                         }
1046                 }
1047                 break;
1048
1049         case I40E_NVMUPD_CSUM_SA:
1050                 status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1051                 if (status) {
1052                         *perrno = i40e_aq_rc_to_posix(status,
1053                                                      hw->aq.asq_last_status);
1054                 } else {
1055                         status = i40e_update_nvm_checksum(hw);
1056                         if (status) {
1057                                 *perrno = hw->aq.asq_last_status ?
1058                                    i40e_aq_rc_to_posix(status,
1059                                                        hw->aq.asq_last_status) :
1060                                    -EIO;
1061                                 i40e_release_nvm(hw);
1062                         } else {
1063                                 hw->nvm_release_on_done = true;
1064                                 hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1065                                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1066                         }
1067                 }
1068                 break;
1069
1070         case I40E_NVMUPD_EXEC_AQ:
1071                 status = i40e_nvmupd_exec_aq(hw, cmd, bytes, perrno);
1072                 break;
1073
1074         case I40E_NVMUPD_GET_AQ_RESULT:
1075                 status = i40e_nvmupd_get_aq_result(hw, cmd, bytes, perrno);
1076                 break;
1077
1078         default:
1079                 i40e_debug(hw, I40E_DEBUG_NVM,
1080                            "NVMUPD: bad cmd %s in init state\n",
1081                            i40e_nvm_update_state_str[upd_cmd]);
1082                 status = I40E_ERR_NVM;
1083                 *perrno = -ESRCH;
1084                 break;
1085         }
1086         return status;
1087 }
1088
1089 /**
1090  * i40e_nvmupd_state_reading - Handle NVM update state Reading
1091  * @hw: pointer to hardware structure
1092  * @cmd: pointer to nvm update command buffer
1093  * @bytes: pointer to the data buffer
1094  * @perrno: pointer to return error code
1095  *
1096  * NVM ownership is already held.  Process legitimate commands and set any
1097  * change in state; reject all other commands.
1098  **/
1099 STATIC enum i40e_status_code i40e_nvmupd_state_reading(struct i40e_hw *hw,
1100                                                     struct i40e_nvm_access *cmd,
1101                                                     u8 *bytes, int *perrno)
1102 {
1103         enum i40e_status_code status = I40E_SUCCESS;
1104         enum i40e_nvmupd_cmd upd_cmd;
1105
1106         DEBUGFUNC("i40e_nvmupd_state_reading");
1107
1108         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1109
1110         switch (upd_cmd) {
1111         case I40E_NVMUPD_READ_SA:
1112         case I40E_NVMUPD_READ_CON:
1113                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1114                 break;
1115
1116         case I40E_NVMUPD_READ_LCB:
1117                 status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
1118                 i40e_release_nvm(hw);
1119                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1120                 break;
1121
1122         default:
1123                 i40e_debug(hw, I40E_DEBUG_NVM,
1124                            "NVMUPD: bad cmd %s in reading state.\n",
1125                            i40e_nvm_update_state_str[upd_cmd]);
1126                 status = I40E_NOT_SUPPORTED;
1127                 *perrno = -ESRCH;
1128                 break;
1129         }
1130         return status;
1131 }
1132
1133 /**
1134  * i40e_nvmupd_state_writing - Handle NVM update state Writing
1135  * @hw: pointer to hardware structure
1136  * @cmd: pointer to nvm update command buffer
1137  * @bytes: pointer to the data buffer
1138  * @perrno: pointer to return error code
1139  *
1140  * NVM ownership is already held.  Process legitimate commands and set any
1141  * change in state; reject all other commands
1142  **/
1143 STATIC enum i40e_status_code i40e_nvmupd_state_writing(struct i40e_hw *hw,
1144                                                     struct i40e_nvm_access *cmd,
1145                                                     u8 *bytes, int *perrno)
1146 {
1147         enum i40e_status_code status = I40E_SUCCESS;
1148         enum i40e_nvmupd_cmd upd_cmd;
1149         bool retry_attempt = false;
1150
1151         DEBUGFUNC("i40e_nvmupd_state_writing");
1152
1153         upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
1154
1155 retry:
1156         switch (upd_cmd) {
1157         case I40E_NVMUPD_WRITE_CON:
1158                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1159                 if (!status) {
1160                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1161                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1162                 }
1163                 break;
1164
1165         case I40E_NVMUPD_WRITE_LCB:
1166                 status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
1167                 if (status) {
1168                         *perrno = hw->aq.asq_last_status ?
1169                                    i40e_aq_rc_to_posix(status,
1170                                                        hw->aq.asq_last_status) :
1171                                    -EIO;
1172                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1173                 } else {
1174                         hw->nvm_release_on_done = true;
1175                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1176                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1177                 }
1178                 break;
1179
1180         case I40E_NVMUPD_CSUM_CON:
1181                 /* Assumes the caller has acquired the nvm */
1182                 status = i40e_update_nvm_checksum(hw);
1183                 if (status) {
1184                         *perrno = hw->aq.asq_last_status ?
1185                                    i40e_aq_rc_to_posix(status,
1186                                                        hw->aq.asq_last_status) :
1187                                    -EIO;
1188                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1189                 } else {
1190                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1191                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
1192                 }
1193                 break;
1194
1195         case I40E_NVMUPD_CSUM_LCB:
1196                 /* Assumes the caller has acquired the nvm */
1197                 status = i40e_update_nvm_checksum(hw);
1198                 if (status) {
1199                         *perrno = hw->aq.asq_last_status ?
1200                                    i40e_aq_rc_to_posix(status,
1201                                                        hw->aq.asq_last_status) :
1202                                    -EIO;
1203                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1204                 } else {
1205                         hw->nvm_release_on_done = true;
1206                         hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
1207                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1208                 }
1209                 break;
1210
1211         default:
1212                 i40e_debug(hw, I40E_DEBUG_NVM,
1213                            "NVMUPD: bad cmd %s in writing state.\n",
1214                            i40e_nvm_update_state_str[upd_cmd]);
1215                 status = I40E_NOT_SUPPORTED;
1216                 *perrno = -ESRCH;
1217                 break;
1218         }
1219
1220         /* In some circumstances, a multi-write transaction takes longer
1221          * than the default 3 minute timeout on the write semaphore.  If
1222          * the write failed with an EBUSY status, this is likely the problem,
1223          * so here we try to reacquire the semaphore then retry the write.
1224          * We only do one retry, then give up.
1225          */
1226         if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
1227             !retry_attempt) {
1228                 enum i40e_status_code old_status = status;
1229                 u32 old_asq_status = hw->aq.asq_last_status;
1230                 u32 gtime;
1231
1232                 gtime = rd32(hw, I40E_GLVFGEN_TIMER);
1233                 if (gtime >= hw->nvm.hw_semaphore_timeout) {
1234                         i40e_debug(hw, I40E_DEBUG_ALL,
1235                                    "NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
1236                                    gtime, hw->nvm.hw_semaphore_timeout);
1237                         i40e_release_nvm(hw);
1238                         status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
1239                         if (status) {
1240                                 i40e_debug(hw, I40E_DEBUG_ALL,
1241                                            "NVMUPD: write semaphore reacquire failed aq_err = %d\n",
1242                                            hw->aq.asq_last_status);
1243                                 status = old_status;
1244                                 hw->aq.asq_last_status = old_asq_status;
1245                         } else {
1246                                 retry_attempt = true;
1247                                 goto retry;
1248                         }
1249                 }
1250         }
1251
1252         return status;
1253 }
1254
1255 /**
1256  * i40e_nvmupd_check_wait_event - handle NVM update operation events
1257  * @hw: pointer to the hardware structure
1258  * @opcode: the event that just happened
1259  **/
1260 void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
1261 {
1262         if (opcode == hw->nvm_wait_opcode) {
1263
1264                 i40e_debug(hw, I40E_DEBUG_NVM,
1265                            "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
1266                 if (hw->nvm_release_on_done) {
1267                         i40e_release_nvm(hw);
1268                         hw->nvm_release_on_done = false;
1269                 }
1270                 hw->nvm_wait_opcode = 0;
1271
1272                 if (hw->aq.arq_last_status) {
1273                         hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
1274                         return;
1275                 }
1276
1277                 switch (hw->nvmupd_state) {
1278                 case I40E_NVMUPD_STATE_INIT_WAIT:
1279                         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
1280                         break;
1281
1282                 case I40E_NVMUPD_STATE_WRITE_WAIT:
1283                         hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
1284                         break;
1285
1286                 default:
1287                         break;
1288                 }
1289         }
1290 }
1291
1292 /**
1293  * i40e_nvmupd_validate_command - Validate given command
1294  * @hw: pointer to hardware structure
1295  * @cmd: pointer to nvm update command buffer
1296  * @perrno: pointer to return error code
1297  *
1298  * Return one of the valid command types or I40E_NVMUPD_INVALID
1299  **/
1300 STATIC enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
1301                                                     struct i40e_nvm_access *cmd,
1302                                                     int *perrno)
1303 {
1304         enum i40e_nvmupd_cmd upd_cmd;
1305         u8 module, transaction;
1306
1307         DEBUGFUNC("i40e_nvmupd_validate_command\n");
1308
1309         /* anything that doesn't match a recognized case is an error */
1310         upd_cmd = I40E_NVMUPD_INVALID;
1311
1312         transaction = i40e_nvmupd_get_transaction(cmd->config);
1313         module = i40e_nvmupd_get_module(cmd->config);
1314
1315         /* limits on data size */
1316         if ((cmd->data_size < 1) ||
1317             (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
1318                 i40e_debug(hw, I40E_DEBUG_NVM,
1319                            "i40e_nvmupd_validate_command data_size %d\n",
1320                            cmd->data_size);
1321                 *perrno = -EFAULT;
1322                 return I40E_NVMUPD_INVALID;
1323         }
1324
1325         switch (cmd->command) {
1326         case I40E_NVM_READ:
1327                 switch (transaction) {
1328                 case I40E_NVM_CON:
1329                         upd_cmd = I40E_NVMUPD_READ_CON;
1330                         break;
1331                 case I40E_NVM_SNT:
1332                         upd_cmd = I40E_NVMUPD_READ_SNT;
1333                         break;
1334                 case I40E_NVM_LCB:
1335                         upd_cmd = I40E_NVMUPD_READ_LCB;
1336                         break;
1337                 case I40E_NVM_SA:
1338                         upd_cmd = I40E_NVMUPD_READ_SA;
1339                         break;
1340                 case I40E_NVM_EXEC:
1341                         if (module == 0xf)
1342                                 upd_cmd = I40E_NVMUPD_STATUS;
1343                         else if (module == 0)
1344                                 upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
1345                         break;
1346                 }
1347                 break;
1348
1349         case I40E_NVM_WRITE:
1350                 switch (transaction) {
1351                 case I40E_NVM_CON:
1352                         upd_cmd = I40E_NVMUPD_WRITE_CON;
1353                         break;
1354                 case I40E_NVM_SNT:
1355                         upd_cmd = I40E_NVMUPD_WRITE_SNT;
1356                         break;
1357                 case I40E_NVM_LCB:
1358                         upd_cmd = I40E_NVMUPD_WRITE_LCB;
1359                         break;
1360                 case I40E_NVM_SA:
1361                         upd_cmd = I40E_NVMUPD_WRITE_SA;
1362                         break;
1363                 case I40E_NVM_ERA:
1364                         upd_cmd = I40E_NVMUPD_WRITE_ERA;
1365                         break;
1366                 case I40E_NVM_CSUM:
1367                         upd_cmd = I40E_NVMUPD_CSUM_CON;
1368                         break;
1369                 case (I40E_NVM_CSUM|I40E_NVM_SA):
1370                         upd_cmd = I40E_NVMUPD_CSUM_SA;
1371                         break;
1372                 case (I40E_NVM_CSUM|I40E_NVM_LCB):
1373                         upd_cmd = I40E_NVMUPD_CSUM_LCB;
1374                         break;
1375                 case I40E_NVM_EXEC:
1376                         if (module == 0)
1377                                 upd_cmd = I40E_NVMUPD_EXEC_AQ;
1378                         break;
1379                 }
1380                 break;
1381         }
1382
1383         return upd_cmd;
1384 }
1385
1386 /**
1387  * i40e_nvmupd_exec_aq - Run an AQ command
1388  * @hw: pointer to hardware structure
1389  * @cmd: pointer to nvm update command buffer
1390  * @bytes: pointer to the data buffer
1391  * @perrno: pointer to return error code
1392  *
1393  * cmd structure contains identifiers and data buffer
1394  **/
1395 STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
1396                                                  struct i40e_nvm_access *cmd,
1397                                                  u8 *bytes, int *perrno)
1398 {
1399         struct i40e_asq_cmd_details cmd_details;
1400         enum i40e_status_code status;
1401         struct i40e_aq_desc *aq_desc;
1402         u32 buff_size = 0;
1403         u8 *buff = NULL;
1404         u32 aq_desc_len;
1405         u32 aq_data_len;
1406
1407         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1408         memset(&cmd_details, 0, sizeof(cmd_details));
1409         cmd_details.wb_desc = &hw->nvm_wb_desc;
1410
1411         aq_desc_len = sizeof(struct i40e_aq_desc);
1412         memset(&hw->nvm_wb_desc, 0, aq_desc_len);
1413
1414         /* get the aq descriptor */
1415         if (cmd->data_size < aq_desc_len) {
1416                 i40e_debug(hw, I40E_DEBUG_NVM,
1417                            "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
1418                            cmd->data_size, aq_desc_len);
1419                 *perrno = -EINVAL;
1420                 return I40E_ERR_PARAM;
1421         }
1422         aq_desc = (struct i40e_aq_desc *)bytes;
1423
1424         /* if data buffer needed, make sure it's ready */
1425         aq_data_len = cmd->data_size - aq_desc_len;
1426         buff_size = max(aq_data_len, (u32)LE16_TO_CPU(aq_desc->datalen));
1427         if (buff_size) {
1428                 if (!hw->nvm_buff.va) {
1429                         status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
1430                                                         hw->aq.asq_buf_size);
1431                         if (status)
1432                                 i40e_debug(hw, I40E_DEBUG_NVM,
1433                                            "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
1434                                            status);
1435                 }
1436
1437                 if (hw->nvm_buff.va) {
1438                         buff = hw->nvm_buff.va;
1439                         i40e_memcpy(buff, &bytes[aq_desc_len], aq_data_len,
1440                                 I40E_NONDMA_TO_NONDMA);
1441                 }
1442         }
1443
1444         /* and away we go! */
1445         status = i40e_asq_send_command(hw, aq_desc, buff,
1446                                        buff_size, &cmd_details);
1447         if (status) {
1448                 i40e_debug(hw, I40E_DEBUG_NVM,
1449                            "i40e_nvmupd_exec_aq err %s aq_err %s\n",
1450                            i40e_stat_str(hw, status),
1451                            i40e_aq_str(hw, hw->aq.asq_last_status));
1452                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1453         }
1454
1455         /* should we wait for a followup event? */
1456         if (cmd->offset) {
1457                 hw->nvm_wait_opcode = cmd->offset;
1458                 hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
1459         }
1460
1461         return status;
1462 }
1463
1464 /**
1465  * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
1466  * @hw: pointer to hardware structure
1467  * @cmd: pointer to nvm update command buffer
1468  * @bytes: pointer to the data buffer
1469  * @perrno: pointer to return error code
1470  *
1471  * cmd structure contains identifiers and data buffer
1472  **/
1473 STATIC enum i40e_status_code i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
1474                                                     struct i40e_nvm_access *cmd,
1475                                                     u8 *bytes, int *perrno)
1476 {
1477         u32 aq_total_len;
1478         u32 aq_desc_len;
1479         int remainder;
1480         u8 *buff;
1481
1482         i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
1483
1484         aq_desc_len = sizeof(struct i40e_aq_desc);
1485         aq_total_len = aq_desc_len + LE16_TO_CPU(hw->nvm_wb_desc.datalen);
1486
1487         /* check offset range */
1488         if (cmd->offset > aq_total_len) {
1489                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
1490                            __func__, cmd->offset, aq_total_len);
1491                 *perrno = -EINVAL;
1492                 return I40E_ERR_PARAM;
1493         }
1494
1495         /* check copylength range */
1496         if (cmd->data_size > (aq_total_len - cmd->offset)) {
1497                 int new_len = aq_total_len - cmd->offset;
1498
1499                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
1500                            __func__, cmd->data_size, new_len);
1501                 cmd->data_size = new_len;
1502         }
1503
1504         remainder = cmd->data_size;
1505         if (cmd->offset < aq_desc_len) {
1506                 u32 len = aq_desc_len - cmd->offset;
1507
1508                 len = min(len, cmd->data_size);
1509                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
1510                            __func__, cmd->offset, cmd->offset + len);
1511
1512                 buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
1513                 i40e_memcpy(bytes, buff, len, I40E_NONDMA_TO_NONDMA);
1514
1515                 bytes += len;
1516                 remainder -= len;
1517                 buff = hw->nvm_buff.va;
1518         } else {
1519                 buff = (u8 *)hw->nvm_buff.va + (cmd->offset - aq_desc_len);
1520         }
1521
1522         if (remainder > 0) {
1523                 int start_byte = buff - (u8 *)hw->nvm_buff.va;
1524
1525                 i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
1526                            __func__, start_byte, start_byte + remainder);
1527                 i40e_memcpy(bytes, buff, remainder, I40E_NONDMA_TO_NONDMA);
1528         }
1529
1530         return I40E_SUCCESS;
1531 }
1532
1533 /**
1534  * i40e_nvmupd_nvm_read - Read NVM
1535  * @hw: pointer to hardware structure
1536  * @cmd: pointer to nvm update command buffer
1537  * @bytes: pointer to the data buffer
1538  * @perrno: pointer to return error code
1539  *
1540  * cmd structure contains identifiers and data buffer
1541  **/
1542 STATIC enum i40e_status_code i40e_nvmupd_nvm_read(struct i40e_hw *hw,
1543                                                   struct i40e_nvm_access *cmd,
1544                                                   u8 *bytes, int *perrno)
1545 {
1546         struct i40e_asq_cmd_details cmd_details;
1547         enum i40e_status_code status;
1548         u8 module, transaction;
1549         bool last;
1550
1551         transaction = i40e_nvmupd_get_transaction(cmd->config);
1552         module = i40e_nvmupd_get_module(cmd->config);
1553         last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
1554
1555         memset(&cmd_details, 0, sizeof(cmd_details));
1556         cmd_details.wb_desc = &hw->nvm_wb_desc;
1557
1558         status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1559                                   bytes, last, &cmd_details);
1560         if (status) {
1561                 i40e_debug(hw, I40E_DEBUG_NVM,
1562                            "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
1563                            module, cmd->offset, cmd->data_size);
1564                 i40e_debug(hw, I40E_DEBUG_NVM,
1565                            "i40e_nvmupd_nvm_read status %d aq %d\n",
1566                            status, hw->aq.asq_last_status);
1567                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1568         }
1569
1570         return status;
1571 }
1572
1573 /**
1574  * i40e_nvmupd_nvm_erase - Erase an NVM module
1575  * @hw: pointer to hardware structure
1576  * @cmd: pointer to nvm update command buffer
1577  * @perrno: pointer to return error code
1578  *
1579  * module, offset, data_size and data are in cmd structure
1580  **/
1581 STATIC enum i40e_status_code i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
1582                                                    struct i40e_nvm_access *cmd,
1583                                                    int *perrno)
1584 {
1585         enum i40e_status_code status = I40E_SUCCESS;
1586         struct i40e_asq_cmd_details cmd_details;
1587         u8 module, transaction;
1588         bool last;
1589
1590         transaction = i40e_nvmupd_get_transaction(cmd->config);
1591         module = i40e_nvmupd_get_module(cmd->config);
1592         last = (transaction & I40E_NVM_LCB);
1593
1594         memset(&cmd_details, 0, sizeof(cmd_details));
1595         cmd_details.wb_desc = &hw->nvm_wb_desc;
1596
1597         status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
1598                                    last, &cmd_details);
1599         if (status) {
1600                 i40e_debug(hw, I40E_DEBUG_NVM,
1601                            "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x len 0x%x\n",
1602                            module, cmd->offset, cmd->data_size);
1603                 i40e_debug(hw, I40E_DEBUG_NVM,
1604                            "i40e_nvmupd_nvm_erase status %d aq %d\n",
1605                            status, hw->aq.asq_last_status);
1606                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1607         }
1608
1609         return status;
1610 }
1611
1612 /**
1613  * i40e_nvmupd_nvm_write - Write NVM
1614  * @hw: pointer to hardware structure
1615  * @cmd: pointer to nvm update command buffer
1616  * @bytes: pointer to the data buffer
1617  * @perrno: pointer to return error code
1618  *
1619  * module, offset, data_size and data are in cmd structure
1620  **/
1621 STATIC enum i40e_status_code i40e_nvmupd_nvm_write(struct i40e_hw *hw,
1622                                                    struct i40e_nvm_access *cmd,
1623                                                    u8 *bytes, int *perrno)
1624 {
1625         enum i40e_status_code status = I40E_SUCCESS;
1626         struct i40e_asq_cmd_details cmd_details;
1627         u8 module, transaction;
1628         bool last;
1629
1630         transaction = i40e_nvmupd_get_transaction(cmd->config);
1631         module = i40e_nvmupd_get_module(cmd->config);
1632         last = (transaction & I40E_NVM_LCB);
1633
1634         memset(&cmd_details, 0, sizeof(cmd_details));
1635         cmd_details.wb_desc = &hw->nvm_wb_desc;
1636
1637         status = i40e_aq_update_nvm(hw, module, cmd->offset,
1638                                     (u16)cmd->data_size, bytes, last,
1639                                     &cmd_details);
1640         if (status) {
1641                 i40e_debug(hw, I40E_DEBUG_NVM,
1642                            "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
1643                            module, cmd->offset, cmd->data_size);
1644                 i40e_debug(hw, I40E_DEBUG_NVM,
1645                            "i40e_nvmupd_nvm_write status %d aq %d\n",
1646                            status, hw->aq.asq_last_status);
1647                 *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
1648         }
1649
1650         return status;
1651 }