net/bnxt: fix an issue with broadcast traffic
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280                          rte_mem_virt2phy(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(mask);
284
285         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286
287         HWRM_CHECK_RESULT();
288         HWRM_UNLOCK();
289
290         return rc;
291 }
292
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
294                         uint16_t vlan_count,
295                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 {
297         int rc = 0;
298         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300                                                 bp->hwrm_cmd_resp_addr;
301
302         /*
303          * Older HWRM versions did not support this command, and the set_rx_mask
304          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305          * removed from set_rx_mask call, and this command was added.
306          *
307          * This command is also present from 1.7.8.11 and higher,
308          * as well as 1.7.8.0
309          */
310         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313                                         (11)))
314                                 return 0;
315                 }
316         }
317         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318         req.fid = rte_cpu_to_le_16(fid);
319
320         req.vlan_tag_mask_tbl_addr =
321                 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
322         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
323
324         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325
326         HWRM_CHECK_RESULT();
327         HWRM_UNLOCK();
328
329         return rc;
330 }
331
332 int bnxt_hwrm_clear_filter(struct bnxt *bp,
333                            struct bnxt_filter_info *filter)
334 {
335         int rc = 0;
336         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
338
339         if (filter->fw_l2_filter_id == UINT64_MAX)
340                 return 0;
341
342         HWRM_PREP(req, CFA_L2_FILTER_FREE);
343
344         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
345
346         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347
348         HWRM_CHECK_RESULT();
349         HWRM_UNLOCK();
350
351         filter->fw_l2_filter_id = -1;
352
353         return 0;
354 }
355
356 int bnxt_hwrm_set_filter(struct bnxt *bp,
357                          uint16_t dst_id,
358                          struct bnxt_filter_info *filter)
359 {
360         int rc = 0;
361         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363         uint32_t enables = 0;
364
365         if (filter->fw_l2_filter_id != UINT64_MAX)
366                 bnxt_hwrm_clear_filter(bp, filter);
367
368         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
369
370         req.flags = rte_cpu_to_le_32(filter->flags);
371
372         enables = filter->enables |
373               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
374         req.dst_id = rte_cpu_to_le_16(dst_id);
375
376         if (enables &
377             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
378                 memcpy(req.l2_addr, filter->l2_addr,
379                        ETHER_ADDR_LEN);
380         if (enables &
381             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
382                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
383                        ETHER_ADDR_LEN);
384         if (enables &
385             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
386                 req.l2_ovlan = filter->l2_ovlan;
387         if (enables &
388             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
389                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
390         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
391                 req.src_id = rte_cpu_to_le_32(filter->src_id);
392         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
393                 req.src_type = filter->src_type;
394
395         req.enables = rte_cpu_to_le_32(enables);
396
397         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
398
399         HWRM_CHECK_RESULT();
400
401         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
402         HWRM_UNLOCK();
403
404         return rc;
405 }
406
407 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
408 {
409         int rc = 0;
410         struct hwrm_func_qcaps_input req = {.req_type = 0 };
411         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
412         uint16_t new_max_vfs;
413         int i;
414
415         HWRM_PREP(req, FUNC_QCAPS);
416
417         req.fid = rte_cpu_to_le_16(0xffff);
418
419         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
420
421         HWRM_CHECK_RESULT();
422
423         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
424         if (BNXT_PF(bp)) {
425                 bp->pf.port_id = resp->port_id;
426                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
427                 new_max_vfs = bp->pdev->max_vfs;
428                 if (new_max_vfs != bp->pf.max_vfs) {
429                         if (bp->pf.vf_info)
430                                 rte_free(bp->pf.vf_info);
431                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
432                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
433                         bp->pf.max_vfs = new_max_vfs;
434                         for (i = 0; i < new_max_vfs; i++) {
435                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
436                                 bp->pf.vf_info[i].vlan_table =
437                                         rte_zmalloc("VF VLAN table",
438                                                     getpagesize(),
439                                                     getpagesize());
440                                 if (bp->pf.vf_info[i].vlan_table == NULL)
441                                         RTE_LOG(ERR, PMD,
442                                         "Fail to alloc VLAN table for VF %d\n",
443                                         i);
444                                 else
445                                         rte_mem_lock_page(
446                                                 bp->pf.vf_info[i].vlan_table);
447                                 bp->pf.vf_info[i].vlan_as_table =
448                                         rte_zmalloc("VF VLAN AS table",
449                                                     getpagesize(),
450                                                     getpagesize());
451                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
452                                         RTE_LOG(ERR, PMD,
453                                         "Alloc VLAN AS table for VF %d fail\n",
454                                         i);
455                                 else
456                                         rte_mem_lock_page(
457                                                bp->pf.vf_info[i].vlan_as_table);
458                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
459                         }
460                 }
461         }
462
463         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
464         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
465         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
466         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
467         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
468         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
469         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
470         /* TODO: For now, do not support VMDq/RFS on VFs. */
471         if (BNXT_PF(bp)) {
472                 if (bp->pf.max_vfs)
473                         bp->max_vnics = 1;
474                 else
475                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
476         } else {
477                 bp->max_vnics = 1;
478         }
479         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
480         if (BNXT_PF(bp))
481                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
482         HWRM_UNLOCK();
483
484         return rc;
485 }
486
487 int bnxt_hwrm_func_reset(struct bnxt *bp)
488 {
489         int rc = 0;
490         struct hwrm_func_reset_input req = {.req_type = 0 };
491         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
492
493         HWRM_PREP(req, FUNC_RESET);
494
495         req.enables = rte_cpu_to_le_32(0);
496
497         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
498
499         HWRM_CHECK_RESULT();
500         HWRM_UNLOCK();
501
502         return rc;
503 }
504
505 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
506 {
507         int rc;
508         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
509         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
510
511         if (bp->flags & BNXT_FLAG_REGISTERED)
512                 return 0;
513
514         HWRM_PREP(req, FUNC_DRV_RGTR);
515         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
516                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
517         req.ver_maj = RTE_VER_YEAR;
518         req.ver_min = RTE_VER_MONTH;
519         req.ver_upd = RTE_VER_MINOR;
520
521         if (BNXT_PF(bp)) {
522                 req.enables |= rte_cpu_to_le_32(
523                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
524                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
525                        RTE_MIN(sizeof(req.vf_req_fwd),
526                                sizeof(bp->pf.vf_req_fwd)));
527         }
528
529         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
530         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
531
532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
533
534         HWRM_CHECK_RESULT();
535         HWRM_UNLOCK();
536
537         bp->flags |= BNXT_FLAG_REGISTERED;
538
539         return rc;
540 }
541
542 int bnxt_hwrm_ver_get(struct bnxt *bp)
543 {
544         int rc = 0;
545         struct hwrm_ver_get_input req = {.req_type = 0 };
546         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
547         uint32_t my_version;
548         uint32_t fw_version;
549         uint16_t max_resp_len;
550         char type[RTE_MEMZONE_NAMESIZE];
551         uint32_t dev_caps_cfg;
552
553         bp->max_req_len = HWRM_MAX_REQ_LEN;
554         HWRM_PREP(req, VER_GET);
555
556         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
557         req.hwrm_intf_min = HWRM_VERSION_MINOR;
558         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
559
560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
561
562         HWRM_CHECK_RESULT();
563
564         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
565                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
566                 resp->hwrm_intf_upd,
567                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
568         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
569                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
570         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
571                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
572
573         my_version = HWRM_VERSION_MAJOR << 16;
574         my_version |= HWRM_VERSION_MINOR << 8;
575         my_version |= HWRM_VERSION_UPDATE;
576
577         fw_version = resp->hwrm_intf_maj << 16;
578         fw_version |= resp->hwrm_intf_min << 8;
579         fw_version |= resp->hwrm_intf_upd;
580
581         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
582                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
583                 rc = -EINVAL;
584                 goto error;
585         }
586
587         if (my_version != fw_version) {
588                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
589                 if (my_version < fw_version) {
590                         RTE_LOG(INFO, PMD,
591                                 "Firmware API version is newer than driver.\n");
592                         RTE_LOG(INFO, PMD,
593                                 "The driver may be missing features.\n");
594                 } else {
595                         RTE_LOG(INFO, PMD,
596                                 "Firmware API version is older than driver.\n");
597                         RTE_LOG(INFO, PMD,
598                                 "Not all driver features may be functional.\n");
599                 }
600         }
601
602         if (bp->max_req_len > resp->max_req_win_len) {
603                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
604                 rc = -EINVAL;
605         }
606         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
607         max_resp_len = resp->max_resp_len;
608         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
609
610         if (bp->max_resp_len != max_resp_len) {
611                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
612                         bp->pdev->addr.domain, bp->pdev->addr.bus,
613                         bp->pdev->addr.devid, bp->pdev->addr.function);
614
615                 rte_free(bp->hwrm_cmd_resp_addr);
616
617                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
618                 if (bp->hwrm_cmd_resp_addr == NULL) {
619                         rc = -ENOMEM;
620                         goto error;
621                 }
622                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
623                 bp->hwrm_cmd_resp_dma_addr =
624                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
625                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
626                         RTE_LOG(ERR, PMD,
627                         "Unable to map response buffer to physical memory.\n");
628                         rc = -ENOMEM;
629                         goto error;
630                 }
631                 bp->max_resp_len = max_resp_len;
632         }
633
634         if ((dev_caps_cfg &
635                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
636             (dev_caps_cfg &
637              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
638                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
639
640                 rte_free(bp->hwrm_short_cmd_req_addr);
641
642                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
643                                                         bp->max_req_len, 0);
644                 if (bp->hwrm_short_cmd_req_addr == NULL) {
645                         rc = -ENOMEM;
646                         goto error;
647                 }
648                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
649                 bp->hwrm_short_cmd_req_dma_addr =
650                         rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
651                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
652                         rte_free(bp->hwrm_short_cmd_req_addr);
653                         RTE_LOG(ERR, PMD,
654                                 "Unable to map buffer to physical memory.\n");
655                         rc = -ENOMEM;
656                         goto error;
657                 }
658
659                 bp->flags |= BNXT_FLAG_SHORT_CMD;
660         }
661
662 error:
663         HWRM_UNLOCK();
664         return rc;
665 }
666
667 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
668 {
669         int rc;
670         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
671         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
672
673         if (!(bp->flags & BNXT_FLAG_REGISTERED))
674                 return 0;
675
676         HWRM_PREP(req, FUNC_DRV_UNRGTR);
677         req.flags = flags;
678
679         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
680
681         HWRM_CHECK_RESULT();
682         HWRM_UNLOCK();
683
684         bp->flags &= ~BNXT_FLAG_REGISTERED;
685
686         return rc;
687 }
688
689 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
690 {
691         int rc = 0;
692         struct hwrm_port_phy_cfg_input req = {0};
693         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
694         uint32_t enables = 0;
695         uint32_t link_speed_mask =
696                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
697
698         HWRM_PREP(req, PORT_PHY_CFG);
699
700         if (conf->link_up) {
701                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
702                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
703                 /*
704                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
705                  * any auto mode, even "none".
706                  */
707                 if (!conf->link_speed) {
708                         req.auto_mode = conf->auto_mode;
709                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
710                         if (conf->auto_mode ==
711                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
712                                 req.auto_link_speed_mask =
713                                         conf->auto_link_speed_mask;
714                                 enables |= link_speed_mask;
715                         }
716                         if (bp->link_info.auto_link_speed) {
717                                 req.auto_link_speed =
718                                         bp->link_info.auto_link_speed;
719                                 enables |=
720                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
721                         }
722                 }
723                 req.auto_duplex = conf->duplex;
724                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
725                 req.auto_pause = conf->auto_pause;
726                 req.force_pause = conf->force_pause;
727                 /* Set force_pause if there is no auto or if there is a force */
728                 if (req.auto_pause && !req.force_pause)
729                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
730                 else
731                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
732
733                 req.enables = rte_cpu_to_le_32(enables);
734         } else {
735                 req.flags =
736                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
737                 RTE_LOG(INFO, PMD, "Force Link Down\n");
738         }
739
740         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
741
742         HWRM_CHECK_RESULT();
743         HWRM_UNLOCK();
744
745         return rc;
746 }
747
748 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
749                                    struct bnxt_link_info *link_info)
750 {
751         int rc = 0;
752         struct hwrm_port_phy_qcfg_input req = {0};
753         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
754
755         HWRM_PREP(req, PORT_PHY_QCFG);
756
757         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
758
759         HWRM_CHECK_RESULT();
760
761         link_info->phy_link_status = resp->link;
762         link_info->link_up =
763                 (link_info->phy_link_status ==
764                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
765         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
766         link_info->duplex = resp->duplex;
767         link_info->pause = resp->pause;
768         link_info->auto_pause = resp->auto_pause;
769         link_info->force_pause = resp->force_pause;
770         link_info->auto_mode = resp->auto_mode;
771
772         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
773         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
774         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
775         link_info->phy_ver[0] = resp->phy_maj;
776         link_info->phy_ver[1] = resp->phy_min;
777         link_info->phy_ver[2] = resp->phy_bld;
778
779         HWRM_UNLOCK();
780
781         return rc;
782 }
783
784 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
785 {
786         int rc = 0;
787         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
788         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
789
790         HWRM_PREP(req, QUEUE_QPORTCFG);
791
792         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
793
794         HWRM_CHECK_RESULT();
795
796 #define GET_QUEUE_INFO(x) \
797         bp->cos_queue[x].id = resp->queue_id##x; \
798         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
799
800         GET_QUEUE_INFO(0);
801         GET_QUEUE_INFO(1);
802         GET_QUEUE_INFO(2);
803         GET_QUEUE_INFO(3);
804         GET_QUEUE_INFO(4);
805         GET_QUEUE_INFO(5);
806         GET_QUEUE_INFO(6);
807         GET_QUEUE_INFO(7);
808
809         HWRM_UNLOCK();
810
811         return rc;
812 }
813
814 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
815                          struct bnxt_ring *ring,
816                          uint32_t ring_type, uint32_t map_index,
817                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
818 {
819         int rc = 0;
820         uint32_t enables = 0;
821         struct hwrm_ring_alloc_input req = {.req_type = 0 };
822         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
823
824         HWRM_PREP(req, RING_ALLOC);
825
826         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
827         req.fbo = rte_cpu_to_le_32(0);
828         /* Association of ring index with doorbell index */
829         req.logical_id = rte_cpu_to_le_16(map_index);
830         req.length = rte_cpu_to_le_32(ring->ring_size);
831
832         switch (ring_type) {
833         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
834                 req.queue_id = bp->cos_queue[0].id;
835                 /* FALLTHROUGH */
836         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
837                 req.ring_type = ring_type;
838                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
839                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
840                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
841                         enables |=
842                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
843                 break;
844         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
845                 req.ring_type = ring_type;
846                 /*
847                  * TODO: Some HWRM versions crash with
848                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
849                  */
850                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
851                 break;
852         default:
853                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
854                         ring_type);
855                 HWRM_UNLOCK();
856                 return -1;
857         }
858         req.enables = rte_cpu_to_le_32(enables);
859
860         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
861
862         if (rc || resp->error_code) {
863                 if (rc == 0 && resp->error_code)
864                         rc = rte_le_to_cpu_16(resp->error_code);
865                 switch (ring_type) {
866                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
867                         RTE_LOG(ERR, PMD,
868                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
869                         HWRM_UNLOCK();
870                         return rc;
871                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
872                         RTE_LOG(ERR, PMD,
873                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
874                         HWRM_UNLOCK();
875                         return rc;
876                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
877                         RTE_LOG(ERR, PMD,
878                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
879                         HWRM_UNLOCK();
880                         return rc;
881                 default:
882                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
883                         HWRM_UNLOCK();
884                         return rc;
885                 }
886         }
887
888         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
889         HWRM_UNLOCK();
890         return rc;
891 }
892
893 int bnxt_hwrm_ring_free(struct bnxt *bp,
894                         struct bnxt_ring *ring, uint32_t ring_type)
895 {
896         int rc;
897         struct hwrm_ring_free_input req = {.req_type = 0 };
898         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
899
900         HWRM_PREP(req, RING_FREE);
901
902         req.ring_type = ring_type;
903         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
904
905         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
906
907         if (rc || resp->error_code) {
908                 if (rc == 0 && resp->error_code)
909                         rc = rte_le_to_cpu_16(resp->error_code);
910                 HWRM_UNLOCK();
911
912                 switch (ring_type) {
913                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
914                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
915                                 rc);
916                         return rc;
917                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
918                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
919                                 rc);
920                         return rc;
921                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
922                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
923                                 rc);
924                         return rc;
925                 default:
926                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
927                         return rc;
928                 }
929         }
930         HWRM_UNLOCK();
931         return 0;
932 }
933
934 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
935 {
936         int rc = 0;
937         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
938         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, RING_GRP_ALLOC);
941
942         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
943         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
944         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
945         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
946
947         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
948
949         HWRM_CHECK_RESULT();
950
951         bp->grp_info[idx].fw_grp_id =
952             rte_le_to_cpu_16(resp->ring_group_id);
953
954         HWRM_UNLOCK();
955
956         return rc;
957 }
958
959 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
960 {
961         int rc;
962         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
963         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
964
965         HWRM_PREP(req, RING_GRP_FREE);
966
967         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
968
969         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
970
971         HWRM_CHECK_RESULT();
972         HWRM_UNLOCK();
973
974         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
975         return rc;
976 }
977
978 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
979 {
980         int rc = 0;
981         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
982         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
983
984         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
985                 return rc;
986
987         HWRM_PREP(req, STAT_CTX_CLR_STATS);
988
989         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
990
991         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
992
993         HWRM_CHECK_RESULT();
994         HWRM_UNLOCK();
995
996         return rc;
997 }
998
999 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1000                                 unsigned int idx __rte_unused)
1001 {
1002         int rc;
1003         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1004         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1005
1006         HWRM_PREP(req, STAT_CTX_ALLOC);
1007
1008         req.update_period_ms = rte_cpu_to_le_32(0);
1009
1010         req.stats_dma_addr =
1011             rte_cpu_to_le_64(cpr->hw_stats_map);
1012
1013         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1014
1015         HWRM_CHECK_RESULT();
1016
1017         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1018
1019         HWRM_UNLOCK();
1020
1021         return rc;
1022 }
1023
1024 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1025                                 unsigned int idx __rte_unused)
1026 {
1027         int rc;
1028         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1029         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1030
1031         HWRM_PREP(req, STAT_CTX_FREE);
1032
1033         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1034
1035         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1036
1037         HWRM_CHECK_RESULT();
1038         HWRM_UNLOCK();
1039
1040         return rc;
1041 }
1042
1043 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1044 {
1045         int rc = 0, i, j;
1046         struct hwrm_vnic_alloc_input req = { 0 };
1047         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1048
1049         /* map ring groups to this vnic */
1050         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1051                 vnic->start_grp_id, vnic->end_grp_id);
1052         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1053                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1054         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1055         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1056         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1057         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1058         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1059                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1060         HWRM_PREP(req, VNIC_ALLOC);
1061
1062         if (vnic->func_default)
1063                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1064         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1065
1066         HWRM_CHECK_RESULT();
1067
1068         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1069         HWRM_UNLOCK();
1070         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1071         return rc;
1072 }
1073
1074 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1075                                         struct bnxt_vnic_info *vnic,
1076                                         struct bnxt_plcmodes_cfg *pmode)
1077 {
1078         int rc = 0;
1079         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1080         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1081
1082         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1083
1084         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1085
1086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1087
1088         HWRM_CHECK_RESULT();
1089
1090         pmode->flags = rte_le_to_cpu_32(resp->flags);
1091         /* dflt_vnic bit doesn't exist in the _cfg command */
1092         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1093         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1094         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1095         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1096
1097         HWRM_UNLOCK();
1098
1099         return rc;
1100 }
1101
1102 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1103                                        struct bnxt_vnic_info *vnic,
1104                                        struct bnxt_plcmodes_cfg *pmode)
1105 {
1106         int rc = 0;
1107         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1108         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1109
1110         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1111
1112         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1113         req.flags = rte_cpu_to_le_32(pmode->flags);
1114         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1115         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1116         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1117         req.enables = rte_cpu_to_le_32(
1118             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1119             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1120             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1121         );
1122
1123         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1124
1125         HWRM_CHECK_RESULT();
1126         HWRM_UNLOCK();
1127
1128         return rc;
1129 }
1130
1131 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1132 {
1133         int rc = 0;
1134         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1135         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1136         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1137         struct bnxt_plcmodes_cfg pmodes;
1138
1139         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1140                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1141                 return rc;
1142         }
1143
1144         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1145         if (rc)
1146                 return rc;
1147
1148         HWRM_PREP(req, VNIC_CFG);
1149
1150         /* Only RSS support for now TBD: COS & LB */
1151         req.enables =
1152             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1153                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1154         if (vnic->lb_rule != 0xffff)
1155                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1156         if (vnic->cos_rule != 0xffff)
1157                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1158         if (vnic->rss_rule != 0xffff)
1159                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1160         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1161         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1162         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1163         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1164         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1165         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1166         req.mru = rte_cpu_to_le_16(vnic->mru);
1167         if (vnic->func_default)
1168                 req.flags |=
1169                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1170         if (vnic->vlan_strip)
1171                 req.flags |=
1172                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1173         if (vnic->bd_stall)
1174                 req.flags |=
1175                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1176         if (vnic->roce_dual)
1177                 req.flags |= rte_cpu_to_le_32(
1178                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1179         if (vnic->roce_only)
1180                 req.flags |= rte_cpu_to_le_32(
1181                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1182         if (vnic->rss_dflt_cr)
1183                 req.flags |= rte_cpu_to_le_32(
1184                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1185
1186         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1187
1188         HWRM_CHECK_RESULT();
1189         HWRM_UNLOCK();
1190
1191         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1192
1193         return rc;
1194 }
1195
1196 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1197                 int16_t fw_vf_id)
1198 {
1199         int rc = 0;
1200         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1201         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1202
1203         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1204                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1205                 return rc;
1206         }
1207         HWRM_PREP(req, VNIC_QCFG);
1208
1209         req.enables =
1210                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1211         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1212         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1213
1214         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1215
1216         HWRM_CHECK_RESULT();
1217
1218         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1219         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1220         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1221         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1222         vnic->mru = rte_le_to_cpu_16(resp->mru);
1223         vnic->func_default = rte_le_to_cpu_32(
1224                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1225         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1226                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1227         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1228                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1229         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1230                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1231         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1232                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1233         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1234                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1235
1236         HWRM_UNLOCK();
1237
1238         return rc;
1239 }
1240
1241 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1242 {
1243         int rc = 0;
1244         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1245         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1246                                                 bp->hwrm_cmd_resp_addr;
1247
1248         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1249
1250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1251
1252         HWRM_CHECK_RESULT();
1253
1254         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1255         HWRM_UNLOCK();
1256         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1257
1258         return rc;
1259 }
1260
1261 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1262 {
1263         int rc = 0;
1264         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1265         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1266                                                 bp->hwrm_cmd_resp_addr;
1267
1268         if (vnic->rss_rule == 0xffff) {
1269                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1270                 return rc;
1271         }
1272         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1273
1274         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1275
1276         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1277
1278         HWRM_CHECK_RESULT();
1279         HWRM_UNLOCK();
1280
1281         vnic->rss_rule = INVALID_HW_RING_ID;
1282
1283         return rc;
1284 }
1285
1286 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1287 {
1288         int rc = 0;
1289         struct hwrm_vnic_free_input req = {.req_type = 0 };
1290         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1291
1292         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1293                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1294                 return rc;
1295         }
1296
1297         HWRM_PREP(req, VNIC_FREE);
1298
1299         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1300
1301         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1302
1303         HWRM_CHECK_RESULT();
1304         HWRM_UNLOCK();
1305
1306         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1307         return rc;
1308 }
1309
1310 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1311                            struct bnxt_vnic_info *vnic)
1312 {
1313         int rc = 0;
1314         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1315         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1316
1317         HWRM_PREP(req, VNIC_RSS_CFG);
1318
1319         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1320
1321         req.ring_grp_tbl_addr =
1322             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1323         req.hash_key_tbl_addr =
1324             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1325         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1326
1327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1328
1329         HWRM_CHECK_RESULT();
1330         HWRM_UNLOCK();
1331
1332         return rc;
1333 }
1334
1335 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1336                         struct bnxt_vnic_info *vnic)
1337 {
1338         int rc = 0;
1339         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1340         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1341         uint16_t size;
1342
1343         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1344
1345         req.flags = rte_cpu_to_le_32(
1346                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1347
1348         req.enables = rte_cpu_to_le_32(
1349                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1350
1351         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1352         size -= RTE_PKTMBUF_HEADROOM;
1353
1354         req.jumbo_thresh = rte_cpu_to_le_16(size);
1355         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1356
1357         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1358
1359         HWRM_CHECK_RESULT();
1360         HWRM_UNLOCK();
1361
1362         return rc;
1363 }
1364
1365 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1366                         struct bnxt_vnic_info *vnic, bool enable)
1367 {
1368         int rc = 0;
1369         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1370         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1371
1372         HWRM_PREP(req, VNIC_TPA_CFG);
1373
1374         if (enable) {
1375                 req.enables = rte_cpu_to_le_32(
1376                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1377                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1378                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1379                 req.flags = rte_cpu_to_le_32(
1380                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1381                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1382                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1383                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1384                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1385                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1386                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1387                 req.max_agg_segs = rte_cpu_to_le_16(5);
1388                 req.max_aggs =
1389                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1390                 req.min_agg_len = rte_cpu_to_le_32(512);
1391         }
1392
1393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1394
1395         HWRM_CHECK_RESULT();
1396         HWRM_UNLOCK();
1397
1398         return rc;
1399 }
1400
1401 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1402 {
1403         struct hwrm_func_cfg_input req = {0};
1404         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1405         int rc;
1406
1407         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1408         req.enables = rte_cpu_to_le_32(
1409                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1410         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1411         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1412
1413         HWRM_PREP(req, FUNC_CFG);
1414
1415         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1416         HWRM_CHECK_RESULT();
1417         HWRM_UNLOCK();
1418
1419         bp->pf.vf_info[vf].random_mac = false;
1420
1421         return rc;
1422 }
1423
1424 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1425                                   uint64_t *dropped)
1426 {
1427         int rc = 0;
1428         struct hwrm_func_qstats_input req = {.req_type = 0};
1429         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1430
1431         HWRM_PREP(req, FUNC_QSTATS);
1432
1433         req.fid = rte_cpu_to_le_16(fid);
1434
1435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1436
1437         HWRM_CHECK_RESULT();
1438
1439         if (dropped)
1440                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1441
1442         HWRM_UNLOCK();
1443
1444         return rc;
1445 }
1446
1447 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1448                           struct rte_eth_stats *stats)
1449 {
1450         int rc = 0;
1451         struct hwrm_func_qstats_input req = {.req_type = 0};
1452         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1453
1454         HWRM_PREP(req, FUNC_QSTATS);
1455
1456         req.fid = rte_cpu_to_le_16(fid);
1457
1458         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1459
1460         HWRM_CHECK_RESULT();
1461
1462         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1463         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1464         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1465         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1466         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1467         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1468
1469         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1470         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1471         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1472         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1473         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1474         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1475
1476         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1477         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1478
1479         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1480
1481         HWRM_UNLOCK();
1482
1483         return rc;
1484 }
1485
1486 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1487 {
1488         int rc = 0;
1489         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1490         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1491
1492         HWRM_PREP(req, FUNC_CLR_STATS);
1493
1494         req.fid = rte_cpu_to_le_16(fid);
1495
1496         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1497
1498         HWRM_CHECK_RESULT();
1499         HWRM_UNLOCK();
1500
1501         return rc;
1502 }
1503
1504 /*
1505  * HWRM utility functions
1506  */
1507
1508 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1509 {
1510         unsigned int i;
1511         int rc = 0;
1512
1513         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1514                 struct bnxt_tx_queue *txq;
1515                 struct bnxt_rx_queue *rxq;
1516                 struct bnxt_cp_ring_info *cpr;
1517
1518                 if (i >= bp->rx_cp_nr_rings) {
1519                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1520                         cpr = txq->cp_ring;
1521                 } else {
1522                         rxq = bp->rx_queues[i];
1523                         cpr = rxq->cp_ring;
1524                 }
1525
1526                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1527                 if (rc)
1528                         return rc;
1529         }
1530         return 0;
1531 }
1532
1533 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1534 {
1535         int rc;
1536         unsigned int i;
1537         struct bnxt_cp_ring_info *cpr;
1538
1539         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1540
1541                 if (i >= bp->rx_cp_nr_rings)
1542                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1543                 else
1544                         cpr = bp->rx_queues[i]->cp_ring;
1545                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1546                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1547                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1548                         /*
1549                          * TODO. Need a better way to reset grp_info.stats_ctx
1550                          * for Rx rings only. stats_ctx is not saved for Tx
1551                          * in grp_info.
1552                          */
1553                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1554                         if (rc)
1555                                 return rc;
1556                 }
1557         }
1558         return 0;
1559 }
1560
1561 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1562 {
1563         unsigned int i;
1564         int rc = 0;
1565
1566         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1567                 struct bnxt_tx_queue *txq;
1568                 struct bnxt_rx_queue *rxq;
1569                 struct bnxt_cp_ring_info *cpr;
1570
1571                 if (i >= bp->rx_cp_nr_rings) {
1572                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1573                         cpr = txq->cp_ring;
1574                 } else {
1575                         rxq = bp->rx_queues[i];
1576                         cpr = rxq->cp_ring;
1577                 }
1578
1579                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1580
1581                 if (rc)
1582                         return rc;
1583         }
1584         return rc;
1585 }
1586
1587 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1588 {
1589         uint16_t idx;
1590         uint32_t rc = 0;
1591
1592         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1593
1594                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1595                         RTE_LOG(ERR, PMD,
1596                                 "Attempt to free invalid ring group %d\n",
1597                                 idx);
1598                         continue;
1599                 }
1600
1601                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1602
1603                 if (rc)
1604                         return rc;
1605         }
1606         return rc;
1607 }
1608
1609 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1610                                 unsigned int idx __rte_unused)
1611 {
1612         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1613
1614         bnxt_hwrm_ring_free(bp, cp_ring,
1615                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1616         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1617         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1618         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1619                         sizeof(*cpr->cp_desc_ring));
1620         cpr->cp_raw_cons = 0;
1621 }
1622
1623 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1624 {
1625         unsigned int i;
1626         int rc = 0;
1627
1628         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1629                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1630                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1631                 struct bnxt_ring *ring = txr->tx_ring_struct;
1632                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1633                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1634
1635                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1636                         bnxt_hwrm_ring_free(bp, ring,
1637                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1638                         ring->fw_ring_id = INVALID_HW_RING_ID;
1639                         memset(txr->tx_desc_ring, 0,
1640                                         txr->tx_ring_struct->ring_size *
1641                                         sizeof(*txr->tx_desc_ring));
1642                         memset(txr->tx_buf_ring, 0,
1643                                         txr->tx_ring_struct->ring_size *
1644                                         sizeof(*txr->tx_buf_ring));
1645                         txr->tx_prod = 0;
1646                         txr->tx_cons = 0;
1647                 }
1648                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1649                         bnxt_free_cp_ring(bp, cpr, idx);
1650                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1651                 }
1652         }
1653
1654         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1655                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1656                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1657                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1658                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1659                 unsigned int idx = i + 1;
1660
1661                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1662                         bnxt_hwrm_ring_free(bp, ring,
1663                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1664                         ring->fw_ring_id = INVALID_HW_RING_ID;
1665                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1666                         memset(rxr->rx_desc_ring, 0,
1667                                         rxr->rx_ring_struct->ring_size *
1668                                         sizeof(*rxr->rx_desc_ring));
1669                         memset(rxr->rx_buf_ring, 0,
1670                                         rxr->rx_ring_struct->ring_size *
1671                                         sizeof(*rxr->rx_buf_ring));
1672                         rxr->rx_prod = 0;
1673                         memset(rxr->ag_buf_ring, 0,
1674                                         rxr->ag_ring_struct->ring_size *
1675                                         sizeof(*rxr->ag_buf_ring));
1676                         rxr->ag_prod = 0;
1677                 }
1678                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1679                         bnxt_free_cp_ring(bp, cpr, idx);
1680                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1681                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1682                 }
1683         }
1684
1685         /* Default completion ring */
1686         {
1687                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1688
1689                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1690                         bnxt_free_cp_ring(bp, cpr, 0);
1691                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1692                 }
1693         }
1694
1695         return rc;
1696 }
1697
1698 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1699 {
1700         uint16_t i;
1701         uint32_t rc = 0;
1702
1703         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1704                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1705                 if (rc)
1706                         return rc;
1707         }
1708         return rc;
1709 }
1710
1711 void bnxt_free_hwrm_resources(struct bnxt *bp)
1712 {
1713         /* Release memzone */
1714         rte_free(bp->hwrm_cmd_resp_addr);
1715         rte_free(bp->hwrm_short_cmd_req_addr);
1716         bp->hwrm_cmd_resp_addr = NULL;
1717         bp->hwrm_short_cmd_req_addr = NULL;
1718         bp->hwrm_cmd_resp_dma_addr = 0;
1719         bp->hwrm_short_cmd_req_dma_addr = 0;
1720 }
1721
1722 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1723 {
1724         struct rte_pci_device *pdev = bp->pdev;
1725         char type[RTE_MEMZONE_NAMESIZE];
1726
1727         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1728                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1729         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1730         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1731         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1732         if (bp->hwrm_cmd_resp_addr == NULL)
1733                 return -ENOMEM;
1734         bp->hwrm_cmd_resp_dma_addr =
1735                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1736         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1737                 RTE_LOG(ERR, PMD,
1738                         "unable to map response address to physical memory\n");
1739                 return -ENOMEM;
1740         }
1741         rte_spinlock_init(&bp->hwrm_lock);
1742
1743         return 0;
1744 }
1745
1746 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1747 {
1748         struct bnxt_filter_info *filter;
1749         int rc = 0;
1750
1751         STAILQ_FOREACH(filter, &vnic->filter, next) {
1752                 rc = bnxt_hwrm_clear_filter(bp, filter);
1753                 if (rc)
1754                         break;
1755         }
1756         return rc;
1757 }
1758
1759 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1760 {
1761         struct bnxt_filter_info *filter;
1762         int rc = 0;
1763
1764         STAILQ_FOREACH(filter, &vnic->filter, next) {
1765                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1766                 if (rc)
1767                         break;
1768         }
1769         return rc;
1770 }
1771
1772 void bnxt_free_tunnel_ports(struct bnxt *bp)
1773 {
1774         if (bp->vxlan_port_cnt)
1775                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1776                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1777         bp->vxlan_port = 0;
1778         if (bp->geneve_port_cnt)
1779                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1780                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1781         bp->geneve_port = 0;
1782 }
1783
1784 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1785 {
1786         struct bnxt_vnic_info *vnic;
1787         unsigned int i;
1788
1789         if (bp->vnic_info == NULL)
1790                 return;
1791
1792         vnic = &bp->vnic_info[0];
1793         if (BNXT_PF(bp))
1794                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1795
1796         /* VNIC resources */
1797         for (i = 0; i < bp->nr_vnics; i++) {
1798                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1799
1800                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1801
1802                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1803
1804                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1805
1806                 bnxt_hwrm_vnic_free(bp, vnic);
1807         }
1808         /* Ring resources */
1809         bnxt_free_all_hwrm_rings(bp);
1810         bnxt_free_all_hwrm_ring_grps(bp);
1811         bnxt_free_all_hwrm_stat_ctxs(bp);
1812         bnxt_free_tunnel_ports(bp);
1813 }
1814
1815 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1816 {
1817         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1818
1819         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1820                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1821
1822         switch (conf_link_speed) {
1823         case ETH_LINK_SPEED_10M_HD:
1824         case ETH_LINK_SPEED_100M_HD:
1825                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1826         }
1827         return hw_link_duplex;
1828 }
1829
1830 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1831 {
1832         uint16_t eth_link_speed = 0;
1833
1834         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1835                 return ETH_LINK_SPEED_AUTONEG;
1836
1837         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1838         case ETH_LINK_SPEED_100M:
1839         case ETH_LINK_SPEED_100M_HD:
1840                 eth_link_speed =
1841                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1842                 break;
1843         case ETH_LINK_SPEED_1G:
1844                 eth_link_speed =
1845                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1846                 break;
1847         case ETH_LINK_SPEED_2_5G:
1848                 eth_link_speed =
1849                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1850                 break;
1851         case ETH_LINK_SPEED_10G:
1852                 eth_link_speed =
1853                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1854                 break;
1855         case ETH_LINK_SPEED_20G:
1856                 eth_link_speed =
1857                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1858                 break;
1859         case ETH_LINK_SPEED_25G:
1860                 eth_link_speed =
1861                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1862                 break;
1863         case ETH_LINK_SPEED_40G:
1864                 eth_link_speed =
1865                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1866                 break;
1867         case ETH_LINK_SPEED_50G:
1868                 eth_link_speed =
1869                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1870                 break;
1871         default:
1872                 RTE_LOG(ERR, PMD,
1873                         "Unsupported link speed %d; default to AUTO\n",
1874                         conf_link_speed);
1875                 break;
1876         }
1877         return eth_link_speed;
1878 }
1879
1880 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1881                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1882                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1883                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1884
1885 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1886 {
1887         uint32_t one_speed;
1888
1889         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1890                 return 0;
1891
1892         if (link_speed & ETH_LINK_SPEED_FIXED) {
1893                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1894
1895                 if (one_speed & (one_speed - 1)) {
1896                         RTE_LOG(ERR, PMD,
1897                                 "Invalid advertised speeds (%u) for port %u\n",
1898                                 link_speed, port_id);
1899                         return -EINVAL;
1900                 }
1901                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1902                         RTE_LOG(ERR, PMD,
1903                                 "Unsupported advertised speed (%u) for port %u\n",
1904                                 link_speed, port_id);
1905                         return -EINVAL;
1906                 }
1907         } else {
1908                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1909                         RTE_LOG(ERR, PMD,
1910                                 "Unsupported advertised speeds (%u) for port %u\n",
1911                                 link_speed, port_id);
1912                         return -EINVAL;
1913                 }
1914         }
1915         return 0;
1916 }
1917
1918 static uint16_t
1919 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1920 {
1921         uint16_t ret = 0;
1922
1923         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1924                 if (bp->link_info.support_speeds)
1925                         return bp->link_info.support_speeds;
1926                 link_speed = BNXT_SUPPORTED_SPEEDS;
1927         }
1928
1929         if (link_speed & ETH_LINK_SPEED_100M)
1930                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1931         if (link_speed & ETH_LINK_SPEED_100M_HD)
1932                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1933         if (link_speed & ETH_LINK_SPEED_1G)
1934                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1935         if (link_speed & ETH_LINK_SPEED_2_5G)
1936                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1937         if (link_speed & ETH_LINK_SPEED_10G)
1938                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1939         if (link_speed & ETH_LINK_SPEED_20G)
1940                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1941         if (link_speed & ETH_LINK_SPEED_25G)
1942                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1943         if (link_speed & ETH_LINK_SPEED_40G)
1944                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1945         if (link_speed & ETH_LINK_SPEED_50G)
1946                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1947         return ret;
1948 }
1949
1950 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1951 {
1952         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1953
1954         switch (hw_link_speed) {
1955         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1956                 eth_link_speed = ETH_SPEED_NUM_100M;
1957                 break;
1958         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1959                 eth_link_speed = ETH_SPEED_NUM_1G;
1960                 break;
1961         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1962                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1963                 break;
1964         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1965                 eth_link_speed = ETH_SPEED_NUM_10G;
1966                 break;
1967         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1968                 eth_link_speed = ETH_SPEED_NUM_20G;
1969                 break;
1970         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1971                 eth_link_speed = ETH_SPEED_NUM_25G;
1972                 break;
1973         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1974                 eth_link_speed = ETH_SPEED_NUM_40G;
1975                 break;
1976         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1977                 eth_link_speed = ETH_SPEED_NUM_50G;
1978                 break;
1979         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1980         default:
1981                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1982                         hw_link_speed);
1983                 break;
1984         }
1985         return eth_link_speed;
1986 }
1987
1988 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1989 {
1990         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1991
1992         switch (hw_link_duplex) {
1993         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1994         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1995                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1996                 break;
1997         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1998                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1999                 break;
2000         default:
2001                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2002                         hw_link_duplex);
2003                 break;
2004         }
2005         return eth_link_duplex;
2006 }
2007
2008 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2009 {
2010         int rc = 0;
2011         struct bnxt_link_info *link_info = &bp->link_info;
2012
2013         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2014         if (rc) {
2015                 RTE_LOG(ERR, PMD,
2016                         "Get link config failed with rc %d\n", rc);
2017                 goto exit;
2018         }
2019         if (link_info->link_speed)
2020                 link->link_speed =
2021                         bnxt_parse_hw_link_speed(link_info->link_speed);
2022         else
2023                 link->link_speed = ETH_SPEED_NUM_NONE;
2024         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2025         link->link_status = link_info->link_up;
2026         link->link_autoneg = link_info->auto_mode ==
2027                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2028                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2029 exit:
2030         return rc;
2031 }
2032
2033 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2034 {
2035         int rc = 0;
2036         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2037         struct bnxt_link_info link_req;
2038         uint16_t speed;
2039
2040         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2041                 return 0;
2042
2043         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2044                         bp->eth_dev->data->port_id);
2045         if (rc)
2046                 goto error;
2047
2048         memset(&link_req, 0, sizeof(link_req));
2049         link_req.link_up = link_up;
2050         if (!link_up)
2051                 goto port_phy_cfg;
2052
2053         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2054         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2055         if (speed == 0) {
2056                 link_req.phy_flags |=
2057                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2058                 link_req.auto_mode =
2059                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2060                 link_req.auto_link_speed_mask =
2061                         bnxt_parse_eth_link_speed_mask(bp,
2062                                                        dev_conf->link_speeds);
2063         } else {
2064                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2065                 link_req.link_speed = speed;
2066                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2067         }
2068         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2069         link_req.auto_pause = bp->link_info.auto_pause;
2070         link_req.force_pause = bp->link_info.force_pause;
2071
2072 port_phy_cfg:
2073         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2074         if (rc) {
2075                 RTE_LOG(ERR, PMD,
2076                         "Set link config failed with rc %d\n", rc);
2077         }
2078
2079 error:
2080         return rc;
2081 }
2082
2083 /* JIRA 22088 */
2084 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2085 {
2086         struct hwrm_func_qcfg_input req = {0};
2087         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2088         int rc = 0;
2089
2090         HWRM_PREP(req, FUNC_QCFG);
2091         req.fid = rte_cpu_to_le_16(0xffff);
2092
2093         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2094
2095         HWRM_CHECK_RESULT();
2096
2097         /* Hard Coded.. 0xfff VLAN ID mask */
2098         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2099
2100         switch (resp->port_partition_type) {
2101         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2102         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2103         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2104                 bp->port_partition_type = resp->port_partition_type;
2105                 break;
2106         default:
2107                 bp->port_partition_type = 0;
2108                 break;
2109         }
2110
2111         HWRM_UNLOCK();
2112
2113         return rc;
2114 }
2115
2116 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2117                                    struct hwrm_func_qcaps_output *qcaps)
2118 {
2119         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2120         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2121                sizeof(qcaps->mac_address));
2122         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2123         qcaps->max_rx_rings = fcfg->num_rx_rings;
2124         qcaps->max_tx_rings = fcfg->num_tx_rings;
2125         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2126         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2127         qcaps->max_vfs = 0;
2128         qcaps->first_vf_id = 0;
2129         qcaps->max_vnics = fcfg->num_vnics;
2130         qcaps->max_decap_records = 0;
2131         qcaps->max_encap_records = 0;
2132         qcaps->max_tx_wm_flows = 0;
2133         qcaps->max_tx_em_flows = 0;
2134         qcaps->max_rx_wm_flows = 0;
2135         qcaps->max_rx_em_flows = 0;
2136         qcaps->max_flow_id = 0;
2137         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2138         qcaps->max_sp_tx_rings = 0;
2139         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2140 }
2141
2142 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2143 {
2144         struct hwrm_func_cfg_input req = {0};
2145         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2146         int rc;
2147
2148         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2149                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2150                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2151                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2152                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2153                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2154                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2155                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2156                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2157                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2158         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2159         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2160         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2161                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2162         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2163         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2164         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2165         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2166         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2167         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2168         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2169         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2170         req.fid = rte_cpu_to_le_16(0xffff);
2171
2172         HWRM_PREP(req, FUNC_CFG);
2173
2174         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2175
2176         HWRM_CHECK_RESULT();
2177         HWRM_UNLOCK();
2178
2179         return rc;
2180 }
2181
2182 static void populate_vf_func_cfg_req(struct bnxt *bp,
2183                                      struct hwrm_func_cfg_input *req,
2184                                      int num_vfs)
2185 {
2186         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2187                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2188                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2189                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2190                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2191                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2192                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2193                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2194                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2195                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2196
2197         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2198                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2199         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2200                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2201         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2202                                                 (num_vfs + 1));
2203         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2204         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2205                                                (num_vfs + 1));
2206         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2207         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2208         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2209         /* TODO: For now, do not support VMDq/RFS on VFs. */
2210         req->num_vnics = rte_cpu_to_le_16(1);
2211         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2212                                                  (num_vfs + 1));
2213 }
2214
2215 static void add_random_mac_if_needed(struct bnxt *bp,
2216                                      struct hwrm_func_cfg_input *cfg_req,
2217                                      int vf)
2218 {
2219         struct ether_addr mac;
2220
2221         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2222                 return;
2223
2224         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2225                 cfg_req->enables |=
2226                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2227                 eth_random_addr(cfg_req->dflt_mac_addr);
2228                 bp->pf.vf_info[vf].random_mac = true;
2229         } else {
2230                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2231         }
2232 }
2233
2234 static void reserve_resources_from_vf(struct bnxt *bp,
2235                                       struct hwrm_func_cfg_input *cfg_req,
2236                                       int vf)
2237 {
2238         struct hwrm_func_qcaps_input req = {0};
2239         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2240         int rc;
2241
2242         /* Get the actual allocated values now */
2243         HWRM_PREP(req, FUNC_QCAPS);
2244         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2245         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2246
2247         if (rc) {
2248                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2249                 copy_func_cfg_to_qcaps(cfg_req, resp);
2250         } else if (resp->error_code) {
2251                 rc = rte_le_to_cpu_16(resp->error_code);
2252                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2253                 copy_func_cfg_to_qcaps(cfg_req, resp);
2254         }
2255
2256         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2257         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2258         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2259         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2260         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2261         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2262         /*
2263          * TODO: While not supporting VMDq with VFs, max_vnics is always
2264          * forced to 1 in this case
2265          */
2266         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2267         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2268
2269         HWRM_UNLOCK();
2270 }
2271
2272 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2273 {
2274         struct hwrm_func_qcfg_input req = {0};
2275         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2276         int rc;
2277
2278         /* Check for zero MAC address */
2279         HWRM_PREP(req, FUNC_QCFG);
2280         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2281         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2282         if (rc) {
2283                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2284                 return -1;
2285         } else if (resp->error_code) {
2286                 rc = rte_le_to_cpu_16(resp->error_code);
2287                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2288                 return -1;
2289         }
2290         rc = rte_le_to_cpu_16(resp->vlan);
2291
2292         HWRM_UNLOCK();
2293
2294         return rc;
2295 }
2296
2297 static int update_pf_resource_max(struct bnxt *bp)
2298 {
2299         struct hwrm_func_qcfg_input req = {0};
2300         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2301         int rc;
2302
2303         /* And copy the allocated numbers into the pf struct */
2304         HWRM_PREP(req, FUNC_QCFG);
2305         req.fid = rte_cpu_to_le_16(0xffff);
2306         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2307         HWRM_CHECK_RESULT();
2308
2309         /* Only TX ring value reflects actual allocation? TODO */
2310         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2311         bp->pf.evb_mode = resp->evb_mode;
2312
2313         HWRM_UNLOCK();
2314
2315         return rc;
2316 }
2317
2318 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2319 {
2320         int rc;
2321
2322         if (!BNXT_PF(bp)) {
2323                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2324                 return -1;
2325         }
2326
2327         rc = bnxt_hwrm_func_qcaps(bp);
2328         if (rc)
2329                 return rc;
2330
2331         bp->pf.func_cfg_flags &=
2332                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2333                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2334         bp->pf.func_cfg_flags |=
2335                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2336         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2337         return rc;
2338 }
2339
2340 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2341 {
2342         struct hwrm_func_cfg_input req = {0};
2343         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2344         int i;
2345         size_t sz;
2346         int rc = 0;
2347         size_t req_buf_sz;
2348
2349         if (!BNXT_PF(bp)) {
2350                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2351                 return -1;
2352         }
2353
2354         rc = bnxt_hwrm_func_qcaps(bp);
2355
2356         if (rc)
2357                 return rc;
2358
2359         bp->pf.active_vfs = num_vfs;
2360
2361         /*
2362          * First, configure the PF to only use one TX ring.  This ensures that
2363          * there are enough rings for all VFs.
2364          *
2365          * If we don't do this, when we call func_alloc() later, we will lock
2366          * extra rings to the PF that won't be available during func_cfg() of
2367          * the VFs.
2368          *
2369          * This has been fixed with firmware versions above 20.6.54
2370          */
2371         bp->pf.func_cfg_flags &=
2372                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2373                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2374         bp->pf.func_cfg_flags |=
2375                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2376         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2377         if (rc)
2378                 return rc;
2379
2380         /*
2381          * Now, create and register a buffer to hold forwarded VF requests
2382          */
2383         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2384         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2385                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2386         if (bp->pf.vf_req_buf == NULL) {
2387                 rc = -ENOMEM;
2388                 goto error_free;
2389         }
2390         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2391                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2392         for (i = 0; i < num_vfs; i++)
2393                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2394                                         (i * HWRM_MAX_REQ_LEN);
2395
2396         rc = bnxt_hwrm_func_buf_rgtr(bp);
2397         if (rc)
2398                 goto error_free;
2399
2400         populate_vf_func_cfg_req(bp, &req, num_vfs);
2401
2402         bp->pf.active_vfs = 0;
2403         for (i = 0; i < num_vfs; i++) {
2404                 add_random_mac_if_needed(bp, &req, i);
2405
2406                 HWRM_PREP(req, FUNC_CFG);
2407                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2408                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2409                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2410
2411                 /* Clear enable flag for next pass */
2412                 req.enables &= ~rte_cpu_to_le_32(
2413                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2414
2415                 if (rc || resp->error_code) {
2416                         RTE_LOG(ERR, PMD,
2417                                 "Failed to initizlie VF %d\n", i);
2418                         RTE_LOG(ERR, PMD,
2419                                 "Not all VFs available. (%d, %d)\n",
2420                                 rc, resp->error_code);
2421                         HWRM_UNLOCK();
2422                         break;
2423                 }
2424
2425                 HWRM_UNLOCK();
2426
2427                 reserve_resources_from_vf(bp, &req, i);
2428                 bp->pf.active_vfs++;
2429                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2430         }
2431
2432         /*
2433          * Now configure the PF to use "the rest" of the resources
2434          * We're using STD_TX_RING_MODE here though which will limit the TX
2435          * rings.  This will allow QoS to function properly.  Not setting this
2436          * will cause PF rings to break bandwidth settings.
2437          */
2438         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2439         if (rc)
2440                 goto error_free;
2441
2442         rc = update_pf_resource_max(bp);
2443         if (rc)
2444                 goto error_free;
2445
2446         return rc;
2447
2448 error_free:
2449         bnxt_hwrm_func_buf_unrgtr(bp);
2450         return rc;
2451 }
2452
2453 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2454 {
2455         struct hwrm_func_cfg_input req = {0};
2456         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2457         int rc;
2458
2459         HWRM_PREP(req, FUNC_CFG);
2460
2461         req.fid = rte_cpu_to_le_16(0xffff);
2462         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2463         req.evb_mode = bp->pf.evb_mode;
2464
2465         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2466         HWRM_CHECK_RESULT();
2467         HWRM_UNLOCK();
2468
2469         return rc;
2470 }
2471
2472 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2473                                 uint8_t tunnel_type)
2474 {
2475         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2476         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2477         int rc = 0;
2478
2479         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2480         req.tunnel_type = tunnel_type;
2481         req.tunnel_dst_port_val = port;
2482         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2483         HWRM_CHECK_RESULT();
2484
2485         switch (tunnel_type) {
2486         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2487                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2488                 bp->vxlan_port = port;
2489                 break;
2490         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2491                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2492                 bp->geneve_port = port;
2493                 break;
2494         default:
2495                 break;
2496         }
2497
2498         HWRM_UNLOCK();
2499
2500         return rc;
2501 }
2502
2503 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2504                                 uint8_t tunnel_type)
2505 {
2506         struct hwrm_tunnel_dst_port_free_input req = {0};
2507         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2508         int rc = 0;
2509
2510         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2511
2512         req.tunnel_type = tunnel_type;
2513         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2514         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2515
2516         HWRM_CHECK_RESULT();
2517         HWRM_UNLOCK();
2518
2519         return rc;
2520 }
2521
2522 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2523                                         uint32_t flags)
2524 {
2525         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2526         struct hwrm_func_cfg_input req = {0};
2527         int rc;
2528
2529         HWRM_PREP(req, FUNC_CFG);
2530
2531         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2532         req.flags = rte_cpu_to_le_32(flags);
2533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2534
2535         HWRM_CHECK_RESULT();
2536         HWRM_UNLOCK();
2537
2538         return rc;
2539 }
2540
2541 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2542 {
2543         uint32_t *flag = flagp;
2544
2545         vnic->flags = *flag;
2546 }
2547
2548 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2549 {
2550         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2551 }
2552
2553 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2554 {
2555         int rc = 0;
2556         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2557         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2558
2559         HWRM_PREP(req, FUNC_BUF_RGTR);
2560
2561         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2562         req.req_buf_page_size = rte_cpu_to_le_16(
2563                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2564         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2565         req.req_buf_page_addr[0] =
2566                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2567         if (req.req_buf_page_addr[0] == 0) {
2568                 RTE_LOG(ERR, PMD,
2569                         "unable to map buffer address to physical memory\n");
2570                 return -ENOMEM;
2571         }
2572
2573         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2574
2575         HWRM_CHECK_RESULT();
2576         HWRM_UNLOCK();
2577
2578         return rc;
2579 }
2580
2581 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2582 {
2583         int rc = 0;
2584         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2585         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2586
2587         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2588
2589         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2590
2591         HWRM_CHECK_RESULT();
2592         HWRM_UNLOCK();
2593
2594         return rc;
2595 }
2596
2597 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2598 {
2599         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2600         struct hwrm_func_cfg_input req = {0};
2601         int rc;
2602
2603         HWRM_PREP(req, FUNC_CFG);
2604
2605         req.fid = rte_cpu_to_le_16(0xffff);
2606         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2607         req.enables = rte_cpu_to_le_32(
2608                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2609         req.async_event_cr = rte_cpu_to_le_16(
2610                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2611         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2612
2613         HWRM_CHECK_RESULT();
2614         HWRM_UNLOCK();
2615
2616         return rc;
2617 }
2618
2619 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2620 {
2621         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2622         struct hwrm_func_vf_cfg_input req = {0};
2623         int rc;
2624
2625         HWRM_PREP(req, FUNC_VF_CFG);
2626
2627         req.enables = rte_cpu_to_le_32(
2628                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2629         req.async_event_cr = rte_cpu_to_le_16(
2630                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2631         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2632
2633         HWRM_CHECK_RESULT();
2634         HWRM_UNLOCK();
2635
2636         return rc;
2637 }
2638
2639 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2640 {
2641         struct hwrm_func_cfg_input req = {0};
2642         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2643         uint16_t dflt_vlan, fid;
2644         uint32_t func_cfg_flags;
2645         int rc = 0;
2646
2647         HWRM_PREP(req, FUNC_CFG);
2648
2649         if (is_vf) {
2650                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2651                 fid = bp->pf.vf_info[vf].fid;
2652                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2653         } else {
2654                 fid = rte_cpu_to_le_16(0xffff);
2655                 func_cfg_flags = bp->pf.func_cfg_flags;
2656                 dflt_vlan = bp->vlan;
2657         }
2658
2659         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2660         req.fid = rte_cpu_to_le_16(fid);
2661         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2662         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2663
2664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2665
2666         HWRM_CHECK_RESULT();
2667         HWRM_UNLOCK();
2668
2669         return rc;
2670 }
2671
2672 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2673                         uint16_t max_bw, uint16_t enables)
2674 {
2675         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2676         struct hwrm_func_cfg_input req = {0};
2677         int rc;
2678
2679         HWRM_PREP(req, FUNC_CFG);
2680
2681         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2682         req.enables |= rte_cpu_to_le_32(enables);
2683         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2684         req.max_bw = rte_cpu_to_le_32(max_bw);
2685         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2686
2687         HWRM_CHECK_RESULT();
2688         HWRM_UNLOCK();
2689
2690         return rc;
2691 }
2692
2693 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2694 {
2695         struct hwrm_func_cfg_input req = {0};
2696         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2697         int rc = 0;
2698
2699         HWRM_PREP(req, FUNC_CFG);
2700
2701         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2702         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2703         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2704         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2705
2706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2707
2708         HWRM_CHECK_RESULT();
2709         HWRM_UNLOCK();
2710
2711         return rc;
2712 }
2713
2714 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2715                               void *encaped, size_t ec_size)
2716 {
2717         int rc = 0;
2718         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2719         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2720
2721         if (ec_size > sizeof(req.encap_request))
2722                 return -1;
2723
2724         HWRM_PREP(req, REJECT_FWD_RESP);
2725
2726         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2727         memcpy(req.encap_request, encaped, ec_size);
2728
2729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2730
2731         HWRM_CHECK_RESULT();
2732         HWRM_UNLOCK();
2733
2734         return rc;
2735 }
2736
2737 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2738                                        struct ether_addr *mac)
2739 {
2740         struct hwrm_func_qcfg_input req = {0};
2741         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2742         int rc;
2743
2744         HWRM_PREP(req, FUNC_QCFG);
2745
2746         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2748
2749         HWRM_CHECK_RESULT();
2750
2751         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2752
2753         HWRM_UNLOCK();
2754
2755         return rc;
2756 }
2757
2758 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2759                             void *encaped, size_t ec_size)
2760 {
2761         int rc = 0;
2762         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2763         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2764
2765         if (ec_size > sizeof(req.encap_request))
2766                 return -1;
2767
2768         HWRM_PREP(req, EXEC_FWD_RESP);
2769
2770         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2771         memcpy(req.encap_request, encaped, ec_size);
2772
2773         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2774
2775         HWRM_CHECK_RESULT();
2776         HWRM_UNLOCK();
2777
2778         return rc;
2779 }
2780
2781 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2782                          struct rte_eth_stats *stats)
2783 {
2784         int rc = 0;
2785         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2786         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2787
2788         HWRM_PREP(req, STAT_CTX_QUERY);
2789
2790         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2791
2792         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2793
2794         HWRM_CHECK_RESULT();
2795
2796         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2797         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2798         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2799         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2800         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2801         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2802
2803         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2804         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2805         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2806         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2807         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2808         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2809
2810         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2811         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2812         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2813
2814         HWRM_UNLOCK();
2815
2816         return rc;
2817 }
2818
2819 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2820 {
2821         struct hwrm_port_qstats_input req = {0};
2822         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2823         struct bnxt_pf_info *pf = &bp->pf;
2824         int rc;
2825
2826         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2827                 return 0;
2828
2829         HWRM_PREP(req, PORT_QSTATS);
2830
2831         req.port_id = rte_cpu_to_le_16(pf->port_id);
2832         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2833         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2834         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2835
2836         HWRM_CHECK_RESULT();
2837         HWRM_UNLOCK();
2838
2839         return rc;
2840 }
2841
2842 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2843 {
2844         struct hwrm_port_clr_stats_input req = {0};
2845         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2846         struct bnxt_pf_info *pf = &bp->pf;
2847         int rc;
2848
2849         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2850                 return 0;
2851
2852         HWRM_PREP(req, PORT_CLR_STATS);
2853
2854         req.port_id = rte_cpu_to_le_16(pf->port_id);
2855         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2856
2857         HWRM_CHECK_RESULT();
2858         HWRM_UNLOCK();
2859
2860         return rc;
2861 }
2862
2863 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2864 {
2865         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2866         struct hwrm_port_led_qcaps_input req = {0};
2867         int rc;
2868
2869         if (BNXT_VF(bp))
2870                 return 0;
2871
2872         HWRM_PREP(req, PORT_LED_QCAPS);
2873         req.port_id = bp->pf.port_id;
2874         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2875
2876         HWRM_CHECK_RESULT();
2877
2878         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2879                 unsigned int i;
2880
2881                 bp->num_leds = resp->num_leds;
2882                 memcpy(bp->leds, &resp->led0_id,
2883                         sizeof(bp->leds[0]) * bp->num_leds);
2884                 for (i = 0; i < bp->num_leds; i++) {
2885                         struct bnxt_led_info *led = &bp->leds[i];
2886
2887                         uint16_t caps = led->led_state_caps;
2888
2889                         if (!led->led_group_id ||
2890                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2891                                 bp->num_leds = 0;
2892                                 break;
2893                         }
2894                 }
2895         }
2896
2897         HWRM_UNLOCK();
2898
2899         return rc;
2900 }
2901
2902 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2903 {
2904         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2905         struct hwrm_port_led_cfg_input req = {0};
2906         struct bnxt_led_cfg *led_cfg;
2907         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2908         uint16_t duration = 0;
2909         int rc, i;
2910
2911         if (!bp->num_leds || BNXT_VF(bp))
2912                 return -EOPNOTSUPP;
2913
2914         HWRM_PREP(req, PORT_LED_CFG);
2915
2916         if (led_on) {
2917                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2918                 duration = rte_cpu_to_le_16(500);
2919         }
2920         req.port_id = bp->pf.port_id;
2921         req.num_leds = bp->num_leds;
2922         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2923         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2924                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2925                 led_cfg->led_id = bp->leds[i].led_id;
2926                 led_cfg->led_state = led_state;
2927                 led_cfg->led_blink_on = duration;
2928                 led_cfg->led_blink_off = duration;
2929                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2930         }
2931
2932         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2933
2934         HWRM_CHECK_RESULT();
2935         HWRM_UNLOCK();
2936
2937         return rc;
2938 }
2939
2940 static void
2941 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2942 {
2943         uint32_t *count = cbdata;
2944
2945         *count = *count + 1;
2946 }
2947
2948 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2949                                      struct bnxt_vnic_info *vnic __rte_unused)
2950 {
2951         return 0;
2952 }
2953
2954 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2955 {
2956         uint32_t count = 0;
2957
2958         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2959             &count, bnxt_vnic_count_hwrm_stub);
2960
2961         return count;
2962 }
2963
2964 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2965                                         uint16_t *vnic_ids)
2966 {
2967         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2968         struct hwrm_func_vf_vnic_ids_query_output *resp =
2969                                                 bp->hwrm_cmd_resp_addr;
2970         int rc;
2971
2972         /* First query all VNIC ids */
2973         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
2974
2975         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2976         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2977         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2978
2979         if (req.vnic_id_tbl_addr == 0) {
2980                 HWRM_UNLOCK();
2981                 RTE_LOG(ERR, PMD,
2982                 "unable to map VNIC ID table address to physical memory\n");
2983                 return -ENOMEM;
2984         }
2985         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2986         if (rc) {
2987                 HWRM_UNLOCK();
2988                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2989                 return -1;
2990         } else if (resp->error_code) {
2991                 rc = rte_le_to_cpu_16(resp->error_code);
2992                 HWRM_UNLOCK();
2993                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2994                 return -1;
2995         }
2996         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
2997
2998         HWRM_UNLOCK();
2999
3000         return rc;
3001 }
3002
3003 /*
3004  * This function queries the VNIC IDs  for a specified VF. It then calls
3005  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3006  * Then it calls the hwrm_cb function to program this new vnic configuration.
3007  */
3008 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3009         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3010         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3011 {
3012         struct bnxt_vnic_info vnic;
3013         int rc = 0;
3014         int i, num_vnic_ids;
3015         uint16_t *vnic_ids;
3016         size_t vnic_id_sz;
3017         size_t sz;
3018
3019         /* First query all VNIC ids */
3020         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3021         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3022                         RTE_CACHE_LINE_SIZE);
3023         if (vnic_ids == NULL) {
3024                 rc = -ENOMEM;
3025                 return rc;
3026         }
3027         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3028                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3029
3030         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3031
3032         if (num_vnic_ids < 0)
3033                 return num_vnic_ids;
3034
3035         /* Retrieve VNIC, update bd_stall then update */
3036
3037         for (i = 0; i < num_vnic_ids; i++) {
3038                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3039                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3040                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3041                 if (rc)
3042                         break;
3043                 if (vnic.mru <= 4)      /* Indicates unallocated */
3044                         continue;
3045
3046                 vnic_cb(&vnic, cbdata);
3047
3048                 rc = hwrm_cb(bp, &vnic);
3049                 if (rc)
3050                         break;
3051         }
3052
3053         rte_free(vnic_ids);
3054
3055         return rc;
3056 }
3057
3058 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3059                                               bool on)
3060 {
3061         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3062         struct hwrm_func_cfg_input req = {0};
3063         int rc;
3064
3065         HWRM_PREP(req, FUNC_CFG);
3066
3067         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3068         req.enables |= rte_cpu_to_le_32(
3069                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3070         req.vlan_antispoof_mode = on ?
3071                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3072                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3073         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3074
3075         HWRM_CHECK_RESULT();
3076         HWRM_UNLOCK();
3077
3078         return rc;
3079 }
3080
3081 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3082 {
3083         struct bnxt_vnic_info vnic;
3084         uint16_t *vnic_ids;
3085         size_t vnic_id_sz;
3086         int num_vnic_ids, i;
3087         size_t sz;
3088         int rc;
3089
3090         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3091         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3092                         RTE_CACHE_LINE_SIZE);
3093         if (vnic_ids == NULL) {
3094                 rc = -ENOMEM;
3095                 return rc;
3096         }
3097
3098         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3099                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3100
3101         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3102         if (rc <= 0)
3103                 goto exit;
3104         num_vnic_ids = rc;
3105
3106         /*
3107          * Loop through to find the default VNIC ID.
3108          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3109          * by sending the hwrm_func_qcfg command to the firmware.
3110          */
3111         for (i = 0; i < num_vnic_ids; i++) {
3112                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3113                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3114                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3115                                         bp->pf.first_vf_id + vf);
3116                 if (rc)
3117                         goto exit;
3118                 if (vnic.func_default) {
3119                         rte_free(vnic_ids);
3120                         return vnic.fw_vnic_id;
3121                 }
3122         }
3123         /* Could not find a default VNIC. */
3124         RTE_LOG(ERR, PMD, "No default VNIC\n");
3125 exit:
3126         rte_free(vnic_ids);
3127         return -1;
3128 }