net/bnxt: fix HWRM macros and locking
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
42
43 #include "bnxt.h"
44 #include "bnxt_cpr.h"
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
47 #include "bnxt_rxq.h"
48 #include "bnxt_rxr.h"
49 #include "bnxt_ring.h"
50 #include "bnxt_txq.h"
51 #include "bnxt_txr.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
54
55 #include <rte_io.h>
56
57 #define HWRM_CMD_TIMEOUT                10000
58
59 struct bnxt_plcmodes_cfg {
60         uint32_t        flags;
61         uint16_t        jumbo_thresh;
62         uint16_t        hds_offset;
63         uint16_t        hds_threshold;
64 };
65
66 static int page_getenum(size_t size)
67 {
68         if (size <= 1 << 4)
69                 return 4;
70         if (size <= 1 << 12)
71                 return 12;
72         if (size <= 1 << 13)
73                 return 13;
74         if (size <= 1 << 16)
75                 return 16;
76         if (size <= 1 << 21)
77                 return 21;
78         if (size <= 1 << 22)
79                 return 22;
80         if (size <= 1 << 30)
81                 return 30;
82         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83         return sizeof(void *) * 8 - 1;
84 }
85
86 static int page_roundup(size_t size)
87 {
88         return 1 << page_getenum(size);
89 }
90
91 /*
92  * HWRM Functions (sent to HWRM)
93  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95  * command was failed by the ChiMP.
96  */
97
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
99                                         uint32_t msg_len)
100 {
101         unsigned int i;
102         struct input *req = msg;
103         struct output *resp = bp->hwrm_cmd_resp_addr;
104         uint32_t *data = msg;
105         uint8_t *bar;
106         uint8_t *valid;
107         uint16_t max_req_len = bp->max_req_len;
108         struct hwrm_short_input short_input = { 0 };
109
110         if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
112
113                 memset(short_cmd_req, 0, bp->max_req_len);
114                 memcpy(short_cmd_req, req, msg_len);
115
116                 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117                 short_input.signature = rte_cpu_to_le_16(
118                                         HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119                 short_input.size = rte_cpu_to_le_16(msg_len);
120                 short_input.req_addr =
121                         rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
122
123                 data = (uint32_t *)&short_input;
124                 msg_len = sizeof(short_input);
125
126                 /* Sync memory write before updating doorbell */
127                 rte_wmb();
128
129                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
130         }
131
132         /* Write request msg to hwrm channel */
133         for (i = 0; i < msg_len; i += 4) {
134                 bar = (uint8_t *)bp->bar0 + i;
135                 rte_write32(*data, bar);
136                 data++;
137         }
138
139         /* Zero the rest of the request space */
140         for (; i < max_req_len; i += 4) {
141                 bar = (uint8_t *)bp->bar0 + i;
142                 rte_write32(0, bar);
143         }
144
145         /* Ring channel doorbell */
146         bar = (uint8_t *)bp->bar0 + 0x100;
147         rte_write32(1, bar);
148
149         /* Poll for the valid bit */
150         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151                 /* Sanity check on the resp->resp_len */
152                 rte_rmb();
153                 if (resp->resp_len && resp->resp_len <=
154                                 bp->max_resp_len) {
155                         /* Last byte of resp contains the valid key */
156                         valid = (uint8_t *)resp + resp->resp_len - 1;
157                         if (*valid == HWRM_RESP_VALID_KEY)
158                                 break;
159                 }
160                 rte_delay_us(600);
161         }
162
163         if (i >= HWRM_CMD_TIMEOUT) {
164                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
165                         req->req_type);
166                 goto err_ret;
167         }
168         return 0;
169
170 err_ret:
171         return -1;
172 }
173
174 /*
175  * HWRM_PREP() should be used to prepare *ALL* HWRM commands.  It grabs the
176  * spinlock, and does initial processing.
177  *
178  * HWRM_CHECK_RESULT() returns errors on failure and may not be used.  It
179  * releases the spinlock only if it returns.  If the regular int return codes
180  * are not used by the function, HWRM_CHECK_RESULT() should not be used
181  * directly, rather it should be copied and modified to suit the function.
182  *
183  * HWRM_UNLOCK() must be called after all response processing is completed.
184  */
185 #define HWRM_PREP(req, type) do { \
186         rte_spinlock_lock(&bp->hwrm_lock); \
187         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189         req.cmpl_ring = rte_cpu_to_le_16(-1); \
190         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191         req.target_id = rte_cpu_to_le_16(0xffff); \
192         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
193 } while (0)
194
195 #define HWRM_CHECK_RESULT() do {\
196         if (rc) { \
197                 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
198                         __func__, rc); \
199                 rte_spinlock_unlock(&bp->hwrm_lock); \
200                 return rc; \
201         } \
202         if (resp->error_code) { \
203                 rc = rte_le_to_cpu_16(resp->error_code); \
204                 if (resp->resp_len >= 16) { \
205                         struct hwrm_err_output *tmp_hwrm_err_op = \
206                                                 (void *)resp; \
207                         RTE_LOG(ERR, PMD, \
208                                 "%s error %d:%d:%08x:%04x\n", \
209                                 __func__, \
210                                 rc, tmp_hwrm_err_op->cmd_err, \
211                                 rte_le_to_cpu_32(\
212                                         tmp_hwrm_err_op->opaque_0), \
213                                 rte_le_to_cpu_16(\
214                                         tmp_hwrm_err_op->opaque_1)); \
215                 } \
216                 else { \
217                         RTE_LOG(ERR, PMD, \
218                                 "%s error %d\n", __func__, rc); \
219                 } \
220                 rte_spinlock_unlock(&bp->hwrm_lock); \
221                 return rc; \
222         } \
223 } while (0)
224
225 #define HWRM_UNLOCK()           rte_spinlock_unlock(&bp->hwrm_lock)
226
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
228 {
229         int rc = 0;
230         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
232
233         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
235         req.mask = 0;
236
237         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238
239         HWRM_CHECK_RESULT();
240         HWRM_UNLOCK();
241
242         return rc;
243 }
244
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246                                  struct bnxt_vnic_info *vnic,
247                                  uint16_t vlan_count,
248                                  struct bnxt_vlan_table_entry *vlan_table)
249 {
250         int rc = 0;
251         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
253         uint32_t mask = 0;
254
255         HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
257
258         /* FIXME add multicast flag, when multicast adding options is supported
259          * by ethtool.
260          */
261         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271         if (vnic->mc_addr_cnt) {
272                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275         }
276         if (vlan_table) {
277                 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278                         mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279                 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
280                          rte_mem_virt2phy(vlan_table));
281                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
282         }
283         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
284                                     mask);
285
286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
287
288         HWRM_CHECK_RESULT();
289         HWRM_UNLOCK();
290
291         return rc;
292 }
293
294 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
295                         uint16_t vlan_count,
296                         struct bnxt_vlan_antispoof_table_entry *vlan_table)
297 {
298         int rc = 0;
299         struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
300         struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
301                                                 bp->hwrm_cmd_resp_addr;
302
303         /*
304          * Older HWRM versions did not support this command, and the set_rx_mask
305          * list was used for anti-spoof. In 1.8.0, the TX path configuration was
306          * removed from set_rx_mask call, and this command was added.
307          *
308          * This command is also present from 1.7.8.11 and higher,
309          * as well as 1.7.8.0
310          */
311         if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
312                 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
313                         if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
314                                         (11)))
315                                 return 0;
316                 }
317         }
318         HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
319         req.fid = rte_cpu_to_le_16(fid);
320
321         req.vlan_tag_mask_tbl_addr =
322                 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
323         req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
324
325         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
326
327         HWRM_CHECK_RESULT();
328         HWRM_UNLOCK();
329
330         return rc;
331 }
332
333 int bnxt_hwrm_clear_filter(struct bnxt *bp,
334                            struct bnxt_filter_info *filter)
335 {
336         int rc = 0;
337         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
338         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
339
340         if (filter->fw_l2_filter_id == UINT64_MAX)
341                 return 0;
342
343         HWRM_PREP(req, CFA_L2_FILTER_FREE);
344
345         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
346
347         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
348
349         HWRM_CHECK_RESULT();
350         HWRM_UNLOCK();
351
352         filter->fw_l2_filter_id = -1;
353
354         return 0;
355 }
356
357 int bnxt_hwrm_set_filter(struct bnxt *bp,
358                          uint16_t dst_id,
359                          struct bnxt_filter_info *filter)
360 {
361         int rc = 0;
362         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
363         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
364         uint32_t enables = 0;
365
366         if (filter->fw_l2_filter_id != UINT64_MAX)
367                 bnxt_hwrm_clear_filter(bp, filter);
368
369         HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
370
371         req.flags = rte_cpu_to_le_32(filter->flags);
372
373         enables = filter->enables |
374               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
375         req.dst_id = rte_cpu_to_le_16(dst_id);
376
377         if (enables &
378             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
379                 memcpy(req.l2_addr, filter->l2_addr,
380                        ETHER_ADDR_LEN);
381         if (enables &
382             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
383                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
384                        ETHER_ADDR_LEN);
385         if (enables &
386             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
387                 req.l2_ovlan = filter->l2_ovlan;
388         if (enables &
389             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
390                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
391         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
392                 req.src_id = rte_cpu_to_le_32(filter->src_id);
393         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
394                 req.src_type = filter->src_type;
395
396         req.enables = rte_cpu_to_le_32(enables);
397
398         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
399
400         HWRM_CHECK_RESULT();
401
402         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
403         HWRM_UNLOCK();
404
405         return rc;
406 }
407
408 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
409 {
410         int rc = 0;
411         struct hwrm_func_qcaps_input req = {.req_type = 0 };
412         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
413         uint16_t new_max_vfs;
414         int i;
415
416         HWRM_PREP(req, FUNC_QCAPS);
417
418         req.fid = rte_cpu_to_le_16(0xffff);
419
420         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
421
422         HWRM_CHECK_RESULT();
423
424         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
425         if (BNXT_PF(bp)) {
426                 bp->pf.port_id = resp->port_id;
427                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
428                 new_max_vfs = bp->pdev->max_vfs;
429                 if (new_max_vfs != bp->pf.max_vfs) {
430                         if (bp->pf.vf_info)
431                                 rte_free(bp->pf.vf_info);
432                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
433                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
434                         bp->pf.max_vfs = new_max_vfs;
435                         for (i = 0; i < new_max_vfs; i++) {
436                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
437                                 bp->pf.vf_info[i].vlan_table =
438                                         rte_zmalloc("VF VLAN table",
439                                                     getpagesize(),
440                                                     getpagesize());
441                                 if (bp->pf.vf_info[i].vlan_table == NULL)
442                                         RTE_LOG(ERR, PMD,
443                                         "Fail to alloc VLAN table for VF %d\n",
444                                         i);
445                                 else
446                                         rte_mem_lock_page(
447                                                 bp->pf.vf_info[i].vlan_table);
448                                 bp->pf.vf_info[i].vlan_as_table =
449                                         rte_zmalloc("VF VLAN AS table",
450                                                     getpagesize(),
451                                                     getpagesize());
452                                 if (bp->pf.vf_info[i].vlan_as_table == NULL)
453                                         RTE_LOG(ERR, PMD,
454                                         "Alloc VLAN AS table for VF %d fail\n",
455                                         i);
456                                 else
457                                         rte_mem_lock_page(
458                                                bp->pf.vf_info[i].vlan_as_table);
459                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
460                         }
461                 }
462         }
463
464         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
465         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
466         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
467         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
468         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
469         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
470         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
471         /* TODO: For now, do not support VMDq/RFS on VFs. */
472         if (BNXT_PF(bp)) {
473                 if (bp->pf.max_vfs)
474                         bp->max_vnics = 1;
475                 else
476                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
477         } else {
478                 bp->max_vnics = 1;
479         }
480         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
481         if (BNXT_PF(bp))
482                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
483         HWRM_UNLOCK();
484
485         return rc;
486 }
487
488 int bnxt_hwrm_func_reset(struct bnxt *bp)
489 {
490         int rc = 0;
491         struct hwrm_func_reset_input req = {.req_type = 0 };
492         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
493
494         HWRM_PREP(req, FUNC_RESET);
495
496         req.enables = rte_cpu_to_le_32(0);
497
498         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
499
500         HWRM_CHECK_RESULT();
501         HWRM_UNLOCK();
502
503         return rc;
504 }
505
506 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
507 {
508         int rc;
509         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
510         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
511
512         if (bp->flags & BNXT_FLAG_REGISTERED)
513                 return 0;
514
515         HWRM_PREP(req, FUNC_DRV_RGTR);
516         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
517                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
518         req.ver_maj = RTE_VER_YEAR;
519         req.ver_min = RTE_VER_MONTH;
520         req.ver_upd = RTE_VER_MINOR;
521
522         if (BNXT_PF(bp)) {
523                 req.enables |= rte_cpu_to_le_32(
524                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
525                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
526                        RTE_MIN(sizeof(req.vf_req_fwd),
527                                sizeof(bp->pf.vf_req_fwd)));
528         }
529
530         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
531         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
532
533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
534
535         HWRM_CHECK_RESULT();
536         HWRM_UNLOCK();
537
538         bp->flags |= BNXT_FLAG_REGISTERED;
539
540         return rc;
541 }
542
543 int bnxt_hwrm_ver_get(struct bnxt *bp)
544 {
545         int rc = 0;
546         struct hwrm_ver_get_input req = {.req_type = 0 };
547         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
548         uint32_t my_version;
549         uint32_t fw_version;
550         uint16_t max_resp_len;
551         char type[RTE_MEMZONE_NAMESIZE];
552         uint32_t dev_caps_cfg;
553
554         bp->max_req_len = HWRM_MAX_REQ_LEN;
555         HWRM_PREP(req, VER_GET);
556
557         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
558         req.hwrm_intf_min = HWRM_VERSION_MINOR;
559         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
560
561         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
562
563         HWRM_CHECK_RESULT();
564
565         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
566                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
567                 resp->hwrm_intf_upd,
568                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
569         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
570                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
571         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
572                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
573
574         my_version = HWRM_VERSION_MAJOR << 16;
575         my_version |= HWRM_VERSION_MINOR << 8;
576         my_version |= HWRM_VERSION_UPDATE;
577
578         fw_version = resp->hwrm_intf_maj << 16;
579         fw_version |= resp->hwrm_intf_min << 8;
580         fw_version |= resp->hwrm_intf_upd;
581
582         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
583                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
584                 rc = -EINVAL;
585                 goto error;
586         }
587
588         if (my_version != fw_version) {
589                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
590                 if (my_version < fw_version) {
591                         RTE_LOG(INFO, PMD,
592                                 "Firmware API version is newer than driver.\n");
593                         RTE_LOG(INFO, PMD,
594                                 "The driver may be missing features.\n");
595                 } else {
596                         RTE_LOG(INFO, PMD,
597                                 "Firmware API version is older than driver.\n");
598                         RTE_LOG(INFO, PMD,
599                                 "Not all driver features may be functional.\n");
600                 }
601         }
602
603         if (bp->max_req_len > resp->max_req_win_len) {
604                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
605                 rc = -EINVAL;
606         }
607         bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
608         max_resp_len = resp->max_resp_len;
609         dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
610
611         if (bp->max_resp_len != max_resp_len) {
612                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
613                         bp->pdev->addr.domain, bp->pdev->addr.bus,
614                         bp->pdev->addr.devid, bp->pdev->addr.function);
615
616                 rte_free(bp->hwrm_cmd_resp_addr);
617
618                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
619                 if (bp->hwrm_cmd_resp_addr == NULL) {
620                         rc = -ENOMEM;
621                         goto error;
622                 }
623                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
624                 bp->hwrm_cmd_resp_dma_addr =
625                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
626                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
627                         RTE_LOG(ERR, PMD,
628                         "Unable to map response buffer to physical memory.\n");
629                         rc = -ENOMEM;
630                         goto error;
631                 }
632                 bp->max_resp_len = max_resp_len;
633         }
634
635         if ((dev_caps_cfg &
636                 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
637             (dev_caps_cfg &
638              HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
639                 RTE_LOG(DEBUG, PMD, "Short command supported\n");
640
641                 rte_free(bp->hwrm_short_cmd_req_addr);
642
643                 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
644                                                         bp->max_req_len, 0);
645                 if (bp->hwrm_short_cmd_req_addr == NULL) {
646                         rc = -ENOMEM;
647                         goto error;
648                 }
649                 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
650                 bp->hwrm_short_cmd_req_dma_addr =
651                         rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
652                 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
653                         rte_free(bp->hwrm_short_cmd_req_addr);
654                         RTE_LOG(ERR, PMD,
655                                 "Unable to map buffer to physical memory.\n");
656                         rc = -ENOMEM;
657                         goto error;
658                 }
659
660                 bp->flags |= BNXT_FLAG_SHORT_CMD;
661         }
662
663 error:
664         HWRM_UNLOCK();
665         return rc;
666 }
667
668 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
669 {
670         int rc;
671         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
672         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
673
674         if (!(bp->flags & BNXT_FLAG_REGISTERED))
675                 return 0;
676
677         HWRM_PREP(req, FUNC_DRV_UNRGTR);
678         req.flags = flags;
679
680         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
681
682         HWRM_CHECK_RESULT();
683         HWRM_UNLOCK();
684
685         bp->flags &= ~BNXT_FLAG_REGISTERED;
686
687         return rc;
688 }
689
690 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
691 {
692         int rc = 0;
693         struct hwrm_port_phy_cfg_input req = {0};
694         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
695         uint32_t enables = 0;
696         uint32_t link_speed_mask =
697                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
698
699         HWRM_PREP(req, PORT_PHY_CFG);
700
701         if (conf->link_up) {
702                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
703                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
704                 /*
705                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
706                  * any auto mode, even "none".
707                  */
708                 if (!conf->link_speed) {
709                         req.auto_mode = conf->auto_mode;
710                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
711                         if (conf->auto_mode ==
712                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
713                                 req.auto_link_speed_mask =
714                                         conf->auto_link_speed_mask;
715                                 enables |= link_speed_mask;
716                         }
717                         if (bp->link_info.auto_link_speed) {
718                                 req.auto_link_speed =
719                                         bp->link_info.auto_link_speed;
720                                 enables |=
721                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
722                         }
723                 }
724                 req.auto_duplex = conf->duplex;
725                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
726                 req.auto_pause = conf->auto_pause;
727                 req.force_pause = conf->force_pause;
728                 /* Set force_pause if there is no auto or if there is a force */
729                 if (req.auto_pause && !req.force_pause)
730                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
731                 else
732                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
733
734                 req.enables = rte_cpu_to_le_32(enables);
735         } else {
736                 req.flags =
737                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
738                 RTE_LOG(INFO, PMD, "Force Link Down\n");
739         }
740
741         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
742
743         HWRM_CHECK_RESULT();
744         HWRM_UNLOCK();
745
746         return rc;
747 }
748
749 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
750                                    struct bnxt_link_info *link_info)
751 {
752         int rc = 0;
753         struct hwrm_port_phy_qcfg_input req = {0};
754         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
755
756         HWRM_PREP(req, PORT_PHY_QCFG);
757
758         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
759
760         HWRM_CHECK_RESULT();
761
762         link_info->phy_link_status = resp->link;
763         link_info->link_up =
764                 (link_info->phy_link_status ==
765                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
766         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
767         link_info->duplex = resp->duplex;
768         link_info->pause = resp->pause;
769         link_info->auto_pause = resp->auto_pause;
770         link_info->force_pause = resp->force_pause;
771         link_info->auto_mode = resp->auto_mode;
772
773         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
774         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
775         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
776         link_info->phy_ver[0] = resp->phy_maj;
777         link_info->phy_ver[1] = resp->phy_min;
778         link_info->phy_ver[2] = resp->phy_bld;
779
780         HWRM_UNLOCK();
781
782         return rc;
783 }
784
785 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
786 {
787         int rc = 0;
788         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
789         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
790
791         HWRM_PREP(req, QUEUE_QPORTCFG);
792
793         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
794
795         HWRM_CHECK_RESULT();
796
797 #define GET_QUEUE_INFO(x) \
798         bp->cos_queue[x].id = resp->queue_id##x; \
799         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
800
801         GET_QUEUE_INFO(0);
802         GET_QUEUE_INFO(1);
803         GET_QUEUE_INFO(2);
804         GET_QUEUE_INFO(3);
805         GET_QUEUE_INFO(4);
806         GET_QUEUE_INFO(5);
807         GET_QUEUE_INFO(6);
808         GET_QUEUE_INFO(7);
809
810         HWRM_UNLOCK();
811
812         return rc;
813 }
814
815 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
816                          struct bnxt_ring *ring,
817                          uint32_t ring_type, uint32_t map_index,
818                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
819 {
820         int rc = 0;
821         uint32_t enables = 0;
822         struct hwrm_ring_alloc_input req = {.req_type = 0 };
823         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
824
825         HWRM_PREP(req, RING_ALLOC);
826
827         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
828         req.fbo = rte_cpu_to_le_32(0);
829         /* Association of ring index with doorbell index */
830         req.logical_id = rte_cpu_to_le_16(map_index);
831         req.length = rte_cpu_to_le_32(ring->ring_size);
832
833         switch (ring_type) {
834         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
835                 req.queue_id = bp->cos_queue[0].id;
836                 /* FALLTHROUGH */
837         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
838                 req.ring_type = ring_type;
839                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
840                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
841                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
842                         enables |=
843                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
844                 break;
845         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
846                 req.ring_type = ring_type;
847                 /*
848                  * TODO: Some HWRM versions crash with
849                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
850                  */
851                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
852                 break;
853         default:
854                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
855                         ring_type);
856                 HWRM_UNLOCK();
857                 return -1;
858         }
859         req.enables = rte_cpu_to_le_32(enables);
860
861         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
862
863         if (rc || resp->error_code) {
864                 if (rc == 0 && resp->error_code)
865                         rc = rte_le_to_cpu_16(resp->error_code);
866                 switch (ring_type) {
867                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
868                         RTE_LOG(ERR, PMD,
869                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
870                         HWRM_UNLOCK();
871                         return rc;
872                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
873                         RTE_LOG(ERR, PMD,
874                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
875                         HWRM_UNLOCK();
876                         return rc;
877                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
878                         RTE_LOG(ERR, PMD,
879                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
880                         HWRM_UNLOCK();
881                         return rc;
882                 default:
883                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
884                         HWRM_UNLOCK();
885                         return rc;
886                 }
887         }
888
889         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
890         HWRM_UNLOCK();
891         return rc;
892 }
893
894 int bnxt_hwrm_ring_free(struct bnxt *bp,
895                         struct bnxt_ring *ring, uint32_t ring_type)
896 {
897         int rc;
898         struct hwrm_ring_free_input req = {.req_type = 0 };
899         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
900
901         HWRM_PREP(req, RING_FREE);
902
903         req.ring_type = ring_type;
904         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
905
906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
907
908         if (rc || resp->error_code) {
909                 if (rc == 0 && resp->error_code)
910                         rc = rte_le_to_cpu_16(resp->error_code);
911                 HWRM_UNLOCK();
912
913                 switch (ring_type) {
914                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
915                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
916                                 rc);
917                         return rc;
918                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
919                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
920                                 rc);
921                         return rc;
922                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
923                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
924                                 rc);
925                         return rc;
926                 default:
927                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
928                         return rc;
929                 }
930         }
931         HWRM_UNLOCK();
932         return 0;
933 }
934
935 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
936 {
937         int rc = 0;
938         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
939         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
940
941         HWRM_PREP(req, RING_GRP_ALLOC);
942
943         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
944         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
945         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
946         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
947
948         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
949
950         HWRM_CHECK_RESULT();
951
952         bp->grp_info[idx].fw_grp_id =
953             rte_le_to_cpu_16(resp->ring_group_id);
954
955         HWRM_UNLOCK();
956
957         return rc;
958 }
959
960 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
961 {
962         int rc;
963         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
964         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
965
966         HWRM_PREP(req, RING_GRP_FREE);
967
968         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
969
970         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
971
972         HWRM_CHECK_RESULT();
973         HWRM_UNLOCK();
974
975         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
976         return rc;
977 }
978
979 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
980 {
981         int rc = 0;
982         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
983         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
984
985         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
986                 return rc;
987
988         HWRM_PREP(req, STAT_CTX_CLR_STATS);
989
990         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
991
992         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
993
994         HWRM_CHECK_RESULT();
995         HWRM_UNLOCK();
996
997         return rc;
998 }
999
1000 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1001                                 unsigned int idx __rte_unused)
1002 {
1003         int rc;
1004         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1005         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1006
1007         HWRM_PREP(req, STAT_CTX_ALLOC);
1008
1009         req.update_period_ms = rte_cpu_to_le_32(0);
1010
1011         req.stats_dma_addr =
1012             rte_cpu_to_le_64(cpr->hw_stats_map);
1013
1014         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1015
1016         HWRM_CHECK_RESULT();
1017
1018         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1019
1020         HWRM_UNLOCK();
1021
1022         return rc;
1023 }
1024
1025 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1026                                 unsigned int idx __rte_unused)
1027 {
1028         int rc;
1029         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1030         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1031
1032         HWRM_PREP(req, STAT_CTX_FREE);
1033
1034         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1035
1036         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1037
1038         HWRM_CHECK_RESULT();
1039         HWRM_UNLOCK();
1040
1041         return rc;
1042 }
1043
1044 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1045 {
1046         int rc = 0, i, j;
1047         struct hwrm_vnic_alloc_input req = { 0 };
1048         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1049
1050         /* map ring groups to this vnic */
1051         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1052                 vnic->start_grp_id, vnic->end_grp_id);
1053         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1054                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1055         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1056         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1057         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1058         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1059         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1060                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1061         HWRM_PREP(req, VNIC_ALLOC);
1062
1063         if (vnic->func_default)
1064                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1065         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1066
1067         HWRM_CHECK_RESULT();
1068
1069         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1070         HWRM_UNLOCK();
1071         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1072         return rc;
1073 }
1074
1075 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1076                                         struct bnxt_vnic_info *vnic,
1077                                         struct bnxt_plcmodes_cfg *pmode)
1078 {
1079         int rc = 0;
1080         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1081         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1082
1083         HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1084
1085         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1086
1087         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1088
1089         HWRM_CHECK_RESULT();
1090
1091         pmode->flags = rte_le_to_cpu_32(resp->flags);
1092         /* dflt_vnic bit doesn't exist in the _cfg command */
1093         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1094         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1095         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1096         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1097
1098         HWRM_UNLOCK();
1099
1100         return rc;
1101 }
1102
1103 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1104                                        struct bnxt_vnic_info *vnic,
1105                                        struct bnxt_plcmodes_cfg *pmode)
1106 {
1107         int rc = 0;
1108         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1109         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1110
1111         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1112
1113         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1114         req.flags = rte_cpu_to_le_32(pmode->flags);
1115         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1116         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1117         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1118         req.enables = rte_cpu_to_le_32(
1119             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1120             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1121             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1122         );
1123
1124         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1125
1126         HWRM_CHECK_RESULT();
1127         HWRM_UNLOCK();
1128
1129         return rc;
1130 }
1131
1132 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1133 {
1134         int rc = 0;
1135         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1136         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1137         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1138         struct bnxt_plcmodes_cfg pmodes;
1139
1140         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1141                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1142                 return rc;
1143         }
1144
1145         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1146         if (rc)
1147                 return rc;
1148
1149         HWRM_PREP(req, VNIC_CFG);
1150
1151         /* Only RSS support for now TBD: COS & LB */
1152         req.enables =
1153             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1154                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1155         if (vnic->lb_rule != 0xffff)
1156                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1157         if (vnic->cos_rule != 0xffff)
1158                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1159         if (vnic->rss_rule != 0xffff)
1160                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1161         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1162         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1163         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1164         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1165         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1166         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1167         req.mru = rte_cpu_to_le_16(vnic->mru);
1168         if (vnic->func_default)
1169                 req.flags |=
1170                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1171         if (vnic->vlan_strip)
1172                 req.flags |=
1173                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1174         if (vnic->bd_stall)
1175                 req.flags |=
1176                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1177         if (vnic->roce_dual)
1178                 req.flags |= rte_cpu_to_le_32(
1179                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1180         if (vnic->roce_only)
1181                 req.flags |= rte_cpu_to_le_32(
1182                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1183         if (vnic->rss_dflt_cr)
1184                 req.flags |= rte_cpu_to_le_32(
1185                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1186
1187         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1188
1189         HWRM_CHECK_RESULT();
1190         HWRM_UNLOCK();
1191
1192         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1193
1194         return rc;
1195 }
1196
1197 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1198                 int16_t fw_vf_id)
1199 {
1200         int rc = 0;
1201         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1202         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1203
1204         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1205                 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1206                 return rc;
1207         }
1208         HWRM_PREP(req, VNIC_QCFG);
1209
1210         req.enables =
1211                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1212         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1213         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1214
1215         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1216
1217         HWRM_CHECK_RESULT();
1218
1219         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1220         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1221         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1222         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1223         vnic->mru = rte_le_to_cpu_16(resp->mru);
1224         vnic->func_default = rte_le_to_cpu_32(
1225                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1226         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1227                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1228         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1229                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1230         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1231                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1232         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1233                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1234         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1235                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1236
1237         HWRM_UNLOCK();
1238
1239         return rc;
1240 }
1241
1242 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1243 {
1244         int rc = 0;
1245         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1246         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1247                                                 bp->hwrm_cmd_resp_addr;
1248
1249         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1250
1251         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1252
1253         HWRM_CHECK_RESULT();
1254
1255         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1256         HWRM_UNLOCK();
1257         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1258
1259         return rc;
1260 }
1261
1262 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1263 {
1264         int rc = 0;
1265         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1266         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1267                                                 bp->hwrm_cmd_resp_addr;
1268
1269         if (vnic->rss_rule == 0xffff) {
1270                 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1271                 return rc;
1272         }
1273         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1274
1275         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1276
1277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1278
1279         HWRM_CHECK_RESULT();
1280         HWRM_UNLOCK();
1281
1282         vnic->rss_rule = INVALID_HW_RING_ID;
1283
1284         return rc;
1285 }
1286
1287 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1288 {
1289         int rc = 0;
1290         struct hwrm_vnic_free_input req = {.req_type = 0 };
1291         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1292
1293         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1294                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1295                 return rc;
1296         }
1297
1298         HWRM_PREP(req, VNIC_FREE);
1299
1300         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1301
1302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1303
1304         HWRM_CHECK_RESULT();
1305         HWRM_UNLOCK();
1306
1307         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1308         return rc;
1309 }
1310
1311 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1312                            struct bnxt_vnic_info *vnic)
1313 {
1314         int rc = 0;
1315         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1316         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1317
1318         HWRM_PREP(req, VNIC_RSS_CFG);
1319
1320         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1321
1322         req.ring_grp_tbl_addr =
1323             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1324         req.hash_key_tbl_addr =
1325             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1326         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1327
1328         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1329
1330         HWRM_CHECK_RESULT();
1331         HWRM_UNLOCK();
1332
1333         return rc;
1334 }
1335
1336 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1337                         struct bnxt_vnic_info *vnic)
1338 {
1339         int rc = 0;
1340         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1341         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1342         uint16_t size;
1343
1344         HWRM_PREP(req, VNIC_PLCMODES_CFG);
1345
1346         req.flags = rte_cpu_to_le_32(
1347                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1348
1349         req.enables = rte_cpu_to_le_32(
1350                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1351
1352         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1353         size -= RTE_PKTMBUF_HEADROOM;
1354
1355         req.jumbo_thresh = rte_cpu_to_le_16(size);
1356         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1357
1358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1359
1360         HWRM_CHECK_RESULT();
1361         HWRM_UNLOCK();
1362
1363         return rc;
1364 }
1365
1366 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1367                         struct bnxt_vnic_info *vnic, bool enable)
1368 {
1369         int rc = 0;
1370         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1371         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1372
1373         HWRM_PREP(req, VNIC_TPA_CFG);
1374
1375         if (enable) {
1376                 req.enables = rte_cpu_to_le_32(
1377                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1378                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1379                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1380                 req.flags = rte_cpu_to_le_32(
1381                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1382                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1383                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1384                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1385                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1386                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1387                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1388                 req.max_agg_segs = rte_cpu_to_le_16(5);
1389                 req.max_aggs =
1390                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1391                 req.min_agg_len = rte_cpu_to_le_32(512);
1392         }
1393
1394         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1395
1396         HWRM_CHECK_RESULT();
1397         HWRM_UNLOCK();
1398
1399         return rc;
1400 }
1401
1402 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1403 {
1404         struct hwrm_func_cfg_input req = {0};
1405         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1406         int rc;
1407
1408         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1409         req.enables = rte_cpu_to_le_32(
1410                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1411         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1412         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1413
1414         HWRM_PREP(req, FUNC_CFG);
1415
1416         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1417         HWRM_CHECK_RESULT();
1418         HWRM_UNLOCK();
1419
1420         bp->pf.vf_info[vf].random_mac = false;
1421
1422         return rc;
1423 }
1424
1425 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1426                                   uint64_t *dropped)
1427 {
1428         int rc = 0;
1429         struct hwrm_func_qstats_input req = {.req_type = 0};
1430         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1431
1432         HWRM_PREP(req, FUNC_QSTATS);
1433
1434         req.fid = rte_cpu_to_le_16(fid);
1435
1436         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1437
1438         HWRM_CHECK_RESULT();
1439
1440         if (dropped)
1441                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1442
1443         HWRM_UNLOCK();
1444
1445         return rc;
1446 }
1447
1448 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1449                           struct rte_eth_stats *stats)
1450 {
1451         int rc = 0;
1452         struct hwrm_func_qstats_input req = {.req_type = 0};
1453         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1454
1455         HWRM_PREP(req, FUNC_QSTATS);
1456
1457         req.fid = rte_cpu_to_le_16(fid);
1458
1459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1460
1461         HWRM_CHECK_RESULT();
1462
1463         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1464         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1465         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1466         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1467         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1468         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1469
1470         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1471         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1472         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1473         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1474         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1475         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1476
1477         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1478         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1479
1480         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1481
1482         HWRM_UNLOCK();
1483
1484         return rc;
1485 }
1486
1487 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1488 {
1489         int rc = 0;
1490         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1491         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1492
1493         HWRM_PREP(req, FUNC_CLR_STATS);
1494
1495         req.fid = rte_cpu_to_le_16(fid);
1496
1497         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1498
1499         HWRM_CHECK_RESULT();
1500         HWRM_UNLOCK();
1501
1502         return rc;
1503 }
1504
1505 /*
1506  * HWRM utility functions
1507  */
1508
1509 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1510 {
1511         unsigned int i;
1512         int rc = 0;
1513
1514         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1515                 struct bnxt_tx_queue *txq;
1516                 struct bnxt_rx_queue *rxq;
1517                 struct bnxt_cp_ring_info *cpr;
1518
1519                 if (i >= bp->rx_cp_nr_rings) {
1520                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1521                         cpr = txq->cp_ring;
1522                 } else {
1523                         rxq = bp->rx_queues[i];
1524                         cpr = rxq->cp_ring;
1525                 }
1526
1527                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1528                 if (rc)
1529                         return rc;
1530         }
1531         return 0;
1532 }
1533
1534 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1535 {
1536         int rc;
1537         unsigned int i;
1538         struct bnxt_cp_ring_info *cpr;
1539
1540         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1541
1542                 if (i >= bp->rx_cp_nr_rings)
1543                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1544                 else
1545                         cpr = bp->rx_queues[i]->cp_ring;
1546                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1547                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1548                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1549                         /*
1550                          * TODO. Need a better way to reset grp_info.stats_ctx
1551                          * for Rx rings only. stats_ctx is not saved for Tx
1552                          * in grp_info.
1553                          */
1554                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1555                         if (rc)
1556                                 return rc;
1557                 }
1558         }
1559         return 0;
1560 }
1561
1562 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1563 {
1564         unsigned int i;
1565         int rc = 0;
1566
1567         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1568                 struct bnxt_tx_queue *txq;
1569                 struct bnxt_rx_queue *rxq;
1570                 struct bnxt_cp_ring_info *cpr;
1571
1572                 if (i >= bp->rx_cp_nr_rings) {
1573                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1574                         cpr = txq->cp_ring;
1575                 } else {
1576                         rxq = bp->rx_queues[i];
1577                         cpr = rxq->cp_ring;
1578                 }
1579
1580                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1581
1582                 if (rc)
1583                         return rc;
1584         }
1585         return rc;
1586 }
1587
1588 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1589 {
1590         uint16_t idx;
1591         uint32_t rc = 0;
1592
1593         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1594
1595                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1596                         RTE_LOG(ERR, PMD,
1597                                 "Attempt to free invalid ring group %d\n",
1598                                 idx);
1599                         continue;
1600                 }
1601
1602                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1603
1604                 if (rc)
1605                         return rc;
1606         }
1607         return rc;
1608 }
1609
1610 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1611                                 unsigned int idx __rte_unused)
1612 {
1613         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1614
1615         bnxt_hwrm_ring_free(bp, cp_ring,
1616                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1617         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1618         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1619         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1620                         sizeof(*cpr->cp_desc_ring));
1621         cpr->cp_raw_cons = 0;
1622 }
1623
1624 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1625 {
1626         unsigned int i;
1627         int rc = 0;
1628
1629         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1630                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1631                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1632                 struct bnxt_ring *ring = txr->tx_ring_struct;
1633                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1634                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1635
1636                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1637                         bnxt_hwrm_ring_free(bp, ring,
1638                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1639                         ring->fw_ring_id = INVALID_HW_RING_ID;
1640                         memset(txr->tx_desc_ring, 0,
1641                                         txr->tx_ring_struct->ring_size *
1642                                         sizeof(*txr->tx_desc_ring));
1643                         memset(txr->tx_buf_ring, 0,
1644                                         txr->tx_ring_struct->ring_size *
1645                                         sizeof(*txr->tx_buf_ring));
1646                         txr->tx_prod = 0;
1647                         txr->tx_cons = 0;
1648                 }
1649                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1650                         bnxt_free_cp_ring(bp, cpr, idx);
1651                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1652                 }
1653         }
1654
1655         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1656                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1657                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1658                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1659                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1660                 unsigned int idx = i + 1;
1661
1662                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1663                         bnxt_hwrm_ring_free(bp, ring,
1664                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1665                         ring->fw_ring_id = INVALID_HW_RING_ID;
1666                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1667                         memset(rxr->rx_desc_ring, 0,
1668                                         rxr->rx_ring_struct->ring_size *
1669                                         sizeof(*rxr->rx_desc_ring));
1670                         memset(rxr->rx_buf_ring, 0,
1671                                         rxr->rx_ring_struct->ring_size *
1672                                         sizeof(*rxr->rx_buf_ring));
1673                         rxr->rx_prod = 0;
1674                         memset(rxr->ag_buf_ring, 0,
1675                                         rxr->ag_ring_struct->ring_size *
1676                                         sizeof(*rxr->ag_buf_ring));
1677                         rxr->ag_prod = 0;
1678                 }
1679                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1680                         bnxt_free_cp_ring(bp, cpr, idx);
1681                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1682                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1683                 }
1684         }
1685
1686         /* Default completion ring */
1687         {
1688                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1689
1690                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1691                         bnxt_free_cp_ring(bp, cpr, 0);
1692                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1693                 }
1694         }
1695
1696         return rc;
1697 }
1698
1699 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1700 {
1701         uint16_t i;
1702         uint32_t rc = 0;
1703
1704         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1705                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1706                 if (rc)
1707                         return rc;
1708         }
1709         return rc;
1710 }
1711
1712 void bnxt_free_hwrm_resources(struct bnxt *bp)
1713 {
1714         /* Release memzone */
1715         rte_free(bp->hwrm_cmd_resp_addr);
1716         rte_free(bp->hwrm_short_cmd_req_addr);
1717         bp->hwrm_cmd_resp_addr = NULL;
1718         bp->hwrm_short_cmd_req_addr = NULL;
1719         bp->hwrm_cmd_resp_dma_addr = 0;
1720         bp->hwrm_short_cmd_req_dma_addr = 0;
1721 }
1722
1723 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1724 {
1725         struct rte_pci_device *pdev = bp->pdev;
1726         char type[RTE_MEMZONE_NAMESIZE];
1727
1728         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1729                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1730         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1731         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1732         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1733         if (bp->hwrm_cmd_resp_addr == NULL)
1734                 return -ENOMEM;
1735         bp->hwrm_cmd_resp_dma_addr =
1736                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1737         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1738                 RTE_LOG(ERR, PMD,
1739                         "unable to map response address to physical memory\n");
1740                 return -ENOMEM;
1741         }
1742         rte_spinlock_init(&bp->hwrm_lock);
1743
1744         return 0;
1745 }
1746
1747 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1748 {
1749         struct bnxt_filter_info *filter;
1750         int rc = 0;
1751
1752         STAILQ_FOREACH(filter, &vnic->filter, next) {
1753                 rc = bnxt_hwrm_clear_filter(bp, filter);
1754                 if (rc)
1755                         break;
1756         }
1757         return rc;
1758 }
1759
1760 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1761 {
1762         struct bnxt_filter_info *filter;
1763         int rc = 0;
1764
1765         STAILQ_FOREACH(filter, &vnic->filter, next) {
1766                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1767                 if (rc)
1768                         break;
1769         }
1770         return rc;
1771 }
1772
1773 void bnxt_free_tunnel_ports(struct bnxt *bp)
1774 {
1775         if (bp->vxlan_port_cnt)
1776                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1777                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1778         bp->vxlan_port = 0;
1779         if (bp->geneve_port_cnt)
1780                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1781                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1782         bp->geneve_port = 0;
1783 }
1784
1785 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1786 {
1787         struct bnxt_vnic_info *vnic;
1788         unsigned int i;
1789
1790         if (bp->vnic_info == NULL)
1791                 return;
1792
1793         vnic = &bp->vnic_info[0];
1794         if (BNXT_PF(bp))
1795                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1796
1797         /* VNIC resources */
1798         for (i = 0; i < bp->nr_vnics; i++) {
1799                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1800
1801                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1802
1803                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1804
1805                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1806
1807                 bnxt_hwrm_vnic_free(bp, vnic);
1808         }
1809         /* Ring resources */
1810         bnxt_free_all_hwrm_rings(bp);
1811         bnxt_free_all_hwrm_ring_grps(bp);
1812         bnxt_free_all_hwrm_stat_ctxs(bp);
1813         bnxt_free_tunnel_ports(bp);
1814 }
1815
1816 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1817 {
1818         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1819
1820         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1821                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1822
1823         switch (conf_link_speed) {
1824         case ETH_LINK_SPEED_10M_HD:
1825         case ETH_LINK_SPEED_100M_HD:
1826                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1827         }
1828         return hw_link_duplex;
1829 }
1830
1831 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1832 {
1833         uint16_t eth_link_speed = 0;
1834
1835         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1836                 return ETH_LINK_SPEED_AUTONEG;
1837
1838         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1839         case ETH_LINK_SPEED_100M:
1840         case ETH_LINK_SPEED_100M_HD:
1841                 eth_link_speed =
1842                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1843                 break;
1844         case ETH_LINK_SPEED_1G:
1845                 eth_link_speed =
1846                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1847                 break;
1848         case ETH_LINK_SPEED_2_5G:
1849                 eth_link_speed =
1850                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1851                 break;
1852         case ETH_LINK_SPEED_10G:
1853                 eth_link_speed =
1854                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1855                 break;
1856         case ETH_LINK_SPEED_20G:
1857                 eth_link_speed =
1858                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1859                 break;
1860         case ETH_LINK_SPEED_25G:
1861                 eth_link_speed =
1862                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1863                 break;
1864         case ETH_LINK_SPEED_40G:
1865                 eth_link_speed =
1866                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1867                 break;
1868         case ETH_LINK_SPEED_50G:
1869                 eth_link_speed =
1870                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1871                 break;
1872         default:
1873                 RTE_LOG(ERR, PMD,
1874                         "Unsupported link speed %d; default to AUTO\n",
1875                         conf_link_speed);
1876                 break;
1877         }
1878         return eth_link_speed;
1879 }
1880
1881 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1882                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1883                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1884                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1885
1886 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1887 {
1888         uint32_t one_speed;
1889
1890         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1891                 return 0;
1892
1893         if (link_speed & ETH_LINK_SPEED_FIXED) {
1894                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1895
1896                 if (one_speed & (one_speed - 1)) {
1897                         RTE_LOG(ERR, PMD,
1898                                 "Invalid advertised speeds (%u) for port %u\n",
1899                                 link_speed, port_id);
1900                         return -EINVAL;
1901                 }
1902                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1903                         RTE_LOG(ERR, PMD,
1904                                 "Unsupported advertised speed (%u) for port %u\n",
1905                                 link_speed, port_id);
1906                         return -EINVAL;
1907                 }
1908         } else {
1909                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1910                         RTE_LOG(ERR, PMD,
1911                                 "Unsupported advertised speeds (%u) for port %u\n",
1912                                 link_speed, port_id);
1913                         return -EINVAL;
1914                 }
1915         }
1916         return 0;
1917 }
1918
1919 static uint16_t
1920 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1921 {
1922         uint16_t ret = 0;
1923
1924         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1925                 if (bp->link_info.support_speeds)
1926                         return bp->link_info.support_speeds;
1927                 link_speed = BNXT_SUPPORTED_SPEEDS;
1928         }
1929
1930         if (link_speed & ETH_LINK_SPEED_100M)
1931                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1932         if (link_speed & ETH_LINK_SPEED_100M_HD)
1933                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1934         if (link_speed & ETH_LINK_SPEED_1G)
1935                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1936         if (link_speed & ETH_LINK_SPEED_2_5G)
1937                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1938         if (link_speed & ETH_LINK_SPEED_10G)
1939                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1940         if (link_speed & ETH_LINK_SPEED_20G)
1941                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1942         if (link_speed & ETH_LINK_SPEED_25G)
1943                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1944         if (link_speed & ETH_LINK_SPEED_40G)
1945                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1946         if (link_speed & ETH_LINK_SPEED_50G)
1947                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1948         return ret;
1949 }
1950
1951 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1952 {
1953         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1954
1955         switch (hw_link_speed) {
1956         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1957                 eth_link_speed = ETH_SPEED_NUM_100M;
1958                 break;
1959         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1960                 eth_link_speed = ETH_SPEED_NUM_1G;
1961                 break;
1962         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1963                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1964                 break;
1965         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1966                 eth_link_speed = ETH_SPEED_NUM_10G;
1967                 break;
1968         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1969                 eth_link_speed = ETH_SPEED_NUM_20G;
1970                 break;
1971         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1972                 eth_link_speed = ETH_SPEED_NUM_25G;
1973                 break;
1974         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1975                 eth_link_speed = ETH_SPEED_NUM_40G;
1976                 break;
1977         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1978                 eth_link_speed = ETH_SPEED_NUM_50G;
1979                 break;
1980         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1981         default:
1982                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1983                         hw_link_speed);
1984                 break;
1985         }
1986         return eth_link_speed;
1987 }
1988
1989 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1990 {
1991         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1992
1993         switch (hw_link_duplex) {
1994         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1995         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1996                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1997                 break;
1998         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1999                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2000                 break;
2001         default:
2002                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2003                         hw_link_duplex);
2004                 break;
2005         }
2006         return eth_link_duplex;
2007 }
2008
2009 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2010 {
2011         int rc = 0;
2012         struct bnxt_link_info *link_info = &bp->link_info;
2013
2014         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2015         if (rc) {
2016                 RTE_LOG(ERR, PMD,
2017                         "Get link config failed with rc %d\n", rc);
2018                 goto exit;
2019         }
2020         if (link_info->link_speed)
2021                 link->link_speed =
2022                         bnxt_parse_hw_link_speed(link_info->link_speed);
2023         else
2024                 link->link_speed = ETH_SPEED_NUM_NONE;
2025         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2026         link->link_status = link_info->link_up;
2027         link->link_autoneg = link_info->auto_mode ==
2028                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2029                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2030 exit:
2031         return rc;
2032 }
2033
2034 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2035 {
2036         int rc = 0;
2037         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2038         struct bnxt_link_info link_req;
2039         uint16_t speed;
2040
2041         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2042                 return 0;
2043
2044         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2045                         bp->eth_dev->data->port_id);
2046         if (rc)
2047                 goto error;
2048
2049         memset(&link_req, 0, sizeof(link_req));
2050         link_req.link_up = link_up;
2051         if (!link_up)
2052                 goto port_phy_cfg;
2053
2054         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2055         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2056         if (speed == 0) {
2057                 link_req.phy_flags |=
2058                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2059                 link_req.auto_mode =
2060                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2061                 link_req.auto_link_speed_mask =
2062                         bnxt_parse_eth_link_speed_mask(bp,
2063                                                        dev_conf->link_speeds);
2064         } else {
2065                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2066                 link_req.link_speed = speed;
2067                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2068         }
2069         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2070         link_req.auto_pause = bp->link_info.auto_pause;
2071         link_req.force_pause = bp->link_info.force_pause;
2072
2073 port_phy_cfg:
2074         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2075         if (rc) {
2076                 RTE_LOG(ERR, PMD,
2077                         "Set link config failed with rc %d\n", rc);
2078         }
2079
2080 error:
2081         return rc;
2082 }
2083
2084 /* JIRA 22088 */
2085 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2086 {
2087         struct hwrm_func_qcfg_input req = {0};
2088         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2089         int rc = 0;
2090
2091         HWRM_PREP(req, FUNC_QCFG);
2092         req.fid = rte_cpu_to_le_16(0xffff);
2093
2094         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2095
2096         HWRM_CHECK_RESULT();
2097
2098         /* Hard Coded.. 0xfff VLAN ID mask */
2099         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2100
2101         switch (resp->port_partition_type) {
2102         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2103         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2104         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2105                 bp->port_partition_type = resp->port_partition_type;
2106                 break;
2107         default:
2108                 bp->port_partition_type = 0;
2109                 break;
2110         }
2111
2112         HWRM_UNLOCK();
2113
2114         return rc;
2115 }
2116
2117 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2118                                    struct hwrm_func_qcaps_output *qcaps)
2119 {
2120         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2121         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2122                sizeof(qcaps->mac_address));
2123         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2124         qcaps->max_rx_rings = fcfg->num_rx_rings;
2125         qcaps->max_tx_rings = fcfg->num_tx_rings;
2126         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2127         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2128         qcaps->max_vfs = 0;
2129         qcaps->first_vf_id = 0;
2130         qcaps->max_vnics = fcfg->num_vnics;
2131         qcaps->max_decap_records = 0;
2132         qcaps->max_encap_records = 0;
2133         qcaps->max_tx_wm_flows = 0;
2134         qcaps->max_tx_em_flows = 0;
2135         qcaps->max_rx_wm_flows = 0;
2136         qcaps->max_rx_em_flows = 0;
2137         qcaps->max_flow_id = 0;
2138         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2139         qcaps->max_sp_tx_rings = 0;
2140         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2141 }
2142
2143 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2144 {
2145         struct hwrm_func_cfg_input req = {0};
2146         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2147         int rc;
2148
2149         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2150                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2151                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2152                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2153                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2154                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2155                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2156                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2157                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2158                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2159         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2160         req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2161         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2162                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
2163         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2164         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2165         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2166         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2167         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2168         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2169         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2170         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2171         req.fid = rte_cpu_to_le_16(0xffff);
2172
2173         HWRM_PREP(req, FUNC_CFG);
2174
2175         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2176
2177         HWRM_CHECK_RESULT();
2178         HWRM_UNLOCK();
2179
2180         return rc;
2181 }
2182
2183 static void populate_vf_func_cfg_req(struct bnxt *bp,
2184                                      struct hwrm_func_cfg_input *req,
2185                                      int num_vfs)
2186 {
2187         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2188                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2189                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2190                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2191                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2192                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2193                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2194                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2195                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2196                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2197
2198         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2199                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2200         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2201                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2202         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2203                                                 (num_vfs + 1));
2204         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2205         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2206                                                (num_vfs + 1));
2207         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2208         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2209         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2210         /* TODO: For now, do not support VMDq/RFS on VFs. */
2211         req->num_vnics = rte_cpu_to_le_16(1);
2212         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2213                                                  (num_vfs + 1));
2214 }
2215
2216 static void add_random_mac_if_needed(struct bnxt *bp,
2217                                      struct hwrm_func_cfg_input *cfg_req,
2218                                      int vf)
2219 {
2220         struct ether_addr mac;
2221
2222         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2223                 return;
2224
2225         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2226                 cfg_req->enables |=
2227                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2228                 eth_random_addr(cfg_req->dflt_mac_addr);
2229                 bp->pf.vf_info[vf].random_mac = true;
2230         } else {
2231                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2232         }
2233 }
2234
2235 static void reserve_resources_from_vf(struct bnxt *bp,
2236                                       struct hwrm_func_cfg_input *cfg_req,
2237                                       int vf)
2238 {
2239         struct hwrm_func_qcaps_input req = {0};
2240         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2241         int rc;
2242
2243         /* Get the actual allocated values now */
2244         HWRM_PREP(req, FUNC_QCAPS);
2245         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2246         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2247
2248         if (rc) {
2249                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2250                 copy_func_cfg_to_qcaps(cfg_req, resp);
2251         } else if (resp->error_code) {
2252                 rc = rte_le_to_cpu_16(resp->error_code);
2253                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2254                 copy_func_cfg_to_qcaps(cfg_req, resp);
2255         }
2256
2257         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2258         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2259         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2260         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2261         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2262         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2263         /*
2264          * TODO: While not supporting VMDq with VFs, max_vnics is always
2265          * forced to 1 in this case
2266          */
2267         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2268         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2269
2270         HWRM_UNLOCK();
2271 }
2272
2273 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2274 {
2275         struct hwrm_func_qcfg_input req = {0};
2276         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2277         int rc;
2278
2279         /* Check for zero MAC address */
2280         HWRM_PREP(req, FUNC_QCFG);
2281         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2283         if (rc) {
2284                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2285                 return -1;
2286         } else if (resp->error_code) {
2287                 rc = rte_le_to_cpu_16(resp->error_code);
2288                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2289                 return -1;
2290         }
2291         rc = rte_le_to_cpu_16(resp->vlan);
2292
2293         HWRM_UNLOCK();
2294
2295         return rc;
2296 }
2297
2298 static int update_pf_resource_max(struct bnxt *bp)
2299 {
2300         struct hwrm_func_qcfg_input req = {0};
2301         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2302         int rc;
2303
2304         /* And copy the allocated numbers into the pf struct */
2305         HWRM_PREP(req, FUNC_QCFG);
2306         req.fid = rte_cpu_to_le_16(0xffff);
2307         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2308         HWRM_CHECK_RESULT();
2309
2310         /* Only TX ring value reflects actual allocation? TODO */
2311         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2312         bp->pf.evb_mode = resp->evb_mode;
2313
2314         HWRM_UNLOCK();
2315
2316         return rc;
2317 }
2318
2319 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2320 {
2321         int rc;
2322
2323         if (!BNXT_PF(bp)) {
2324                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2325                 return -1;
2326         }
2327
2328         rc = bnxt_hwrm_func_qcaps(bp);
2329         if (rc)
2330                 return rc;
2331
2332         bp->pf.func_cfg_flags &=
2333                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2334                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2335         bp->pf.func_cfg_flags |=
2336                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2337         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2338         return rc;
2339 }
2340
2341 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2342 {
2343         struct hwrm_func_cfg_input req = {0};
2344         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2345         int i;
2346         size_t sz;
2347         int rc = 0;
2348         size_t req_buf_sz;
2349
2350         if (!BNXT_PF(bp)) {
2351                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2352                 return -1;
2353         }
2354
2355         rc = bnxt_hwrm_func_qcaps(bp);
2356
2357         if (rc)
2358                 return rc;
2359
2360         bp->pf.active_vfs = num_vfs;
2361
2362         /*
2363          * First, configure the PF to only use one TX ring.  This ensures that
2364          * there are enough rings for all VFs.
2365          *
2366          * If we don't do this, when we call func_alloc() later, we will lock
2367          * extra rings to the PF that won't be available during func_cfg() of
2368          * the VFs.
2369          *
2370          * This has been fixed with firmware versions above 20.6.54
2371          */
2372         bp->pf.func_cfg_flags &=
2373                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2374                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2375         bp->pf.func_cfg_flags |=
2376                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2377         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2378         if (rc)
2379                 return rc;
2380
2381         /*
2382          * Now, create and register a buffer to hold forwarded VF requests
2383          */
2384         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2385         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2386                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2387         if (bp->pf.vf_req_buf == NULL) {
2388                 rc = -ENOMEM;
2389                 goto error_free;
2390         }
2391         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2392                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2393         for (i = 0; i < num_vfs; i++)
2394                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2395                                         (i * HWRM_MAX_REQ_LEN);
2396
2397         rc = bnxt_hwrm_func_buf_rgtr(bp);
2398         if (rc)
2399                 goto error_free;
2400
2401         populate_vf_func_cfg_req(bp, &req, num_vfs);
2402
2403         bp->pf.active_vfs = 0;
2404         for (i = 0; i < num_vfs; i++) {
2405                 add_random_mac_if_needed(bp, &req, i);
2406
2407                 HWRM_PREP(req, FUNC_CFG);
2408                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2409                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2410                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2411
2412                 /* Clear enable flag for next pass */
2413                 req.enables &= ~rte_cpu_to_le_32(
2414                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2415
2416                 if (rc || resp->error_code) {
2417                         RTE_LOG(ERR, PMD,
2418                                 "Failed to initizlie VF %d\n", i);
2419                         RTE_LOG(ERR, PMD,
2420                                 "Not all VFs available. (%d, %d)\n",
2421                                 rc, resp->error_code);
2422                         HWRM_UNLOCK();
2423                         break;
2424                 }
2425
2426                 HWRM_UNLOCK();
2427
2428                 reserve_resources_from_vf(bp, &req, i);
2429                 bp->pf.active_vfs++;
2430                 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2431         }
2432
2433         /*
2434          * Now configure the PF to use "the rest" of the resources
2435          * We're using STD_TX_RING_MODE here though which will limit the TX
2436          * rings.  This will allow QoS to function properly.  Not setting this
2437          * will cause PF rings to break bandwidth settings.
2438          */
2439         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2440         if (rc)
2441                 goto error_free;
2442
2443         rc = update_pf_resource_max(bp);
2444         if (rc)
2445                 goto error_free;
2446
2447         return rc;
2448
2449 error_free:
2450         bnxt_hwrm_func_buf_unrgtr(bp);
2451         return rc;
2452 }
2453
2454 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2455 {
2456         struct hwrm_func_cfg_input req = {0};
2457         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2458         int rc;
2459
2460         HWRM_PREP(req, FUNC_CFG);
2461
2462         req.fid = rte_cpu_to_le_16(0xffff);
2463         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2464         req.evb_mode = bp->pf.evb_mode;
2465
2466         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2467         HWRM_CHECK_RESULT();
2468         HWRM_UNLOCK();
2469
2470         return rc;
2471 }
2472
2473 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2474                                 uint8_t tunnel_type)
2475 {
2476         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2477         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2478         int rc = 0;
2479
2480         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2481         req.tunnel_type = tunnel_type;
2482         req.tunnel_dst_port_val = port;
2483         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2484         HWRM_CHECK_RESULT();
2485
2486         switch (tunnel_type) {
2487         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2488                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2489                 bp->vxlan_port = port;
2490                 break;
2491         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2492                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2493                 bp->geneve_port = port;
2494                 break;
2495         default:
2496                 break;
2497         }
2498
2499         HWRM_UNLOCK();
2500
2501         return rc;
2502 }
2503
2504 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2505                                 uint8_t tunnel_type)
2506 {
2507         struct hwrm_tunnel_dst_port_free_input req = {0};
2508         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2509         int rc = 0;
2510
2511         HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2512
2513         req.tunnel_type = tunnel_type;
2514         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2515         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2516
2517         HWRM_CHECK_RESULT();
2518         HWRM_UNLOCK();
2519
2520         return rc;
2521 }
2522
2523 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2524                                         uint32_t flags)
2525 {
2526         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2527         struct hwrm_func_cfg_input req = {0};
2528         int rc;
2529
2530         HWRM_PREP(req, FUNC_CFG);
2531
2532         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2533         req.flags = rte_cpu_to_le_32(flags);
2534         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2535
2536         HWRM_CHECK_RESULT();
2537         HWRM_UNLOCK();
2538
2539         return rc;
2540 }
2541
2542 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2543 {
2544         uint32_t *flag = flagp;
2545
2546         vnic->flags = *flag;
2547 }
2548
2549 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2550 {
2551         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2552 }
2553
2554 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2555 {
2556         int rc = 0;
2557         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2558         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2559
2560         HWRM_PREP(req, FUNC_BUF_RGTR);
2561
2562         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2563         req.req_buf_page_size = rte_cpu_to_le_16(
2564                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2565         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2566         req.req_buf_page_addr[0] =
2567                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2568         if (req.req_buf_page_addr[0] == 0) {
2569                 RTE_LOG(ERR, PMD,
2570                         "unable to map buffer address to physical memory\n");
2571                 return -ENOMEM;
2572         }
2573
2574         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2575
2576         HWRM_CHECK_RESULT();
2577         HWRM_UNLOCK();
2578
2579         return rc;
2580 }
2581
2582 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2583 {
2584         int rc = 0;
2585         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2586         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2587
2588         HWRM_PREP(req, FUNC_BUF_UNRGTR);
2589
2590         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2591
2592         HWRM_CHECK_RESULT();
2593         HWRM_UNLOCK();
2594
2595         return rc;
2596 }
2597
2598 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2599 {
2600         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2601         struct hwrm_func_cfg_input req = {0};
2602         int rc;
2603
2604         HWRM_PREP(req, FUNC_CFG);
2605
2606         req.fid = rte_cpu_to_le_16(0xffff);
2607         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2608         req.enables = rte_cpu_to_le_32(
2609                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2610         req.async_event_cr = rte_cpu_to_le_16(
2611                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2613
2614         HWRM_CHECK_RESULT();
2615         HWRM_UNLOCK();
2616
2617         return rc;
2618 }
2619
2620 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2621 {
2622         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2623         struct hwrm_func_vf_cfg_input req = {0};
2624         int rc;
2625
2626         HWRM_PREP(req, FUNC_VF_CFG);
2627
2628         req.enables = rte_cpu_to_le_32(
2629                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2630         req.async_event_cr = rte_cpu_to_le_16(
2631                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2632         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2633
2634         HWRM_CHECK_RESULT();
2635         HWRM_UNLOCK();
2636
2637         return rc;
2638 }
2639
2640 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2641 {
2642         struct hwrm_func_cfg_input req = {0};
2643         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2644         uint16_t dflt_vlan, fid;
2645         uint32_t func_cfg_flags;
2646         int rc = 0;
2647
2648         HWRM_PREP(req, FUNC_CFG);
2649
2650         if (is_vf) {
2651                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2652                 fid = bp->pf.vf_info[vf].fid;
2653                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2654         } else {
2655                 fid = rte_cpu_to_le_16(0xffff);
2656                 func_cfg_flags = bp->pf.func_cfg_flags;
2657                 dflt_vlan = bp->vlan;
2658         }
2659
2660         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2661         req.fid = rte_cpu_to_le_16(fid);
2662         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2663         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2664
2665         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2666
2667         HWRM_CHECK_RESULT();
2668         HWRM_UNLOCK();
2669
2670         return rc;
2671 }
2672
2673 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2674                         uint16_t max_bw, uint16_t enables)
2675 {
2676         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2677         struct hwrm_func_cfg_input req = {0};
2678         int rc;
2679
2680         HWRM_PREP(req, FUNC_CFG);
2681
2682         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2683         req.enables |= rte_cpu_to_le_32(enables);
2684         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2685         req.max_bw = rte_cpu_to_le_32(max_bw);
2686         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2687
2688         HWRM_CHECK_RESULT();
2689         HWRM_UNLOCK();
2690
2691         return rc;
2692 }
2693
2694 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2695 {
2696         struct hwrm_func_cfg_input req = {0};
2697         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2698         int rc = 0;
2699
2700         HWRM_PREP(req, FUNC_CFG);
2701
2702         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2703         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2704         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2705         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2706
2707         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2708
2709         HWRM_CHECK_RESULT();
2710         HWRM_UNLOCK();
2711
2712         return rc;
2713 }
2714
2715 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2716                               void *encaped, size_t ec_size)
2717 {
2718         int rc = 0;
2719         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2720         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2721
2722         if (ec_size > sizeof(req.encap_request))
2723                 return -1;
2724
2725         HWRM_PREP(req, REJECT_FWD_RESP);
2726
2727         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2728         memcpy(req.encap_request, encaped, ec_size);
2729
2730         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2731
2732         HWRM_CHECK_RESULT();
2733         HWRM_UNLOCK();
2734
2735         return rc;
2736 }
2737
2738 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2739                                        struct ether_addr *mac)
2740 {
2741         struct hwrm_func_qcfg_input req = {0};
2742         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2743         int rc;
2744
2745         HWRM_PREP(req, FUNC_QCFG);
2746
2747         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2748         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2749
2750         HWRM_CHECK_RESULT();
2751
2752         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2753
2754         HWRM_UNLOCK();
2755
2756         return rc;
2757 }
2758
2759 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2760                             void *encaped, size_t ec_size)
2761 {
2762         int rc = 0;
2763         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2764         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2765
2766         if (ec_size > sizeof(req.encap_request))
2767                 return -1;
2768
2769         HWRM_PREP(req, EXEC_FWD_RESP);
2770
2771         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2772         memcpy(req.encap_request, encaped, ec_size);
2773
2774         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2775
2776         HWRM_CHECK_RESULT();
2777         HWRM_UNLOCK();
2778
2779         return rc;
2780 }
2781
2782 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2783                          struct rte_eth_stats *stats)
2784 {
2785         int rc = 0;
2786         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2787         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2788
2789         HWRM_PREP(req, STAT_CTX_QUERY);
2790
2791         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2792
2793         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2794
2795         HWRM_CHECK_RESULT();
2796
2797         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2798         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2799         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2800         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2801         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2802         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2803
2804         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2805         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2806         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2807         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2808         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2809         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2810
2811         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2812         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2813         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2814
2815         HWRM_UNLOCK();
2816
2817         return rc;
2818 }
2819
2820 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2821 {
2822         struct hwrm_port_qstats_input req = {0};
2823         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2824         struct bnxt_pf_info *pf = &bp->pf;
2825         int rc;
2826
2827         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2828                 return 0;
2829
2830         HWRM_PREP(req, PORT_QSTATS);
2831
2832         req.port_id = rte_cpu_to_le_16(pf->port_id);
2833         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2834         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2835         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2836
2837         HWRM_CHECK_RESULT();
2838         HWRM_UNLOCK();
2839
2840         return rc;
2841 }
2842
2843 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2844 {
2845         struct hwrm_port_clr_stats_input req = {0};
2846         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2847         struct bnxt_pf_info *pf = &bp->pf;
2848         int rc;
2849
2850         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2851                 return 0;
2852
2853         HWRM_PREP(req, PORT_CLR_STATS);
2854
2855         req.port_id = rte_cpu_to_le_16(pf->port_id);
2856         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2857
2858         HWRM_CHECK_RESULT();
2859         HWRM_UNLOCK();
2860
2861         return rc;
2862 }
2863
2864 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2865 {
2866         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2867         struct hwrm_port_led_qcaps_input req = {0};
2868         int rc;
2869
2870         if (BNXT_VF(bp))
2871                 return 0;
2872
2873         HWRM_PREP(req, PORT_LED_QCAPS);
2874         req.port_id = bp->pf.port_id;
2875         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2876
2877         HWRM_CHECK_RESULT();
2878
2879         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2880                 unsigned int i;
2881
2882                 bp->num_leds = resp->num_leds;
2883                 memcpy(bp->leds, &resp->led0_id,
2884                         sizeof(bp->leds[0]) * bp->num_leds);
2885                 for (i = 0; i < bp->num_leds; i++) {
2886                         struct bnxt_led_info *led = &bp->leds[i];
2887
2888                         uint16_t caps = led->led_state_caps;
2889
2890                         if (!led->led_group_id ||
2891                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2892                                 bp->num_leds = 0;
2893                                 break;
2894                         }
2895                 }
2896         }
2897
2898         HWRM_UNLOCK();
2899
2900         return rc;
2901 }
2902
2903 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2904 {
2905         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2906         struct hwrm_port_led_cfg_input req = {0};
2907         struct bnxt_led_cfg *led_cfg;
2908         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2909         uint16_t duration = 0;
2910         int rc, i;
2911
2912         if (!bp->num_leds || BNXT_VF(bp))
2913                 return -EOPNOTSUPP;
2914
2915         HWRM_PREP(req, PORT_LED_CFG);
2916
2917         if (led_on) {
2918                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2919                 duration = rte_cpu_to_le_16(500);
2920         }
2921         req.port_id = bp->pf.port_id;
2922         req.num_leds = bp->num_leds;
2923         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2924         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2925                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2926                 led_cfg->led_id = bp->leds[i].led_id;
2927                 led_cfg->led_state = led_state;
2928                 led_cfg->led_blink_on = duration;
2929                 led_cfg->led_blink_off = duration;
2930                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2931         }
2932
2933         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2934
2935         HWRM_CHECK_RESULT();
2936         HWRM_UNLOCK();
2937
2938         return rc;
2939 }
2940
2941 static void
2942 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2943 {
2944         uint32_t *count = cbdata;
2945
2946         *count = *count + 1;
2947 }
2948
2949 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2950                                      struct bnxt_vnic_info *vnic __rte_unused)
2951 {
2952         return 0;
2953 }
2954
2955 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2956 {
2957         uint32_t count = 0;
2958
2959         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2960             &count, bnxt_vnic_count_hwrm_stub);
2961
2962         return count;
2963 }
2964
2965 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2966                                         uint16_t *vnic_ids)
2967 {
2968         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2969         struct hwrm_func_vf_vnic_ids_query_output *resp =
2970                                                 bp->hwrm_cmd_resp_addr;
2971         int rc;
2972
2973         /* First query all VNIC ids */
2974         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
2975
2976         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2977         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2978         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2979
2980         if (req.vnic_id_tbl_addr == 0) {
2981                 HWRM_UNLOCK();
2982                 RTE_LOG(ERR, PMD,
2983                 "unable to map VNIC ID table address to physical memory\n");
2984                 return -ENOMEM;
2985         }
2986         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2987         if (rc) {
2988                 HWRM_UNLOCK();
2989                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2990                 return -1;
2991         } else if (resp->error_code) {
2992                 rc = rte_le_to_cpu_16(resp->error_code);
2993                 HWRM_UNLOCK();
2994                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2995                 return -1;
2996         }
2997         rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
2998
2999         HWRM_UNLOCK();
3000
3001         return rc;
3002 }
3003
3004 /*
3005  * This function queries the VNIC IDs  for a specified VF. It then calls
3006  * the vnic_cb to update the necessary field in vnic_info with cbdata.
3007  * Then it calls the hwrm_cb function to program this new vnic configuration.
3008  */
3009 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3010         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3011         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3012 {
3013         struct bnxt_vnic_info vnic;
3014         int rc = 0;
3015         int i, num_vnic_ids;
3016         uint16_t *vnic_ids;
3017         size_t vnic_id_sz;
3018         size_t sz;
3019
3020         /* First query all VNIC ids */
3021         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3022         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3023                         RTE_CACHE_LINE_SIZE);
3024         if (vnic_ids == NULL) {
3025                 rc = -ENOMEM;
3026                 return rc;
3027         }
3028         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3029                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3030
3031         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3032
3033         if (num_vnic_ids < 0)
3034                 return num_vnic_ids;
3035
3036         /* Retrieve VNIC, update bd_stall then update */
3037
3038         for (i = 0; i < num_vnic_ids; i++) {
3039                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3040                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3041                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3042                 if (rc)
3043                         break;
3044                 if (vnic.mru <= 4)      /* Indicates unallocated */
3045                         continue;
3046
3047                 vnic_cb(&vnic, cbdata);
3048
3049                 rc = hwrm_cb(bp, &vnic);
3050                 if (rc)
3051                         break;
3052         }
3053
3054         rte_free(vnic_ids);
3055
3056         return rc;
3057 }
3058
3059 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3060                                               bool on)
3061 {
3062         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3063         struct hwrm_func_cfg_input req = {0};
3064         int rc;
3065
3066         HWRM_PREP(req, FUNC_CFG);
3067
3068         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3069         req.enables |= rte_cpu_to_le_32(
3070                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3071         req.vlan_antispoof_mode = on ?
3072                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3073                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3074         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3075
3076         HWRM_CHECK_RESULT();
3077         HWRM_UNLOCK();
3078
3079         return rc;
3080 }
3081
3082 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3083 {
3084         struct bnxt_vnic_info vnic;
3085         uint16_t *vnic_ids;
3086         size_t vnic_id_sz;
3087         int num_vnic_ids, i;
3088         size_t sz;
3089         int rc;
3090
3091         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3092         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3093                         RTE_CACHE_LINE_SIZE);
3094         if (vnic_ids == NULL) {
3095                 rc = -ENOMEM;
3096                 return rc;
3097         }
3098
3099         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3100                 rte_mem_lock_page(((char *)vnic_ids) + sz);
3101
3102         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3103         if (rc <= 0)
3104                 goto exit;
3105         num_vnic_ids = rc;
3106
3107         /*
3108          * Loop through to find the default VNIC ID.
3109          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3110          * by sending the hwrm_func_qcfg command to the firmware.
3111          */
3112         for (i = 0; i < num_vnic_ids; i++) {
3113                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3114                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3115                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3116                                         bp->pf.first_vf_id + vf);
3117                 if (rc)
3118                         goto exit;
3119                 if (vnic.func_default) {
3120                         rte_free(vnic_ids);
3121                         return vnic.fw_vnic_id;
3122                 }
3123         }
3124         /* Could not find a default VNIC. */
3125         RTE_LOG(ERR, PMD, "No default VNIC\n");
3126 exit:
3127         rte_free(vnic_ids);
3128         return -1;
3129 }