net/bnxt: support to set VF rxmode
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         if (resp->resp_len >= 16) { \
180                                 struct hwrm_err_output *tmp_hwrm_err_op = \
181                                                         (void *)resp; \
182                                 RTE_LOG(ERR, PMD, \
183                                         "%s error %d:%d:%08x:%04x\n", \
184                                         __func__, \
185                                         rc, tmp_hwrm_err_op->cmd_err, \
186                                         rte_le_to_cpu_32(\
187                                                 tmp_hwrm_err_op->opaque_0), \
188                                         rte_le_to_cpu_16(\
189                                                 tmp_hwrm_err_op->opaque_1)); \
190                         } \
191                         else { \
192                                 RTE_LOG(ERR, PMD, \
193                                         "%s error %d\n", __func__, rc); \
194                         } \
195                         return rc; \
196                 } \
197         }
198
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204
205         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207         req.mask = 0;
208
209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210
211         HWRM_CHECK_RESULT;
212
213         return rc;
214 }
215
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
217                                  struct bnxt_vnic_info *vnic,
218                                  uint16_t vlan_count,
219                                  struct bnxt_vlan_table_entry *vlan_table)
220 {
221         int rc = 0;
222         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
223         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
224         uint32_t mask = 0;
225
226         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
227         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
228
229         /* FIXME add multicast flag, when multicast adding options is supported
230          * by ethtool.
231          */
232         if (vnic->flags & BNXT_VNIC_INFO_BCAST)
233                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
234         if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
235                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
236         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
237                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
238         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
239                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
240         if (vnic->flags & BNXT_VNIC_INFO_MCAST)
241                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
242         if (vnic->mc_addr_cnt) {
243                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
245                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
246         }
247         if (vlan_count && vlan_table) {
248                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
249                 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
250                          rte_mem_virt2phy(vlan_table));
251                 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
252         }
253         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
254                                     mask);
255
256         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
257
258         HWRM_CHECK_RESULT;
259
260         return rc;
261 }
262
263 int bnxt_hwrm_clear_filter(struct bnxt *bp,
264                            struct bnxt_filter_info *filter)
265 {
266         int rc = 0;
267         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
268         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
269
270         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
271
272         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
273
274         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
275
276         HWRM_CHECK_RESULT;
277
278         filter->fw_l2_filter_id = -1;
279
280         return 0;
281 }
282
283 int bnxt_hwrm_set_filter(struct bnxt *bp,
284                          uint16_t dst_id,
285                          struct bnxt_filter_info *filter)
286 {
287         int rc = 0;
288         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
289         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
290         uint32_t enables = 0;
291
292         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
293
294         req.flags = rte_cpu_to_le_32(filter->flags);
295
296         enables = filter->enables |
297               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
298         req.dst_id = rte_cpu_to_le_16(dst_id);
299
300         if (enables &
301             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
302                 memcpy(req.l2_addr, filter->l2_addr,
303                        ETHER_ADDR_LEN);
304         if (enables &
305             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
306                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
307                        ETHER_ADDR_LEN);
308         if (enables &
309             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
310                 req.l2_ovlan = filter->l2_ovlan;
311         if (enables &
312             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
313                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
314         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
315                 req.src_id = rte_cpu_to_le_32(filter->src_id);
316         if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
317                 req.src_type = filter->src_type;
318
319         req.enables = rte_cpu_to_le_32(enables);
320
321         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
322
323         HWRM_CHECK_RESULT;
324
325         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
326
327         return rc;
328 }
329
330 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
331 {
332         int rc = 0;
333         struct hwrm_func_qcaps_input req = {.req_type = 0 };
334         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
335         uint16_t new_max_vfs;
336         int i;
337
338         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
339
340         req.fid = rte_cpu_to_le_16(0xffff);
341
342         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
343
344         HWRM_CHECK_RESULT;
345
346         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
347         if (BNXT_PF(bp)) {
348                 bp->pf.port_id = resp->port_id;
349                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
350                 new_max_vfs = bp->pdev->max_vfs;
351                 if (new_max_vfs != bp->pf.max_vfs) {
352                         if (bp->pf.vf_info)
353                                 rte_free(bp->pf.vf_info);
354                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
355                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
356                         bp->pf.max_vfs = new_max_vfs;
357                         for (i = 0; i < new_max_vfs; i++) {
358                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
359                                 bp->pf.vf_info[i].vlan_table =
360                                         rte_zmalloc("VF VLAN table",
361                                                     getpagesize(),
362                                                     getpagesize());
363                                 if (bp->pf.vf_info[i].vlan_table == NULL)
364                                         RTE_LOG(ERR, PMD,
365                                         "Fail to alloc VLAN table for VF %d\n",
366                                         i);
367                                 else
368                                         rte_mem_lock_page(
369                                                 bp->pf.vf_info[i].vlan_table);
370                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
371                         }
372                 }
373         }
374
375         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
376         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
377         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
378         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
379         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
380         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
381         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
382         /* TODO: For now, do not support VMDq/RFS on VFs. */
383         if (BNXT_PF(bp)) {
384                 if (bp->pf.max_vfs)
385                         bp->max_vnics = 1;
386                 else
387                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
388         } else {
389                 bp->max_vnics = 1;
390         }
391         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
392         if (BNXT_PF(bp))
393                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
394
395         return rc;
396 }
397
398 int bnxt_hwrm_func_reset(struct bnxt *bp)
399 {
400         int rc = 0;
401         struct hwrm_func_reset_input req = {.req_type = 0 };
402         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
403
404         HWRM_PREP(req, FUNC_RESET, -1, resp);
405
406         req.enables = rte_cpu_to_le_32(0);
407
408         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
409
410         HWRM_CHECK_RESULT;
411
412         return rc;
413 }
414
415 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
416 {
417         int rc;
418         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
419         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
420
421         if (bp->flags & BNXT_FLAG_REGISTERED)
422                 return 0;
423
424         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
425         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
426                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
427         req.ver_maj = RTE_VER_YEAR;
428         req.ver_min = RTE_VER_MONTH;
429         req.ver_upd = RTE_VER_MINOR;
430
431         if (BNXT_PF(bp)) {
432                 req.enables |= rte_cpu_to_le_32(
433                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
434                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
435                        RTE_MIN(sizeof(req.vf_req_fwd),
436                                sizeof(bp->pf.vf_req_fwd)));
437         }
438
439         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
440         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
441
442         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
443
444         HWRM_CHECK_RESULT;
445
446         bp->flags |= BNXT_FLAG_REGISTERED;
447
448         return rc;
449 }
450
451 int bnxt_hwrm_ver_get(struct bnxt *bp)
452 {
453         int rc = 0;
454         struct hwrm_ver_get_input req = {.req_type = 0 };
455         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
456         uint32_t my_version;
457         uint32_t fw_version;
458         uint16_t max_resp_len;
459         char type[RTE_MEMZONE_NAMESIZE];
460
461         HWRM_PREP(req, VER_GET, -1, resp);
462
463         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
464         req.hwrm_intf_min = HWRM_VERSION_MINOR;
465         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
466
467         /*
468          * Hold the lock since we may be adjusting the response pointers.
469          */
470         rte_spinlock_lock(&bp->hwrm_lock);
471         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
472
473         HWRM_CHECK_RESULT;
474
475         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
476                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
477                 resp->hwrm_intf_upd,
478                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
479         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
480                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
481         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
482                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
483
484         my_version = HWRM_VERSION_MAJOR << 16;
485         my_version |= HWRM_VERSION_MINOR << 8;
486         my_version |= HWRM_VERSION_UPDATE;
487
488         fw_version = resp->hwrm_intf_maj << 16;
489         fw_version |= resp->hwrm_intf_min << 8;
490         fw_version |= resp->hwrm_intf_upd;
491
492         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
493                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
494                 rc = -EINVAL;
495                 goto error;
496         }
497
498         if (my_version != fw_version) {
499                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
500                 if (my_version < fw_version) {
501                         RTE_LOG(INFO, PMD,
502                                 "Firmware API version is newer than driver.\n");
503                         RTE_LOG(INFO, PMD,
504                                 "The driver may be missing features.\n");
505                 } else {
506                         RTE_LOG(INFO, PMD,
507                                 "Firmware API version is older than driver.\n");
508                         RTE_LOG(INFO, PMD,
509                                 "Not all driver features may be functional.\n");
510                 }
511         }
512
513         if (bp->max_req_len > resp->max_req_win_len) {
514                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
515                 rc = -EINVAL;
516         }
517         bp->max_req_len = resp->max_req_win_len;
518         max_resp_len = resp->max_resp_len;
519         if (bp->max_resp_len != max_resp_len) {
520                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
521                         bp->pdev->addr.domain, bp->pdev->addr.bus,
522                         bp->pdev->addr.devid, bp->pdev->addr.function);
523
524                 rte_free(bp->hwrm_cmd_resp_addr);
525
526                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
527                 if (bp->hwrm_cmd_resp_addr == NULL) {
528                         rc = -ENOMEM;
529                         goto error;
530                 }
531                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
532                 bp->hwrm_cmd_resp_dma_addr =
533                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
534                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
535                         RTE_LOG(ERR, PMD,
536                         "Unable to map response buffer to physical memory.\n");
537                         rc = -ENOMEM;
538                         goto error;
539                 }
540                 bp->max_resp_len = max_resp_len;
541         }
542
543 error:
544         rte_spinlock_unlock(&bp->hwrm_lock);
545         return rc;
546 }
547
548 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
549 {
550         int rc;
551         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
552         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
553
554         if (!(bp->flags & BNXT_FLAG_REGISTERED))
555                 return 0;
556
557         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
558         req.flags = flags;
559
560         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
561
562         HWRM_CHECK_RESULT;
563
564         bp->flags &= ~BNXT_FLAG_REGISTERED;
565
566         return rc;
567 }
568
569 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
570 {
571         int rc = 0;
572         struct hwrm_port_phy_cfg_input req = {0};
573         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
574         uint32_t enables = 0;
575
576         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
577
578         if (conf->link_up) {
579                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
580                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
581                 /*
582                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
583                  * any auto mode, even "none".
584                  */
585                 if (!conf->link_speed) {
586                         req.auto_mode |= conf->auto_mode;
587                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
588                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
589                         enables |=
590                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
591                         req.auto_link_speed = bp->link_info.auto_link_speed;
592                         enables |=
593                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
594                 }
595                 req.auto_duplex = conf->duplex;
596                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
597                 req.auto_pause = conf->auto_pause;
598                 req.force_pause = conf->force_pause;
599                 /* Set force_pause if there is no auto or if there is a force */
600                 if (req.auto_pause && !req.force_pause)
601                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
602                 else
603                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
604
605                 req.enables = rte_cpu_to_le_32(enables);
606         } else {
607                 req.flags =
608                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
609                 RTE_LOG(INFO, PMD, "Force Link Down\n");
610         }
611
612         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613
614         HWRM_CHECK_RESULT;
615
616         return rc;
617 }
618
619 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
620                                    struct bnxt_link_info *link_info)
621 {
622         int rc = 0;
623         struct hwrm_port_phy_qcfg_input req = {0};
624         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625
626         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
627
628         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
629
630         HWRM_CHECK_RESULT;
631
632         link_info->phy_link_status = resp->link;
633         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
634                 link_info->link_up = 1;
635                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
636         } else {
637                 link_info->link_up = 0;
638                 link_info->link_speed = 0;
639         }
640         link_info->duplex = resp->duplex;
641         link_info->pause = resp->pause;
642         link_info->auto_pause = resp->auto_pause;
643         link_info->force_pause = resp->force_pause;
644         link_info->auto_mode = resp->auto_mode;
645
646         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
647         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
648         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
649         link_info->phy_ver[0] = resp->phy_maj;
650         link_info->phy_ver[1] = resp->phy_min;
651         link_info->phy_ver[2] = resp->phy_bld;
652
653         return rc;
654 }
655
656 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
657 {
658         int rc = 0;
659         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
660         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
661
662         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
663
664         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
665
666         HWRM_CHECK_RESULT;
667
668 #define GET_QUEUE_INFO(x) \
669         bp->cos_queue[x].id = resp->queue_id##x; \
670         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
671
672         GET_QUEUE_INFO(0);
673         GET_QUEUE_INFO(1);
674         GET_QUEUE_INFO(2);
675         GET_QUEUE_INFO(3);
676         GET_QUEUE_INFO(4);
677         GET_QUEUE_INFO(5);
678         GET_QUEUE_INFO(6);
679         GET_QUEUE_INFO(7);
680
681         return rc;
682 }
683
684 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
685                          struct bnxt_ring *ring,
686                          uint32_t ring_type, uint32_t map_index,
687                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
688 {
689         int rc = 0;
690         uint32_t enables = 0;
691         struct hwrm_ring_alloc_input req = {.req_type = 0 };
692         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
693
694         HWRM_PREP(req, RING_ALLOC, -1, resp);
695
696         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
697         req.fbo = rte_cpu_to_le_32(0);
698         /* Association of ring index with doorbell index */
699         req.logical_id = rte_cpu_to_le_16(map_index);
700         req.length = rte_cpu_to_le_32(ring->ring_size);
701
702         switch (ring_type) {
703         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
704                 req.queue_id = bp->cos_queue[0].id;
705                 /* FALLTHROUGH */
706         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
707                 req.ring_type = ring_type;
708                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
709                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
710                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
711                         enables |=
712                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
713                 break;
714         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
715                 req.ring_type = ring_type;
716                 /*
717                  * TODO: Some HWRM versions crash with
718                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
719                  */
720                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
721                 break;
722         default:
723                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
724                         ring_type);
725                 return -1;
726         }
727         req.enables = rte_cpu_to_le_32(enables);
728
729         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
730
731         if (rc || resp->error_code) {
732                 if (rc == 0 && resp->error_code)
733                         rc = rte_le_to_cpu_16(resp->error_code);
734                 switch (ring_type) {
735                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
736                         RTE_LOG(ERR, PMD,
737                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
738                         return rc;
739                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
740                         RTE_LOG(ERR, PMD,
741                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
742                         return rc;
743                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
744                         RTE_LOG(ERR, PMD,
745                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
746                         return rc;
747                 default:
748                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
749                         return rc;
750                 }
751         }
752
753         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
754         return rc;
755 }
756
757 int bnxt_hwrm_ring_free(struct bnxt *bp,
758                         struct bnxt_ring *ring, uint32_t ring_type)
759 {
760         int rc;
761         struct hwrm_ring_free_input req = {.req_type = 0 };
762         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
763
764         HWRM_PREP(req, RING_FREE, -1, resp);
765
766         req.ring_type = ring_type;
767         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
768
769         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
770
771         if (rc || resp->error_code) {
772                 if (rc == 0 && resp->error_code)
773                         rc = rte_le_to_cpu_16(resp->error_code);
774
775                 switch (ring_type) {
776                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
777                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
778                                 rc);
779                         return rc;
780                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
781                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
782                                 rc);
783                         return rc;
784                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
785                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
786                                 rc);
787                         return rc;
788                 default:
789                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
790                         return rc;
791                 }
792         }
793         return 0;
794 }
795
796 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
797 {
798         int rc = 0;
799         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
800         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
801
802         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
803
804         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
805         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
806         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
807         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
808
809         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
810
811         HWRM_CHECK_RESULT;
812
813         bp->grp_info[idx].fw_grp_id =
814             rte_le_to_cpu_16(resp->ring_group_id);
815
816         return rc;
817 }
818
819 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
820 {
821         int rc;
822         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
823         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
824
825         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
826
827         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
828
829         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
830
831         HWRM_CHECK_RESULT;
832
833         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
834         return rc;
835 }
836
837 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
838 {
839         int rc = 0;
840         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
841         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
842
843         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
844                 return rc;
845
846         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
847
848         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
849
850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851
852         HWRM_CHECK_RESULT;
853
854         return rc;
855 }
856
857 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
858                                 unsigned int idx __rte_unused)
859 {
860         int rc;
861         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
862         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
863
864         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
865
866         req.update_period_ms = rte_cpu_to_le_32(0);
867
868         req.stats_dma_addr =
869             rte_cpu_to_le_64(cpr->hw_stats_map);
870
871         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
872
873         HWRM_CHECK_RESULT;
874
875         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
876
877         return rc;
878 }
879
880 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
881                                 unsigned int idx __rte_unused)
882 {
883         int rc;
884         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
885         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
886
887         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
888
889         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
890
891         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
892
893         HWRM_CHECK_RESULT;
894
895         return rc;
896 }
897
898 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
899 {
900         int rc = 0, i, j;
901         struct hwrm_vnic_alloc_input req = { 0 };
902         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
903
904         /* map ring groups to this vnic */
905         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
906                 vnic->start_grp_id, vnic->end_grp_id);
907         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
908                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
909         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
910         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
911         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
912         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
913         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
914                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
915         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
916
917         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
918
919         HWRM_CHECK_RESULT;
920
921         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
922         return rc;
923 }
924
925 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
926                                         struct bnxt_vnic_info *vnic,
927                                         struct bnxt_plcmodes_cfg *pmode)
928 {
929         int rc = 0;
930         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
931         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
932
933         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
934
935         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
936
937         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
938
939         HWRM_CHECK_RESULT;
940
941         pmode->flags = rte_le_to_cpu_32(resp->flags);
942         /* dflt_vnic bit doesn't exist in the _cfg command */
943         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
944         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
945         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
946         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
947
948         return rc;
949 }
950
951 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
952                                        struct bnxt_vnic_info *vnic,
953                                        struct bnxt_plcmodes_cfg *pmode)
954 {
955         int rc = 0;
956         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
957         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
958
959         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
960
961         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
962         req.flags = rte_cpu_to_le_32(pmode->flags);
963         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
964         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
965         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
966         req.enables = rte_cpu_to_le_32(
967             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
968             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
969             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
970         );
971
972         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
973
974         HWRM_CHECK_RESULT;
975
976         return rc;
977 }
978
979 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
980 {
981         int rc = 0;
982         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
983         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
984         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
985         struct bnxt_plcmodes_cfg pmodes;
986
987         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
988         if (rc)
989                 return rc;
990
991         HWRM_PREP(req, VNIC_CFG, -1, resp);
992
993         /* Only RSS support for now TBD: COS & LB */
994         req.enables =
995             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
996                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
997         if (vnic->lb_rule != 0xffff)
998                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
999         if (vnic->cos_rule != 0xffff)
1000                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1001         if (vnic->rss_rule != 0xffff)
1002                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1003         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1004         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1005         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1006         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1007         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1008         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1009         req.mru = rte_cpu_to_le_16(vnic->mru);
1010         if (vnic->func_default)
1011                 req.flags |=
1012                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1013         if (vnic->vlan_strip)
1014                 req.flags |=
1015                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1016         if (vnic->bd_stall)
1017                 req.flags |=
1018                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1019         if (vnic->roce_dual)
1020                 req.flags |= rte_cpu_to_le_32(
1021                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1022         if (vnic->roce_only)
1023                 req.flags |= rte_cpu_to_le_32(
1024                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1025         if (vnic->rss_dflt_cr)
1026                 req.flags |= rte_cpu_to_le_32(
1027                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1028
1029         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1030
1031         HWRM_CHECK_RESULT;
1032
1033         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1034
1035         return rc;
1036 }
1037
1038 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1039                 int16_t fw_vf_id)
1040 {
1041         int rc = 0;
1042         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1043         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1044
1045         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1046
1047         req.enables =
1048                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1049         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1050         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1051
1052         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1053
1054         HWRM_CHECK_RESULT;
1055
1056         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1057         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1058         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1059         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1060         vnic->mru = rte_le_to_cpu_16(resp->mru);
1061         vnic->func_default = rte_le_to_cpu_32(
1062                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1063         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1064                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1065         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1066                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1067         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1068                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1069         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1070                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1071         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1072                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1073
1074         return rc;
1075 }
1076
1077 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1078 {
1079         int rc = 0;
1080         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1081         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1082                                                 bp->hwrm_cmd_resp_addr;
1083
1084         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1085
1086         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1087
1088         HWRM_CHECK_RESULT;
1089
1090         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1091
1092         return rc;
1093 }
1094
1095 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1096 {
1097         int rc = 0;
1098         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1099         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1100                                                 bp->hwrm_cmd_resp_addr;
1101
1102         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1103
1104         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1105
1106         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1107
1108         HWRM_CHECK_RESULT;
1109
1110         vnic->rss_rule = INVALID_HW_RING_ID;
1111
1112         return rc;
1113 }
1114
1115 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1116 {
1117         int rc = 0;
1118         struct hwrm_vnic_free_input req = {.req_type = 0 };
1119         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1120
1121         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1122                 return rc;
1123
1124         HWRM_PREP(req, VNIC_FREE, -1, resp);
1125
1126         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1127
1128         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1129
1130         HWRM_CHECK_RESULT;
1131
1132         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1133         return rc;
1134 }
1135
1136 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1137                            struct bnxt_vnic_info *vnic)
1138 {
1139         int rc = 0;
1140         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1141         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1142
1143         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1144
1145         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1146
1147         req.ring_grp_tbl_addr =
1148             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1149         req.hash_key_tbl_addr =
1150             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1151         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1152
1153         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1154
1155         HWRM_CHECK_RESULT;
1156
1157         return rc;
1158 }
1159
1160 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1161                         struct bnxt_vnic_info *vnic)
1162 {
1163         int rc = 0;
1164         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1165         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1166         uint16_t size;
1167
1168         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1169
1170         req.flags = rte_cpu_to_le_32(
1171                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1172
1173         req.enables = rte_cpu_to_le_32(
1174                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1175
1176         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1177         size -= RTE_PKTMBUF_HEADROOM;
1178
1179         req.jumbo_thresh = rte_cpu_to_le_16(size);
1180         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1181
1182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1183
1184         HWRM_CHECK_RESULT;
1185
1186         return rc;
1187 }
1188
1189 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1190                         struct bnxt_vnic_info *vnic, bool enable)
1191 {
1192         int rc = 0;
1193         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1194         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1195
1196         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1197
1198         if (enable) {
1199                 req.enables = rte_cpu_to_le_32(
1200                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1201                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1202                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1203                 req.flags = rte_cpu_to_le_32(
1204                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1205                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1206                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1207                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1208                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1209                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1210                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1211                 req.max_agg_segs = rte_cpu_to_le_16(5);
1212                 req.max_aggs =
1213                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1214                 req.min_agg_len = rte_cpu_to_le_32(512);
1215         }
1216
1217         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218
1219         HWRM_CHECK_RESULT;
1220
1221         return rc;
1222 }
1223
1224 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1225 {
1226         struct hwrm_func_cfg_input req = {0};
1227         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1228         int rc;
1229
1230         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1231         req.enables = rte_cpu_to_le_32(
1232                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1233         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1234         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1235
1236         HWRM_PREP(req, FUNC_CFG, -1, resp);
1237
1238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1239         HWRM_CHECK_RESULT;
1240
1241         bp->pf.vf_info[vf].random_mac = false;
1242
1243         return rc;
1244 }
1245
1246 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1247                                   uint64_t *dropped)
1248 {
1249         int rc = 0;
1250         struct hwrm_func_qstats_input req = {.req_type = 0};
1251         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1252
1253         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1254
1255         req.fid = rte_cpu_to_le_16(fid);
1256
1257         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1258
1259         HWRM_CHECK_RESULT;
1260
1261         if (dropped)
1262                 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1263
1264         return rc;
1265 }
1266
1267 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1268                           struct rte_eth_stats *stats)
1269 {
1270         int rc = 0;
1271         struct hwrm_func_qstats_input req = {.req_type = 0};
1272         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1273
1274         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1275
1276         req.fid = rte_cpu_to_le_16(fid);
1277
1278         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1279
1280         HWRM_CHECK_RESULT;
1281
1282         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1283         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1284         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1285         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1286         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1287         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1288
1289         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1290         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1291         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1292         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1293         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1294         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1295
1296         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1297         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1298
1299         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1300
1301         return rc;
1302 }
1303
1304 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1305 {
1306         int rc = 0;
1307         struct hwrm_func_clr_stats_input req = {.req_type = 0};
1308         struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1309
1310         HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1311
1312         req.fid = rte_cpu_to_le_16(fid);
1313
1314         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1315
1316         HWRM_CHECK_RESULT;
1317
1318         return rc;
1319 }
1320
1321 /*
1322  * HWRM utility functions
1323  */
1324
1325 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1326 {
1327         unsigned int i;
1328         int rc = 0;
1329
1330         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1331                 struct bnxt_tx_queue *txq;
1332                 struct bnxt_rx_queue *rxq;
1333                 struct bnxt_cp_ring_info *cpr;
1334
1335                 if (i >= bp->rx_cp_nr_rings) {
1336                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1337                         cpr = txq->cp_ring;
1338                 } else {
1339                         rxq = bp->rx_queues[i];
1340                         cpr = rxq->cp_ring;
1341                 }
1342
1343                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1344                 if (rc)
1345                         return rc;
1346         }
1347         return 0;
1348 }
1349
1350 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1351 {
1352         int rc;
1353         unsigned int i;
1354         struct bnxt_cp_ring_info *cpr;
1355
1356         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1357
1358                 if (i >= bp->rx_cp_nr_rings)
1359                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1360                 else
1361                         cpr = bp->rx_queues[i]->cp_ring;
1362                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1363                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1364                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1365                         /*
1366                          * TODO. Need a better way to reset grp_info.stats_ctx
1367                          * for Rx rings only. stats_ctx is not saved for Tx
1368                          * in grp_info.
1369                          */
1370                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1371                         if (rc)
1372                                 return rc;
1373                 }
1374         }
1375         return 0;
1376 }
1377
1378 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1379 {
1380         unsigned int i;
1381         int rc = 0;
1382
1383         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1384                 struct bnxt_tx_queue *txq;
1385                 struct bnxt_rx_queue *rxq;
1386                 struct bnxt_cp_ring_info *cpr;
1387
1388                 if (i >= bp->rx_cp_nr_rings) {
1389                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1390                         cpr = txq->cp_ring;
1391                 } else {
1392                         rxq = bp->rx_queues[i];
1393                         cpr = rxq->cp_ring;
1394                 }
1395
1396                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1397
1398                 if (rc)
1399                         return rc;
1400         }
1401         return rc;
1402 }
1403
1404 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1405 {
1406         uint16_t idx;
1407         uint32_t rc = 0;
1408
1409         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1410
1411                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1412                         RTE_LOG(ERR, PMD,
1413                                 "Attempt to free invalid ring group %d\n",
1414                                 idx);
1415                         continue;
1416                 }
1417
1418                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1419
1420                 if (rc)
1421                         return rc;
1422         }
1423         return rc;
1424 }
1425
1426 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1427                                 unsigned int idx __rte_unused)
1428 {
1429         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1430
1431         bnxt_hwrm_ring_free(bp, cp_ring,
1432                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1433         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1434         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1435         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1436                         sizeof(*cpr->cp_desc_ring));
1437         cpr->cp_raw_cons = 0;
1438 }
1439
1440 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1441 {
1442         unsigned int i;
1443         int rc = 0;
1444
1445         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1446                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1447                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1448                 struct bnxt_ring *ring = txr->tx_ring_struct;
1449                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1450                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1451
1452                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1453                         bnxt_hwrm_ring_free(bp, ring,
1454                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1455                         ring->fw_ring_id = INVALID_HW_RING_ID;
1456                         memset(txr->tx_desc_ring, 0,
1457                                         txr->tx_ring_struct->ring_size *
1458                                         sizeof(*txr->tx_desc_ring));
1459                         memset(txr->tx_buf_ring, 0,
1460                                         txr->tx_ring_struct->ring_size *
1461                                         sizeof(*txr->tx_buf_ring));
1462                         txr->tx_prod = 0;
1463                         txr->tx_cons = 0;
1464                 }
1465                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1466                         bnxt_free_cp_ring(bp, cpr, idx);
1467                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1468                 }
1469         }
1470
1471         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1472                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1473                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1474                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1475                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1476                 unsigned int idx = i + 1;
1477
1478                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1479                         bnxt_hwrm_ring_free(bp, ring,
1480                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1481                         ring->fw_ring_id = INVALID_HW_RING_ID;
1482                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1483                         memset(rxr->rx_desc_ring, 0,
1484                                         rxr->rx_ring_struct->ring_size *
1485                                         sizeof(*rxr->rx_desc_ring));
1486                         memset(rxr->rx_buf_ring, 0,
1487                                         rxr->rx_ring_struct->ring_size *
1488                                         sizeof(*rxr->rx_buf_ring));
1489                         rxr->rx_prod = 0;
1490                         memset(rxr->ag_buf_ring, 0,
1491                                         rxr->ag_ring_struct->ring_size *
1492                                         sizeof(*rxr->ag_buf_ring));
1493                         rxr->ag_prod = 0;
1494                 }
1495                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1496                         bnxt_free_cp_ring(bp, cpr, idx);
1497                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1498                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1499                 }
1500         }
1501
1502         /* Default completion ring */
1503         {
1504                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1505
1506                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1507                         bnxt_free_cp_ring(bp, cpr, 0);
1508                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1509                 }
1510         }
1511
1512         return rc;
1513 }
1514
1515 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1516 {
1517         uint16_t i;
1518         uint32_t rc = 0;
1519
1520         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1521                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1522                 if (rc)
1523                         return rc;
1524         }
1525         return rc;
1526 }
1527
1528 void bnxt_free_hwrm_resources(struct bnxt *bp)
1529 {
1530         /* Release memzone */
1531         rte_free(bp->hwrm_cmd_resp_addr);
1532         bp->hwrm_cmd_resp_addr = NULL;
1533         bp->hwrm_cmd_resp_dma_addr = 0;
1534 }
1535
1536 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1537 {
1538         struct rte_pci_device *pdev = bp->pdev;
1539         char type[RTE_MEMZONE_NAMESIZE];
1540
1541         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1542                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1543         bp->max_req_len = HWRM_MAX_REQ_LEN;
1544         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1545         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1546         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1547         if (bp->hwrm_cmd_resp_addr == NULL)
1548                 return -ENOMEM;
1549         bp->hwrm_cmd_resp_dma_addr =
1550                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1551         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1552                 RTE_LOG(ERR, PMD,
1553                         "unable to map response address to physical memory\n");
1554                 return -ENOMEM;
1555         }
1556         rte_spinlock_init(&bp->hwrm_lock);
1557
1558         return 0;
1559 }
1560
1561 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1562 {
1563         struct bnxt_filter_info *filter;
1564         int rc = 0;
1565
1566         STAILQ_FOREACH(filter, &vnic->filter, next) {
1567                 rc = bnxt_hwrm_clear_filter(bp, filter);
1568                 if (rc)
1569                         break;
1570         }
1571         return rc;
1572 }
1573
1574 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1575 {
1576         struct bnxt_filter_info *filter;
1577         int rc = 0;
1578
1579         STAILQ_FOREACH(filter, &vnic->filter, next) {
1580                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1581                 if (rc)
1582                         break;
1583         }
1584         return rc;
1585 }
1586
1587 void bnxt_free_tunnel_ports(struct bnxt *bp)
1588 {
1589         if (bp->vxlan_port_cnt)
1590                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1591                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1592         bp->vxlan_port = 0;
1593         if (bp->geneve_port_cnt)
1594                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1595                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1596         bp->geneve_port = 0;
1597 }
1598
1599 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1600 {
1601         struct bnxt_vnic_info *vnic;
1602         unsigned int i;
1603
1604         if (bp->vnic_info == NULL)
1605                 return;
1606
1607         vnic = &bp->vnic_info[0];
1608         if (BNXT_PF(bp))
1609                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1610
1611         /* VNIC resources */
1612         for (i = 0; i < bp->nr_vnics; i++) {
1613                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1614
1615                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1616
1617                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1618
1619                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1620
1621                 bnxt_hwrm_vnic_free(bp, vnic);
1622         }
1623         /* Ring resources */
1624         bnxt_free_all_hwrm_rings(bp);
1625         bnxt_free_all_hwrm_ring_grps(bp);
1626         bnxt_free_all_hwrm_stat_ctxs(bp);
1627         bnxt_free_tunnel_ports(bp);
1628 }
1629
1630 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1631 {
1632         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1633
1634         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1635                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1636
1637         switch (conf_link_speed) {
1638         case ETH_LINK_SPEED_10M_HD:
1639         case ETH_LINK_SPEED_100M_HD:
1640                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1641         }
1642         return hw_link_duplex;
1643 }
1644
1645 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1646 {
1647         uint16_t eth_link_speed = 0;
1648
1649         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1650                 return ETH_LINK_SPEED_AUTONEG;
1651
1652         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1653         case ETH_LINK_SPEED_100M:
1654         case ETH_LINK_SPEED_100M_HD:
1655                 eth_link_speed =
1656                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1657                 break;
1658         case ETH_LINK_SPEED_1G:
1659                 eth_link_speed =
1660                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1661                 break;
1662         case ETH_LINK_SPEED_2_5G:
1663                 eth_link_speed =
1664                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1665                 break;
1666         case ETH_LINK_SPEED_10G:
1667                 eth_link_speed =
1668                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1669                 break;
1670         case ETH_LINK_SPEED_20G:
1671                 eth_link_speed =
1672                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1673                 break;
1674         case ETH_LINK_SPEED_25G:
1675                 eth_link_speed =
1676                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1677                 break;
1678         case ETH_LINK_SPEED_40G:
1679                 eth_link_speed =
1680                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1681                 break;
1682         case ETH_LINK_SPEED_50G:
1683                 eth_link_speed =
1684                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1685                 break;
1686         default:
1687                 RTE_LOG(ERR, PMD,
1688                         "Unsupported link speed %d; default to AUTO\n",
1689                         conf_link_speed);
1690                 break;
1691         }
1692         return eth_link_speed;
1693 }
1694
1695 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1696                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1697                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1698                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1699
1700 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1701 {
1702         uint32_t one_speed;
1703
1704         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1705                 return 0;
1706
1707         if (link_speed & ETH_LINK_SPEED_FIXED) {
1708                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1709
1710                 if (one_speed & (one_speed - 1)) {
1711                         RTE_LOG(ERR, PMD,
1712                                 "Invalid advertised speeds (%u) for port %u\n",
1713                                 link_speed, port_id);
1714                         return -EINVAL;
1715                 }
1716                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1717                         RTE_LOG(ERR, PMD,
1718                                 "Unsupported advertised speed (%u) for port %u\n",
1719                                 link_speed, port_id);
1720                         return -EINVAL;
1721                 }
1722         } else {
1723                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1724                         RTE_LOG(ERR, PMD,
1725                                 "Unsupported advertised speeds (%u) for port %u\n",
1726                                 link_speed, port_id);
1727                         return -EINVAL;
1728                 }
1729         }
1730         return 0;
1731 }
1732
1733 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1734 {
1735         uint16_t ret = 0;
1736
1737         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1738                 link_speed = BNXT_SUPPORTED_SPEEDS;
1739
1740         if (link_speed & ETH_LINK_SPEED_100M)
1741                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1742         if (link_speed & ETH_LINK_SPEED_100M_HD)
1743                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1744         if (link_speed & ETH_LINK_SPEED_1G)
1745                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1746         if (link_speed & ETH_LINK_SPEED_2_5G)
1747                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1748         if (link_speed & ETH_LINK_SPEED_10G)
1749                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1750         if (link_speed & ETH_LINK_SPEED_20G)
1751                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1752         if (link_speed & ETH_LINK_SPEED_25G)
1753                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1754         if (link_speed & ETH_LINK_SPEED_40G)
1755                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1756         if (link_speed & ETH_LINK_SPEED_50G)
1757                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1758         return ret;
1759 }
1760
1761 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1762 {
1763         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1764
1765         switch (hw_link_speed) {
1766         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1767                 eth_link_speed = ETH_SPEED_NUM_100M;
1768                 break;
1769         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1770                 eth_link_speed = ETH_SPEED_NUM_1G;
1771                 break;
1772         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1773                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1774                 break;
1775         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1776                 eth_link_speed = ETH_SPEED_NUM_10G;
1777                 break;
1778         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1779                 eth_link_speed = ETH_SPEED_NUM_20G;
1780                 break;
1781         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1782                 eth_link_speed = ETH_SPEED_NUM_25G;
1783                 break;
1784         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1785                 eth_link_speed = ETH_SPEED_NUM_40G;
1786                 break;
1787         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1788                 eth_link_speed = ETH_SPEED_NUM_50G;
1789                 break;
1790         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1791         default:
1792                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1793                         hw_link_speed);
1794                 break;
1795         }
1796         return eth_link_speed;
1797 }
1798
1799 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1800 {
1801         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1802
1803         switch (hw_link_duplex) {
1804         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1805         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1806                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1807                 break;
1808         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1809                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1810                 break;
1811         default:
1812                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1813                         hw_link_duplex);
1814                 break;
1815         }
1816         return eth_link_duplex;
1817 }
1818
1819 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1820 {
1821         int rc = 0;
1822         struct bnxt_link_info *link_info = &bp->link_info;
1823
1824         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1825         if (rc) {
1826                 RTE_LOG(ERR, PMD,
1827                         "Get link config failed with rc %d\n", rc);
1828                 goto exit;
1829         }
1830         if (link_info->link_up)
1831                 link->link_speed =
1832                         bnxt_parse_hw_link_speed(link_info->link_speed);
1833         else
1834                 link->link_speed = ETH_LINK_SPEED_10M;
1835         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1836         link->link_status = link_info->link_up;
1837         link->link_autoneg = link_info->auto_mode ==
1838                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1839                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1840 exit:
1841         return rc;
1842 }
1843
1844 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1845 {
1846         int rc = 0;
1847         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1848         struct bnxt_link_info link_req;
1849         uint16_t speed;
1850
1851         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1852                 return 0;
1853
1854         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1855                         bp->eth_dev->data->port_id);
1856         if (rc)
1857                 goto error;
1858
1859         memset(&link_req, 0, sizeof(link_req));
1860         link_req.link_up = link_up;
1861         if (!link_up)
1862                 goto port_phy_cfg;
1863
1864         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1865         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1866         if (speed == 0) {
1867                 link_req.phy_flags |=
1868                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1869                 link_req.auto_mode =
1870                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1871                 link_req.auto_link_speed_mask =
1872                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1873         } else {
1874                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1875                 link_req.link_speed = speed;
1876                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1877         }
1878         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1879         link_req.auto_pause = bp->link_info.auto_pause;
1880         link_req.force_pause = bp->link_info.force_pause;
1881
1882 port_phy_cfg:
1883         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1884         if (rc) {
1885                 RTE_LOG(ERR, PMD,
1886                         "Set link config failed with rc %d\n", rc);
1887         }
1888
1889         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1890 error:
1891         return rc;
1892 }
1893
1894 /* JIRA 22088 */
1895 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1896 {
1897         struct hwrm_func_qcfg_input req = {0};
1898         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1899         int rc = 0;
1900
1901         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1902         req.fid = rte_cpu_to_le_16(0xffff);
1903
1904         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1905
1906         HWRM_CHECK_RESULT;
1907
1908         /* Hard Coded.. 0xfff VLAN ID mask */
1909         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1910
1911         switch (resp->port_partition_type) {
1912         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1913         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1914         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1915                 bp->port_partition_type = resp->port_partition_type;
1916                 break;
1917         default:
1918                 bp->port_partition_type = 0;
1919                 break;
1920         }
1921
1922         return rc;
1923 }
1924
1925 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1926                                    struct hwrm_func_qcaps_output *qcaps)
1927 {
1928         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1929         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1930                sizeof(qcaps->mac_address));
1931         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1932         qcaps->max_rx_rings = fcfg->num_rx_rings;
1933         qcaps->max_tx_rings = fcfg->num_tx_rings;
1934         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1935         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1936         qcaps->max_vfs = 0;
1937         qcaps->first_vf_id = 0;
1938         qcaps->max_vnics = fcfg->num_vnics;
1939         qcaps->max_decap_records = 0;
1940         qcaps->max_encap_records = 0;
1941         qcaps->max_tx_wm_flows = 0;
1942         qcaps->max_tx_em_flows = 0;
1943         qcaps->max_rx_wm_flows = 0;
1944         qcaps->max_rx_em_flows = 0;
1945         qcaps->max_flow_id = 0;
1946         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1947         qcaps->max_sp_tx_rings = 0;
1948         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1949 }
1950
1951 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1952 {
1953         struct hwrm_func_cfg_input req = {0};
1954         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1955         int rc;
1956
1957         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1958                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1959                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1960                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1961                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1962                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1963                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1964                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1965                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1966                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1967         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1968         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1969                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1970         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1971                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1972         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1973         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1974         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1975         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1976         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1977         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1978         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1979         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1980         req.fid = rte_cpu_to_le_16(0xffff);
1981
1982         HWRM_PREP(req, FUNC_CFG, -1, resp);
1983
1984         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1985         HWRM_CHECK_RESULT;
1986
1987         return rc;
1988 }
1989
1990 static void populate_vf_func_cfg_req(struct bnxt *bp,
1991                                      struct hwrm_func_cfg_input *req,
1992                                      int num_vfs)
1993 {
1994         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1995                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1996                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1997                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1998                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1999                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2000                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2001                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2002                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2003                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2004
2005         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2006                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2007         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2008                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
2009         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2010                                                 (num_vfs + 1));
2011         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2012         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2013                                                (num_vfs + 1));
2014         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2015         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2016         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2017         /* TODO: For now, do not support VMDq/RFS on VFs. */
2018         req->num_vnics = rte_cpu_to_le_16(1);
2019         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2020                                                  (num_vfs + 1));
2021 }
2022
2023 static void add_random_mac_if_needed(struct bnxt *bp,
2024                                      struct hwrm_func_cfg_input *cfg_req,
2025                                      int vf)
2026 {
2027         struct ether_addr mac;
2028
2029         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2030                 return;
2031
2032         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2033                 cfg_req->enables |=
2034                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2035                 eth_random_addr(cfg_req->dflt_mac_addr);
2036                 bp->pf.vf_info[vf].random_mac = true;
2037         } else {
2038                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2039         }
2040 }
2041
2042 static void reserve_resources_from_vf(struct bnxt *bp,
2043                                       struct hwrm_func_cfg_input *cfg_req,
2044                                       int vf)
2045 {
2046         struct hwrm_func_qcaps_input req = {0};
2047         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2048         int rc;
2049
2050         /* Get the actual allocated values now */
2051         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2052         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2053         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2054
2055         if (rc) {
2056                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2057                 copy_func_cfg_to_qcaps(cfg_req, resp);
2058         } else if (resp->error_code) {
2059                 rc = rte_le_to_cpu_16(resp->error_code);
2060                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2061                 copy_func_cfg_to_qcaps(cfg_req, resp);
2062         }
2063
2064         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2065         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2066         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2067         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2068         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2069         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2070         /*
2071          * TODO: While not supporting VMDq with VFs, max_vnics is always
2072          * forced to 1 in this case
2073          */
2074         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2075         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2076 }
2077
2078 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2079 {
2080         struct hwrm_func_qcfg_input req = {0};
2081         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2082         int rc;
2083
2084         /* Check for zero MAC address */
2085         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2086         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2087         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2088         if (rc) {
2089                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2090                 return -1;
2091         } else if (resp->error_code) {
2092                 rc = rte_le_to_cpu_16(resp->error_code);
2093                 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2094                 return -1;
2095         }
2096         return rte_le_to_cpu_16(resp->vlan);
2097 }
2098
2099 static int update_pf_resource_max(struct bnxt *bp)
2100 {
2101         struct hwrm_func_qcfg_input req = {0};
2102         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2103         int rc;
2104
2105         /* And copy the allocated numbers into the pf struct */
2106         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2107         req.fid = rte_cpu_to_le_16(0xffff);
2108         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2109         HWRM_CHECK_RESULT;
2110
2111         /* Only TX ring value reflects actual allocation? TODO */
2112         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2113         bp->pf.evb_mode = resp->evb_mode;
2114
2115         return rc;
2116 }
2117
2118 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2119 {
2120         int rc;
2121
2122         if (!BNXT_PF(bp)) {
2123                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2124                 return -1;
2125         }
2126
2127         rc = bnxt_hwrm_func_qcaps(bp);
2128         if (rc)
2129                 return rc;
2130
2131         bp->pf.func_cfg_flags &=
2132                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2133                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2134         bp->pf.func_cfg_flags |=
2135                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2136         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2137         return rc;
2138 }
2139
2140 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2141 {
2142         struct hwrm_func_cfg_input req = {0};
2143         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2144         int i;
2145         size_t sz;
2146         int rc = 0;
2147         size_t req_buf_sz;
2148
2149         if (!BNXT_PF(bp)) {
2150                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2151                 return -1;
2152         }
2153
2154         rc = bnxt_hwrm_func_qcaps(bp);
2155
2156         if (rc)
2157                 return rc;
2158
2159         bp->pf.active_vfs = num_vfs;
2160
2161         /*
2162          * First, configure the PF to only use one TX ring.  This ensures that
2163          * there are enough rings for all VFs.
2164          *
2165          * If we don't do this, when we call func_alloc() later, we will lock
2166          * extra rings to the PF that won't be available during func_cfg() of
2167          * the VFs.
2168          *
2169          * This has been fixed with firmware versions above 20.6.54
2170          */
2171         bp->pf.func_cfg_flags &=
2172                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2173                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2174         bp->pf.func_cfg_flags |=
2175                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2176         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2177         if (rc)
2178                 return rc;
2179
2180         /*
2181          * Now, create and register a buffer to hold forwarded VF requests
2182          */
2183         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2184         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2185                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2186         if (bp->pf.vf_req_buf == NULL) {
2187                 rc = -ENOMEM;
2188                 goto error_free;
2189         }
2190         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2191                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2192         for (i = 0; i < num_vfs; i++)
2193                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2194                                         (i * HWRM_MAX_REQ_LEN);
2195
2196         rc = bnxt_hwrm_func_buf_rgtr(bp);
2197         if (rc)
2198                 goto error_free;
2199
2200         populate_vf_func_cfg_req(bp, &req, num_vfs);
2201
2202         bp->pf.active_vfs = 0;
2203         for (i = 0; i < num_vfs; i++) {
2204                 add_random_mac_if_needed(bp, &req, i);
2205
2206                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2207                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2208                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2209                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2210
2211                 /* Clear enable flag for next pass */
2212                 req.enables &= ~rte_cpu_to_le_32(
2213                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2214
2215                 if (rc || resp->error_code) {
2216                         RTE_LOG(ERR, PMD,
2217                                 "Failed to initizlie VF %d\n", i);
2218                         RTE_LOG(ERR, PMD,
2219                                 "Not all VFs available. (%d, %d)\n",
2220                                 rc, resp->error_code);
2221                         break;
2222                 }
2223
2224                 reserve_resources_from_vf(bp, &req, i);
2225                 bp->pf.active_vfs++;
2226         }
2227
2228         /*
2229          * Now configure the PF to use "the rest" of the resources
2230          * We're using STD_TX_RING_MODE here though which will limit the TX
2231          * rings.  This will allow QoS to function properly.  Not setting this
2232          * will cause PF rings to break bandwidth settings.
2233          */
2234         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2235         if (rc)
2236                 goto error_free;
2237
2238         rc = update_pf_resource_max(bp);
2239         if (rc)
2240                 goto error_free;
2241
2242         return rc;
2243
2244 error_free:
2245         bnxt_hwrm_func_buf_unrgtr(bp);
2246         return rc;
2247 }
2248
2249 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2250 {
2251         struct hwrm_func_cfg_input req = {0};
2252         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2253         int rc;
2254
2255         HWRM_PREP(req, FUNC_CFG, -1, resp);
2256
2257         req.fid = rte_cpu_to_le_16(0xffff);
2258         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2259         req.evb_mode = bp->pf.evb_mode;
2260
2261         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2262         HWRM_CHECK_RESULT;
2263
2264         return rc;
2265 }
2266
2267 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2268                                 uint8_t tunnel_type)
2269 {
2270         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2271         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2272         int rc = 0;
2273
2274         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2275         req.tunnel_type = tunnel_type;
2276         req.tunnel_dst_port_val = port;
2277         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2278         HWRM_CHECK_RESULT;
2279
2280         switch (tunnel_type) {
2281         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2282                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2283                 bp->vxlan_port = port;
2284                 break;
2285         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2286                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2287                 bp->geneve_port = port;
2288                 break;
2289         default:
2290                 break;
2291         }
2292         return rc;
2293 }
2294
2295 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2296                                 uint8_t tunnel_type)
2297 {
2298         struct hwrm_tunnel_dst_port_free_input req = {0};
2299         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2300         int rc = 0;
2301
2302         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2303         req.tunnel_type = tunnel_type;
2304         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2305         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2306         HWRM_CHECK_RESULT;
2307
2308         return rc;
2309 }
2310
2311 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf)
2312 {
2313         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2314         struct hwrm_func_cfg_input req = {0};
2315         int rc;
2316
2317         HWRM_PREP(req, FUNC_CFG, -1, resp);
2318         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2319         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2320         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2321         HWRM_CHECK_RESULT;
2322
2323         return rc;
2324 }
2325
2326 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2327 {
2328         uint32_t *flag = flagp;
2329
2330         vnic->flags = *flag;
2331 }
2332
2333 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2334 {
2335         return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2336 }
2337
2338 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2339 {
2340         int rc = 0;
2341         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2342         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2343
2344         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2345
2346         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2347         req.req_buf_page_size = rte_cpu_to_le_16(
2348                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2349         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2350         req.req_buf_page_addr[0] =
2351                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2352         if (req.req_buf_page_addr[0] == 0) {
2353                 RTE_LOG(ERR, PMD,
2354                         "unable to map buffer address to physical memory\n");
2355                 return -ENOMEM;
2356         }
2357
2358         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2359
2360         HWRM_CHECK_RESULT;
2361
2362         return rc;
2363 }
2364
2365 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2366 {
2367         int rc = 0;
2368         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2369         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2370
2371         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2372
2373         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2374
2375         HWRM_CHECK_RESULT;
2376
2377         return rc;
2378 }
2379
2380 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2381 {
2382         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2383         struct hwrm_func_cfg_input req = {0};
2384         int rc;
2385
2386         HWRM_PREP(req, FUNC_CFG, -1, resp);
2387         req.fid = rte_cpu_to_le_16(0xffff);
2388         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2389         req.enables = rte_cpu_to_le_32(
2390                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2391         req.async_event_cr = rte_cpu_to_le_16(
2392                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2393         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2394         HWRM_CHECK_RESULT;
2395
2396         return rc;
2397 }
2398
2399 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2400 {
2401         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2402         struct hwrm_func_vf_cfg_input req = {0};
2403         int rc;
2404
2405         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2406         req.enables = rte_cpu_to_le_32(
2407                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2408         req.async_event_cr = rte_cpu_to_le_16(
2409                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2410         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2411         HWRM_CHECK_RESULT;
2412
2413         return rc;
2414 }
2415
2416 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2417 {
2418         struct hwrm_func_cfg_input req = {0};
2419         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2420         uint16_t dflt_vlan, fid;
2421         uint32_t func_cfg_flags;
2422         int rc = 0;
2423
2424         HWRM_PREP(req, FUNC_CFG, -1, resp);
2425
2426         if (is_vf) {
2427                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2428                 fid = bp->pf.vf_info[vf].fid;
2429                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2430         } else {
2431                 fid = rte_cpu_to_le_16(0xffff);
2432                 func_cfg_flags = bp->pf.func_cfg_flags;
2433                 dflt_vlan = bp->vlan;
2434         }
2435
2436         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2437         req.fid = rte_cpu_to_le_16(fid);
2438         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2439         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2440
2441         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2442         HWRM_CHECK_RESULT;
2443
2444         return rc;
2445 }
2446
2447 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2448                         uint16_t max_bw, uint16_t enables)
2449 {
2450         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2451         struct hwrm_func_cfg_input req = {0};
2452         int rc;
2453
2454         HWRM_PREP(req, FUNC_CFG, -1, resp);
2455         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2456         req.enables |= rte_cpu_to_le_32(enables);
2457         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2458         req.max_bw = rte_cpu_to_le_32(max_bw);
2459         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2460         HWRM_CHECK_RESULT;
2461
2462         return rc;
2463 }
2464
2465 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2466 {
2467         struct hwrm_func_cfg_input req = {0};
2468         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2469         int rc = 0;
2470
2471         HWRM_PREP(req, FUNC_CFG, -1, resp);
2472         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2473         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2474         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2475         req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2476
2477         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2478         HWRM_CHECK_RESULT;
2479
2480         return rc;
2481 }
2482
2483 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2484                               void *encaped, size_t ec_size)
2485 {
2486         int rc = 0;
2487         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2488         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2489
2490         if (ec_size > sizeof(req.encap_request))
2491                 return -1;
2492
2493         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2494
2495         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2496         memcpy(req.encap_request, encaped, ec_size);
2497
2498         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2499
2500         HWRM_CHECK_RESULT;
2501
2502         return rc;
2503 }
2504
2505 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2506                                        struct ether_addr *mac)
2507 {
2508         struct hwrm_func_qcfg_input req = {0};
2509         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2510         int rc;
2511
2512         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2513         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2514         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2515
2516         HWRM_CHECK_RESULT;
2517
2518         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2519         return rc;
2520 }
2521
2522 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2523                             void *encaped, size_t ec_size)
2524 {
2525         int rc = 0;
2526         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2527         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2528
2529         if (ec_size > sizeof(req.encap_request))
2530                 return -1;
2531
2532         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2533
2534         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2535         memcpy(req.encap_request, encaped, ec_size);
2536
2537         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2538
2539         HWRM_CHECK_RESULT;
2540
2541         return rc;
2542 }
2543
2544 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2545                          struct rte_eth_stats *stats)
2546 {
2547         int rc = 0;
2548         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2549         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2550
2551         HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2552
2553         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2554
2555         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2556
2557         HWRM_CHECK_RESULT;
2558
2559         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2560         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2561         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2562         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2563         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2564         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2565
2566         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2567         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2568         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2569         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2570         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2571         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2572
2573         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2574         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2575         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2576
2577         return rc;
2578 }
2579
2580 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2581 {
2582         struct hwrm_port_qstats_input req = {0};
2583         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2584         struct bnxt_pf_info *pf = &bp->pf;
2585         int rc;
2586
2587         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2588                 return 0;
2589
2590         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2591         req.port_id = rte_cpu_to_le_16(pf->port_id);
2592         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2593         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2594         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2595         HWRM_CHECK_RESULT;
2596         return rc;
2597 }
2598
2599 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2600 {
2601         struct hwrm_port_clr_stats_input req = {0};
2602         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2603         struct bnxt_pf_info *pf = &bp->pf;
2604         int rc;
2605
2606         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2607                 return 0;
2608
2609         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2610         req.port_id = rte_cpu_to_le_16(pf->port_id);
2611         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2612         HWRM_CHECK_RESULT;
2613         return rc;
2614 }
2615
2616 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2617 {
2618         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2619         struct hwrm_port_led_qcaps_input req = {0};
2620         int rc;
2621
2622         if (BNXT_VF(bp))
2623                 return 0;
2624
2625         HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2626         req.port_id = bp->pf.port_id;
2627         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2628         HWRM_CHECK_RESULT;
2629
2630         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2631                 unsigned int i;
2632
2633                 bp->num_leds = resp->num_leds;
2634                 memcpy(bp->leds, &resp->led0_id,
2635                         sizeof(bp->leds[0]) * bp->num_leds);
2636                 for (i = 0; i < bp->num_leds; i++) {
2637                         struct bnxt_led_info *led = &bp->leds[i];
2638
2639                         uint16_t caps = led->led_state_caps;
2640
2641                         if (!led->led_group_id ||
2642                                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2643                                 bp->num_leds = 0;
2644                                 break;
2645                         }
2646                 }
2647         }
2648         return rc;
2649 }
2650
2651 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2652 {
2653         struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2654         struct hwrm_port_led_cfg_input req = {0};
2655         struct bnxt_led_cfg *led_cfg;
2656         uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2657         uint16_t duration = 0;
2658         int rc, i;
2659
2660         if (!bp->num_leds || BNXT_VF(bp))
2661                 return -EOPNOTSUPP;
2662
2663         HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2664         if (led_on) {
2665                 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2666                 duration = rte_cpu_to_le_16(500);
2667         }
2668         req.port_id = bp->pf.port_id;
2669         req.num_leds = bp->num_leds;
2670         led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2671         for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2672                 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2673                 led_cfg->led_id = bp->leds[i].led_id;
2674                 led_cfg->led_state = led_state;
2675                 led_cfg->led_blink_on = duration;
2676                 led_cfg->led_blink_off = duration;
2677                 led_cfg->led_group_id = bp->leds[i].led_group_id;
2678         }
2679
2680         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2681         HWRM_CHECK_RESULT;
2682
2683         return rc;
2684 }
2685
2686 static void bnxt_vnic_count(struct bnxt_vnic_info *vnic, void *cbdata)
2687 {
2688         uint32_t *count = cbdata;
2689
2690         if (vnic->func_default)
2691                 *count = *count + 1;
2692 }
2693
2694 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2695                                      struct bnxt_vnic_info *vnic __rte_unused)
2696 {
2697         return 0;
2698 }
2699
2700 int bnxt_vf_default_vnic_count(struct bnxt *bp, uint16_t vf)
2701 {
2702         uint32_t count = 0;
2703
2704         bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2705             &count, bnxt_vnic_count_hwrm_stub);
2706
2707         return count;
2708 }
2709
2710 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2711                                         uint16_t *vnic_ids)
2712 {
2713         struct hwrm_func_vf_vnic_ids_query_input req = {0};
2714         struct hwrm_func_vf_vnic_ids_query_output *resp =
2715                                                 bp->hwrm_cmd_resp_addr;
2716         int rc;
2717
2718         /* First query all VNIC ids */
2719         HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2720
2721         req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2722         req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2723         req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2724
2725         if (req.vnic_id_tbl_addr == 0) {
2726                 RTE_LOG(ERR, PMD,
2727                 "unable to map VNIC ID table address to physical memory\n");
2728                 return -ENOMEM;
2729         }
2730         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2731         if (rc) {
2732                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2733                 return -1;
2734         } else if (resp->error_code) {
2735                 rc = rte_le_to_cpu_16(resp->error_code);
2736                 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2737                 return -1;
2738         }
2739
2740         return rte_le_to_cpu_32(resp->vnic_id_cnt);
2741 }
2742
2743 /*
2744  * This function queries the VNIC IDs  for a specified VF. It then calls
2745  * the vnic_cb to update the necessary field in vnic_info with cbdata.
2746  * Then it calls the hwrm_cb function to program this new vnic configuration.
2747  */
2748 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2749         void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2750         int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2751 {
2752         struct bnxt_vnic_info vnic;
2753         int rc = 0;
2754         int i, num_vnic_ids;
2755         uint16_t *vnic_ids;
2756         size_t vnic_id_sz;
2757         size_t sz;
2758
2759         /* First query all VNIC ids */
2760         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2761         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2762                         RTE_CACHE_LINE_SIZE);
2763         if (vnic_ids == NULL) {
2764                 rc = -ENOMEM;
2765                 return rc;
2766         }
2767         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2768                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2769
2770         num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2771
2772         if (num_vnic_ids < 0)
2773                 return num_vnic_ids;
2774
2775         /* Retrieve VNIC, update bd_stall then update */
2776
2777         for (i = 0; i < num_vnic_ids; i++) {
2778                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2779                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2780                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2781                 if (rc)
2782                         break;
2783                 if (vnic.mru == 4)      /* Indicates unallocated */
2784                         continue;
2785
2786                 vnic_cb(&vnic, cbdata);
2787
2788                 rc = hwrm_cb(bp, &vnic);
2789                 if (rc)
2790                         break;
2791         }
2792
2793         rte_free(vnic_ids);
2794
2795         return rc;
2796 }
2797
2798 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2799                                               bool on)
2800 {
2801         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2802         struct hwrm_func_cfg_input req = {0};
2803         int rc;
2804
2805         HWRM_PREP(req, FUNC_CFG, -1, resp);
2806         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2807         req.enables |= rte_cpu_to_le_32(
2808                         HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2809         req.vlan_antispoof_mode = on ?
2810                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2811                 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2812         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2813         HWRM_CHECK_RESULT;
2814
2815         return rc;
2816 }
2817
2818 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2819 {
2820         struct bnxt_vnic_info vnic;
2821         uint16_t *vnic_ids;
2822         size_t vnic_id_sz;
2823         int num_vnic_ids, i;
2824         size_t sz;
2825         int rc;
2826
2827         vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2828         vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2829                         RTE_CACHE_LINE_SIZE);
2830         if (vnic_ids == NULL) {
2831                 rc = -ENOMEM;
2832                 return rc;
2833         }
2834
2835         for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2836                 rte_mem_lock_page(((char *)vnic_ids) + sz);
2837
2838         rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2839         if (rc <= 0)
2840                 goto exit;
2841         num_vnic_ids = rc;
2842
2843         /*
2844          * Loop through to find the default VNIC ID.
2845          * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2846          * by sending the hwrm_func_qcfg command to the firmware.
2847          */
2848         for (i = 0; i < num_vnic_ids; i++) {
2849                 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2850                 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2851                 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2852                                         bp->pf.first_vf_id + vf);
2853                 if (rc)
2854                         goto exit;
2855                 if (vnic.func_default) {
2856                         rte_free(vnic_ids);
2857                         return vnic.fw_vnic_id;
2858                 }
2859         }
2860         /* Could not find a default VNIC. */
2861         RTE_LOG(ERR, PMD, "No default VNIC\n");
2862 exit:
2863         rte_free(vnic_ids);
2864         return -1;
2865 }