net/bnxt: refactor the query stats
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         if (resp->resp_len >= 16) { \
180                                 struct hwrm_err_output *tmp_hwrm_err_op = \
181                                                         (void *)resp; \
182                                 RTE_LOG(ERR, PMD, \
183                                         "%s error %d:%d:%08x:%04x\n", \
184                                         __func__, \
185                                         rc, tmp_hwrm_err_op->cmd_err, \
186                                         rte_le_to_cpu_32(\
187                                                 tmp_hwrm_err_op->opaque_0), \
188                                         rte_le_to_cpu_16(\
189                                                 tmp_hwrm_err_op->opaque_1)); \
190                         } \
191                         else { \
192                                 RTE_LOG(ERR, PMD, \
193                                         "%s error %d\n", __func__, rc); \
194                         } \
195                         return rc; \
196                 } \
197         }
198
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204
205         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207         req.mask = 0;
208
209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210
211         HWRM_CHECK_RESULT;
212
213         return rc;
214 }
215
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
217 {
218         int rc = 0;
219         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
221         uint32_t mask = 0;
222
223         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         /* FIXME add multicast flag, when multicast adding options is supported
227          * by ethtool.
228          */
229         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233         if (vnic->mc_addr_cnt) {
234                 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
235                 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
236                 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
237         }
238         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
239                                     mask);
240
241         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
242
243         HWRM_CHECK_RESULT;
244
245         return rc;
246 }
247
248 int bnxt_hwrm_clear_filter(struct bnxt *bp,
249                            struct bnxt_filter_info *filter)
250 {
251         int rc = 0;
252         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
253         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
254
255         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
256
257         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
258
259         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
260
261         HWRM_CHECK_RESULT;
262
263         filter->fw_l2_filter_id = -1;
264
265         return 0;
266 }
267
268 int bnxt_hwrm_set_filter(struct bnxt *bp,
269                          uint16_t dst_id,
270                          struct bnxt_filter_info *filter)
271 {
272         int rc = 0;
273         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
274         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
275         uint32_t enables = 0;
276
277         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
278
279         req.flags = rte_cpu_to_le_32(filter->flags);
280
281         enables = filter->enables |
282               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
283         req.dst_id = rte_cpu_to_le_16(dst_id);
284
285         if (enables &
286             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
287                 memcpy(req.l2_addr, filter->l2_addr,
288                        ETHER_ADDR_LEN);
289         if (enables &
290             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
291                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
292                        ETHER_ADDR_LEN);
293         if (enables &
294             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
295                 req.l2_ovlan = filter->l2_ovlan;
296         if (enables &
297             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
298                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
299
300         req.enables = rte_cpu_to_le_32(enables);
301
302         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
303
304         HWRM_CHECK_RESULT;
305
306         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
307
308         return rc;
309 }
310
311 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
312 {
313         int rc = 0;
314         struct hwrm_func_qcaps_input req = {.req_type = 0 };
315         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
316         uint16_t new_max_vfs;
317         int i;
318
319         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
320
321         req.fid = rte_cpu_to_le_16(0xffff);
322
323         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
324
325         HWRM_CHECK_RESULT;
326
327         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
328         if (BNXT_PF(bp)) {
329                 bp->pf.port_id = resp->port_id;
330                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
331                 new_max_vfs = bp->pdev->max_vfs;
332                 if (new_max_vfs != bp->pf.max_vfs) {
333                         if (bp->pf.vf_info)
334                                 rte_free(bp->pf.vf_info);
335                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
336                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
337                         bp->pf.max_vfs = new_max_vfs;
338                         for (i = 0; i < new_max_vfs; i++) {
339                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
340                                 bp->pf.vf_info[i].vlan_table =
341                                         rte_zmalloc("VF VLAN table",
342                                                     getpagesize(),
343                                                     getpagesize());
344                                 if (bp->pf.vf_info[i].vlan_table == NULL)
345                                         RTE_LOG(ERR, PMD,
346                                         "Fail to alloc VLAN table for VF %d\n",
347                                         i);
348                                 else
349                                         rte_mem_lock_page(
350                                                 bp->pf.vf_info[i].vlan_table);
351                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
352                         }
353                 }
354         }
355
356         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
357         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
358         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
359         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
360         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
361         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
362         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
363         /* TODO: For now, do not support VMDq/RFS on VFs. */
364         if (BNXT_PF(bp)) {
365                 if (bp->pf.max_vfs)
366                         bp->max_vnics = 1;
367                 else
368                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
369         } else {
370                 bp->max_vnics = 1;
371         }
372         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
373         if (BNXT_PF(bp))
374                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
375
376         return rc;
377 }
378
379 int bnxt_hwrm_func_reset(struct bnxt *bp)
380 {
381         int rc = 0;
382         struct hwrm_func_reset_input req = {.req_type = 0 };
383         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
384
385         HWRM_PREP(req, FUNC_RESET, -1, resp);
386
387         req.enables = rte_cpu_to_le_32(0);
388
389         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
390
391         HWRM_CHECK_RESULT;
392
393         return rc;
394 }
395
396 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
397 {
398         int rc;
399         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
400         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
401
402         if (bp->flags & BNXT_FLAG_REGISTERED)
403                 return 0;
404
405         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
406         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
407                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
408         req.ver_maj = RTE_VER_YEAR;
409         req.ver_min = RTE_VER_MONTH;
410         req.ver_upd = RTE_VER_MINOR;
411
412         if (BNXT_PF(bp)) {
413                 req.enables |= rte_cpu_to_le_32(
414                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
415                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
416                        RTE_MIN(sizeof(req.vf_req_fwd),
417                                sizeof(bp->pf.vf_req_fwd)));
418         }
419
420         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
421         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
422
423         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424
425         HWRM_CHECK_RESULT;
426
427         bp->flags |= BNXT_FLAG_REGISTERED;
428
429         return rc;
430 }
431
432 int bnxt_hwrm_ver_get(struct bnxt *bp)
433 {
434         int rc = 0;
435         struct hwrm_ver_get_input req = {.req_type = 0 };
436         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
437         uint32_t my_version;
438         uint32_t fw_version;
439         uint16_t max_resp_len;
440         char type[RTE_MEMZONE_NAMESIZE];
441
442         HWRM_PREP(req, VER_GET, -1, resp);
443
444         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
445         req.hwrm_intf_min = HWRM_VERSION_MINOR;
446         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
447
448         /*
449          * Hold the lock since we may be adjusting the response pointers.
450          */
451         rte_spinlock_lock(&bp->hwrm_lock);
452         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
453
454         HWRM_CHECK_RESULT;
455
456         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
457                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
458                 resp->hwrm_intf_upd,
459                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
460         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
461                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
462         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
463                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
464
465         my_version = HWRM_VERSION_MAJOR << 16;
466         my_version |= HWRM_VERSION_MINOR << 8;
467         my_version |= HWRM_VERSION_UPDATE;
468
469         fw_version = resp->hwrm_intf_maj << 16;
470         fw_version |= resp->hwrm_intf_min << 8;
471         fw_version |= resp->hwrm_intf_upd;
472
473         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
474                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
475                 rc = -EINVAL;
476                 goto error;
477         }
478
479         if (my_version != fw_version) {
480                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
481                 if (my_version < fw_version) {
482                         RTE_LOG(INFO, PMD,
483                                 "Firmware API version is newer than driver.\n");
484                         RTE_LOG(INFO, PMD,
485                                 "The driver may be missing features.\n");
486                 } else {
487                         RTE_LOG(INFO, PMD,
488                                 "Firmware API version is older than driver.\n");
489                         RTE_LOG(INFO, PMD,
490                                 "Not all driver features may be functional.\n");
491                 }
492         }
493
494         if (bp->max_req_len > resp->max_req_win_len) {
495                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
496                 rc = -EINVAL;
497         }
498         bp->max_req_len = resp->max_req_win_len;
499         max_resp_len = resp->max_resp_len;
500         if (bp->max_resp_len != max_resp_len) {
501                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
502                         bp->pdev->addr.domain, bp->pdev->addr.bus,
503                         bp->pdev->addr.devid, bp->pdev->addr.function);
504
505                 rte_free(bp->hwrm_cmd_resp_addr);
506
507                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
508                 if (bp->hwrm_cmd_resp_addr == NULL) {
509                         rc = -ENOMEM;
510                         goto error;
511                 }
512                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
513                 bp->hwrm_cmd_resp_dma_addr =
514                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
515                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
516                         RTE_LOG(ERR, PMD,
517                         "Unable to map response buffer to physical memory.\n");
518                         rc = -ENOMEM;
519                         goto error;
520                 }
521                 bp->max_resp_len = max_resp_len;
522         }
523
524 error:
525         rte_spinlock_unlock(&bp->hwrm_lock);
526         return rc;
527 }
528
529 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
530 {
531         int rc;
532         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
533         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
534
535         if (!(bp->flags & BNXT_FLAG_REGISTERED))
536                 return 0;
537
538         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
539         req.flags = flags;
540
541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
542
543         HWRM_CHECK_RESULT;
544
545         bp->flags &= ~BNXT_FLAG_REGISTERED;
546
547         return rc;
548 }
549
550 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
551 {
552         int rc = 0;
553         struct hwrm_port_phy_cfg_input req = {0};
554         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
555         uint32_t enables = 0;
556
557         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
558
559         if (conf->link_up) {
560                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
561                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
562                 /*
563                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
564                  * any auto mode, even "none".
565                  */
566                 if (!conf->link_speed) {
567                         req.auto_mode |= conf->auto_mode;
568                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
569                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
570                         enables |=
571                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
572                         req.auto_link_speed = bp->link_info.auto_link_speed;
573                         enables |=
574                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
575                 }
576                 req.auto_duplex = conf->duplex;
577                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
578                 req.auto_pause = conf->auto_pause;
579                 req.force_pause = conf->force_pause;
580                 /* Set force_pause if there is no auto or if there is a force */
581                 if (req.auto_pause && !req.force_pause)
582                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
583                 else
584                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
585
586                 req.enables = rte_cpu_to_le_32(enables);
587         } else {
588                 req.flags =
589                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
590                 RTE_LOG(INFO, PMD, "Force Link Down\n");
591         }
592
593         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
594
595         HWRM_CHECK_RESULT;
596
597         return rc;
598 }
599
600 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
601                                    struct bnxt_link_info *link_info)
602 {
603         int rc = 0;
604         struct hwrm_port_phy_qcfg_input req = {0};
605         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
606
607         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
608
609         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
610
611         HWRM_CHECK_RESULT;
612
613         link_info->phy_link_status = resp->link;
614         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
615                 link_info->link_up = 1;
616                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
617         } else {
618                 link_info->link_up = 0;
619                 link_info->link_speed = 0;
620         }
621         link_info->duplex = resp->duplex;
622         link_info->pause = resp->pause;
623         link_info->auto_pause = resp->auto_pause;
624         link_info->force_pause = resp->force_pause;
625         link_info->auto_mode = resp->auto_mode;
626
627         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
628         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
629         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
630         link_info->phy_ver[0] = resp->phy_maj;
631         link_info->phy_ver[1] = resp->phy_min;
632         link_info->phy_ver[2] = resp->phy_bld;
633
634         return rc;
635 }
636
637 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
638 {
639         int rc = 0;
640         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
641         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
642
643         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
644
645         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
646
647         HWRM_CHECK_RESULT;
648
649 #define GET_QUEUE_INFO(x) \
650         bp->cos_queue[x].id = resp->queue_id##x; \
651         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
652
653         GET_QUEUE_INFO(0);
654         GET_QUEUE_INFO(1);
655         GET_QUEUE_INFO(2);
656         GET_QUEUE_INFO(3);
657         GET_QUEUE_INFO(4);
658         GET_QUEUE_INFO(5);
659         GET_QUEUE_INFO(6);
660         GET_QUEUE_INFO(7);
661
662         return rc;
663 }
664
665 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
666                          struct bnxt_ring *ring,
667                          uint32_t ring_type, uint32_t map_index,
668                          uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
669 {
670         int rc = 0;
671         uint32_t enables = 0;
672         struct hwrm_ring_alloc_input req = {.req_type = 0 };
673         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
674
675         HWRM_PREP(req, RING_ALLOC, -1, resp);
676
677         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
678         req.fbo = rte_cpu_to_le_32(0);
679         /* Association of ring index with doorbell index */
680         req.logical_id = rte_cpu_to_le_16(map_index);
681         req.length = rte_cpu_to_le_32(ring->ring_size);
682
683         switch (ring_type) {
684         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
685                 req.queue_id = bp->cos_queue[0].id;
686                 /* FALLTHROUGH */
687         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
688                 req.ring_type = ring_type;
689                 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
690                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
691                 if (stats_ctx_id != INVALID_STATS_CTX_ID)
692                         enables |=
693                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
694                 break;
695         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
696                 req.ring_type = ring_type;
697                 /*
698                  * TODO: Some HWRM versions crash with
699                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
700                  */
701                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
702                 break;
703         default:
704                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
705                         ring_type);
706                 return -1;
707         }
708         req.enables = rte_cpu_to_le_32(enables);
709
710         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
711
712         if (rc || resp->error_code) {
713                 if (rc == 0 && resp->error_code)
714                         rc = rte_le_to_cpu_16(resp->error_code);
715                 switch (ring_type) {
716                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
717                         RTE_LOG(ERR, PMD,
718                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
719                         return rc;
720                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
721                         RTE_LOG(ERR, PMD,
722                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
723                         return rc;
724                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
725                         RTE_LOG(ERR, PMD,
726                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
727                         return rc;
728                 default:
729                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
730                         return rc;
731                 }
732         }
733
734         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
735         return rc;
736 }
737
738 int bnxt_hwrm_ring_free(struct bnxt *bp,
739                         struct bnxt_ring *ring, uint32_t ring_type)
740 {
741         int rc;
742         struct hwrm_ring_free_input req = {.req_type = 0 };
743         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
744
745         HWRM_PREP(req, RING_FREE, -1, resp);
746
747         req.ring_type = ring_type;
748         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
749
750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
751
752         if (rc || resp->error_code) {
753                 if (rc == 0 && resp->error_code)
754                         rc = rte_le_to_cpu_16(resp->error_code);
755
756                 switch (ring_type) {
757                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
758                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
759                                 rc);
760                         return rc;
761                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
762                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
763                                 rc);
764                         return rc;
765                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
766                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
767                                 rc);
768                         return rc;
769                 default:
770                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
771                         return rc;
772                 }
773         }
774         return 0;
775 }
776
777 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
778 {
779         int rc = 0;
780         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
781         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
782
783         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
784
785         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
786         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
787         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
788         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
789
790         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791
792         HWRM_CHECK_RESULT;
793
794         bp->grp_info[idx].fw_grp_id =
795             rte_le_to_cpu_16(resp->ring_group_id);
796
797         return rc;
798 }
799
800 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
801 {
802         int rc;
803         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
804         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
805
806         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
807
808         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
809
810         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
811
812         HWRM_CHECK_RESULT;
813
814         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
815         return rc;
816 }
817
818 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
819 {
820         int rc = 0;
821         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
822         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
823
824         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
825                 return rc;
826
827         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
828
829         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
830
831         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
832
833         HWRM_CHECK_RESULT;
834
835         return rc;
836 }
837
838 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
839                                 unsigned int idx __rte_unused)
840 {
841         int rc;
842         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
843         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
844
845         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
846
847         req.update_period_ms = rte_cpu_to_le_32(0);
848
849         req.stats_dma_addr =
850             rte_cpu_to_le_64(cpr->hw_stats_map);
851
852         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
853
854         HWRM_CHECK_RESULT;
855
856         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
857
858         return rc;
859 }
860
861 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
862                                 unsigned int idx __rte_unused)
863 {
864         int rc;
865         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
866         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
867
868         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
869
870         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
871
872         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
873
874         HWRM_CHECK_RESULT;
875
876         return rc;
877 }
878
879 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
880 {
881         int rc = 0, i, j;
882         struct hwrm_vnic_alloc_input req = { 0 };
883         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
884
885         /* map ring groups to this vnic */
886         RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
887                 vnic->start_grp_id, vnic->end_grp_id);
888         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
889                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
890         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
891         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
892         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
893         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
894         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
895                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
896         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
897
898         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
899
900         HWRM_CHECK_RESULT;
901
902         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
903         return rc;
904 }
905
906 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
907                                         struct bnxt_vnic_info *vnic,
908                                         struct bnxt_plcmodes_cfg *pmode)
909 {
910         int rc = 0;
911         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
912         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
913
914         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
915
916         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
917
918         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
919
920         HWRM_CHECK_RESULT;
921
922         pmode->flags = rte_le_to_cpu_32(resp->flags);
923         /* dflt_vnic bit doesn't exist in the _cfg command */
924         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
925         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
926         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
927         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
928
929         return rc;
930 }
931
932 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
933                                        struct bnxt_vnic_info *vnic,
934                                        struct bnxt_plcmodes_cfg *pmode)
935 {
936         int rc = 0;
937         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
938         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
941
942         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
943         req.flags = rte_cpu_to_le_32(pmode->flags);
944         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
945         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
946         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
947         req.enables = rte_cpu_to_le_32(
948             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
949             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
950             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
951         );
952
953         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
954
955         HWRM_CHECK_RESULT;
956
957         return rc;
958 }
959
960 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
961 {
962         int rc = 0;
963         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
964         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
965         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
966         struct bnxt_plcmodes_cfg pmodes;
967
968         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
969         if (rc)
970                 return rc;
971
972         HWRM_PREP(req, VNIC_CFG, -1, resp);
973
974         /* Only RSS support for now TBD: COS & LB */
975         req.enables =
976             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
977                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
978         if (vnic->lb_rule != 0xffff)
979                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
980         if (vnic->cos_rule != 0xffff)
981                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
982         if (vnic->rss_rule != 0xffff)
983                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
984         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
985         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
986         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
987         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
988         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
989         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
990         req.mru = rte_cpu_to_le_16(vnic->mru);
991         if (vnic->func_default)
992                 req.flags |=
993                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
994         if (vnic->vlan_strip)
995                 req.flags |=
996                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
997         if (vnic->bd_stall)
998                 req.flags |=
999                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1000         if (vnic->roce_dual)
1001                 req.flags |= rte_cpu_to_le_32(
1002                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1003         if (vnic->roce_only)
1004                 req.flags |= rte_cpu_to_le_32(
1005                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1006         if (vnic->rss_dflt_cr)
1007                 req.flags |= rte_cpu_to_le_32(
1008                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1009
1010         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1011
1012         HWRM_CHECK_RESULT;
1013
1014         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1015
1016         return rc;
1017 }
1018
1019 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1020                 int16_t fw_vf_id)
1021 {
1022         int rc = 0;
1023         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1024         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1025
1026         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1027
1028         req.enables =
1029                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1030         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1031         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1032
1033         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1034
1035         HWRM_CHECK_RESULT;
1036
1037         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1038         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1039         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1040         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1041         vnic->mru = rte_le_to_cpu_16(resp->mru);
1042         vnic->func_default = rte_le_to_cpu_32(
1043                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1044         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1045                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1046         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1047                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1048         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1049                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1050         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1051                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1052         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1053                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1054
1055         return rc;
1056 }
1057
1058 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1059 {
1060         int rc = 0;
1061         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1062         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1063                                                 bp->hwrm_cmd_resp_addr;
1064
1065         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1066
1067         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1068
1069         HWRM_CHECK_RESULT;
1070
1071         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1072
1073         return rc;
1074 }
1075
1076 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1077 {
1078         int rc = 0;
1079         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1080         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1081                                                 bp->hwrm_cmd_resp_addr;
1082
1083         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1084
1085         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1086
1087         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1088
1089         HWRM_CHECK_RESULT;
1090
1091         vnic->rss_rule = INVALID_HW_RING_ID;
1092
1093         return rc;
1094 }
1095
1096 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1097 {
1098         int rc = 0;
1099         struct hwrm_vnic_free_input req = {.req_type = 0 };
1100         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1101
1102         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1103                 return rc;
1104
1105         HWRM_PREP(req, VNIC_FREE, -1, resp);
1106
1107         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1108
1109         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1110
1111         HWRM_CHECK_RESULT;
1112
1113         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1114         return rc;
1115 }
1116
1117 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1118                            struct bnxt_vnic_info *vnic)
1119 {
1120         int rc = 0;
1121         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1122         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1123
1124         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1125
1126         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1127
1128         req.ring_grp_tbl_addr =
1129             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1130         req.hash_key_tbl_addr =
1131             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1132         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1133
1134         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1135
1136         HWRM_CHECK_RESULT;
1137
1138         return rc;
1139 }
1140
1141 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1142                         struct bnxt_vnic_info *vnic)
1143 {
1144         int rc = 0;
1145         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1146         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1147         uint16_t size;
1148
1149         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1150
1151         req.flags = rte_cpu_to_le_32(
1152                         HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1153
1154         req.enables = rte_cpu_to_le_32(
1155                 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1156
1157         size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1158         size -= RTE_PKTMBUF_HEADROOM;
1159
1160         req.jumbo_thresh = rte_cpu_to_le_16(size);
1161         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1162
1163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1164
1165         HWRM_CHECK_RESULT;
1166
1167         return rc;
1168 }
1169
1170 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1171                         struct bnxt_vnic_info *vnic, bool enable)
1172 {
1173         int rc = 0;
1174         struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1175         struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1176
1177         HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1178
1179         if (enable) {
1180                 req.enables = rte_cpu_to_le_32(
1181                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1182                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1183                                 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1184                 req.flags = rte_cpu_to_le_32(
1185                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1186                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1187                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1188                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1189                                 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1190                         HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1191                 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1192                 req.max_agg_segs = rte_cpu_to_le_16(5);
1193                 req.max_aggs =
1194                         rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1195                 req.min_agg_len = rte_cpu_to_le_32(512);
1196         }
1197
1198         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1199
1200         HWRM_CHECK_RESULT;
1201
1202         return rc;
1203 }
1204
1205 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1206 {
1207         struct hwrm_func_cfg_input req = {0};
1208         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1209         int rc;
1210
1211         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1212         req.enables = rte_cpu_to_le_32(
1213                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1214         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1215         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1216
1217         HWRM_PREP(req, FUNC_CFG, -1, resp);
1218
1219         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1220         HWRM_CHECK_RESULT;
1221
1222         bp->pf.vf_info[vf].random_mac = false;
1223
1224         return rc;
1225 }
1226
1227 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1228                           struct rte_eth_stats *stats)
1229 {
1230         int rc = 0;
1231         struct hwrm_func_qstats_input req = {.req_type = 0};
1232         struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1233
1234         HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1235
1236         req.fid = rte_cpu_to_le_16(fid);
1237
1238         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1239
1240         HWRM_CHECK_RESULT;
1241
1242         stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1243         stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1244         stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1245         stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1246         stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1247         stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1248
1249         stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1250         stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1251         stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1252         stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1253         stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1254         stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1255
1256         stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1257         stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1258
1259         stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1260
1261         return rc;
1262 }
1263
1264 /*
1265  * HWRM utility functions
1266  */
1267
1268 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1269 {
1270         unsigned int i;
1271         int rc = 0;
1272
1273         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1274                 struct bnxt_tx_queue *txq;
1275                 struct bnxt_rx_queue *rxq;
1276                 struct bnxt_cp_ring_info *cpr;
1277
1278                 if (i >= bp->rx_cp_nr_rings) {
1279                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1280                         cpr = txq->cp_ring;
1281                 } else {
1282                         rxq = bp->rx_queues[i];
1283                         cpr = rxq->cp_ring;
1284                 }
1285
1286                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1287                 if (rc)
1288                         return rc;
1289         }
1290         return 0;
1291 }
1292
1293 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1294 {
1295         int rc;
1296         unsigned int i;
1297         struct bnxt_cp_ring_info *cpr;
1298
1299         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1300
1301                 if (i >= bp->rx_cp_nr_rings)
1302                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1303                 else
1304                         cpr = bp->rx_queues[i]->cp_ring;
1305                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1306                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1307                         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1308                         /*
1309                          * TODO. Need a better way to reset grp_info.stats_ctx
1310                          * for Rx rings only. stats_ctx is not saved for Tx
1311                          * in grp_info.
1312                          */
1313                         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1314                         if (rc)
1315                                 return rc;
1316                 }
1317         }
1318         return 0;
1319 }
1320
1321 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1322 {
1323         unsigned int i;
1324         int rc = 0;
1325
1326         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1327                 struct bnxt_tx_queue *txq;
1328                 struct bnxt_rx_queue *rxq;
1329                 struct bnxt_cp_ring_info *cpr;
1330
1331                 if (i >= bp->rx_cp_nr_rings) {
1332                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1333                         cpr = txq->cp_ring;
1334                 } else {
1335                         rxq = bp->rx_queues[i];
1336                         cpr = rxq->cp_ring;
1337                 }
1338
1339                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1340
1341                 if (rc)
1342                         return rc;
1343         }
1344         return rc;
1345 }
1346
1347 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1348 {
1349         uint16_t idx;
1350         uint32_t rc = 0;
1351
1352         for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1353
1354                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1355                         RTE_LOG(ERR, PMD,
1356                                 "Attempt to free invalid ring group %d\n",
1357                                 idx);
1358                         continue;
1359                 }
1360
1361                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1362
1363                 if (rc)
1364                         return rc;
1365         }
1366         return rc;
1367 }
1368
1369 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1370                                 unsigned int idx __rte_unused)
1371 {
1372         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1373
1374         bnxt_hwrm_ring_free(bp, cp_ring,
1375                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1376         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1377         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1378         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1379                         sizeof(*cpr->cp_desc_ring));
1380         cpr->cp_raw_cons = 0;
1381 }
1382
1383 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1384 {
1385         unsigned int i;
1386         int rc = 0;
1387
1388         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1389                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1390                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1391                 struct bnxt_ring *ring = txr->tx_ring_struct;
1392                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1393                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1394
1395                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1396                         bnxt_hwrm_ring_free(bp, ring,
1397                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1398                         ring->fw_ring_id = INVALID_HW_RING_ID;
1399                         memset(txr->tx_desc_ring, 0,
1400                                         txr->tx_ring_struct->ring_size *
1401                                         sizeof(*txr->tx_desc_ring));
1402                         memset(txr->tx_buf_ring, 0,
1403                                         txr->tx_ring_struct->ring_size *
1404                                         sizeof(*txr->tx_buf_ring));
1405                         txr->tx_prod = 0;
1406                         txr->tx_cons = 0;
1407                 }
1408                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1409                         bnxt_free_cp_ring(bp, cpr, idx);
1410                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1411                 }
1412         }
1413
1414         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1415                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1416                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1417                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1418                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1419                 unsigned int idx = i + 1;
1420
1421                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1422                         bnxt_hwrm_ring_free(bp, ring,
1423                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1424                         ring->fw_ring_id = INVALID_HW_RING_ID;
1425                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1426                         memset(rxr->rx_desc_ring, 0,
1427                                         rxr->rx_ring_struct->ring_size *
1428                                         sizeof(*rxr->rx_desc_ring));
1429                         memset(rxr->rx_buf_ring, 0,
1430                                         rxr->rx_ring_struct->ring_size *
1431                                         sizeof(*rxr->rx_buf_ring));
1432                         rxr->rx_prod = 0;
1433                         memset(rxr->ag_buf_ring, 0,
1434                                         rxr->ag_ring_struct->ring_size *
1435                                         sizeof(*rxr->ag_buf_ring));
1436                         rxr->ag_prod = 0;
1437                 }
1438                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1439                         bnxt_free_cp_ring(bp, cpr, idx);
1440                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1441                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1442                 }
1443         }
1444
1445         /* Default completion ring */
1446         {
1447                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1448
1449                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1450                         bnxt_free_cp_ring(bp, cpr, 0);
1451                         cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1452                 }
1453         }
1454
1455         return rc;
1456 }
1457
1458 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1459 {
1460         uint16_t i;
1461         uint32_t rc = 0;
1462
1463         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1464                 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1465                 if (rc)
1466                         return rc;
1467         }
1468         return rc;
1469 }
1470
1471 void bnxt_free_hwrm_resources(struct bnxt *bp)
1472 {
1473         /* Release memzone */
1474         rte_free(bp->hwrm_cmd_resp_addr);
1475         bp->hwrm_cmd_resp_addr = NULL;
1476         bp->hwrm_cmd_resp_dma_addr = 0;
1477 }
1478
1479 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1480 {
1481         struct rte_pci_device *pdev = bp->pdev;
1482         char type[RTE_MEMZONE_NAMESIZE];
1483
1484         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1485                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1486         bp->max_req_len = HWRM_MAX_REQ_LEN;
1487         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1488         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1489         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1490         if (bp->hwrm_cmd_resp_addr == NULL)
1491                 return -ENOMEM;
1492         bp->hwrm_cmd_resp_dma_addr =
1493                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1494         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1495                 RTE_LOG(ERR, PMD,
1496                         "unable to map response address to physical memory\n");
1497                 return -ENOMEM;
1498         }
1499         rte_spinlock_init(&bp->hwrm_lock);
1500
1501         return 0;
1502 }
1503
1504 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1505 {
1506         struct bnxt_filter_info *filter;
1507         int rc = 0;
1508
1509         STAILQ_FOREACH(filter, &vnic->filter, next) {
1510                 rc = bnxt_hwrm_clear_filter(bp, filter);
1511                 if (rc)
1512                         break;
1513         }
1514         return rc;
1515 }
1516
1517 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1518 {
1519         struct bnxt_filter_info *filter;
1520         int rc = 0;
1521
1522         STAILQ_FOREACH(filter, &vnic->filter, next) {
1523                 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1524                 if (rc)
1525                         break;
1526         }
1527         return rc;
1528 }
1529
1530 void bnxt_free_tunnel_ports(struct bnxt *bp)
1531 {
1532         if (bp->vxlan_port_cnt)
1533                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1534                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1535         bp->vxlan_port = 0;
1536         if (bp->geneve_port_cnt)
1537                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1538                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1539         bp->geneve_port = 0;
1540 }
1541
1542 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1543 {
1544         struct bnxt_vnic_info *vnic;
1545         unsigned int i;
1546
1547         if (bp->vnic_info == NULL)
1548                 return;
1549
1550         vnic = &bp->vnic_info[0];
1551         if (BNXT_PF(bp))
1552                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1553
1554         /* VNIC resources */
1555         for (i = 0; i < bp->nr_vnics; i++) {
1556                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1557
1558                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1559
1560                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1561
1562                 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1563
1564                 bnxt_hwrm_vnic_free(bp, vnic);
1565         }
1566         /* Ring resources */
1567         bnxt_free_all_hwrm_rings(bp);
1568         bnxt_free_all_hwrm_ring_grps(bp);
1569         bnxt_free_all_hwrm_stat_ctxs(bp);
1570         bnxt_free_tunnel_ports(bp);
1571 }
1572
1573 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1574 {
1575         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1576
1577         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1578                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1579
1580         switch (conf_link_speed) {
1581         case ETH_LINK_SPEED_10M_HD:
1582         case ETH_LINK_SPEED_100M_HD:
1583                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1584         }
1585         return hw_link_duplex;
1586 }
1587
1588 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1589 {
1590         uint16_t eth_link_speed = 0;
1591
1592         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1593                 return ETH_LINK_SPEED_AUTONEG;
1594
1595         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1596         case ETH_LINK_SPEED_100M:
1597         case ETH_LINK_SPEED_100M_HD:
1598                 eth_link_speed =
1599                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1600                 break;
1601         case ETH_LINK_SPEED_1G:
1602                 eth_link_speed =
1603                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1604                 break;
1605         case ETH_LINK_SPEED_2_5G:
1606                 eth_link_speed =
1607                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1608                 break;
1609         case ETH_LINK_SPEED_10G:
1610                 eth_link_speed =
1611                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1612                 break;
1613         case ETH_LINK_SPEED_20G:
1614                 eth_link_speed =
1615                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1616                 break;
1617         case ETH_LINK_SPEED_25G:
1618                 eth_link_speed =
1619                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1620                 break;
1621         case ETH_LINK_SPEED_40G:
1622                 eth_link_speed =
1623                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1624                 break;
1625         case ETH_LINK_SPEED_50G:
1626                 eth_link_speed =
1627                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1628                 break;
1629         default:
1630                 RTE_LOG(ERR, PMD,
1631                         "Unsupported link speed %d; default to AUTO\n",
1632                         conf_link_speed);
1633                 break;
1634         }
1635         return eth_link_speed;
1636 }
1637
1638 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1639                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1640                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1641                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1642
1643 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1644 {
1645         uint32_t one_speed;
1646
1647         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1648                 return 0;
1649
1650         if (link_speed & ETH_LINK_SPEED_FIXED) {
1651                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1652
1653                 if (one_speed & (one_speed - 1)) {
1654                         RTE_LOG(ERR, PMD,
1655                                 "Invalid advertised speeds (%u) for port %u\n",
1656                                 link_speed, port_id);
1657                         return -EINVAL;
1658                 }
1659                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1660                         RTE_LOG(ERR, PMD,
1661                                 "Unsupported advertised speed (%u) for port %u\n",
1662                                 link_speed, port_id);
1663                         return -EINVAL;
1664                 }
1665         } else {
1666                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1667                         RTE_LOG(ERR, PMD,
1668                                 "Unsupported advertised speeds (%u) for port %u\n",
1669                                 link_speed, port_id);
1670                         return -EINVAL;
1671                 }
1672         }
1673         return 0;
1674 }
1675
1676 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1677 {
1678         uint16_t ret = 0;
1679
1680         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1681                 link_speed = BNXT_SUPPORTED_SPEEDS;
1682
1683         if (link_speed & ETH_LINK_SPEED_100M)
1684                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1685         if (link_speed & ETH_LINK_SPEED_100M_HD)
1686                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1687         if (link_speed & ETH_LINK_SPEED_1G)
1688                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1689         if (link_speed & ETH_LINK_SPEED_2_5G)
1690                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1691         if (link_speed & ETH_LINK_SPEED_10G)
1692                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1693         if (link_speed & ETH_LINK_SPEED_20G)
1694                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1695         if (link_speed & ETH_LINK_SPEED_25G)
1696                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1697         if (link_speed & ETH_LINK_SPEED_40G)
1698                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1699         if (link_speed & ETH_LINK_SPEED_50G)
1700                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1701         return ret;
1702 }
1703
1704 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1705 {
1706         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1707
1708         switch (hw_link_speed) {
1709         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1710                 eth_link_speed = ETH_SPEED_NUM_100M;
1711                 break;
1712         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1713                 eth_link_speed = ETH_SPEED_NUM_1G;
1714                 break;
1715         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1716                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1717                 break;
1718         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1719                 eth_link_speed = ETH_SPEED_NUM_10G;
1720                 break;
1721         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1722                 eth_link_speed = ETH_SPEED_NUM_20G;
1723                 break;
1724         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1725                 eth_link_speed = ETH_SPEED_NUM_25G;
1726                 break;
1727         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1728                 eth_link_speed = ETH_SPEED_NUM_40G;
1729                 break;
1730         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1731                 eth_link_speed = ETH_SPEED_NUM_50G;
1732                 break;
1733         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1734         default:
1735                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1736                         hw_link_speed);
1737                 break;
1738         }
1739         return eth_link_speed;
1740 }
1741
1742 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1743 {
1744         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1745
1746         switch (hw_link_duplex) {
1747         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1748         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1749                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1750                 break;
1751         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1752                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1753                 break;
1754         default:
1755                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1756                         hw_link_duplex);
1757                 break;
1758         }
1759         return eth_link_duplex;
1760 }
1761
1762 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1763 {
1764         int rc = 0;
1765         struct bnxt_link_info *link_info = &bp->link_info;
1766
1767         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1768         if (rc) {
1769                 RTE_LOG(ERR, PMD,
1770                         "Get link config failed with rc %d\n", rc);
1771                 goto exit;
1772         }
1773         if (link_info->link_up)
1774                 link->link_speed =
1775                         bnxt_parse_hw_link_speed(link_info->link_speed);
1776         else
1777                 link->link_speed = ETH_LINK_SPEED_10M;
1778         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1779         link->link_status = link_info->link_up;
1780         link->link_autoneg = link_info->auto_mode ==
1781                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1782                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1783 exit:
1784         return rc;
1785 }
1786
1787 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1788 {
1789         int rc = 0;
1790         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1791         struct bnxt_link_info link_req;
1792         uint16_t speed;
1793
1794         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1795                 return 0;
1796
1797         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1798                         bp->eth_dev->data->port_id);
1799         if (rc)
1800                 goto error;
1801
1802         memset(&link_req, 0, sizeof(link_req));
1803         link_req.link_up = link_up;
1804         if (!link_up)
1805                 goto port_phy_cfg;
1806
1807         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1808         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1809         if (speed == 0) {
1810                 link_req.phy_flags |=
1811                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1812                 link_req.auto_mode =
1813                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1814                 link_req.auto_link_speed_mask =
1815                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1816         } else {
1817                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1818                 link_req.link_speed = speed;
1819                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1820         }
1821         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1822         link_req.auto_pause = bp->link_info.auto_pause;
1823         link_req.force_pause = bp->link_info.force_pause;
1824
1825 port_phy_cfg:
1826         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1827         if (rc) {
1828                 RTE_LOG(ERR, PMD,
1829                         "Set link config failed with rc %d\n", rc);
1830         }
1831
1832         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1833 error:
1834         return rc;
1835 }
1836
1837 /* JIRA 22088 */
1838 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1839 {
1840         struct hwrm_func_qcfg_input req = {0};
1841         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1842         int rc = 0;
1843
1844         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1845         req.fid = rte_cpu_to_le_16(0xffff);
1846
1847         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1848
1849         HWRM_CHECK_RESULT;
1850
1851         /* Hard Coded.. 0xfff VLAN ID mask */
1852         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1853
1854         switch (resp->port_partition_type) {
1855         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1856         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1857         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1858                 bp->port_partition_type = resp->port_partition_type;
1859                 break;
1860         default:
1861                 bp->port_partition_type = 0;
1862                 break;
1863         }
1864
1865         return rc;
1866 }
1867
1868 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1869                                    struct hwrm_func_qcaps_output *qcaps)
1870 {
1871         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1872         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1873                sizeof(qcaps->mac_address));
1874         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1875         qcaps->max_rx_rings = fcfg->num_rx_rings;
1876         qcaps->max_tx_rings = fcfg->num_tx_rings;
1877         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1878         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1879         qcaps->max_vfs = 0;
1880         qcaps->first_vf_id = 0;
1881         qcaps->max_vnics = fcfg->num_vnics;
1882         qcaps->max_decap_records = 0;
1883         qcaps->max_encap_records = 0;
1884         qcaps->max_tx_wm_flows = 0;
1885         qcaps->max_tx_em_flows = 0;
1886         qcaps->max_rx_wm_flows = 0;
1887         qcaps->max_rx_em_flows = 0;
1888         qcaps->max_flow_id = 0;
1889         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1890         qcaps->max_sp_tx_rings = 0;
1891         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1892 }
1893
1894 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1895 {
1896         struct hwrm_func_cfg_input req = {0};
1897         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1898         int rc;
1899
1900         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1901                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1902                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1903                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1904                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1905                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1906                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1907                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1908                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1909                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1910         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1911         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1912                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1913         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1914                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1915         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1916         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1917         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1918         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1919         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1920         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1921         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1922         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1923         req.fid = rte_cpu_to_le_16(0xffff);
1924
1925         HWRM_PREP(req, FUNC_CFG, -1, resp);
1926
1927         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1928         HWRM_CHECK_RESULT;
1929
1930         return rc;
1931 }
1932
1933 static void populate_vf_func_cfg_req(struct bnxt *bp,
1934                                      struct hwrm_func_cfg_input *req,
1935                                      int num_vfs)
1936 {
1937         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1938                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1939                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1940                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1941                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1942                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1943                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1944                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1945                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1946                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1947
1948         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1949                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1950         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1951                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1952         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1953                                                 (num_vfs + 1));
1954         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1955         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1956                                                (num_vfs + 1));
1957         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1958         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1959         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1960         /* TODO: For now, do not support VMDq/RFS on VFs. */
1961         req->num_vnics = rte_cpu_to_le_16(1);
1962         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1963                                                  (num_vfs + 1));
1964 }
1965
1966 static void add_random_mac_if_needed(struct bnxt *bp,
1967                                      struct hwrm_func_cfg_input *cfg_req,
1968                                      int vf)
1969 {
1970         struct ether_addr mac;
1971
1972         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1973                 return;
1974
1975         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1976                 cfg_req->enables |=
1977                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1978                 eth_random_addr(cfg_req->dflt_mac_addr);
1979                 bp->pf.vf_info[vf].random_mac = true;
1980         } else {
1981                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1982         }
1983 }
1984
1985 static void reserve_resources_from_vf(struct bnxt *bp,
1986                                       struct hwrm_func_cfg_input *cfg_req,
1987                                       int vf)
1988 {
1989         struct hwrm_func_qcaps_input req = {0};
1990         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1991         int rc;
1992
1993         /* Get the actual allocated values now */
1994         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1995         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1996         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1997
1998         if (rc) {
1999                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2000                 copy_func_cfg_to_qcaps(cfg_req, resp);
2001         } else if (resp->error_code) {
2002                 rc = rte_le_to_cpu_16(resp->error_code);
2003                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2004                 copy_func_cfg_to_qcaps(cfg_req, resp);
2005         }
2006
2007         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2008         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2009         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2010         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2011         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2012         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2013         /*
2014          * TODO: While not supporting VMDq with VFs, max_vnics is always
2015          * forced to 1 in this case
2016          */
2017         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2018         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2019 }
2020
2021 static int update_pf_resource_max(struct bnxt *bp)
2022 {
2023         struct hwrm_func_qcfg_input req = {0};
2024         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2025         int rc;
2026
2027         /* And copy the allocated numbers into the pf struct */
2028         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2029         req.fid = rte_cpu_to_le_16(0xffff);
2030         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2031         HWRM_CHECK_RESULT;
2032
2033         /* Only TX ring value reflects actual allocation? TODO */
2034         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2035         bp->pf.evb_mode = resp->evb_mode;
2036
2037         return rc;
2038 }
2039
2040 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2041 {
2042         int rc;
2043
2044         if (!BNXT_PF(bp)) {
2045                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2046                 return -1;
2047         }
2048
2049         rc = bnxt_hwrm_func_qcaps(bp);
2050         if (rc)
2051                 return rc;
2052
2053         bp->pf.func_cfg_flags &=
2054                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2055                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2056         bp->pf.func_cfg_flags |=
2057                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2058         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2059         return rc;
2060 }
2061
2062 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2063 {
2064         struct hwrm_func_cfg_input req = {0};
2065         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2066         int i;
2067         size_t sz;
2068         int rc = 0;
2069         size_t req_buf_sz;
2070
2071         if (!BNXT_PF(bp)) {
2072                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2073                 return -1;
2074         }
2075
2076         rc = bnxt_hwrm_func_qcaps(bp);
2077
2078         if (rc)
2079                 return rc;
2080
2081         bp->pf.active_vfs = num_vfs;
2082
2083         /*
2084          * First, configure the PF to only use one TX ring.  This ensures that
2085          * there are enough rings for all VFs.
2086          *
2087          * If we don't do this, when we call func_alloc() later, we will lock
2088          * extra rings to the PF that won't be available during func_cfg() of
2089          * the VFs.
2090          *
2091          * This has been fixed with firmware versions above 20.6.54
2092          */
2093         bp->pf.func_cfg_flags &=
2094                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2095                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2096         bp->pf.func_cfg_flags |=
2097                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2098         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2099         if (rc)
2100                 return rc;
2101
2102         /*
2103          * Now, create and register a buffer to hold forwarded VF requests
2104          */
2105         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2106         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2107                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2108         if (bp->pf.vf_req_buf == NULL) {
2109                 rc = -ENOMEM;
2110                 goto error_free;
2111         }
2112         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2113                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2114         for (i = 0; i < num_vfs; i++)
2115                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2116                                         (i * HWRM_MAX_REQ_LEN);
2117
2118         rc = bnxt_hwrm_func_buf_rgtr(bp);
2119         if (rc)
2120                 goto error_free;
2121
2122         populate_vf_func_cfg_req(bp, &req, num_vfs);
2123
2124         bp->pf.active_vfs = 0;
2125         for (i = 0; i < num_vfs; i++) {
2126                 add_random_mac_if_needed(bp, &req, i);
2127
2128                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2129                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2130                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2131                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2132
2133                 /* Clear enable flag for next pass */
2134                 req.enables &= ~rte_cpu_to_le_32(
2135                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2136
2137                 if (rc || resp->error_code) {
2138                         RTE_LOG(ERR, PMD,
2139                                 "Failed to initizlie VF %d\n", i);
2140                         RTE_LOG(ERR, PMD,
2141                                 "Not all VFs available. (%d, %d)\n",
2142                                 rc, resp->error_code);
2143                         break;
2144                 }
2145
2146                 reserve_resources_from_vf(bp, &req, i);
2147                 bp->pf.active_vfs++;
2148         }
2149
2150         /*
2151          * Now configure the PF to use "the rest" of the resources
2152          * We're using STD_TX_RING_MODE here though which will limit the TX
2153          * rings.  This will allow QoS to function properly.  Not setting this
2154          * will cause PF rings to break bandwidth settings.
2155          */
2156         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2157         if (rc)
2158                 goto error_free;
2159
2160         rc = update_pf_resource_max(bp);
2161         if (rc)
2162                 goto error_free;
2163
2164         return rc;
2165
2166 error_free:
2167         bnxt_hwrm_func_buf_unrgtr(bp);
2168         return rc;
2169 }
2170
2171 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2172                                 uint8_t tunnel_type)
2173 {
2174         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2175         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2176         int rc = 0;
2177
2178         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2179         req.tunnel_type = tunnel_type;
2180         req.tunnel_dst_port_val = port;
2181         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2182         HWRM_CHECK_RESULT;
2183
2184         switch (tunnel_type) {
2185         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2186                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2187                 bp->vxlan_port = port;
2188                 break;
2189         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2190                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2191                 bp->geneve_port = port;
2192                 break;
2193         default:
2194                 break;
2195         }
2196         return rc;
2197 }
2198
2199 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2200                                 uint8_t tunnel_type)
2201 {
2202         struct hwrm_tunnel_dst_port_free_input req = {0};
2203         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2204         int rc = 0;
2205
2206         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2207         req.tunnel_type = tunnel_type;
2208         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2210         HWRM_CHECK_RESULT;
2211
2212         return rc;
2213 }
2214
2215 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2216 {
2217         int rc = 0;
2218         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2219         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2220
2221         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2222
2223         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2224         req.req_buf_page_size = rte_cpu_to_le_16(
2225                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2226         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2227         req.req_buf_page_addr[0] =
2228                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2229         if (req.req_buf_page_addr[0] == 0) {
2230                 RTE_LOG(ERR, PMD,
2231                         "unable to map buffer address to physical memory\n");
2232                 return -ENOMEM;
2233         }
2234
2235         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2236
2237         HWRM_CHECK_RESULT;
2238
2239         return rc;
2240 }
2241
2242 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2243 {
2244         int rc = 0;
2245         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2246         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2247
2248         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2249
2250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2251
2252         HWRM_CHECK_RESULT;
2253
2254         return rc;
2255 }
2256
2257 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2258 {
2259         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2260         struct hwrm_func_cfg_input req = {0};
2261         int rc;
2262
2263         HWRM_PREP(req, FUNC_CFG, -1, resp);
2264         req.fid = rte_cpu_to_le_16(0xffff);
2265         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2266         req.enables = rte_cpu_to_le_32(
2267                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2268         req.async_event_cr = rte_cpu_to_le_16(
2269                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2271         HWRM_CHECK_RESULT;
2272
2273         return rc;
2274 }
2275
2276 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2277 {
2278         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2279         struct hwrm_func_vf_cfg_input req = {0};
2280         int rc;
2281
2282         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2283         req.enables = rte_cpu_to_le_32(
2284                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2285         req.async_event_cr = rte_cpu_to_le_16(
2286                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2287         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2288         HWRM_CHECK_RESULT;
2289
2290         return rc;
2291 }
2292
2293 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2294 {
2295         struct hwrm_func_cfg_input req = {0};
2296         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2297         uint16_t dflt_vlan, fid;
2298         uint32_t func_cfg_flags;
2299         int rc = 0;
2300
2301         HWRM_PREP(req, FUNC_CFG, -1, resp);
2302
2303         if (is_vf) {
2304                 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2305                 fid = bp->pf.vf_info[vf].fid;
2306                 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2307         } else {
2308                 fid = rte_cpu_to_le_16(0xffff);
2309                 func_cfg_flags = bp->pf.func_cfg_flags;
2310                 dflt_vlan = bp->vlan;
2311         }
2312
2313         req.flags = rte_cpu_to_le_32(func_cfg_flags);
2314         req.fid = rte_cpu_to_le_16(fid);
2315         req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2316         req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2317
2318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2319         HWRM_CHECK_RESULT;
2320
2321         return rc;
2322 }
2323
2324 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2325                               void *encaped, size_t ec_size)
2326 {
2327         int rc = 0;
2328         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2329         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2330
2331         if (ec_size > sizeof(req.encap_request))
2332                 return -1;
2333
2334         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2335
2336         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2337         memcpy(req.encap_request, encaped, ec_size);
2338
2339         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2340
2341         HWRM_CHECK_RESULT;
2342
2343         return rc;
2344 }
2345
2346 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2347                                        struct ether_addr *mac)
2348 {
2349         struct hwrm_func_qcfg_input req = {0};
2350         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2351         int rc;
2352
2353         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2354         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2355         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2356
2357         HWRM_CHECK_RESULT;
2358
2359         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2360         return rc;
2361 }
2362
2363 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2364                             void *encaped, size_t ec_size)
2365 {
2366         int rc = 0;
2367         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2368         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2369
2370         if (ec_size > sizeof(req.encap_request))
2371                 return -1;
2372
2373         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2374
2375         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2376         memcpy(req.encap_request, encaped, ec_size);
2377
2378         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2379
2380         HWRM_CHECK_RESULT;
2381
2382         return rc;
2383 }
2384
2385 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2386                          struct rte_eth_stats *stats)
2387 {
2388         int rc = 0;
2389         struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2390         struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2391
2392         HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2393
2394         req.stat_ctx_id = rte_cpu_to_le_32(cid);
2395
2396         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2397
2398         HWRM_CHECK_RESULT;
2399
2400         stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2401         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2402         stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2403         stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2404         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2405         stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2406
2407         stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2408         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2409         stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2410         stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2411         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2412         stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2413
2414         stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2415         stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2416         stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2417
2418         return rc;
2419 }
2420
2421 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2422 {
2423         struct hwrm_port_qstats_input req = {0};
2424         struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2425         struct bnxt_pf_info *pf = &bp->pf;
2426         int rc;
2427
2428         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2429                 return 0;
2430
2431         HWRM_PREP(req, PORT_QSTATS, -1, resp);
2432         req.port_id = rte_cpu_to_le_16(pf->port_id);
2433         req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2434         req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2435         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2436         HWRM_CHECK_RESULT;
2437         return rc;
2438 }
2439
2440 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2441 {
2442         struct hwrm_port_clr_stats_input req = {0};
2443         struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2444         struct bnxt_pf_info *pf = &bp->pf;
2445         int rc;
2446
2447         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2448                 return 0;
2449
2450         HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2451         req.port_id = rte_cpu_to_le_16(pf->port_id);
2452         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2453         HWRM_CHECK_RESULT;
2454         return rc;
2455 }