net/bnxt: support tunneling
[dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <unistd.h>
35
36 #include <unistd.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
44
45 #include "bnxt.h"
46 #include "bnxt_cpr.h"
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
49 #include "bnxt_rxq.h"
50 #include "bnxt_rxr.h"
51 #include "bnxt_ring.h"
52 #include "bnxt_txq.h"
53 #include "bnxt_txr.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
56
57 #include <rte_io.h>
58
59 #define HWRM_CMD_TIMEOUT                2000
60
61 struct bnxt_plcmodes_cfg {
62         uint32_t        flags;
63         uint16_t        jumbo_thresh;
64         uint16_t        hds_offset;
65         uint16_t        hds_threshold;
66 };
67
68 static int page_getenum(size_t size)
69 {
70         if (size <= 1 << 4)
71                 return 4;
72         if (size <= 1 << 12)
73                 return 12;
74         if (size <= 1 << 13)
75                 return 13;
76         if (size <= 1 << 16)
77                 return 16;
78         if (size <= 1 << 21)
79                 return 21;
80         if (size <= 1 << 22)
81                 return 22;
82         if (size <= 1 << 30)
83                 return 30;
84         RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85         return sizeof(void *) * 8 - 1;
86 }
87
88 static int page_roundup(size_t size)
89 {
90         return 1 << page_getenum(size);
91 }
92
93 /*
94  * HWRM Functions (sent to HWRM)
95  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97  * command was failed by the ChiMP.
98  */
99
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
101                                         uint32_t msg_len)
102 {
103         unsigned int i;
104         struct input *req = msg;
105         struct output *resp = bp->hwrm_cmd_resp_addr;
106         uint32_t *data = msg;
107         uint8_t *bar;
108         uint8_t *valid;
109
110         /* Write request msg to hwrm channel */
111         for (i = 0; i < msg_len; i += 4) {
112                 bar = (uint8_t *)bp->bar0 + i;
113                 rte_write32(*data, bar);
114                 data++;
115         }
116
117         /* Zero the rest of the request space */
118         for (; i < bp->max_req_len; i += 4) {
119                 bar = (uint8_t *)bp->bar0 + i;
120                 rte_write32(0, bar);
121         }
122
123         /* Ring channel doorbell */
124         bar = (uint8_t *)bp->bar0 + 0x100;
125         rte_write32(1, bar);
126
127         /* Poll for the valid bit */
128         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129                 /* Sanity check on the resp->resp_len */
130                 rte_rmb();
131                 if (resp->resp_len && resp->resp_len <=
132                                 bp->max_resp_len) {
133                         /* Last byte of resp contains the valid key */
134                         valid = (uint8_t *)resp + resp->resp_len - 1;
135                         if (*valid == HWRM_RESP_VALID_KEY)
136                                 break;
137                 }
138                 rte_delay_us(600);
139         }
140
141         if (i >= HWRM_CMD_TIMEOUT) {
142                 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
143                         req->req_type);
144                 goto err_ret;
145         }
146         return 0;
147
148 err_ret:
149         return -1;
150 }
151
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
153 {
154         int rc;
155
156         rte_spinlock_lock(&bp->hwrm_lock);
157         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158         rte_spinlock_unlock(&bp->hwrm_lock);
159         return rc;
160 }
161
162 #define HWRM_PREP(req, type, cr, resp) \
163         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165         req.cmpl_ring = rte_cpu_to_le_16(cr); \
166         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167         req.target_id = rte_cpu_to_le_16(0xffff); \
168         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
169
170 #define HWRM_CHECK_RESULT \
171         { \
172                 if (rc) { \
173                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
174                                 __func__, rc); \
175                         return rc; \
176                 } \
177                 if (resp->error_code) { \
178                         rc = rte_le_to_cpu_16(resp->error_code); \
179                         if (resp->resp_len >= 16) { \
180                                 struct hwrm_err_output *tmp_hwrm_err_op = \
181                                                         (void *)resp; \
182                                 RTE_LOG(ERR, PMD, \
183                                         "%s error %d:%d:%08x:%04x\n", \
184                                         __func__, \
185                                         rc, tmp_hwrm_err_op->cmd_err, \
186                                         rte_le_to_cpu_32(\
187                                                 tmp_hwrm_err_op->opaque_0), \
188                                         rte_le_to_cpu_16(\
189                                                 tmp_hwrm_err_op->opaque_1)); \
190                         } \
191                         else { \
192                                 RTE_LOG(ERR, PMD, \
193                                         "%s error %d\n", __func__, rc); \
194                         } \
195                         return rc; \
196                 } \
197         }
198
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 {
201         int rc = 0;
202         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
204
205         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207         req.mask = 0;
208
209         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210
211         HWRM_CHECK_RESULT;
212
213         return rc;
214 }
215
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
217 {
218         int rc = 0;
219         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
221         uint32_t mask = 0;
222
223         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
225
226         /* FIXME add multicast flag, when multicast adding options is supported
227          * by ethtool.
228          */
229         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
234                                     mask);
235
236         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
237
238         HWRM_CHECK_RESULT;
239
240         return rc;
241 }
242
243 int bnxt_hwrm_clear_filter(struct bnxt *bp,
244                            struct bnxt_filter_info *filter)
245 {
246         int rc = 0;
247         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
248         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
249
250         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
251
252         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
253
254         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
255
256         HWRM_CHECK_RESULT;
257
258         filter->fw_l2_filter_id = -1;
259
260         return 0;
261 }
262
263 int bnxt_hwrm_set_filter(struct bnxt *bp,
264                          struct bnxt_vnic_info *vnic,
265                          struct bnxt_filter_info *filter)
266 {
267         int rc = 0;
268         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
269         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
270         uint32_t enables = 0;
271
272         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
273
274         req.flags = rte_cpu_to_le_32(filter->flags);
275
276         enables = filter->enables |
277               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
278         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
279
280         if (enables &
281             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
282                 memcpy(req.l2_addr, filter->l2_addr,
283                        ETHER_ADDR_LEN);
284         if (enables &
285             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
286                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
287                        ETHER_ADDR_LEN);
288         if (enables &
289             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
290                 req.l2_ovlan = filter->l2_ovlan;
291         if (enables &
292             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
293                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
294
295         req.enables = rte_cpu_to_le_32(enables);
296
297         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298
299         HWRM_CHECK_RESULT;
300
301         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
302
303         return rc;
304 }
305
306 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
307 {
308         int rc = 0;
309         struct hwrm_func_qcaps_input req = {.req_type = 0 };
310         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
311         uint16_t new_max_vfs;
312         int i;
313
314         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
315
316         req.fid = rte_cpu_to_le_16(0xffff);
317
318         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319
320         HWRM_CHECK_RESULT;
321
322         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
323         if (BNXT_PF(bp)) {
324                 bp->pf.port_id = resp->port_id;
325                 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
326                 new_max_vfs = bp->pdev->max_vfs;
327                 if (new_max_vfs != bp->pf.max_vfs) {
328                         if (bp->pf.vf_info)
329                                 rte_free(bp->pf.vf_info);
330                         bp->pf.vf_info = rte_malloc("bnxt_vf_info",
331                             sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
332                         bp->pf.max_vfs = new_max_vfs;
333                         for (i = 0; i < new_max_vfs; i++) {
334                                 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
335                                 bp->pf.vf_info[i].vlan_table =
336                                         rte_zmalloc("VF VLAN table",
337                                                     getpagesize(),
338                                                     getpagesize());
339                                 if (bp->pf.vf_info[i].vlan_table == NULL)
340                                         RTE_LOG(ERR, PMD,
341                                         "Fail to alloc VLAN table for VF %d\n",
342                                         i);
343                                 else
344                                         rte_mem_lock_page(
345                                                 bp->pf.vf_info[i].vlan_table);
346                                 STAILQ_INIT(&bp->pf.vf_info[i].filter);
347                         }
348                 }
349         }
350
351         bp->fw_fid = rte_le_to_cpu_32(resp->fid);
352         memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
353         bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
354         bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
355         bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
356         bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
357         bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
358         /* TODO: For now, do not support VMDq/RFS on VFs. */
359         if (BNXT_PF(bp)) {
360                 if (bp->pf.max_vfs)
361                         bp->max_vnics = 1;
362                 else
363                         bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
364         } else {
365                 bp->max_vnics = 1;
366         }
367         bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
368         if (BNXT_PF(bp))
369                 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
370
371         return rc;
372 }
373
374 int bnxt_hwrm_func_reset(struct bnxt *bp)
375 {
376         int rc = 0;
377         struct hwrm_func_reset_input req = {.req_type = 0 };
378         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
379
380         HWRM_PREP(req, FUNC_RESET, -1, resp);
381
382         req.enables = rte_cpu_to_le_32(0);
383
384         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
385
386         HWRM_CHECK_RESULT;
387
388         return rc;
389 }
390
391 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
392 {
393         int rc;
394         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
395         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
396
397         if (bp->flags & BNXT_FLAG_REGISTERED)
398                 return 0;
399
400         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
401         req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
402                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
403         req.ver_maj = RTE_VER_YEAR;
404         req.ver_min = RTE_VER_MONTH;
405         req.ver_upd = RTE_VER_MINOR;
406
407         if (BNXT_PF(bp)) {
408                 req.enables |= rte_cpu_to_le_32(
409                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
410                 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
411                        RTE_MIN(sizeof(req.vf_req_fwd),
412                                sizeof(bp->pf.vf_req_fwd)));
413         }
414
415         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
416         memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
417
418         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
419
420         HWRM_CHECK_RESULT;
421
422         bp->flags |= BNXT_FLAG_REGISTERED;
423
424         return rc;
425 }
426
427 int bnxt_hwrm_ver_get(struct bnxt *bp)
428 {
429         int rc = 0;
430         struct hwrm_ver_get_input req = {.req_type = 0 };
431         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
432         uint32_t my_version;
433         uint32_t fw_version;
434         uint16_t max_resp_len;
435         char type[RTE_MEMZONE_NAMESIZE];
436
437         HWRM_PREP(req, VER_GET, -1, resp);
438
439         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
440         req.hwrm_intf_min = HWRM_VERSION_MINOR;
441         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
442
443         /*
444          * Hold the lock since we may be adjusting the response pointers.
445          */
446         rte_spinlock_lock(&bp->hwrm_lock);
447         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
448
449         HWRM_CHECK_RESULT;
450
451         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
452                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
453                 resp->hwrm_intf_upd,
454                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
455         bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
456                         (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
457         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
458                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
459
460         my_version = HWRM_VERSION_MAJOR << 16;
461         my_version |= HWRM_VERSION_MINOR << 8;
462         my_version |= HWRM_VERSION_UPDATE;
463
464         fw_version = resp->hwrm_intf_maj << 16;
465         fw_version |= resp->hwrm_intf_min << 8;
466         fw_version |= resp->hwrm_intf_upd;
467
468         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
469                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
470                 rc = -EINVAL;
471                 goto error;
472         }
473
474         if (my_version != fw_version) {
475                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
476                 if (my_version < fw_version) {
477                         RTE_LOG(INFO, PMD,
478                                 "Firmware API version is newer than driver.\n");
479                         RTE_LOG(INFO, PMD,
480                                 "The driver may be missing features.\n");
481                 } else {
482                         RTE_LOG(INFO, PMD,
483                                 "Firmware API version is older than driver.\n");
484                         RTE_LOG(INFO, PMD,
485                                 "Not all driver features may be functional.\n");
486                 }
487         }
488
489         if (bp->max_req_len > resp->max_req_win_len) {
490                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
491                 rc = -EINVAL;
492         }
493         bp->max_req_len = resp->max_req_win_len;
494         max_resp_len = resp->max_resp_len;
495         if (bp->max_resp_len != max_resp_len) {
496                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
497                         bp->pdev->addr.domain, bp->pdev->addr.bus,
498                         bp->pdev->addr.devid, bp->pdev->addr.function);
499
500                 rte_free(bp->hwrm_cmd_resp_addr);
501
502                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
503                 if (bp->hwrm_cmd_resp_addr == NULL) {
504                         rc = -ENOMEM;
505                         goto error;
506                 }
507                 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
508                 bp->hwrm_cmd_resp_dma_addr =
509                         rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
510                 if (bp->hwrm_cmd_resp_dma_addr == 0) {
511                         RTE_LOG(ERR, PMD,
512                         "Unable to map response buffer to physical memory.\n");
513                         rc = -ENOMEM;
514                         goto error;
515                 }
516                 bp->max_resp_len = max_resp_len;
517         }
518
519 error:
520         rte_spinlock_unlock(&bp->hwrm_lock);
521         return rc;
522 }
523
524 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
525 {
526         int rc;
527         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
528         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
529
530         if (!(bp->flags & BNXT_FLAG_REGISTERED))
531                 return 0;
532
533         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
534         req.flags = flags;
535
536         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
537
538         HWRM_CHECK_RESULT;
539
540         bp->flags &= ~BNXT_FLAG_REGISTERED;
541
542         return rc;
543 }
544
545 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
546 {
547         int rc = 0;
548         struct hwrm_port_phy_cfg_input req = {0};
549         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
550         uint32_t enables = 0;
551
552         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
553
554         if (conf->link_up) {
555                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
556                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
557                 /*
558                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
559                  * any auto mode, even "none".
560                  */
561                 if (!conf->link_speed) {
562                         req.auto_mode |= conf->auto_mode;
563                         enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
564                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
565                         enables |=
566                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
567                         req.auto_link_speed = bp->link_info.auto_link_speed;
568                         enables |=
569                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
570                 }
571                 req.auto_duplex = conf->duplex;
572                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
573                 req.auto_pause = conf->auto_pause;
574                 req.force_pause = conf->force_pause;
575                 /* Set force_pause if there is no auto or if there is a force */
576                 if (req.auto_pause && !req.force_pause)
577                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
578                 else
579                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
580
581                 req.enables = rte_cpu_to_le_32(enables);
582         } else {
583                 req.flags =
584                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
585                 RTE_LOG(INFO, PMD, "Force Link Down\n");
586         }
587
588         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
589
590         HWRM_CHECK_RESULT;
591
592         return rc;
593 }
594
595 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
596                                    struct bnxt_link_info *link_info)
597 {
598         int rc = 0;
599         struct hwrm_port_phy_qcfg_input req = {0};
600         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
601
602         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
603
604         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
605
606         HWRM_CHECK_RESULT;
607
608         link_info->phy_link_status = resp->link;
609         if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
610                 link_info->link_up = 1;
611                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
612         } else {
613                 link_info->link_up = 0;
614                 link_info->link_speed = 0;
615         }
616         link_info->duplex = resp->duplex;
617         link_info->pause = resp->pause;
618         link_info->auto_pause = resp->auto_pause;
619         link_info->force_pause = resp->force_pause;
620         link_info->auto_mode = resp->auto_mode;
621
622         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
623         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
624         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
625         link_info->phy_ver[0] = resp->phy_maj;
626         link_info->phy_ver[1] = resp->phy_min;
627         link_info->phy_ver[2] = resp->phy_bld;
628
629         return rc;
630 }
631
632 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
633 {
634         int rc = 0;
635         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
636         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
637
638         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
639
640         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
641
642         HWRM_CHECK_RESULT;
643
644 #define GET_QUEUE_INFO(x) \
645         bp->cos_queue[x].id = resp->queue_id##x; \
646         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
647
648         GET_QUEUE_INFO(0);
649         GET_QUEUE_INFO(1);
650         GET_QUEUE_INFO(2);
651         GET_QUEUE_INFO(3);
652         GET_QUEUE_INFO(4);
653         GET_QUEUE_INFO(5);
654         GET_QUEUE_INFO(6);
655         GET_QUEUE_INFO(7);
656
657         return rc;
658 }
659
660 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
661                          struct bnxt_ring *ring,
662                          uint32_t ring_type, uint32_t map_index,
663                          uint32_t stats_ctx_id)
664 {
665         int rc = 0;
666         struct hwrm_ring_alloc_input req = {.req_type = 0 };
667         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
668
669         HWRM_PREP(req, RING_ALLOC, -1, resp);
670
671         req.enables = rte_cpu_to_le_32(0);
672
673         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
674         req.fbo = rte_cpu_to_le_32(0);
675         /* Association of ring index with doorbell index */
676         req.logical_id = rte_cpu_to_le_16(map_index);
677
678         switch (ring_type) {
679         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
680                 req.queue_id = bp->cos_queue[0].id;
681                 /* FALLTHROUGH */
682         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
683                 req.ring_type = ring_type;
684                 req.cmpl_ring_id =
685                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
686                 req.length = rte_cpu_to_le_32(ring->ring_size);
687                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
688                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
689                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
690                 break;
691         case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
692                 req.ring_type = ring_type;
693                 /*
694                  * TODO: Some HWRM versions crash with
695                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
696                  */
697                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
698                 req.length = rte_cpu_to_le_32(ring->ring_size);
699                 break;
700         default:
701                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
702                         ring_type);
703                 return -1;
704         }
705
706         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
707
708         if (rc || resp->error_code) {
709                 if (rc == 0 && resp->error_code)
710                         rc = rte_le_to_cpu_16(resp->error_code);
711                 switch (ring_type) {
712                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
713                         RTE_LOG(ERR, PMD,
714                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
715                         return rc;
716                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
717                         RTE_LOG(ERR, PMD,
718                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
719                         return rc;
720                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
721                         RTE_LOG(ERR, PMD,
722                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
723                         return rc;
724                 default:
725                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
726                         return rc;
727                 }
728         }
729
730         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
731         return rc;
732 }
733
734 int bnxt_hwrm_ring_free(struct bnxt *bp,
735                         struct bnxt_ring *ring, uint32_t ring_type)
736 {
737         int rc;
738         struct hwrm_ring_free_input req = {.req_type = 0 };
739         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
740
741         HWRM_PREP(req, RING_FREE, -1, resp);
742
743         req.ring_type = ring_type;
744         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
745
746         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
747
748         if (rc || resp->error_code) {
749                 if (rc == 0 && resp->error_code)
750                         rc = rte_le_to_cpu_16(resp->error_code);
751
752                 switch (ring_type) {
753                 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
754                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
755                                 rc);
756                         return rc;
757                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
758                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
759                                 rc);
760                         return rc;
761                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
762                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
763                                 rc);
764                         return rc;
765                 default:
766                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
767                         return rc;
768                 }
769         }
770         return 0;
771 }
772
773 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
774 {
775         int rc = 0;
776         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
777         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
778
779         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
780
781         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
782         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
783         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
784         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
785
786         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
787
788         HWRM_CHECK_RESULT;
789
790         bp->grp_info[idx].fw_grp_id =
791             rte_le_to_cpu_16(resp->ring_group_id);
792
793         return rc;
794 }
795
796 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
797 {
798         int rc;
799         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
800         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
801
802         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
803
804         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
805
806         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
807
808         HWRM_CHECK_RESULT;
809
810         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
811         return rc;
812 }
813
814 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
815 {
816         int rc = 0;
817         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
818         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
819
820         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
821
822         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
823                 return rc;
824
825         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
826         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
827
828         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
829
830         HWRM_CHECK_RESULT;
831
832         return rc;
833 }
834
835 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
836                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
837 {
838         int rc;
839         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
840         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
841
842         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
843
844         req.update_period_ms = rte_cpu_to_le_32(1000);
845
846         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
847         req.stats_dma_addr =
848             rte_cpu_to_le_64(cpr->hw_stats_map);
849
850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851
852         HWRM_CHECK_RESULT;
853
854         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
855         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
856
857         return rc;
858 }
859
860 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
861                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
862 {
863         int rc;
864         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
865         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
866
867         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
868
869         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
870         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
871
872         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
873
874         HWRM_CHECK_RESULT;
875
876         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
877         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
878
879         return rc;
880 }
881
882 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
883 {
884         int rc = 0, i, j;
885         struct hwrm_vnic_alloc_input req = { 0 };
886         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
887
888         /* map ring groups to this vnic */
889         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
890                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
891                         RTE_LOG(ERR, PMD,
892                                 "Not enough ring groups avail:%x req:%x\n", j,
893                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
894                         break;
895                 }
896                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
897         }
898         vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
899         vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
900         vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
901         vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
902         vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
903                                 ETHER_CRC_LEN + VLAN_TAG_SIZE;
904         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
905
906         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
907
908         HWRM_CHECK_RESULT;
909
910         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
911         return rc;
912 }
913
914 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
915                                         struct bnxt_vnic_info *vnic,
916                                         struct bnxt_plcmodes_cfg *pmode)
917 {
918         int rc = 0;
919         struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
920         struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
921
922         HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
923
924         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
925
926         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
927
928         HWRM_CHECK_RESULT;
929
930         pmode->flags = rte_le_to_cpu_32(resp->flags);
931         /* dflt_vnic bit doesn't exist in the _cfg command */
932         pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
933         pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
934         pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
935         pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
936
937         return rc;
938 }
939
940 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
941                                        struct bnxt_vnic_info *vnic,
942                                        struct bnxt_plcmodes_cfg *pmode)
943 {
944         int rc = 0;
945         struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
946         struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
947
948         HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
949
950         req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
951         req.flags = rte_cpu_to_le_32(pmode->flags);
952         req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
953         req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
954         req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
955         req.enables = rte_cpu_to_le_32(
956             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
957             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
958             HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
959         );
960
961         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
962
963         HWRM_CHECK_RESULT;
964
965         return rc;
966 }
967
968 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
969 {
970         int rc = 0;
971         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
972         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
973         uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
974         struct bnxt_plcmodes_cfg pmodes;
975
976         rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
977         if (rc)
978                 return rc;
979
980         HWRM_PREP(req, VNIC_CFG, -1, resp);
981
982         /* Only RSS support for now TBD: COS & LB */
983         req.enables =
984             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
985                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
986         if (vnic->lb_rule != 0xffff)
987                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
988         if (vnic->cos_rule != 0xffff)
989                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
990         if (vnic->rss_rule != 0xffff)
991                 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
992         req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
993         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
994         req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
995         req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
996         req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
997         req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
998         req.mru = rte_cpu_to_le_16(vnic->mru);
999         if (vnic->func_default)
1000                 req.flags |=
1001                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1002         if (vnic->vlan_strip)
1003                 req.flags |=
1004                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1005         if (vnic->bd_stall)
1006                 req.flags |=
1007                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1008         if (vnic->roce_dual)
1009                 req.flags |= rte_cpu_to_le_32(
1010                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1011         if (vnic->roce_only)
1012                 req.flags |= rte_cpu_to_le_32(
1013                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1014         if (vnic->rss_dflt_cr)
1015                 req.flags |= rte_cpu_to_le_32(
1016                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1017
1018         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1019
1020         HWRM_CHECK_RESULT;
1021
1022         rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1023
1024         return rc;
1025 }
1026
1027 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1028                 int16_t fw_vf_id)
1029 {
1030         int rc = 0;
1031         struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1032         struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1033
1034         HWRM_PREP(req, VNIC_QCFG, -1, resp);
1035
1036         req.enables =
1037                 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1038         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1039         req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1040
1041         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1042
1043         HWRM_CHECK_RESULT;
1044
1045         vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1046         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1047         vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1048         vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1049         vnic->mru = rte_le_to_cpu_16(resp->mru);
1050         vnic->func_default = rte_le_to_cpu_32(
1051                         resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1052         vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1053                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1054         vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1055                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1056         vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1057                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1058         vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1059                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1060         vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1061                         HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1062
1063         return rc;
1064 }
1065
1066 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1067 {
1068         int rc = 0;
1069         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1070         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1071                                                 bp->hwrm_cmd_resp_addr;
1072
1073         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1074
1075         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1076
1077         HWRM_CHECK_RESULT;
1078
1079         vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1080
1081         return rc;
1082 }
1083
1084 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1085 {
1086         int rc = 0;
1087         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1088         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1089                                                 bp->hwrm_cmd_resp_addr;
1090
1091         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1092
1093         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1094
1095         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1096
1097         HWRM_CHECK_RESULT;
1098
1099         vnic->rss_rule = INVALID_HW_RING_ID;
1100
1101         return rc;
1102 }
1103
1104 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1105 {
1106         int rc = 0;
1107         struct hwrm_vnic_free_input req = {.req_type = 0 };
1108         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1109
1110         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1111                 return rc;
1112
1113         HWRM_PREP(req, VNIC_FREE, -1, resp);
1114
1115         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1116
1117         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1118
1119         HWRM_CHECK_RESULT;
1120
1121         vnic->fw_vnic_id = INVALID_HW_RING_ID;
1122         return rc;
1123 }
1124
1125 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1126                            struct bnxt_vnic_info *vnic)
1127 {
1128         int rc = 0;
1129         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1130         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1131
1132         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1133
1134         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1135
1136         req.ring_grp_tbl_addr =
1137             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1138         req.hash_key_tbl_addr =
1139             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1140         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1141
1142         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1143
1144         HWRM_CHECK_RESULT;
1145
1146         return rc;
1147 }
1148
1149 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1150 {
1151         struct hwrm_func_cfg_input req = {0};
1152         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1153         int rc;
1154
1155         req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1156         req.enables = rte_cpu_to_le_32(
1157                         HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1158         memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1159         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1160
1161         HWRM_PREP(req, FUNC_CFG, -1, resp);
1162
1163         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1164         HWRM_CHECK_RESULT;
1165
1166         bp->pf.vf_info[vf].random_mac = false;
1167
1168         return rc;
1169 }
1170
1171 /*
1172  * HWRM utility functions
1173  */
1174
1175 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1176 {
1177         unsigned int i;
1178         int rc = 0;
1179
1180         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1181                 struct bnxt_tx_queue *txq;
1182                 struct bnxt_rx_queue *rxq;
1183                 struct bnxt_cp_ring_info *cpr;
1184
1185                 if (i >= bp->rx_cp_nr_rings) {
1186                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1187                         cpr = txq->cp_ring;
1188                 } else {
1189                         rxq = bp->rx_queues[i];
1190                         cpr = rxq->cp_ring;
1191                 }
1192
1193                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1194                 if (rc)
1195                         return rc;
1196         }
1197         return 0;
1198 }
1199
1200 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1201 {
1202         int rc;
1203         unsigned int i;
1204         struct bnxt_cp_ring_info *cpr;
1205
1206         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1207                 unsigned int idx = i + 1;
1208
1209                 if (i >= bp->rx_cp_nr_rings)
1210                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1211                 else
1212                         cpr = bp->rx_queues[i]->cp_ring;
1213                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1214                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1215                         if (rc)
1216                                 return rc;
1217                 }
1218         }
1219         return 0;
1220 }
1221
1222 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1223 {
1224         unsigned int i;
1225         int rc = 0;
1226
1227         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1228                 struct bnxt_tx_queue *txq;
1229                 struct bnxt_rx_queue *rxq;
1230                 struct bnxt_cp_ring_info *cpr;
1231                 unsigned int idx = i + 1;
1232
1233                 if (i >= bp->rx_cp_nr_rings) {
1234                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1235                         cpr = txq->cp_ring;
1236                 } else {
1237                         rxq = bp->rx_queues[i];
1238                         cpr = rxq->cp_ring;
1239                 }
1240
1241                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1242
1243                 if (rc)
1244                         return rc;
1245         }
1246         return rc;
1247 }
1248
1249 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1250 {
1251         uint16_t i;
1252         uint32_t rc = 0;
1253
1254         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1255                 unsigned int idx = i + 1;
1256
1257                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1258                         RTE_LOG(ERR, PMD,
1259                                 "Attempt to free invalid ring group %d\n",
1260                                 idx);
1261                         continue;
1262                 }
1263
1264                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1265
1266                 if (rc)
1267                         return rc;
1268         }
1269         return rc;
1270 }
1271
1272 static void bnxt_free_cp_ring(struct bnxt *bp,
1273                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1274 {
1275         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1276
1277         bnxt_hwrm_ring_free(bp, cp_ring,
1278                         HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1279         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1280         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1281         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1282                         sizeof(*cpr->cp_desc_ring));
1283         cpr->cp_raw_cons = 0;
1284 }
1285
1286 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1287 {
1288         unsigned int i;
1289         int rc = 0;
1290
1291         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1292                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1293                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1294                 struct bnxt_ring *ring = txr->tx_ring_struct;
1295                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1296                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1297
1298                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1299                         bnxt_hwrm_ring_free(bp, ring,
1300                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1301                         ring->fw_ring_id = INVALID_HW_RING_ID;
1302                         memset(txr->tx_desc_ring, 0,
1303                                         txr->tx_ring_struct->ring_size *
1304                                         sizeof(*txr->tx_desc_ring));
1305                         memset(txr->tx_buf_ring, 0,
1306                                         txr->tx_ring_struct->ring_size *
1307                                         sizeof(*txr->tx_buf_ring));
1308                         txr->tx_prod = 0;
1309                         txr->tx_cons = 0;
1310                 }
1311                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1312                         bnxt_free_cp_ring(bp, cpr, idx);
1313         }
1314
1315         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1316                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1317                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1318                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1319                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1320                 unsigned int idx = i + 1;
1321
1322                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1323                         bnxt_hwrm_ring_free(bp, ring,
1324                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1325                         ring->fw_ring_id = INVALID_HW_RING_ID;
1326                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1327                         memset(rxr->rx_desc_ring, 0,
1328                                         rxr->rx_ring_struct->ring_size *
1329                                         sizeof(*rxr->rx_desc_ring));
1330                         memset(rxr->rx_buf_ring, 0,
1331                                         rxr->rx_ring_struct->ring_size *
1332                                         sizeof(*rxr->rx_buf_ring));
1333                         rxr->rx_prod = 0;
1334                 }
1335                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1336                         bnxt_free_cp_ring(bp, cpr, idx);
1337         }
1338
1339         /* Default completion ring */
1340         {
1341                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1342
1343                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1344                         bnxt_free_cp_ring(bp, cpr, 0);
1345         }
1346
1347         return rc;
1348 }
1349
1350 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1351 {
1352         uint16_t i;
1353         uint32_t rc = 0;
1354
1355         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1356                 unsigned int idx = i + 1;
1357
1358                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1359                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1360                         continue;
1361
1362                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1363
1364                 if (rc)
1365                         return rc;
1366         }
1367         return rc;
1368 }
1369
1370 void bnxt_free_hwrm_resources(struct bnxt *bp)
1371 {
1372         /* Release memzone */
1373         rte_free(bp->hwrm_cmd_resp_addr);
1374         bp->hwrm_cmd_resp_addr = NULL;
1375         bp->hwrm_cmd_resp_dma_addr = 0;
1376 }
1377
1378 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1379 {
1380         struct rte_pci_device *pdev = bp->pdev;
1381         char type[RTE_MEMZONE_NAMESIZE];
1382
1383         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1384                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1385         bp->max_req_len = HWRM_MAX_REQ_LEN;
1386         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1387         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1388         rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1389         if (bp->hwrm_cmd_resp_addr == NULL)
1390                 return -ENOMEM;
1391         bp->hwrm_cmd_resp_dma_addr =
1392                 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1393         if (bp->hwrm_cmd_resp_dma_addr == 0) {
1394                 RTE_LOG(ERR, PMD,
1395                         "unable to map response address to physical memory\n");
1396                 return -ENOMEM;
1397         }
1398         rte_spinlock_init(&bp->hwrm_lock);
1399
1400         return 0;
1401 }
1402
1403 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1404 {
1405         struct bnxt_filter_info *filter;
1406         int rc = 0;
1407
1408         STAILQ_FOREACH(filter, &vnic->filter, next) {
1409                 rc = bnxt_hwrm_clear_filter(bp, filter);
1410                 if (rc)
1411                         break;
1412         }
1413         return rc;
1414 }
1415
1416 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1417 {
1418         struct bnxt_filter_info *filter;
1419         int rc = 0;
1420
1421         STAILQ_FOREACH(filter, &vnic->filter, next) {
1422                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1423                 if (rc)
1424                         break;
1425         }
1426         return rc;
1427 }
1428
1429 void bnxt_free_tunnel_ports(struct bnxt *bp)
1430 {
1431         if (bp->vxlan_port_cnt)
1432                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1433                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1434         bp->vxlan_port = 0;
1435         if (bp->geneve_port_cnt)
1436                 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1437                         HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1438         bp->geneve_port = 0;
1439 }
1440
1441 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1442 {
1443         struct bnxt_vnic_info *vnic;
1444         unsigned int i;
1445
1446         if (bp->vnic_info == NULL)
1447                 return;
1448
1449         vnic = &bp->vnic_info[0];
1450         if (BNXT_PF(bp))
1451                 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1452
1453         /* VNIC resources */
1454         for (i = 0; i < bp->nr_vnics; i++) {
1455                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1456
1457                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1458
1459                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1460                 bnxt_hwrm_vnic_free(bp, vnic);
1461         }
1462         /* Ring resources */
1463         bnxt_free_all_hwrm_rings(bp);
1464         bnxt_free_all_hwrm_ring_grps(bp);
1465         bnxt_free_all_hwrm_stat_ctxs(bp);
1466         bnxt_free_tunnel_ports(bp);
1467 }
1468
1469 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1470 {
1471         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1472
1473         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1474                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1475
1476         switch (conf_link_speed) {
1477         case ETH_LINK_SPEED_10M_HD:
1478         case ETH_LINK_SPEED_100M_HD:
1479                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1480         }
1481         return hw_link_duplex;
1482 }
1483
1484 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1485 {
1486         uint16_t eth_link_speed = 0;
1487
1488         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1489                 return ETH_LINK_SPEED_AUTONEG;
1490
1491         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1492         case ETH_LINK_SPEED_100M:
1493         case ETH_LINK_SPEED_100M_HD:
1494                 eth_link_speed =
1495                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1496                 break;
1497         case ETH_LINK_SPEED_1G:
1498                 eth_link_speed =
1499                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1500                 break;
1501         case ETH_LINK_SPEED_2_5G:
1502                 eth_link_speed =
1503                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1504                 break;
1505         case ETH_LINK_SPEED_10G:
1506                 eth_link_speed =
1507                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1508                 break;
1509         case ETH_LINK_SPEED_20G:
1510                 eth_link_speed =
1511                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1512                 break;
1513         case ETH_LINK_SPEED_25G:
1514                 eth_link_speed =
1515                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1516                 break;
1517         case ETH_LINK_SPEED_40G:
1518                 eth_link_speed =
1519                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1520                 break;
1521         case ETH_LINK_SPEED_50G:
1522                 eth_link_speed =
1523                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1524                 break;
1525         default:
1526                 RTE_LOG(ERR, PMD,
1527                         "Unsupported link speed %d; default to AUTO\n",
1528                         conf_link_speed);
1529                 break;
1530         }
1531         return eth_link_speed;
1532 }
1533
1534 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1535                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1536                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1537                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1538
1539 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1540 {
1541         uint32_t one_speed;
1542
1543         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1544                 return 0;
1545
1546         if (link_speed & ETH_LINK_SPEED_FIXED) {
1547                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1548
1549                 if (one_speed & (one_speed - 1)) {
1550                         RTE_LOG(ERR, PMD,
1551                                 "Invalid advertised speeds (%u) for port %u\n",
1552                                 link_speed, port_id);
1553                         return -EINVAL;
1554                 }
1555                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1556                         RTE_LOG(ERR, PMD,
1557                                 "Unsupported advertised speed (%u) for port %u\n",
1558                                 link_speed, port_id);
1559                         return -EINVAL;
1560                 }
1561         } else {
1562                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1563                         RTE_LOG(ERR, PMD,
1564                                 "Unsupported advertised speeds (%u) for port %u\n",
1565                                 link_speed, port_id);
1566                         return -EINVAL;
1567                 }
1568         }
1569         return 0;
1570 }
1571
1572 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1573 {
1574         uint16_t ret = 0;
1575
1576         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1577                 link_speed = BNXT_SUPPORTED_SPEEDS;
1578
1579         if (link_speed & ETH_LINK_SPEED_100M)
1580                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1581         if (link_speed & ETH_LINK_SPEED_100M_HD)
1582                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1583         if (link_speed & ETH_LINK_SPEED_1G)
1584                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1585         if (link_speed & ETH_LINK_SPEED_2_5G)
1586                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1587         if (link_speed & ETH_LINK_SPEED_10G)
1588                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1589         if (link_speed & ETH_LINK_SPEED_20G)
1590                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1591         if (link_speed & ETH_LINK_SPEED_25G)
1592                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1593         if (link_speed & ETH_LINK_SPEED_40G)
1594                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1595         if (link_speed & ETH_LINK_SPEED_50G)
1596                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1597         return ret;
1598 }
1599
1600 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1601 {
1602         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1603
1604         switch (hw_link_speed) {
1605         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1606                 eth_link_speed = ETH_SPEED_NUM_100M;
1607                 break;
1608         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1609                 eth_link_speed = ETH_SPEED_NUM_1G;
1610                 break;
1611         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1612                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1613                 break;
1614         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1615                 eth_link_speed = ETH_SPEED_NUM_10G;
1616                 break;
1617         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1618                 eth_link_speed = ETH_SPEED_NUM_20G;
1619                 break;
1620         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1621                 eth_link_speed = ETH_SPEED_NUM_25G;
1622                 break;
1623         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1624                 eth_link_speed = ETH_SPEED_NUM_40G;
1625                 break;
1626         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1627                 eth_link_speed = ETH_SPEED_NUM_50G;
1628                 break;
1629         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1630         default:
1631                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1632                         hw_link_speed);
1633                 break;
1634         }
1635         return eth_link_speed;
1636 }
1637
1638 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1639 {
1640         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1641
1642         switch (hw_link_duplex) {
1643         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1644         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1645                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1646                 break;
1647         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1648                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1649                 break;
1650         default:
1651                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1652                         hw_link_duplex);
1653                 break;
1654         }
1655         return eth_link_duplex;
1656 }
1657
1658 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1659 {
1660         int rc = 0;
1661         struct bnxt_link_info *link_info = &bp->link_info;
1662
1663         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1664         if (rc) {
1665                 RTE_LOG(ERR, PMD,
1666                         "Get link config failed with rc %d\n", rc);
1667                 goto exit;
1668         }
1669         if (link_info->link_up)
1670                 link->link_speed =
1671                         bnxt_parse_hw_link_speed(link_info->link_speed);
1672         else
1673                 link->link_speed = ETH_LINK_SPEED_10M;
1674         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1675         link->link_status = link_info->link_up;
1676         link->link_autoneg = link_info->auto_mode ==
1677                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1678                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1679 exit:
1680         return rc;
1681 }
1682
1683 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1684 {
1685         int rc = 0;
1686         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1687         struct bnxt_link_info link_req;
1688         uint16_t speed;
1689
1690         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1691                 return 0;
1692
1693         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1694                         bp->eth_dev->data->port_id);
1695         if (rc)
1696                 goto error;
1697
1698         memset(&link_req, 0, sizeof(link_req));
1699         link_req.link_up = link_up;
1700         if (!link_up)
1701                 goto port_phy_cfg;
1702
1703         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1704         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1705         if (speed == 0) {
1706                 link_req.phy_flags |=
1707                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1708                 link_req.auto_mode =
1709                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1710                 link_req.auto_link_speed_mask =
1711                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1712         } else {
1713                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1714                 link_req.link_speed = speed;
1715                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1716         }
1717         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1718         link_req.auto_pause = bp->link_info.auto_pause;
1719         link_req.force_pause = bp->link_info.force_pause;
1720
1721 port_phy_cfg:
1722         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1723         if (rc) {
1724                 RTE_LOG(ERR, PMD,
1725                         "Set link config failed with rc %d\n", rc);
1726         }
1727
1728         rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1729 error:
1730         return rc;
1731 }
1732
1733 /* JIRA 22088 */
1734 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1735 {
1736         struct hwrm_func_qcfg_input req = {0};
1737         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1738         int rc = 0;
1739
1740         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1741         req.fid = rte_cpu_to_le_16(0xffff);
1742
1743         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1744
1745         HWRM_CHECK_RESULT;
1746
1747         /* Hard Coded.. 0xfff VLAN ID mask */
1748         bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1749
1750         switch (resp->port_partition_type) {
1751         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1752         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1753         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1754                 bp->port_partition_type = resp->port_partition_type;
1755                 break;
1756         default:
1757                 bp->port_partition_type = 0;
1758                 break;
1759         }
1760
1761         return rc;
1762 }
1763
1764 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1765                                    struct hwrm_func_qcaps_output *qcaps)
1766 {
1767         qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1768         memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1769                sizeof(qcaps->mac_address));
1770         qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1771         qcaps->max_rx_rings = fcfg->num_rx_rings;
1772         qcaps->max_tx_rings = fcfg->num_tx_rings;
1773         qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1774         qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1775         qcaps->max_vfs = 0;
1776         qcaps->first_vf_id = 0;
1777         qcaps->max_vnics = fcfg->num_vnics;
1778         qcaps->max_decap_records = 0;
1779         qcaps->max_encap_records = 0;
1780         qcaps->max_tx_wm_flows = 0;
1781         qcaps->max_tx_em_flows = 0;
1782         qcaps->max_rx_wm_flows = 0;
1783         qcaps->max_rx_em_flows = 0;
1784         qcaps->max_flow_id = 0;
1785         qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1786         qcaps->max_sp_tx_rings = 0;
1787         qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1788 }
1789
1790 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1791 {
1792         struct hwrm_func_cfg_input req = {0};
1793         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1794         int rc;
1795
1796         req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1797                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1798                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1799                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1800                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1801                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1802                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1803                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1804                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1805                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1806         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1807         req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1808                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1809         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1810                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
1811         req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1812         req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1813         req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1814         req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1815         req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1816         req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1817         req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1818         req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1819         req.fid = rte_cpu_to_le_16(0xffff);
1820
1821         HWRM_PREP(req, FUNC_CFG, -1, resp);
1822
1823         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1824         HWRM_CHECK_RESULT;
1825
1826         return rc;
1827 }
1828
1829 static void populate_vf_func_cfg_req(struct bnxt *bp,
1830                                      struct hwrm_func_cfg_input *req,
1831                                      int num_vfs)
1832 {
1833         req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1834                         HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1835                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1836                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1837                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1838                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1839                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1840                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1841                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1842                         HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1843
1844         req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1845                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1846         req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1847                                     ETHER_CRC_LEN + VLAN_TAG_SIZE);
1848         req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1849                                                 (num_vfs + 1));
1850         req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1851         req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1852                                                (num_vfs + 1));
1853         req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1854         req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1855         req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1856         /* TODO: For now, do not support VMDq/RFS on VFs. */
1857         req->num_vnics = rte_cpu_to_le_16(1);
1858         req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1859                                                  (num_vfs + 1));
1860 }
1861
1862 static void add_random_mac_if_needed(struct bnxt *bp,
1863                                      struct hwrm_func_cfg_input *cfg_req,
1864                                      int vf)
1865 {
1866         struct ether_addr mac;
1867
1868         if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1869                 return;
1870
1871         if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1872                 cfg_req->enables |=
1873                 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1874                 eth_random_addr(cfg_req->dflt_mac_addr);
1875                 bp->pf.vf_info[vf].random_mac = true;
1876         } else {
1877                 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1878         }
1879 }
1880
1881 static void reserve_resources_from_vf(struct bnxt *bp,
1882                                       struct hwrm_func_cfg_input *cfg_req,
1883                                       int vf)
1884 {
1885         struct hwrm_func_qcaps_input req = {0};
1886         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1887         int rc;
1888
1889         /* Get the actual allocated values now */
1890         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1891         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1892         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1893
1894         if (rc) {
1895                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1896                 copy_func_cfg_to_qcaps(cfg_req, resp);
1897         } else if (resp->error_code) {
1898                 rc = rte_le_to_cpu_16(resp->error_code);
1899                 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1900                 copy_func_cfg_to_qcaps(cfg_req, resp);
1901         }
1902
1903         bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1904         bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1905         bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1906         bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1907         bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1908         bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1909         /*
1910          * TODO: While not supporting VMDq with VFs, max_vnics is always
1911          * forced to 1 in this case
1912          */
1913         //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1914         bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1915 }
1916
1917 static int update_pf_resource_max(struct bnxt *bp)
1918 {
1919         struct hwrm_func_qcfg_input req = {0};
1920         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1921         int rc;
1922
1923         /* And copy the allocated numbers into the pf struct */
1924         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1925         req.fid = rte_cpu_to_le_16(0xffff);
1926         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1927         HWRM_CHECK_RESULT;
1928
1929         /* Only TX ring value reflects actual allocation? TODO */
1930         bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1931         bp->pf.evb_mode = resp->evb_mode;
1932
1933         return rc;
1934 }
1935
1936 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1937 {
1938         int rc;
1939
1940         if (!BNXT_PF(bp)) {
1941                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1942                 return -1;
1943         }
1944
1945         rc = bnxt_hwrm_func_qcaps(bp);
1946         if (rc)
1947                 return rc;
1948
1949         bp->pf.func_cfg_flags &=
1950                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1951                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1952         bp->pf.func_cfg_flags |=
1953                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1954         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1955         return rc;
1956 }
1957
1958 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1959 {
1960         struct hwrm_func_cfg_input req = {0};
1961         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1962         int i;
1963         size_t sz;
1964         int rc = 0;
1965         size_t req_buf_sz;
1966
1967         if (!BNXT_PF(bp)) {
1968                 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1969                 return -1;
1970         }
1971
1972         rc = bnxt_hwrm_func_qcaps(bp);
1973
1974         if (rc)
1975                 return rc;
1976
1977         bp->pf.active_vfs = num_vfs;
1978
1979         /*
1980          * First, configure the PF to only use one TX ring.  This ensures that
1981          * there are enough rings for all VFs.
1982          *
1983          * If we don't do this, when we call func_alloc() later, we will lock
1984          * extra rings to the PF that won't be available during func_cfg() of
1985          * the VFs.
1986          *
1987          * This has been fixed with firmware versions above 20.6.54
1988          */
1989         bp->pf.func_cfg_flags &=
1990                 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1991                   HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1992         bp->pf.func_cfg_flags |=
1993                 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1994         rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1995         if (rc)
1996                 return rc;
1997
1998         /*
1999          * Now, create and register a buffer to hold forwarded VF requests
2000          */
2001         req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2002         bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2003                 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2004         if (bp->pf.vf_req_buf == NULL) {
2005                 rc = -ENOMEM;
2006                 goto error_free;
2007         }
2008         for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2009                 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2010         for (i = 0; i < num_vfs; i++)
2011                 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2012                                         (i * HWRM_MAX_REQ_LEN);
2013
2014         rc = bnxt_hwrm_func_buf_rgtr(bp);
2015         if (rc)
2016                 goto error_free;
2017
2018         populate_vf_func_cfg_req(bp, &req, num_vfs);
2019
2020         bp->pf.active_vfs = 0;
2021         for (i = 0; i < num_vfs; i++) {
2022                 add_random_mac_if_needed(bp, &req, i);
2023
2024                 HWRM_PREP(req, FUNC_CFG, -1, resp);
2025                 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2026                 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2027                 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2028
2029                 /* Clear enable flag for next pass */
2030                 req.enables &= ~rte_cpu_to_le_32(
2031                                 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2032
2033                 if (rc || resp->error_code) {
2034                         RTE_LOG(ERR, PMD,
2035                                 "Failed to initizlie VF %d\n", i);
2036                         RTE_LOG(ERR, PMD,
2037                                 "Not all VFs available. (%d, %d)\n",
2038                                 rc, resp->error_code);
2039                         break;
2040                 }
2041
2042                 reserve_resources_from_vf(bp, &req, i);
2043                 bp->pf.active_vfs++;
2044         }
2045
2046         /*
2047          * Now configure the PF to use "the rest" of the resources
2048          * We're using STD_TX_RING_MODE here though which will limit the TX
2049          * rings.  This will allow QoS to function properly.  Not setting this
2050          * will cause PF rings to break bandwidth settings.
2051          */
2052         rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2053         if (rc)
2054                 goto error_free;
2055
2056         rc = update_pf_resource_max(bp);
2057         if (rc)
2058                 goto error_free;
2059
2060         return rc;
2061
2062 error_free:
2063         bnxt_hwrm_func_buf_unrgtr(bp);
2064         return rc;
2065 }
2066
2067 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2068                                 uint8_t tunnel_type)
2069 {
2070         struct hwrm_tunnel_dst_port_alloc_input req = {0};
2071         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2072         int rc = 0;
2073
2074         HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2075         req.tunnel_type = tunnel_type;
2076         req.tunnel_dst_port_val = port;
2077         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2078         HWRM_CHECK_RESULT;
2079
2080         switch (tunnel_type) {
2081         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2082                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2083                 bp->vxlan_port = port;
2084                 break;
2085         case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2086                 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2087                 bp->geneve_port = port;
2088                 break;
2089         default:
2090                 break;
2091         }
2092         return rc;
2093 }
2094
2095 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2096                                 uint8_t tunnel_type)
2097 {
2098         struct hwrm_tunnel_dst_port_free_input req = {0};
2099         struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2100         int rc = 0;
2101
2102         HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2103         req.tunnel_type = tunnel_type;
2104         req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2105         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2106         HWRM_CHECK_RESULT;
2107
2108         return rc;
2109 }
2110
2111 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2112 {
2113         int rc = 0;
2114         struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2115         struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2116
2117         HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2118
2119         req.req_buf_num_pages = rte_cpu_to_le_16(1);
2120         req.req_buf_page_size = rte_cpu_to_le_16(
2121                          page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2122         req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2123         req.req_buf_page_addr[0] =
2124                 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2125         if (req.req_buf_page_addr[0] == 0) {
2126                 RTE_LOG(ERR, PMD,
2127                         "unable to map buffer address to physical memory\n");
2128                 return -ENOMEM;
2129         }
2130
2131         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2132
2133         HWRM_CHECK_RESULT;
2134
2135         return rc;
2136 }
2137
2138 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2139 {
2140         int rc = 0;
2141         struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2142         struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2143
2144         HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2145
2146         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2147
2148         HWRM_CHECK_RESULT;
2149
2150         return rc;
2151 }
2152
2153 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2154 {
2155         struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2156         struct hwrm_func_cfg_input req = {0};
2157         int rc;
2158
2159         HWRM_PREP(req, FUNC_CFG, -1, resp);
2160         req.fid = rte_cpu_to_le_16(0xffff);
2161         req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2162         req.enables = rte_cpu_to_le_32(
2163                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2164         req.async_event_cr = rte_cpu_to_le_16(
2165                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2166         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2167         HWRM_CHECK_RESULT;
2168
2169         return rc;
2170 }
2171
2172 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2173 {
2174         struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2175         struct hwrm_func_vf_cfg_input req = {0};
2176         int rc;
2177
2178         HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2179         req.enables = rte_cpu_to_le_32(
2180                         HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2181         req.async_event_cr = rte_cpu_to_le_16(
2182                         bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2184         HWRM_CHECK_RESULT;
2185
2186         return rc;
2187 }
2188
2189 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2190                               void *encaped, size_t ec_size)
2191 {
2192         int rc = 0;
2193         struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2194         struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2195
2196         if (ec_size > sizeof(req.encap_request))
2197                 return -1;
2198
2199         HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2200
2201         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2202         memcpy(req.encap_request, encaped, ec_size);
2203
2204         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2205
2206         HWRM_CHECK_RESULT;
2207
2208         return rc;
2209 }
2210
2211 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2212                                        struct ether_addr *mac)
2213 {
2214         struct hwrm_func_qcfg_input req = {0};
2215         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2216         int rc;
2217
2218         HWRM_PREP(req, FUNC_QCFG, -1, resp);
2219         req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2220         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2221
2222         HWRM_CHECK_RESULT;
2223
2224         memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2225         return rc;
2226 }
2227
2228 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2229                             void *encaped, size_t ec_size)
2230 {
2231         int rc = 0;
2232         struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2233         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2234
2235         if (ec_size > sizeof(req.encap_request))
2236                 return -1;
2237
2238         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2239
2240         req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2241         memcpy(req.encap_request, encaped, ec_size);
2242
2243         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2244
2245         HWRM_CHECK_RESULT;
2246
2247         return rc;
2248 }