4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 if (resp->resp_len >= 16) { \
180 struct hwrm_err_output *tmp_hwrm_err_op = \
183 "%s error %d:%d:%08x:%04x\n", \
185 rc, tmp_hwrm_err_op->cmd_err, \
187 tmp_hwrm_err_op->opaque_0), \
189 tmp_hwrm_err_op->opaque_1)); \
193 "%s error %d\n", __func__, rc); \
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
205 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
219 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226 /* FIXME add multicast flag, when multicast adding options is supported
229 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233 if (vnic->mc_addr_cnt) {
234 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
235 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
236 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
238 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
241 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
248 int bnxt_hwrm_clear_filter(struct bnxt *bp,
249 struct bnxt_filter_info *filter)
252 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
253 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
255 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
257 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
259 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
263 filter->fw_l2_filter_id = -1;
268 int bnxt_hwrm_set_filter(struct bnxt *bp,
270 struct bnxt_filter_info *filter)
273 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
274 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
275 uint32_t enables = 0;
277 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
279 req.flags = rte_cpu_to_le_32(filter->flags);
281 enables = filter->enables |
282 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
283 req.dst_id = rte_cpu_to_le_16(dst_id);
286 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
287 memcpy(req.l2_addr, filter->l2_addr,
290 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
291 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
294 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
295 req.l2_ovlan = filter->l2_ovlan;
297 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
298 req.l2_ovlan_mask = filter->l2_ovlan_mask;
300 req.enables = rte_cpu_to_le_32(enables);
302 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
306 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
311 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
314 struct hwrm_func_qcaps_input req = {.req_type = 0 };
315 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
316 uint16_t new_max_vfs;
319 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
321 req.fid = rte_cpu_to_le_16(0xffff);
323 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
327 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
329 bp->pf.port_id = resp->port_id;
330 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
331 new_max_vfs = bp->pdev->max_vfs;
332 if (new_max_vfs != bp->pf.max_vfs) {
334 rte_free(bp->pf.vf_info);
335 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
336 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
337 bp->pf.max_vfs = new_max_vfs;
338 for (i = 0; i < new_max_vfs; i++) {
339 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
340 bp->pf.vf_info[i].vlan_table =
341 rte_zmalloc("VF VLAN table",
344 if (bp->pf.vf_info[i].vlan_table == NULL)
346 "Fail to alloc VLAN table for VF %d\n",
350 bp->pf.vf_info[i].vlan_table);
351 STAILQ_INIT(&bp->pf.vf_info[i].filter);
356 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
357 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
358 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
359 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
360 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
361 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
362 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
363 /* TODO: For now, do not support VMDq/RFS on VFs. */
368 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
372 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
374 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
379 int bnxt_hwrm_func_reset(struct bnxt *bp)
382 struct hwrm_func_reset_input req = {.req_type = 0 };
383 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
385 HWRM_PREP(req, FUNC_RESET, -1, resp);
387 req.enables = rte_cpu_to_le_32(0);
389 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
396 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
399 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
400 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
402 if (bp->flags & BNXT_FLAG_REGISTERED)
405 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
406 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
407 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
408 req.ver_maj = RTE_VER_YEAR;
409 req.ver_min = RTE_VER_MONTH;
410 req.ver_upd = RTE_VER_MINOR;
413 req.enables |= rte_cpu_to_le_32(
414 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
415 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
416 RTE_MIN(sizeof(req.vf_req_fwd),
417 sizeof(bp->pf.vf_req_fwd)));
420 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
421 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
423 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
427 bp->flags |= BNXT_FLAG_REGISTERED;
432 int bnxt_hwrm_ver_get(struct bnxt *bp)
435 struct hwrm_ver_get_input req = {.req_type = 0 };
436 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
439 uint16_t max_resp_len;
440 char type[RTE_MEMZONE_NAMESIZE];
442 HWRM_PREP(req, VER_GET, -1, resp);
444 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
445 req.hwrm_intf_min = HWRM_VERSION_MINOR;
446 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
449 * Hold the lock since we may be adjusting the response pointers.
451 rte_spinlock_lock(&bp->hwrm_lock);
452 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
456 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
457 resp->hwrm_intf_maj, resp->hwrm_intf_min,
459 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
460 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
461 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
462 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
463 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
465 my_version = HWRM_VERSION_MAJOR << 16;
466 my_version |= HWRM_VERSION_MINOR << 8;
467 my_version |= HWRM_VERSION_UPDATE;
469 fw_version = resp->hwrm_intf_maj << 16;
470 fw_version |= resp->hwrm_intf_min << 8;
471 fw_version |= resp->hwrm_intf_upd;
473 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
474 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
479 if (my_version != fw_version) {
480 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
481 if (my_version < fw_version) {
483 "Firmware API version is newer than driver.\n");
485 "The driver may be missing features.\n");
488 "Firmware API version is older than driver.\n");
490 "Not all driver features may be functional.\n");
494 if (bp->max_req_len > resp->max_req_win_len) {
495 RTE_LOG(ERR, PMD, "Unsupported request length\n");
498 bp->max_req_len = resp->max_req_win_len;
499 max_resp_len = resp->max_resp_len;
500 if (bp->max_resp_len != max_resp_len) {
501 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
502 bp->pdev->addr.domain, bp->pdev->addr.bus,
503 bp->pdev->addr.devid, bp->pdev->addr.function);
505 rte_free(bp->hwrm_cmd_resp_addr);
507 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
508 if (bp->hwrm_cmd_resp_addr == NULL) {
512 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
513 bp->hwrm_cmd_resp_dma_addr =
514 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
515 if (bp->hwrm_cmd_resp_dma_addr == 0) {
517 "Unable to map response buffer to physical memory.\n");
521 bp->max_resp_len = max_resp_len;
525 rte_spinlock_unlock(&bp->hwrm_lock);
529 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
532 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
533 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
535 if (!(bp->flags & BNXT_FLAG_REGISTERED))
538 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
541 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
545 bp->flags &= ~BNXT_FLAG_REGISTERED;
550 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
553 struct hwrm_port_phy_cfg_input req = {0};
554 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
555 uint32_t enables = 0;
557 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
560 req.flags = rte_cpu_to_le_32(conf->phy_flags);
561 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
563 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
564 * any auto mode, even "none".
566 if (!conf->link_speed) {
567 req.auto_mode |= conf->auto_mode;
568 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
569 req.auto_link_speed_mask = conf->auto_link_speed_mask;
571 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
572 req.auto_link_speed = bp->link_info.auto_link_speed;
574 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
576 req.auto_duplex = conf->duplex;
577 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
578 req.auto_pause = conf->auto_pause;
579 req.force_pause = conf->force_pause;
580 /* Set force_pause if there is no auto or if there is a force */
581 if (req.auto_pause && !req.force_pause)
582 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
584 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
586 req.enables = rte_cpu_to_le_32(enables);
589 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
590 RTE_LOG(INFO, PMD, "Force Link Down\n");
593 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
600 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
601 struct bnxt_link_info *link_info)
604 struct hwrm_port_phy_qcfg_input req = {0};
605 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
607 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
609 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613 link_info->phy_link_status = resp->link;
614 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
615 link_info->link_up = 1;
616 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
618 link_info->link_up = 0;
619 link_info->link_speed = 0;
621 link_info->duplex = resp->duplex;
622 link_info->pause = resp->pause;
623 link_info->auto_pause = resp->auto_pause;
624 link_info->force_pause = resp->force_pause;
625 link_info->auto_mode = resp->auto_mode;
627 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
628 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
629 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
630 link_info->phy_ver[0] = resp->phy_maj;
631 link_info->phy_ver[1] = resp->phy_min;
632 link_info->phy_ver[2] = resp->phy_bld;
637 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
640 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
641 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
643 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
645 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
649 #define GET_QUEUE_INFO(x) \
650 bp->cos_queue[x].id = resp->queue_id##x; \
651 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
665 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
666 struct bnxt_ring *ring,
667 uint32_t ring_type, uint32_t map_index,
668 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
671 uint32_t enables = 0;
672 struct hwrm_ring_alloc_input req = {.req_type = 0 };
673 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
675 HWRM_PREP(req, RING_ALLOC, -1, resp);
677 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
678 req.fbo = rte_cpu_to_le_32(0);
679 /* Association of ring index with doorbell index */
680 req.logical_id = rte_cpu_to_le_16(map_index);
681 req.length = rte_cpu_to_le_32(ring->ring_size);
684 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
685 req.queue_id = bp->cos_queue[0].id;
687 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
688 req.ring_type = ring_type;
689 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
690 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
691 if (stats_ctx_id != INVALID_STATS_CTX_ID)
693 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
695 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
696 req.ring_type = ring_type;
698 * TODO: Some HWRM versions crash with
699 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
701 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
704 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
708 req.enables = rte_cpu_to_le_32(enables);
710 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
712 if (rc || resp->error_code) {
713 if (rc == 0 && resp->error_code)
714 rc = rte_le_to_cpu_16(resp->error_code);
716 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
718 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
720 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
722 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
724 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
726 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
729 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
734 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
738 int bnxt_hwrm_ring_free(struct bnxt *bp,
739 struct bnxt_ring *ring, uint32_t ring_type)
742 struct hwrm_ring_free_input req = {.req_type = 0 };
743 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
745 HWRM_PREP(req, RING_FREE, -1, resp);
747 req.ring_type = ring_type;
748 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
750 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
752 if (rc || resp->error_code) {
753 if (rc == 0 && resp->error_code)
754 rc = rte_le_to_cpu_16(resp->error_code);
757 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
758 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
761 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
762 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
765 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
766 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
770 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
777 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
780 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
781 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
783 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
785 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
786 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
787 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
788 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
790 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
794 bp->grp_info[idx].fw_grp_id =
795 rte_le_to_cpu_16(resp->ring_group_id);
800 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
803 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
804 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
806 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
808 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
810 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
814 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
818 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
821 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
822 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
824 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
826 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
829 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
830 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
832 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
839 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
840 unsigned int idx __rte_unused)
843 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
844 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
846 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
848 req.update_period_ms = rte_cpu_to_le_32(1000);
850 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
852 rte_cpu_to_le_64(cpr->hw_stats_map);
854 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
858 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
863 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
864 unsigned int idx __rte_unused)
867 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
868 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
870 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
872 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
873 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
875 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
882 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
885 struct hwrm_vnic_alloc_input req = { 0 };
886 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
888 /* map ring groups to this vnic */
889 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
890 vnic->start_grp_id, vnic->end_grp_id);
891 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
892 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
893 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
894 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
895 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
896 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
897 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
898 ETHER_CRC_LEN + VLAN_TAG_SIZE;
899 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
901 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
905 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
909 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
910 struct bnxt_vnic_info *vnic,
911 struct bnxt_plcmodes_cfg *pmode)
914 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
915 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
917 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
919 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
921 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
925 pmode->flags = rte_le_to_cpu_32(resp->flags);
926 /* dflt_vnic bit doesn't exist in the _cfg command */
927 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
928 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
929 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
930 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
935 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
936 struct bnxt_vnic_info *vnic,
937 struct bnxt_plcmodes_cfg *pmode)
940 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
941 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
943 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
945 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
946 req.flags = rte_cpu_to_le_32(pmode->flags);
947 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
948 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
949 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
950 req.enables = rte_cpu_to_le_32(
951 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
952 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
953 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
956 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
963 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
966 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
967 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
968 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
969 struct bnxt_plcmodes_cfg pmodes;
971 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
975 HWRM_PREP(req, VNIC_CFG, -1, resp);
977 /* Only RSS support for now TBD: COS & LB */
979 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
980 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
981 if (vnic->lb_rule != 0xffff)
982 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
983 if (vnic->cos_rule != 0xffff)
984 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
985 if (vnic->rss_rule != 0xffff)
986 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
987 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
988 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
989 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
990 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
991 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
992 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
993 req.mru = rte_cpu_to_le_16(vnic->mru);
994 if (vnic->func_default)
996 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
997 if (vnic->vlan_strip)
999 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1002 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1003 if (vnic->roce_dual)
1004 req.flags |= rte_cpu_to_le_32(
1005 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1006 if (vnic->roce_only)
1007 req.flags |= rte_cpu_to_le_32(
1008 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1009 if (vnic->rss_dflt_cr)
1010 req.flags |= rte_cpu_to_le_32(
1011 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1013 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1017 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1022 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1026 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1027 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1029 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1032 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1033 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1034 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1036 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1040 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1041 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1042 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1043 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1044 vnic->mru = rte_le_to_cpu_16(resp->mru);
1045 vnic->func_default = rte_le_to_cpu_32(
1046 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1047 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1048 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1049 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1050 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1051 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1052 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1053 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1054 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1055 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1056 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1061 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1064 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1065 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1066 bp->hwrm_cmd_resp_addr;
1068 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1070 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1074 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1079 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1082 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1083 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1084 bp->hwrm_cmd_resp_addr;
1086 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1088 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1090 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1094 vnic->rss_rule = INVALID_HW_RING_ID;
1099 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1102 struct hwrm_vnic_free_input req = {.req_type = 0 };
1103 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1105 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1108 HWRM_PREP(req, VNIC_FREE, -1, resp);
1110 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1112 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1116 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1120 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1121 struct bnxt_vnic_info *vnic)
1124 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1125 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1127 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1129 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1131 req.ring_grp_tbl_addr =
1132 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1133 req.hash_key_tbl_addr =
1134 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1135 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1137 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1144 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1145 struct bnxt_vnic_info *vnic)
1148 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1149 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1152 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1154 req.flags = rte_cpu_to_le_32(
1155 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1157 req.enables = rte_cpu_to_le_32(
1158 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1160 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1161 size -= RTE_PKTMBUF_HEADROOM;
1163 req.jumbo_thresh = rte_cpu_to_le_16(size);
1164 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1166 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1173 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1174 struct bnxt_vnic_info *vnic, bool enable)
1177 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1178 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1180 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1183 req.enables = rte_cpu_to_le_32(
1184 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1185 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1186 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1187 req.flags = rte_cpu_to_le_32(
1188 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1189 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1190 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1191 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1192 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1193 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1194 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1195 req.max_agg_segs = rte_cpu_to_le_16(5);
1197 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1198 req.min_agg_len = rte_cpu_to_le_32(512);
1201 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1208 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1210 struct hwrm_func_cfg_input req = {0};
1211 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1214 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1215 req.enables = rte_cpu_to_le_32(
1216 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1217 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1218 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1220 HWRM_PREP(req, FUNC_CFG, -1, resp);
1222 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1225 bp->pf.vf_info[vf].random_mac = false;
1231 * HWRM utility functions
1234 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1239 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1240 struct bnxt_tx_queue *txq;
1241 struct bnxt_rx_queue *rxq;
1242 struct bnxt_cp_ring_info *cpr;
1244 if (i >= bp->rx_cp_nr_rings) {
1245 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1248 rxq = bp->rx_queues[i];
1252 rc = bnxt_hwrm_stat_clear(bp, cpr);
1259 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1263 struct bnxt_cp_ring_info *cpr;
1265 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1267 if (i >= bp->rx_cp_nr_rings)
1268 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1270 cpr = bp->rx_queues[i]->cp_ring;
1271 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1272 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1273 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1275 * TODO. Need a better way to reset grp_info.stats_ctx
1276 * for Rx rings only. stats_ctx is not saved for Tx
1279 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1287 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1292 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1293 struct bnxt_tx_queue *txq;
1294 struct bnxt_rx_queue *rxq;
1295 struct bnxt_cp_ring_info *cpr;
1297 if (i >= bp->rx_cp_nr_rings) {
1298 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1301 rxq = bp->rx_queues[i];
1305 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1313 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1318 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1320 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1322 "Attempt to free invalid ring group %d\n",
1327 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1335 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1336 unsigned int idx __rte_unused)
1338 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1340 bnxt_hwrm_ring_free(bp, cp_ring,
1341 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1342 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1343 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1344 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1345 sizeof(*cpr->cp_desc_ring));
1346 cpr->cp_raw_cons = 0;
1349 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1354 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1355 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1356 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1357 struct bnxt_ring *ring = txr->tx_ring_struct;
1358 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1359 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1361 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1362 bnxt_hwrm_ring_free(bp, ring,
1363 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1364 ring->fw_ring_id = INVALID_HW_RING_ID;
1365 memset(txr->tx_desc_ring, 0,
1366 txr->tx_ring_struct->ring_size *
1367 sizeof(*txr->tx_desc_ring));
1368 memset(txr->tx_buf_ring, 0,
1369 txr->tx_ring_struct->ring_size *
1370 sizeof(*txr->tx_buf_ring));
1374 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1375 bnxt_free_cp_ring(bp, cpr, idx);
1376 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1380 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1381 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1382 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1383 struct bnxt_ring *ring = rxr->rx_ring_struct;
1384 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1385 unsigned int idx = i + 1;
1387 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1388 bnxt_hwrm_ring_free(bp, ring,
1389 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1390 ring->fw_ring_id = INVALID_HW_RING_ID;
1391 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1392 memset(rxr->rx_desc_ring, 0,
1393 rxr->rx_ring_struct->ring_size *
1394 sizeof(*rxr->rx_desc_ring));
1395 memset(rxr->rx_buf_ring, 0,
1396 rxr->rx_ring_struct->ring_size *
1397 sizeof(*rxr->rx_buf_ring));
1399 memset(rxr->ag_buf_ring, 0,
1400 rxr->ag_ring_struct->ring_size *
1401 sizeof(*rxr->ag_buf_ring));
1404 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1405 bnxt_free_cp_ring(bp, cpr, idx);
1406 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1407 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1411 /* Default completion ring */
1413 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1415 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1416 bnxt_free_cp_ring(bp, cpr, 0);
1417 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1424 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1429 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1430 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1437 void bnxt_free_hwrm_resources(struct bnxt *bp)
1439 /* Release memzone */
1440 rte_free(bp->hwrm_cmd_resp_addr);
1441 bp->hwrm_cmd_resp_addr = NULL;
1442 bp->hwrm_cmd_resp_dma_addr = 0;
1445 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1447 struct rte_pci_device *pdev = bp->pdev;
1448 char type[RTE_MEMZONE_NAMESIZE];
1450 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1451 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1452 bp->max_req_len = HWRM_MAX_REQ_LEN;
1453 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1454 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1455 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1456 if (bp->hwrm_cmd_resp_addr == NULL)
1458 bp->hwrm_cmd_resp_dma_addr =
1459 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1460 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1462 "unable to map response address to physical memory\n");
1465 rte_spinlock_init(&bp->hwrm_lock);
1470 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1472 struct bnxt_filter_info *filter;
1475 STAILQ_FOREACH(filter, &vnic->filter, next) {
1476 rc = bnxt_hwrm_clear_filter(bp, filter);
1483 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1485 struct bnxt_filter_info *filter;
1488 STAILQ_FOREACH(filter, &vnic->filter, next) {
1489 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1496 void bnxt_free_tunnel_ports(struct bnxt *bp)
1498 if (bp->vxlan_port_cnt)
1499 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1500 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1502 if (bp->geneve_port_cnt)
1503 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1504 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1505 bp->geneve_port = 0;
1508 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1510 struct bnxt_vnic_info *vnic;
1513 if (bp->vnic_info == NULL)
1516 vnic = &bp->vnic_info[0];
1518 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1520 /* VNIC resources */
1521 for (i = 0; i < bp->nr_vnics; i++) {
1522 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1524 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1526 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1528 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1530 bnxt_hwrm_vnic_free(bp, vnic);
1532 /* Ring resources */
1533 bnxt_free_all_hwrm_rings(bp);
1534 bnxt_free_all_hwrm_ring_grps(bp);
1535 bnxt_free_all_hwrm_stat_ctxs(bp);
1536 bnxt_free_tunnel_ports(bp);
1539 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1541 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1543 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1544 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1546 switch (conf_link_speed) {
1547 case ETH_LINK_SPEED_10M_HD:
1548 case ETH_LINK_SPEED_100M_HD:
1549 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1551 return hw_link_duplex;
1554 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1556 uint16_t eth_link_speed = 0;
1558 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1559 return ETH_LINK_SPEED_AUTONEG;
1561 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1562 case ETH_LINK_SPEED_100M:
1563 case ETH_LINK_SPEED_100M_HD:
1565 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1567 case ETH_LINK_SPEED_1G:
1569 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1571 case ETH_LINK_SPEED_2_5G:
1573 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1575 case ETH_LINK_SPEED_10G:
1577 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1579 case ETH_LINK_SPEED_20G:
1581 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1583 case ETH_LINK_SPEED_25G:
1585 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1587 case ETH_LINK_SPEED_40G:
1589 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1591 case ETH_LINK_SPEED_50G:
1593 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1597 "Unsupported link speed %d; default to AUTO\n",
1601 return eth_link_speed;
1604 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1605 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1606 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1607 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1609 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1613 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1616 if (link_speed & ETH_LINK_SPEED_FIXED) {
1617 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1619 if (one_speed & (one_speed - 1)) {
1621 "Invalid advertised speeds (%u) for port %u\n",
1622 link_speed, port_id);
1625 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1627 "Unsupported advertised speed (%u) for port %u\n",
1628 link_speed, port_id);
1632 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1634 "Unsupported advertised speeds (%u) for port %u\n",
1635 link_speed, port_id);
1642 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1646 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1647 link_speed = BNXT_SUPPORTED_SPEEDS;
1649 if (link_speed & ETH_LINK_SPEED_100M)
1650 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1651 if (link_speed & ETH_LINK_SPEED_100M_HD)
1652 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1653 if (link_speed & ETH_LINK_SPEED_1G)
1654 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1655 if (link_speed & ETH_LINK_SPEED_2_5G)
1656 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1657 if (link_speed & ETH_LINK_SPEED_10G)
1658 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1659 if (link_speed & ETH_LINK_SPEED_20G)
1660 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1661 if (link_speed & ETH_LINK_SPEED_25G)
1662 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1663 if (link_speed & ETH_LINK_SPEED_40G)
1664 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1665 if (link_speed & ETH_LINK_SPEED_50G)
1666 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1670 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1672 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1674 switch (hw_link_speed) {
1675 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1676 eth_link_speed = ETH_SPEED_NUM_100M;
1678 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1679 eth_link_speed = ETH_SPEED_NUM_1G;
1681 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1682 eth_link_speed = ETH_SPEED_NUM_2_5G;
1684 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1685 eth_link_speed = ETH_SPEED_NUM_10G;
1687 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1688 eth_link_speed = ETH_SPEED_NUM_20G;
1690 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1691 eth_link_speed = ETH_SPEED_NUM_25G;
1693 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1694 eth_link_speed = ETH_SPEED_NUM_40G;
1696 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1697 eth_link_speed = ETH_SPEED_NUM_50G;
1699 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1701 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1705 return eth_link_speed;
1708 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1710 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1712 switch (hw_link_duplex) {
1713 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1714 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1715 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1717 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1718 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1721 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1725 return eth_link_duplex;
1728 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1731 struct bnxt_link_info *link_info = &bp->link_info;
1733 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1736 "Get link config failed with rc %d\n", rc);
1739 if (link_info->link_up)
1741 bnxt_parse_hw_link_speed(link_info->link_speed);
1743 link->link_speed = ETH_LINK_SPEED_10M;
1744 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1745 link->link_status = link_info->link_up;
1746 link->link_autoneg = link_info->auto_mode ==
1747 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1748 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1753 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1756 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1757 struct bnxt_link_info link_req;
1760 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1763 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1764 bp->eth_dev->data->port_id);
1768 memset(&link_req, 0, sizeof(link_req));
1769 link_req.link_up = link_up;
1773 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1774 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1776 link_req.phy_flags |=
1777 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1778 link_req.auto_mode =
1779 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1780 link_req.auto_link_speed_mask =
1781 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1783 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1784 link_req.link_speed = speed;
1785 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1787 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1788 link_req.auto_pause = bp->link_info.auto_pause;
1789 link_req.force_pause = bp->link_info.force_pause;
1792 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1795 "Set link config failed with rc %d\n", rc);
1798 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1804 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1806 struct hwrm_func_qcfg_input req = {0};
1807 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1810 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1811 req.fid = rte_cpu_to_le_16(0xffff);
1813 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1817 /* Hard Coded.. 0xfff VLAN ID mask */
1818 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1820 switch (resp->port_partition_type) {
1821 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1822 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1823 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1824 bp->port_partition_type = resp->port_partition_type;
1827 bp->port_partition_type = 0;
1834 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1835 struct hwrm_func_qcaps_output *qcaps)
1837 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1838 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1839 sizeof(qcaps->mac_address));
1840 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1841 qcaps->max_rx_rings = fcfg->num_rx_rings;
1842 qcaps->max_tx_rings = fcfg->num_tx_rings;
1843 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1844 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1846 qcaps->first_vf_id = 0;
1847 qcaps->max_vnics = fcfg->num_vnics;
1848 qcaps->max_decap_records = 0;
1849 qcaps->max_encap_records = 0;
1850 qcaps->max_tx_wm_flows = 0;
1851 qcaps->max_tx_em_flows = 0;
1852 qcaps->max_rx_wm_flows = 0;
1853 qcaps->max_rx_em_flows = 0;
1854 qcaps->max_flow_id = 0;
1855 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1856 qcaps->max_sp_tx_rings = 0;
1857 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1860 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1862 struct hwrm_func_cfg_input req = {0};
1863 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1866 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1867 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1868 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1869 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1870 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1871 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1872 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1873 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1874 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1875 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1876 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1877 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1878 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1879 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1880 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1881 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1882 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1883 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1884 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1885 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1886 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1887 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1888 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1889 req.fid = rte_cpu_to_le_16(0xffff);
1891 HWRM_PREP(req, FUNC_CFG, -1, resp);
1893 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1899 static void populate_vf_func_cfg_req(struct bnxt *bp,
1900 struct hwrm_func_cfg_input *req,
1903 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1904 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1905 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1906 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1907 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1908 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1909 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1910 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1911 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1912 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1914 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1915 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1916 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1917 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1918 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1920 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1921 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1923 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1924 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1925 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1926 /* TODO: For now, do not support VMDq/RFS on VFs. */
1927 req->num_vnics = rte_cpu_to_le_16(1);
1928 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1932 static void add_random_mac_if_needed(struct bnxt *bp,
1933 struct hwrm_func_cfg_input *cfg_req,
1936 struct ether_addr mac;
1938 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1941 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1943 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1944 eth_random_addr(cfg_req->dflt_mac_addr);
1945 bp->pf.vf_info[vf].random_mac = true;
1947 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1951 static void reserve_resources_from_vf(struct bnxt *bp,
1952 struct hwrm_func_cfg_input *cfg_req,
1955 struct hwrm_func_qcaps_input req = {0};
1956 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1959 /* Get the actual allocated values now */
1960 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1961 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1962 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1965 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1966 copy_func_cfg_to_qcaps(cfg_req, resp);
1967 } else if (resp->error_code) {
1968 rc = rte_le_to_cpu_16(resp->error_code);
1969 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1970 copy_func_cfg_to_qcaps(cfg_req, resp);
1973 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1974 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1975 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1976 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1977 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1978 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1980 * TODO: While not supporting VMDq with VFs, max_vnics is always
1981 * forced to 1 in this case
1983 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1984 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1987 static int update_pf_resource_max(struct bnxt *bp)
1989 struct hwrm_func_qcfg_input req = {0};
1990 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1993 /* And copy the allocated numbers into the pf struct */
1994 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1995 req.fid = rte_cpu_to_le_16(0xffff);
1996 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1999 /* Only TX ring value reflects actual allocation? TODO */
2000 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2001 bp->pf.evb_mode = resp->evb_mode;
2006 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2011 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2015 rc = bnxt_hwrm_func_qcaps(bp);
2019 bp->pf.func_cfg_flags &=
2020 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2021 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2022 bp->pf.func_cfg_flags |=
2023 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2024 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2028 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2030 struct hwrm_func_cfg_input req = {0};
2031 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2038 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2042 rc = bnxt_hwrm_func_qcaps(bp);
2047 bp->pf.active_vfs = num_vfs;
2050 * First, configure the PF to only use one TX ring. This ensures that
2051 * there are enough rings for all VFs.
2053 * If we don't do this, when we call func_alloc() later, we will lock
2054 * extra rings to the PF that won't be available during func_cfg() of
2057 * This has been fixed with firmware versions above 20.6.54
2059 bp->pf.func_cfg_flags &=
2060 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2061 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2062 bp->pf.func_cfg_flags |=
2063 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2064 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2069 * Now, create and register a buffer to hold forwarded VF requests
2071 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2072 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2073 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2074 if (bp->pf.vf_req_buf == NULL) {
2078 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2079 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2080 for (i = 0; i < num_vfs; i++)
2081 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2082 (i * HWRM_MAX_REQ_LEN);
2084 rc = bnxt_hwrm_func_buf_rgtr(bp);
2088 populate_vf_func_cfg_req(bp, &req, num_vfs);
2090 bp->pf.active_vfs = 0;
2091 for (i = 0; i < num_vfs; i++) {
2092 add_random_mac_if_needed(bp, &req, i);
2094 HWRM_PREP(req, FUNC_CFG, -1, resp);
2095 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2096 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2097 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2099 /* Clear enable flag for next pass */
2100 req.enables &= ~rte_cpu_to_le_32(
2101 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2103 if (rc || resp->error_code) {
2105 "Failed to initizlie VF %d\n", i);
2107 "Not all VFs available. (%d, %d)\n",
2108 rc, resp->error_code);
2112 reserve_resources_from_vf(bp, &req, i);
2113 bp->pf.active_vfs++;
2117 * Now configure the PF to use "the rest" of the resources
2118 * We're using STD_TX_RING_MODE here though which will limit the TX
2119 * rings. This will allow QoS to function properly. Not setting this
2120 * will cause PF rings to break bandwidth settings.
2122 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2126 rc = update_pf_resource_max(bp);
2133 bnxt_hwrm_func_buf_unrgtr(bp);
2137 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2138 uint8_t tunnel_type)
2140 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2141 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2144 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2145 req.tunnel_type = tunnel_type;
2146 req.tunnel_dst_port_val = port;
2147 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2150 switch (tunnel_type) {
2151 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2152 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2153 bp->vxlan_port = port;
2155 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2156 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2157 bp->geneve_port = port;
2165 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2166 uint8_t tunnel_type)
2168 struct hwrm_tunnel_dst_port_free_input req = {0};
2169 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2172 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2173 req.tunnel_type = tunnel_type;
2174 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2175 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2181 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2184 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2185 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2187 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2189 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2190 req.req_buf_page_size = rte_cpu_to_le_16(
2191 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2192 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2193 req.req_buf_page_addr[0] =
2194 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2195 if (req.req_buf_page_addr[0] == 0) {
2197 "unable to map buffer address to physical memory\n");
2201 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2208 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2211 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2212 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2214 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2216 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2223 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2225 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2226 struct hwrm_func_cfg_input req = {0};
2229 HWRM_PREP(req, FUNC_CFG, -1, resp);
2230 req.fid = rte_cpu_to_le_16(0xffff);
2231 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2232 req.enables = rte_cpu_to_le_32(
2233 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2234 req.async_event_cr = rte_cpu_to_le_16(
2235 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2236 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2242 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2244 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2245 struct hwrm_func_vf_cfg_input req = {0};
2248 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2249 req.enables = rte_cpu_to_le_32(
2250 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2251 req.async_event_cr = rte_cpu_to_le_16(
2252 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2253 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2259 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2260 void *encaped, size_t ec_size)
2263 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2264 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2266 if (ec_size > sizeof(req.encap_request))
2269 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2271 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2272 memcpy(req.encap_request, encaped, ec_size);
2274 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2281 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2282 struct ether_addr *mac)
2284 struct hwrm_func_qcfg_input req = {0};
2285 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2288 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2289 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2290 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2294 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2298 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2299 void *encaped, size_t ec_size)
2302 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2303 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2305 if (ec_size > sizeof(req.encap_request))
2308 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2310 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2311 memcpy(req.encap_request, encaped, ec_size);
2313 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2320 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2322 struct hwrm_port_qstats_input req = {0};
2323 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2324 struct bnxt_pf_info *pf = &bp->pf;
2327 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2330 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2331 req.port_id = rte_cpu_to_le_16(pf->port_id);
2332 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2333 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2334 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2339 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2341 struct hwrm_port_clr_stats_input req = {0};
2342 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2343 struct bnxt_pf_info *pf = &bp->pf;
2346 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2349 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2350 req.port_id = rte_cpu_to_le_16(pf->port_id);
2351 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));