4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 if (resp->resp_len >= 16) { \
180 struct hwrm_err_output *tmp_hwrm_err_op = \
183 "%s error %d:%d:%08x:%04x\n", \
185 rc, tmp_hwrm_err_op->cmd_err, \
187 tmp_hwrm_err_op->opaque_0), \
189 tmp_hwrm_err_op->opaque_1)); \
193 "%s error %d\n", __func__, rc); \
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
205 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
219 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
224 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226 /* FIXME add multicast flag, when multicast adding options is supported
229 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
230 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
231 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
232 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
233 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
236 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
243 int bnxt_hwrm_clear_filter(struct bnxt *bp,
244 struct bnxt_filter_info *filter)
247 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
248 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
250 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
252 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
254 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
258 filter->fw_l2_filter_id = -1;
263 int bnxt_hwrm_set_filter(struct bnxt *bp,
264 struct bnxt_vnic_info *vnic,
265 struct bnxt_filter_info *filter)
268 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
269 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
270 uint32_t enables = 0;
272 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
274 req.flags = rte_cpu_to_le_32(filter->flags);
276 enables = filter->enables |
277 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
278 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
281 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
282 memcpy(req.l2_addr, filter->l2_addr,
285 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
286 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
289 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
290 req.l2_ovlan = filter->l2_ovlan;
292 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
293 req.l2_ovlan_mask = filter->l2_ovlan_mask;
295 req.enables = rte_cpu_to_le_32(enables);
297 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
301 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
306 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
309 struct hwrm_func_qcaps_input req = {.req_type = 0 };
310 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
311 uint16_t new_max_vfs;
314 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
316 req.fid = rte_cpu_to_le_16(0xffff);
318 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
322 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
324 bp->pf.port_id = resp->port_id;
325 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
326 new_max_vfs = bp->pdev->max_vfs;
327 if (new_max_vfs != bp->pf.max_vfs) {
329 rte_free(bp->pf.vf_info);
330 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
331 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
332 bp->pf.max_vfs = new_max_vfs;
333 for (i = 0; i < new_max_vfs; i++) {
334 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
335 bp->pf.vf_info[i].vlan_table =
336 rte_zmalloc("VF VLAN table",
339 if (bp->pf.vf_info[i].vlan_table == NULL)
341 "Fail to alloc VLAN table for VF %d\n",
345 bp->pf.vf_info[i].vlan_table);
346 STAILQ_INIT(&bp->pf.vf_info[i].filter);
351 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
352 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
353 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
354 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
355 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
356 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
357 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
358 /* TODO: For now, do not support VMDq/RFS on VFs. */
363 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
367 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
369 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
374 int bnxt_hwrm_func_reset(struct bnxt *bp)
377 struct hwrm_func_reset_input req = {.req_type = 0 };
378 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
380 HWRM_PREP(req, FUNC_RESET, -1, resp);
382 req.enables = rte_cpu_to_le_32(0);
384 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
391 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
394 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
395 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
397 if (bp->flags & BNXT_FLAG_REGISTERED)
400 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
401 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
402 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
403 req.ver_maj = RTE_VER_YEAR;
404 req.ver_min = RTE_VER_MONTH;
405 req.ver_upd = RTE_VER_MINOR;
408 req.enables |= rte_cpu_to_le_32(
409 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
410 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
411 RTE_MIN(sizeof(req.vf_req_fwd),
412 sizeof(bp->pf.vf_req_fwd)));
415 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
416 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
418 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
422 bp->flags |= BNXT_FLAG_REGISTERED;
427 int bnxt_hwrm_ver_get(struct bnxt *bp)
430 struct hwrm_ver_get_input req = {.req_type = 0 };
431 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
434 uint16_t max_resp_len;
435 char type[RTE_MEMZONE_NAMESIZE];
437 HWRM_PREP(req, VER_GET, -1, resp);
439 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
440 req.hwrm_intf_min = HWRM_VERSION_MINOR;
441 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
444 * Hold the lock since we may be adjusting the response pointers.
446 rte_spinlock_lock(&bp->hwrm_lock);
447 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
451 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
452 resp->hwrm_intf_maj, resp->hwrm_intf_min,
454 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
455 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
456 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
457 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
458 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
460 my_version = HWRM_VERSION_MAJOR << 16;
461 my_version |= HWRM_VERSION_MINOR << 8;
462 my_version |= HWRM_VERSION_UPDATE;
464 fw_version = resp->hwrm_intf_maj << 16;
465 fw_version |= resp->hwrm_intf_min << 8;
466 fw_version |= resp->hwrm_intf_upd;
468 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
469 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
474 if (my_version != fw_version) {
475 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
476 if (my_version < fw_version) {
478 "Firmware API version is newer than driver.\n");
480 "The driver may be missing features.\n");
483 "Firmware API version is older than driver.\n");
485 "Not all driver features may be functional.\n");
489 if (bp->max_req_len > resp->max_req_win_len) {
490 RTE_LOG(ERR, PMD, "Unsupported request length\n");
493 bp->max_req_len = resp->max_req_win_len;
494 max_resp_len = resp->max_resp_len;
495 if (bp->max_resp_len != max_resp_len) {
496 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
497 bp->pdev->addr.domain, bp->pdev->addr.bus,
498 bp->pdev->addr.devid, bp->pdev->addr.function);
500 rte_free(bp->hwrm_cmd_resp_addr);
502 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
503 if (bp->hwrm_cmd_resp_addr == NULL) {
507 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
508 bp->hwrm_cmd_resp_dma_addr =
509 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
510 if (bp->hwrm_cmd_resp_dma_addr == 0) {
512 "Unable to map response buffer to physical memory.\n");
516 bp->max_resp_len = max_resp_len;
520 rte_spinlock_unlock(&bp->hwrm_lock);
524 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
527 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
528 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
530 if (!(bp->flags & BNXT_FLAG_REGISTERED))
533 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
536 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
540 bp->flags &= ~BNXT_FLAG_REGISTERED;
545 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
548 struct hwrm_port_phy_cfg_input req = {0};
549 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
550 uint32_t enables = 0;
552 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
555 req.flags = rte_cpu_to_le_32(conf->phy_flags);
556 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
558 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
559 * any auto mode, even "none".
561 if (!conf->link_speed) {
562 req.auto_mode |= conf->auto_mode;
563 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
564 req.auto_link_speed_mask = conf->auto_link_speed_mask;
566 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
567 req.auto_link_speed = bp->link_info.auto_link_speed;
569 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
571 req.auto_duplex = conf->duplex;
572 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
573 req.auto_pause = conf->auto_pause;
574 req.force_pause = conf->force_pause;
575 /* Set force_pause if there is no auto or if there is a force */
576 if (req.auto_pause && !req.force_pause)
577 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
579 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
581 req.enables = rte_cpu_to_le_32(enables);
584 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
585 RTE_LOG(INFO, PMD, "Force Link Down\n");
588 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
595 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
596 struct bnxt_link_info *link_info)
599 struct hwrm_port_phy_qcfg_input req = {0};
600 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
602 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
604 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
608 link_info->phy_link_status = resp->link;
609 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
610 link_info->link_up = 1;
611 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
613 link_info->link_up = 0;
614 link_info->link_speed = 0;
616 link_info->duplex = resp->duplex;
617 link_info->pause = resp->pause;
618 link_info->auto_pause = resp->auto_pause;
619 link_info->force_pause = resp->force_pause;
620 link_info->auto_mode = resp->auto_mode;
622 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
623 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
624 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
625 link_info->phy_ver[0] = resp->phy_maj;
626 link_info->phy_ver[1] = resp->phy_min;
627 link_info->phy_ver[2] = resp->phy_bld;
632 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
635 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
636 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
638 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
640 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
644 #define GET_QUEUE_INFO(x) \
645 bp->cos_queue[x].id = resp->queue_id##x; \
646 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
660 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
661 struct bnxt_ring *ring,
662 uint32_t ring_type, uint32_t map_index,
663 uint32_t stats_ctx_id)
666 struct hwrm_ring_alloc_input req = {.req_type = 0 };
667 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
669 HWRM_PREP(req, RING_ALLOC, -1, resp);
671 req.enables = rte_cpu_to_le_32(0);
673 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
674 req.fbo = rte_cpu_to_le_32(0);
675 /* Association of ring index with doorbell index */
676 req.logical_id = rte_cpu_to_le_16(map_index);
679 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
680 req.queue_id = bp->cos_queue[0].id;
682 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
683 req.ring_type = ring_type;
685 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
686 req.length = rte_cpu_to_le_32(ring->ring_size);
687 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
688 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
689 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
691 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
692 req.ring_type = ring_type;
694 * TODO: Some HWRM versions crash with
695 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
697 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
698 req.length = rte_cpu_to_le_32(ring->ring_size);
701 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
706 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
708 if (rc || resp->error_code) {
709 if (rc == 0 && resp->error_code)
710 rc = rte_le_to_cpu_16(resp->error_code);
712 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
714 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
716 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
718 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
720 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
722 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
725 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
730 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
734 int bnxt_hwrm_ring_free(struct bnxt *bp,
735 struct bnxt_ring *ring, uint32_t ring_type)
738 struct hwrm_ring_free_input req = {.req_type = 0 };
739 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
741 HWRM_PREP(req, RING_FREE, -1, resp);
743 req.ring_type = ring_type;
744 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
746 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
748 if (rc || resp->error_code) {
749 if (rc == 0 && resp->error_code)
750 rc = rte_le_to_cpu_16(resp->error_code);
753 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
754 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
757 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
758 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
761 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
762 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
766 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
773 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
776 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
777 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
779 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
781 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
782 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
783 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
784 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
786 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
790 bp->grp_info[idx].fw_grp_id =
791 rte_le_to_cpu_16(resp->ring_group_id);
796 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
799 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
800 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
802 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
804 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
806 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
810 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
814 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
817 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
818 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
820 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
822 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
825 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
826 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
828 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
835 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
836 struct bnxt_cp_ring_info *cpr, unsigned int idx)
839 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
840 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
842 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
844 req.update_period_ms = rte_cpu_to_le_32(1000);
846 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
848 rte_cpu_to_le_64(cpr->hw_stats_map);
850 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
854 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
855 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
860 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
861 struct bnxt_cp_ring_info *cpr, unsigned int idx)
864 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
865 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
867 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
869 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
870 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
872 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
876 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
877 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
882 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
885 struct hwrm_vnic_alloc_input req = { 0 };
886 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
888 /* map ring groups to this vnic */
889 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
890 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
892 "Not enough ring groups avail:%x req:%x\n", j,
893 (vnic->end_grp_id - vnic->start_grp_id) + 1);
896 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
898 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
899 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
900 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
901 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
902 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
903 ETHER_CRC_LEN + VLAN_TAG_SIZE;
904 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
906 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
910 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
914 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
915 struct bnxt_vnic_info *vnic,
916 struct bnxt_plcmodes_cfg *pmode)
919 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
920 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
922 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
924 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
926 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
930 pmode->flags = rte_le_to_cpu_32(resp->flags);
931 /* dflt_vnic bit doesn't exist in the _cfg command */
932 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
933 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
934 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
935 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
940 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
941 struct bnxt_vnic_info *vnic,
942 struct bnxt_plcmodes_cfg *pmode)
945 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
946 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
948 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
950 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
951 req.flags = rte_cpu_to_le_32(pmode->flags);
952 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
953 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
954 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
955 req.enables = rte_cpu_to_le_32(
956 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
957 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
958 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
961 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
968 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
971 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
972 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
973 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
974 struct bnxt_plcmodes_cfg pmodes;
976 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
980 HWRM_PREP(req, VNIC_CFG, -1, resp);
982 /* Only RSS support for now TBD: COS & LB */
984 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
985 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
986 if (vnic->lb_rule != 0xffff)
987 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
988 if (vnic->cos_rule != 0xffff)
989 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
990 if (vnic->rss_rule != 0xffff)
991 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
992 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
993 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
994 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
995 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
996 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
997 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
998 req.mru = rte_cpu_to_le_16(vnic->mru);
999 if (vnic->func_default)
1001 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1002 if (vnic->vlan_strip)
1004 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1007 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1008 if (vnic->roce_dual)
1009 req.flags |= rte_cpu_to_le_32(
1010 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1011 if (vnic->roce_only)
1012 req.flags |= rte_cpu_to_le_32(
1013 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1014 if (vnic->rss_dflt_cr)
1015 req.flags |= rte_cpu_to_le_32(
1016 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1018 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1022 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1027 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1031 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1032 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1034 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1037 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1038 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1039 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1041 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1045 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1046 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1047 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1048 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1049 vnic->mru = rte_le_to_cpu_16(resp->mru);
1050 vnic->func_default = rte_le_to_cpu_32(
1051 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1052 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1053 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1054 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1055 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1056 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1057 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1058 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1059 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1060 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1061 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1066 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1069 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1070 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1071 bp->hwrm_cmd_resp_addr;
1073 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1075 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1079 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1084 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1087 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1088 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1089 bp->hwrm_cmd_resp_addr;
1091 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1093 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1095 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1099 vnic->rss_rule = INVALID_HW_RING_ID;
1104 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1107 struct hwrm_vnic_free_input req = {.req_type = 0 };
1108 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1110 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1113 HWRM_PREP(req, VNIC_FREE, -1, resp);
1115 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1117 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1121 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1125 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1126 struct bnxt_vnic_info *vnic)
1129 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1130 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1132 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1134 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1136 req.ring_grp_tbl_addr =
1137 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1138 req.hash_key_tbl_addr =
1139 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1140 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1142 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1149 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1151 struct hwrm_func_cfg_input req = {0};
1152 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1155 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1156 req.enables = rte_cpu_to_le_32(
1157 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1158 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1159 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1161 HWRM_PREP(req, FUNC_CFG, -1, resp);
1163 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1166 bp->pf.vf_info[vf].random_mac = false;
1172 * HWRM utility functions
1175 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1180 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1181 struct bnxt_tx_queue *txq;
1182 struct bnxt_rx_queue *rxq;
1183 struct bnxt_cp_ring_info *cpr;
1185 if (i >= bp->rx_cp_nr_rings) {
1186 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1189 rxq = bp->rx_queues[i];
1193 rc = bnxt_hwrm_stat_clear(bp, cpr);
1200 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1204 struct bnxt_cp_ring_info *cpr;
1206 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1207 unsigned int idx = i + 1;
1209 if (i >= bp->rx_cp_nr_rings)
1210 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1212 cpr = bp->rx_queues[i]->cp_ring;
1213 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1214 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1222 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1227 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1228 struct bnxt_tx_queue *txq;
1229 struct bnxt_rx_queue *rxq;
1230 struct bnxt_cp_ring_info *cpr;
1231 unsigned int idx = i + 1;
1233 if (i >= bp->rx_cp_nr_rings) {
1234 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1237 rxq = bp->rx_queues[i];
1241 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1249 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1254 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1255 unsigned int idx = i + 1;
1257 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1259 "Attempt to free invalid ring group %d\n",
1264 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1272 static void bnxt_free_cp_ring(struct bnxt *bp,
1273 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1275 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1277 bnxt_hwrm_ring_free(bp, cp_ring,
1278 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1279 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1280 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1281 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1282 sizeof(*cpr->cp_desc_ring));
1283 cpr->cp_raw_cons = 0;
1286 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1291 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1292 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1293 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1294 struct bnxt_ring *ring = txr->tx_ring_struct;
1295 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1296 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1298 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1299 bnxt_hwrm_ring_free(bp, ring,
1300 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1301 ring->fw_ring_id = INVALID_HW_RING_ID;
1302 memset(txr->tx_desc_ring, 0,
1303 txr->tx_ring_struct->ring_size *
1304 sizeof(*txr->tx_desc_ring));
1305 memset(txr->tx_buf_ring, 0,
1306 txr->tx_ring_struct->ring_size *
1307 sizeof(*txr->tx_buf_ring));
1311 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1312 bnxt_free_cp_ring(bp, cpr, idx);
1315 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1316 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1317 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1318 struct bnxt_ring *ring = rxr->rx_ring_struct;
1319 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1320 unsigned int idx = i + 1;
1322 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1323 bnxt_hwrm_ring_free(bp, ring,
1324 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1325 ring->fw_ring_id = INVALID_HW_RING_ID;
1326 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1327 memset(rxr->rx_desc_ring, 0,
1328 rxr->rx_ring_struct->ring_size *
1329 sizeof(*rxr->rx_desc_ring));
1330 memset(rxr->rx_buf_ring, 0,
1331 rxr->rx_ring_struct->ring_size *
1332 sizeof(*rxr->rx_buf_ring));
1335 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1336 bnxt_free_cp_ring(bp, cpr, idx);
1339 /* Default completion ring */
1341 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1343 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1344 bnxt_free_cp_ring(bp, cpr, 0);
1350 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1355 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1356 unsigned int idx = i + 1;
1358 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1359 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1362 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1370 void bnxt_free_hwrm_resources(struct bnxt *bp)
1372 /* Release memzone */
1373 rte_free(bp->hwrm_cmd_resp_addr);
1374 bp->hwrm_cmd_resp_addr = NULL;
1375 bp->hwrm_cmd_resp_dma_addr = 0;
1378 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1380 struct rte_pci_device *pdev = bp->pdev;
1381 char type[RTE_MEMZONE_NAMESIZE];
1383 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1384 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1385 bp->max_req_len = HWRM_MAX_REQ_LEN;
1386 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1387 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1388 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1389 if (bp->hwrm_cmd_resp_addr == NULL)
1391 bp->hwrm_cmd_resp_dma_addr =
1392 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1393 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1395 "unable to map response address to physical memory\n");
1398 rte_spinlock_init(&bp->hwrm_lock);
1403 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1405 struct bnxt_filter_info *filter;
1408 STAILQ_FOREACH(filter, &vnic->filter, next) {
1409 rc = bnxt_hwrm_clear_filter(bp, filter);
1416 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1418 struct bnxt_filter_info *filter;
1421 STAILQ_FOREACH(filter, &vnic->filter, next) {
1422 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1429 void bnxt_free_tunnel_ports(struct bnxt *bp)
1431 if (bp->vxlan_port_cnt)
1432 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1433 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1435 if (bp->geneve_port_cnt)
1436 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1437 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1438 bp->geneve_port = 0;
1441 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1443 struct bnxt_vnic_info *vnic;
1446 if (bp->vnic_info == NULL)
1449 vnic = &bp->vnic_info[0];
1451 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1453 /* VNIC resources */
1454 for (i = 0; i < bp->nr_vnics; i++) {
1455 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1457 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1459 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1460 bnxt_hwrm_vnic_free(bp, vnic);
1462 /* Ring resources */
1463 bnxt_free_all_hwrm_rings(bp);
1464 bnxt_free_all_hwrm_ring_grps(bp);
1465 bnxt_free_all_hwrm_stat_ctxs(bp);
1466 bnxt_free_tunnel_ports(bp);
1469 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1471 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1473 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1474 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1476 switch (conf_link_speed) {
1477 case ETH_LINK_SPEED_10M_HD:
1478 case ETH_LINK_SPEED_100M_HD:
1479 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1481 return hw_link_duplex;
1484 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1486 uint16_t eth_link_speed = 0;
1488 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1489 return ETH_LINK_SPEED_AUTONEG;
1491 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1492 case ETH_LINK_SPEED_100M:
1493 case ETH_LINK_SPEED_100M_HD:
1495 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1497 case ETH_LINK_SPEED_1G:
1499 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1501 case ETH_LINK_SPEED_2_5G:
1503 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1505 case ETH_LINK_SPEED_10G:
1507 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1509 case ETH_LINK_SPEED_20G:
1511 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1513 case ETH_LINK_SPEED_25G:
1515 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1517 case ETH_LINK_SPEED_40G:
1519 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1521 case ETH_LINK_SPEED_50G:
1523 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1527 "Unsupported link speed %d; default to AUTO\n",
1531 return eth_link_speed;
1534 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1535 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1536 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1537 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1539 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1543 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1546 if (link_speed & ETH_LINK_SPEED_FIXED) {
1547 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1549 if (one_speed & (one_speed - 1)) {
1551 "Invalid advertised speeds (%u) for port %u\n",
1552 link_speed, port_id);
1555 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1557 "Unsupported advertised speed (%u) for port %u\n",
1558 link_speed, port_id);
1562 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1564 "Unsupported advertised speeds (%u) for port %u\n",
1565 link_speed, port_id);
1572 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1576 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1577 link_speed = BNXT_SUPPORTED_SPEEDS;
1579 if (link_speed & ETH_LINK_SPEED_100M)
1580 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1581 if (link_speed & ETH_LINK_SPEED_100M_HD)
1582 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1583 if (link_speed & ETH_LINK_SPEED_1G)
1584 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1585 if (link_speed & ETH_LINK_SPEED_2_5G)
1586 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1587 if (link_speed & ETH_LINK_SPEED_10G)
1588 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1589 if (link_speed & ETH_LINK_SPEED_20G)
1590 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1591 if (link_speed & ETH_LINK_SPEED_25G)
1592 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1593 if (link_speed & ETH_LINK_SPEED_40G)
1594 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1595 if (link_speed & ETH_LINK_SPEED_50G)
1596 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1600 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1602 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1604 switch (hw_link_speed) {
1605 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1606 eth_link_speed = ETH_SPEED_NUM_100M;
1608 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1609 eth_link_speed = ETH_SPEED_NUM_1G;
1611 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1612 eth_link_speed = ETH_SPEED_NUM_2_5G;
1614 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1615 eth_link_speed = ETH_SPEED_NUM_10G;
1617 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1618 eth_link_speed = ETH_SPEED_NUM_20G;
1620 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1621 eth_link_speed = ETH_SPEED_NUM_25G;
1623 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1624 eth_link_speed = ETH_SPEED_NUM_40G;
1626 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1627 eth_link_speed = ETH_SPEED_NUM_50G;
1629 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1631 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1635 return eth_link_speed;
1638 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1640 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1642 switch (hw_link_duplex) {
1643 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1644 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1645 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1647 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1648 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1651 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1655 return eth_link_duplex;
1658 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1661 struct bnxt_link_info *link_info = &bp->link_info;
1663 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1666 "Get link config failed with rc %d\n", rc);
1669 if (link_info->link_up)
1671 bnxt_parse_hw_link_speed(link_info->link_speed);
1673 link->link_speed = ETH_LINK_SPEED_10M;
1674 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1675 link->link_status = link_info->link_up;
1676 link->link_autoneg = link_info->auto_mode ==
1677 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1678 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1683 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1686 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1687 struct bnxt_link_info link_req;
1690 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1693 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1694 bp->eth_dev->data->port_id);
1698 memset(&link_req, 0, sizeof(link_req));
1699 link_req.link_up = link_up;
1703 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1704 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1706 link_req.phy_flags |=
1707 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1708 link_req.auto_mode =
1709 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1710 link_req.auto_link_speed_mask =
1711 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1713 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1714 link_req.link_speed = speed;
1715 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1717 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1718 link_req.auto_pause = bp->link_info.auto_pause;
1719 link_req.force_pause = bp->link_info.force_pause;
1722 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1725 "Set link config failed with rc %d\n", rc);
1728 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1734 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1736 struct hwrm_func_qcfg_input req = {0};
1737 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1740 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1741 req.fid = rte_cpu_to_le_16(0xffff);
1743 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1747 /* Hard Coded.. 0xfff VLAN ID mask */
1748 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1750 switch (resp->port_partition_type) {
1751 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1752 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1753 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1754 bp->port_partition_type = resp->port_partition_type;
1757 bp->port_partition_type = 0;
1764 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1765 struct hwrm_func_qcaps_output *qcaps)
1767 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1768 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1769 sizeof(qcaps->mac_address));
1770 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1771 qcaps->max_rx_rings = fcfg->num_rx_rings;
1772 qcaps->max_tx_rings = fcfg->num_tx_rings;
1773 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1774 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1776 qcaps->first_vf_id = 0;
1777 qcaps->max_vnics = fcfg->num_vnics;
1778 qcaps->max_decap_records = 0;
1779 qcaps->max_encap_records = 0;
1780 qcaps->max_tx_wm_flows = 0;
1781 qcaps->max_tx_em_flows = 0;
1782 qcaps->max_rx_wm_flows = 0;
1783 qcaps->max_rx_em_flows = 0;
1784 qcaps->max_flow_id = 0;
1785 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1786 qcaps->max_sp_tx_rings = 0;
1787 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1790 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1792 struct hwrm_func_cfg_input req = {0};
1793 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1796 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1797 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1798 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1799 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1800 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1801 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1802 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1803 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1804 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1805 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1806 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1807 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1808 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1809 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1810 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1811 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1812 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1813 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1814 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1815 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1816 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1817 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1818 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1819 req.fid = rte_cpu_to_le_16(0xffff);
1821 HWRM_PREP(req, FUNC_CFG, -1, resp);
1823 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1829 static void populate_vf_func_cfg_req(struct bnxt *bp,
1830 struct hwrm_func_cfg_input *req,
1833 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1834 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1835 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1836 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1837 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1838 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1839 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1840 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1841 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1842 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1844 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1845 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1846 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1847 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1848 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1850 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1851 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1853 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1854 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1855 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1856 /* TODO: For now, do not support VMDq/RFS on VFs. */
1857 req->num_vnics = rte_cpu_to_le_16(1);
1858 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1862 static void add_random_mac_if_needed(struct bnxt *bp,
1863 struct hwrm_func_cfg_input *cfg_req,
1866 struct ether_addr mac;
1868 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1871 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1873 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1874 eth_random_addr(cfg_req->dflt_mac_addr);
1875 bp->pf.vf_info[vf].random_mac = true;
1877 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1881 static void reserve_resources_from_vf(struct bnxt *bp,
1882 struct hwrm_func_cfg_input *cfg_req,
1885 struct hwrm_func_qcaps_input req = {0};
1886 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1889 /* Get the actual allocated values now */
1890 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1891 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1892 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1895 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1896 copy_func_cfg_to_qcaps(cfg_req, resp);
1897 } else if (resp->error_code) {
1898 rc = rte_le_to_cpu_16(resp->error_code);
1899 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1900 copy_func_cfg_to_qcaps(cfg_req, resp);
1903 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1904 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1905 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1906 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1907 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1908 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1910 * TODO: While not supporting VMDq with VFs, max_vnics is always
1911 * forced to 1 in this case
1913 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1914 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1917 static int update_pf_resource_max(struct bnxt *bp)
1919 struct hwrm_func_qcfg_input req = {0};
1920 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1923 /* And copy the allocated numbers into the pf struct */
1924 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1925 req.fid = rte_cpu_to_le_16(0xffff);
1926 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1929 /* Only TX ring value reflects actual allocation? TODO */
1930 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1931 bp->pf.evb_mode = resp->evb_mode;
1936 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1941 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1945 rc = bnxt_hwrm_func_qcaps(bp);
1949 bp->pf.func_cfg_flags &=
1950 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1951 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1952 bp->pf.func_cfg_flags |=
1953 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1954 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1958 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1960 struct hwrm_func_cfg_input req = {0};
1961 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1968 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1972 rc = bnxt_hwrm_func_qcaps(bp);
1977 bp->pf.active_vfs = num_vfs;
1980 * First, configure the PF to only use one TX ring. This ensures that
1981 * there are enough rings for all VFs.
1983 * If we don't do this, when we call func_alloc() later, we will lock
1984 * extra rings to the PF that won't be available during func_cfg() of
1987 * This has been fixed with firmware versions above 20.6.54
1989 bp->pf.func_cfg_flags &=
1990 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1991 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1992 bp->pf.func_cfg_flags |=
1993 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1994 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1999 * Now, create and register a buffer to hold forwarded VF requests
2001 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2002 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2003 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2004 if (bp->pf.vf_req_buf == NULL) {
2008 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2009 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2010 for (i = 0; i < num_vfs; i++)
2011 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2012 (i * HWRM_MAX_REQ_LEN);
2014 rc = bnxt_hwrm_func_buf_rgtr(bp);
2018 populate_vf_func_cfg_req(bp, &req, num_vfs);
2020 bp->pf.active_vfs = 0;
2021 for (i = 0; i < num_vfs; i++) {
2022 add_random_mac_if_needed(bp, &req, i);
2024 HWRM_PREP(req, FUNC_CFG, -1, resp);
2025 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2026 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2027 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2029 /* Clear enable flag for next pass */
2030 req.enables &= ~rte_cpu_to_le_32(
2031 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2033 if (rc || resp->error_code) {
2035 "Failed to initizlie VF %d\n", i);
2037 "Not all VFs available. (%d, %d)\n",
2038 rc, resp->error_code);
2042 reserve_resources_from_vf(bp, &req, i);
2043 bp->pf.active_vfs++;
2047 * Now configure the PF to use "the rest" of the resources
2048 * We're using STD_TX_RING_MODE here though which will limit the TX
2049 * rings. This will allow QoS to function properly. Not setting this
2050 * will cause PF rings to break bandwidth settings.
2052 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2056 rc = update_pf_resource_max(bp);
2063 bnxt_hwrm_func_buf_unrgtr(bp);
2067 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2068 uint8_t tunnel_type)
2070 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2071 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2074 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2075 req.tunnel_type = tunnel_type;
2076 req.tunnel_dst_port_val = port;
2077 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2080 switch (tunnel_type) {
2081 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2082 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2083 bp->vxlan_port = port;
2085 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2086 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2087 bp->geneve_port = port;
2095 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2096 uint8_t tunnel_type)
2098 struct hwrm_tunnel_dst_port_free_input req = {0};
2099 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2102 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2103 req.tunnel_type = tunnel_type;
2104 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2105 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2111 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2114 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2115 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2117 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2119 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2120 req.req_buf_page_size = rte_cpu_to_le_16(
2121 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2122 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2123 req.req_buf_page_addr[0] =
2124 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2125 if (req.req_buf_page_addr[0] == 0) {
2127 "unable to map buffer address to physical memory\n");
2131 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2138 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2141 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2142 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2144 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2146 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2153 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2155 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2156 struct hwrm_func_cfg_input req = {0};
2159 HWRM_PREP(req, FUNC_CFG, -1, resp);
2160 req.fid = rte_cpu_to_le_16(0xffff);
2161 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2162 req.enables = rte_cpu_to_le_32(
2163 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2164 req.async_event_cr = rte_cpu_to_le_16(
2165 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2166 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2172 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2174 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2175 struct hwrm_func_vf_cfg_input req = {0};
2178 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2179 req.enables = rte_cpu_to_le_32(
2180 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2181 req.async_event_cr = rte_cpu_to_le_16(
2182 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2183 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2189 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2190 void *encaped, size_t ec_size)
2193 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2194 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2196 if (ec_size > sizeof(req.encap_request))
2199 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2201 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2202 memcpy(req.encap_request, encaped, ec_size);
2204 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2211 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2212 struct ether_addr *mac)
2214 struct hwrm_func_qcfg_input req = {0};
2215 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2218 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2219 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2220 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2224 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2228 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2229 void *encaped, size_t ec_size)
2232 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2233 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2235 if (ec_size > sizeof(req.encap_request))
2238 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2240 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2241 memcpy(req.encap_request, encaped, ec_size);
2243 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));