4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 if (resp->resp_len >= 16) { \
180 struct hwrm_err_output *tmp_hwrm_err_op = \
183 "%s error %d:%d:%08x:%04x\n", \
185 rc, tmp_hwrm_err_op->cmd_err, \
187 tmp_hwrm_err_op->opaque_0), \
189 tmp_hwrm_err_op->opaque_1)); \
193 "%s error %d\n", __func__, rc); \
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
205 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
217 struct bnxt_vnic_info *vnic,
219 struct bnxt_vlan_table_entry *vlan_table)
222 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
223 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
226 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
227 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229 /* FIXME add multicast flag, when multicast adding options is supported
232 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
233 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
234 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
235 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
236 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
237 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
238 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
239 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
240 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
241 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
242 if (vnic->mc_addr_cnt) {
243 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
245 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
247 if (vlan_count && vlan_table) {
248 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
249 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
250 rte_mem_virt2phy(vlan_table));
251 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
253 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
256 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
263 int bnxt_hwrm_clear_filter(struct bnxt *bp,
264 struct bnxt_filter_info *filter)
267 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
268 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
270 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
272 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
274 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
278 filter->fw_l2_filter_id = -1;
283 int bnxt_hwrm_set_filter(struct bnxt *bp,
285 struct bnxt_filter_info *filter)
288 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
289 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
290 uint32_t enables = 0;
292 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
294 req.flags = rte_cpu_to_le_32(filter->flags);
296 enables = filter->enables |
297 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
298 req.dst_id = rte_cpu_to_le_16(dst_id);
301 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
302 memcpy(req.l2_addr, filter->l2_addr,
305 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
306 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
309 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
310 req.l2_ovlan = filter->l2_ovlan;
312 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
313 req.l2_ovlan_mask = filter->l2_ovlan_mask;
314 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
315 req.src_id = rte_cpu_to_le_32(filter->src_id);
316 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
317 req.src_type = filter->src_type;
319 req.enables = rte_cpu_to_le_32(enables);
321 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
325 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
330 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
333 struct hwrm_func_qcaps_input req = {.req_type = 0 };
334 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
335 uint16_t new_max_vfs;
338 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
340 req.fid = rte_cpu_to_le_16(0xffff);
342 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
346 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
348 bp->pf.port_id = resp->port_id;
349 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
350 new_max_vfs = bp->pdev->max_vfs;
351 if (new_max_vfs != bp->pf.max_vfs) {
353 rte_free(bp->pf.vf_info);
354 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
355 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
356 bp->pf.max_vfs = new_max_vfs;
357 for (i = 0; i < new_max_vfs; i++) {
358 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
359 bp->pf.vf_info[i].vlan_table =
360 rte_zmalloc("VF VLAN table",
363 if (bp->pf.vf_info[i].vlan_table == NULL)
365 "Fail to alloc VLAN table for VF %d\n",
369 bp->pf.vf_info[i].vlan_table);
370 STAILQ_INIT(&bp->pf.vf_info[i].filter);
375 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
376 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
377 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
378 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
379 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
380 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
381 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
382 /* TODO: For now, do not support VMDq/RFS on VFs. */
387 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
391 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
393 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
398 int bnxt_hwrm_func_reset(struct bnxt *bp)
401 struct hwrm_func_reset_input req = {.req_type = 0 };
402 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
404 HWRM_PREP(req, FUNC_RESET, -1, resp);
406 req.enables = rte_cpu_to_le_32(0);
408 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
415 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
418 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
419 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
421 if (bp->flags & BNXT_FLAG_REGISTERED)
424 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
425 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
426 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
427 req.ver_maj = RTE_VER_YEAR;
428 req.ver_min = RTE_VER_MONTH;
429 req.ver_upd = RTE_VER_MINOR;
432 req.enables |= rte_cpu_to_le_32(
433 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
434 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
435 RTE_MIN(sizeof(req.vf_req_fwd),
436 sizeof(bp->pf.vf_req_fwd)));
439 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
440 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
442 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
446 bp->flags |= BNXT_FLAG_REGISTERED;
451 int bnxt_hwrm_ver_get(struct bnxt *bp)
454 struct hwrm_ver_get_input req = {.req_type = 0 };
455 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
458 uint16_t max_resp_len;
459 char type[RTE_MEMZONE_NAMESIZE];
461 HWRM_PREP(req, VER_GET, -1, resp);
463 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
464 req.hwrm_intf_min = HWRM_VERSION_MINOR;
465 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
468 * Hold the lock since we may be adjusting the response pointers.
470 rte_spinlock_lock(&bp->hwrm_lock);
471 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
475 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
476 resp->hwrm_intf_maj, resp->hwrm_intf_min,
478 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
479 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
480 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
481 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
482 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
484 my_version = HWRM_VERSION_MAJOR << 16;
485 my_version |= HWRM_VERSION_MINOR << 8;
486 my_version |= HWRM_VERSION_UPDATE;
488 fw_version = resp->hwrm_intf_maj << 16;
489 fw_version |= resp->hwrm_intf_min << 8;
490 fw_version |= resp->hwrm_intf_upd;
492 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
493 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
498 if (my_version != fw_version) {
499 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
500 if (my_version < fw_version) {
502 "Firmware API version is newer than driver.\n");
504 "The driver may be missing features.\n");
507 "Firmware API version is older than driver.\n");
509 "Not all driver features may be functional.\n");
513 if (bp->max_req_len > resp->max_req_win_len) {
514 RTE_LOG(ERR, PMD, "Unsupported request length\n");
517 bp->max_req_len = resp->max_req_win_len;
518 max_resp_len = resp->max_resp_len;
519 if (bp->max_resp_len != max_resp_len) {
520 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
521 bp->pdev->addr.domain, bp->pdev->addr.bus,
522 bp->pdev->addr.devid, bp->pdev->addr.function);
524 rte_free(bp->hwrm_cmd_resp_addr);
526 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
527 if (bp->hwrm_cmd_resp_addr == NULL) {
531 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
532 bp->hwrm_cmd_resp_dma_addr =
533 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
534 if (bp->hwrm_cmd_resp_dma_addr == 0) {
536 "Unable to map response buffer to physical memory.\n");
540 bp->max_resp_len = max_resp_len;
544 rte_spinlock_unlock(&bp->hwrm_lock);
548 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
551 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
552 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
554 if (!(bp->flags & BNXT_FLAG_REGISTERED))
557 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
560 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
564 bp->flags &= ~BNXT_FLAG_REGISTERED;
569 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
572 struct hwrm_port_phy_cfg_input req = {0};
573 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
574 uint32_t enables = 0;
576 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
579 req.flags = rte_cpu_to_le_32(conf->phy_flags);
580 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
582 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
583 * any auto mode, even "none".
585 if (!conf->link_speed) {
586 req.auto_mode |= conf->auto_mode;
587 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
588 req.auto_link_speed_mask = conf->auto_link_speed_mask;
590 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
591 req.auto_link_speed = bp->link_info.auto_link_speed;
593 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
595 req.auto_duplex = conf->duplex;
596 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
597 req.auto_pause = conf->auto_pause;
598 req.force_pause = conf->force_pause;
599 /* Set force_pause if there is no auto or if there is a force */
600 if (req.auto_pause && !req.force_pause)
601 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
603 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
605 req.enables = rte_cpu_to_le_32(enables);
608 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
609 RTE_LOG(INFO, PMD, "Force Link Down\n");
612 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
619 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
620 struct bnxt_link_info *link_info)
623 struct hwrm_port_phy_qcfg_input req = {0};
624 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
626 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
628 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
632 link_info->phy_link_status = resp->link;
633 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
634 link_info->link_up = 1;
635 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
637 link_info->link_up = 0;
638 link_info->link_speed = 0;
640 link_info->duplex = resp->duplex;
641 link_info->pause = resp->pause;
642 link_info->auto_pause = resp->auto_pause;
643 link_info->force_pause = resp->force_pause;
644 link_info->auto_mode = resp->auto_mode;
646 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
647 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
648 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
649 link_info->phy_ver[0] = resp->phy_maj;
650 link_info->phy_ver[1] = resp->phy_min;
651 link_info->phy_ver[2] = resp->phy_bld;
656 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
659 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
660 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
662 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
664 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
668 #define GET_QUEUE_INFO(x) \
669 bp->cos_queue[x].id = resp->queue_id##x; \
670 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
684 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
685 struct bnxt_ring *ring,
686 uint32_t ring_type, uint32_t map_index,
687 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
690 uint32_t enables = 0;
691 struct hwrm_ring_alloc_input req = {.req_type = 0 };
692 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
694 HWRM_PREP(req, RING_ALLOC, -1, resp);
696 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
697 req.fbo = rte_cpu_to_le_32(0);
698 /* Association of ring index with doorbell index */
699 req.logical_id = rte_cpu_to_le_16(map_index);
700 req.length = rte_cpu_to_le_32(ring->ring_size);
703 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
704 req.queue_id = bp->cos_queue[0].id;
706 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
707 req.ring_type = ring_type;
708 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
709 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
710 if (stats_ctx_id != INVALID_STATS_CTX_ID)
712 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
714 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
715 req.ring_type = ring_type;
717 * TODO: Some HWRM versions crash with
718 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
720 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
723 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
727 req.enables = rte_cpu_to_le_32(enables);
729 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
731 if (rc || resp->error_code) {
732 if (rc == 0 && resp->error_code)
733 rc = rte_le_to_cpu_16(resp->error_code);
735 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
737 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
739 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
741 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
743 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
745 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
748 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
753 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
757 int bnxt_hwrm_ring_free(struct bnxt *bp,
758 struct bnxt_ring *ring, uint32_t ring_type)
761 struct hwrm_ring_free_input req = {.req_type = 0 };
762 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
764 HWRM_PREP(req, RING_FREE, -1, resp);
766 req.ring_type = ring_type;
767 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
769 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
771 if (rc || resp->error_code) {
772 if (rc == 0 && resp->error_code)
773 rc = rte_le_to_cpu_16(resp->error_code);
776 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
777 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
780 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
781 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
784 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
785 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
789 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
796 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
799 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
800 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
802 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
804 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
805 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
806 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
807 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
809 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
813 bp->grp_info[idx].fw_grp_id =
814 rte_le_to_cpu_16(resp->ring_group_id);
819 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
822 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
823 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
825 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
827 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
829 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
833 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
837 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
840 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
841 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
843 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
846 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
848 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
850 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
857 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
858 unsigned int idx __rte_unused)
861 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
862 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
864 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
866 req.update_period_ms = rte_cpu_to_le_32(0);
869 rte_cpu_to_le_64(cpr->hw_stats_map);
871 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
875 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
880 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
881 unsigned int idx __rte_unused)
884 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
885 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
887 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
889 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
891 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
898 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
901 struct hwrm_vnic_alloc_input req = { 0 };
902 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
904 /* map ring groups to this vnic */
905 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
906 vnic->start_grp_id, vnic->end_grp_id);
907 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
908 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
909 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
910 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
911 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
912 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
913 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
914 ETHER_CRC_LEN + VLAN_TAG_SIZE;
915 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
917 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
921 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
925 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
926 struct bnxt_vnic_info *vnic,
927 struct bnxt_plcmodes_cfg *pmode)
930 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
931 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
933 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
935 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
937 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
941 pmode->flags = rte_le_to_cpu_32(resp->flags);
942 /* dflt_vnic bit doesn't exist in the _cfg command */
943 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
944 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
945 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
946 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
951 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
952 struct bnxt_vnic_info *vnic,
953 struct bnxt_plcmodes_cfg *pmode)
956 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
957 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
959 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
961 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
962 req.flags = rte_cpu_to_le_32(pmode->flags);
963 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
964 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
965 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
966 req.enables = rte_cpu_to_le_32(
967 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
968 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
969 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
972 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
982 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
983 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
984 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
985 struct bnxt_plcmodes_cfg pmodes;
987 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
991 HWRM_PREP(req, VNIC_CFG, -1, resp);
993 /* Only RSS support for now TBD: COS & LB */
995 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
996 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
997 if (vnic->lb_rule != 0xffff)
998 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
999 if (vnic->cos_rule != 0xffff)
1000 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1001 if (vnic->rss_rule != 0xffff)
1002 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1003 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1004 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1005 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1006 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1007 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1008 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1009 req.mru = rte_cpu_to_le_16(vnic->mru);
1010 if (vnic->func_default)
1012 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1013 if (vnic->vlan_strip)
1015 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1018 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1019 if (vnic->roce_dual)
1020 req.flags |= rte_cpu_to_le_32(
1021 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1022 if (vnic->roce_only)
1023 req.flags |= rte_cpu_to_le_32(
1024 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1025 if (vnic->rss_dflt_cr)
1026 req.flags |= rte_cpu_to_le_32(
1027 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1029 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1033 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1038 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1042 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1043 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1045 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1048 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1049 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1050 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1052 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1056 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1057 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1058 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1059 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1060 vnic->mru = rte_le_to_cpu_16(resp->mru);
1061 vnic->func_default = rte_le_to_cpu_32(
1062 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1063 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1064 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1065 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1066 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1067 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1068 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1069 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1070 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1071 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1072 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1077 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1080 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1081 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1082 bp->hwrm_cmd_resp_addr;
1084 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1086 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1090 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1095 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1098 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1099 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1100 bp->hwrm_cmd_resp_addr;
1102 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1104 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1106 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1110 vnic->rss_rule = INVALID_HW_RING_ID;
1115 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1118 struct hwrm_vnic_free_input req = {.req_type = 0 };
1119 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1121 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1124 HWRM_PREP(req, VNIC_FREE, -1, resp);
1126 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1128 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1132 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1136 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1137 struct bnxt_vnic_info *vnic)
1140 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1141 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1143 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1145 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1147 req.ring_grp_tbl_addr =
1148 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1149 req.hash_key_tbl_addr =
1150 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1151 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1153 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1160 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1161 struct bnxt_vnic_info *vnic)
1164 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1165 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1168 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1170 req.flags = rte_cpu_to_le_32(
1171 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1173 req.enables = rte_cpu_to_le_32(
1174 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1176 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1177 size -= RTE_PKTMBUF_HEADROOM;
1179 req.jumbo_thresh = rte_cpu_to_le_16(size);
1180 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1189 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1190 struct bnxt_vnic_info *vnic, bool enable)
1193 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1194 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1196 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1199 req.enables = rte_cpu_to_le_32(
1200 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1201 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1202 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1203 req.flags = rte_cpu_to_le_32(
1204 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1205 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1206 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1207 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1208 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1209 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1210 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1211 req.max_agg_segs = rte_cpu_to_le_16(5);
1213 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1214 req.min_agg_len = rte_cpu_to_le_32(512);
1217 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1224 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1226 struct hwrm_func_cfg_input req = {0};
1227 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1230 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1231 req.enables = rte_cpu_to_le_32(
1232 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1233 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1234 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1236 HWRM_PREP(req, FUNC_CFG, -1, resp);
1238 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1241 bp->pf.vf_info[vf].random_mac = false;
1246 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1250 struct hwrm_func_qstats_input req = {.req_type = 0};
1251 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1253 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1255 req.fid = rte_cpu_to_le_16(fid);
1257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1262 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1267 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1268 struct rte_eth_stats *stats)
1271 struct hwrm_func_qstats_input req = {.req_type = 0};
1272 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1274 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1276 req.fid = rte_cpu_to_le_16(fid);
1278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1282 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1283 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1284 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1285 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1286 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1287 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1289 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1290 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1291 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1292 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1293 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1294 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1296 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1297 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1299 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1304 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1307 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1308 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1310 HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1312 req.fid = rte_cpu_to_le_16(fid);
1314 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1322 * HWRM utility functions
1325 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1330 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1331 struct bnxt_tx_queue *txq;
1332 struct bnxt_rx_queue *rxq;
1333 struct bnxt_cp_ring_info *cpr;
1335 if (i >= bp->rx_cp_nr_rings) {
1336 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1339 rxq = bp->rx_queues[i];
1343 rc = bnxt_hwrm_stat_clear(bp, cpr);
1350 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1354 struct bnxt_cp_ring_info *cpr;
1356 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1358 if (i >= bp->rx_cp_nr_rings)
1359 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1361 cpr = bp->rx_queues[i]->cp_ring;
1362 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1363 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1364 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1366 * TODO. Need a better way to reset grp_info.stats_ctx
1367 * for Rx rings only. stats_ctx is not saved for Tx
1370 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1378 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1383 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1384 struct bnxt_tx_queue *txq;
1385 struct bnxt_rx_queue *rxq;
1386 struct bnxt_cp_ring_info *cpr;
1388 if (i >= bp->rx_cp_nr_rings) {
1389 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1392 rxq = bp->rx_queues[i];
1396 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1404 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1409 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1411 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1413 "Attempt to free invalid ring group %d\n",
1418 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1426 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1427 unsigned int idx __rte_unused)
1429 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1431 bnxt_hwrm_ring_free(bp, cp_ring,
1432 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1433 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1434 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1435 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1436 sizeof(*cpr->cp_desc_ring));
1437 cpr->cp_raw_cons = 0;
1440 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1445 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1446 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1447 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1448 struct bnxt_ring *ring = txr->tx_ring_struct;
1449 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1450 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1452 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1453 bnxt_hwrm_ring_free(bp, ring,
1454 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1455 ring->fw_ring_id = INVALID_HW_RING_ID;
1456 memset(txr->tx_desc_ring, 0,
1457 txr->tx_ring_struct->ring_size *
1458 sizeof(*txr->tx_desc_ring));
1459 memset(txr->tx_buf_ring, 0,
1460 txr->tx_ring_struct->ring_size *
1461 sizeof(*txr->tx_buf_ring));
1465 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1466 bnxt_free_cp_ring(bp, cpr, idx);
1467 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1471 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1472 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1473 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1474 struct bnxt_ring *ring = rxr->rx_ring_struct;
1475 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1476 unsigned int idx = i + 1;
1478 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1479 bnxt_hwrm_ring_free(bp, ring,
1480 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1481 ring->fw_ring_id = INVALID_HW_RING_ID;
1482 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1483 memset(rxr->rx_desc_ring, 0,
1484 rxr->rx_ring_struct->ring_size *
1485 sizeof(*rxr->rx_desc_ring));
1486 memset(rxr->rx_buf_ring, 0,
1487 rxr->rx_ring_struct->ring_size *
1488 sizeof(*rxr->rx_buf_ring));
1490 memset(rxr->ag_buf_ring, 0,
1491 rxr->ag_ring_struct->ring_size *
1492 sizeof(*rxr->ag_buf_ring));
1495 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1496 bnxt_free_cp_ring(bp, cpr, idx);
1497 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1498 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1502 /* Default completion ring */
1504 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1506 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1507 bnxt_free_cp_ring(bp, cpr, 0);
1508 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1515 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1520 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1521 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1528 void bnxt_free_hwrm_resources(struct bnxt *bp)
1530 /* Release memzone */
1531 rte_free(bp->hwrm_cmd_resp_addr);
1532 bp->hwrm_cmd_resp_addr = NULL;
1533 bp->hwrm_cmd_resp_dma_addr = 0;
1536 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1538 struct rte_pci_device *pdev = bp->pdev;
1539 char type[RTE_MEMZONE_NAMESIZE];
1541 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1542 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1543 bp->max_req_len = HWRM_MAX_REQ_LEN;
1544 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1545 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1546 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1547 if (bp->hwrm_cmd_resp_addr == NULL)
1549 bp->hwrm_cmd_resp_dma_addr =
1550 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1551 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1553 "unable to map response address to physical memory\n");
1556 rte_spinlock_init(&bp->hwrm_lock);
1561 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1563 struct bnxt_filter_info *filter;
1566 STAILQ_FOREACH(filter, &vnic->filter, next) {
1567 rc = bnxt_hwrm_clear_filter(bp, filter);
1574 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1576 struct bnxt_filter_info *filter;
1579 STAILQ_FOREACH(filter, &vnic->filter, next) {
1580 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1587 void bnxt_free_tunnel_ports(struct bnxt *bp)
1589 if (bp->vxlan_port_cnt)
1590 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1591 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1593 if (bp->geneve_port_cnt)
1594 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1595 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1596 bp->geneve_port = 0;
1599 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1601 struct bnxt_vnic_info *vnic;
1604 if (bp->vnic_info == NULL)
1607 vnic = &bp->vnic_info[0];
1609 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1611 /* VNIC resources */
1612 for (i = 0; i < bp->nr_vnics; i++) {
1613 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1615 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1617 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1619 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1621 bnxt_hwrm_vnic_free(bp, vnic);
1623 /* Ring resources */
1624 bnxt_free_all_hwrm_rings(bp);
1625 bnxt_free_all_hwrm_ring_grps(bp);
1626 bnxt_free_all_hwrm_stat_ctxs(bp);
1627 bnxt_free_tunnel_ports(bp);
1630 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1632 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1634 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1635 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1637 switch (conf_link_speed) {
1638 case ETH_LINK_SPEED_10M_HD:
1639 case ETH_LINK_SPEED_100M_HD:
1640 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1642 return hw_link_duplex;
1645 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1647 uint16_t eth_link_speed = 0;
1649 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1650 return ETH_LINK_SPEED_AUTONEG;
1652 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1653 case ETH_LINK_SPEED_100M:
1654 case ETH_LINK_SPEED_100M_HD:
1656 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1658 case ETH_LINK_SPEED_1G:
1660 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1662 case ETH_LINK_SPEED_2_5G:
1664 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1666 case ETH_LINK_SPEED_10G:
1668 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1670 case ETH_LINK_SPEED_20G:
1672 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1674 case ETH_LINK_SPEED_25G:
1676 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1678 case ETH_LINK_SPEED_40G:
1680 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1682 case ETH_LINK_SPEED_50G:
1684 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1688 "Unsupported link speed %d; default to AUTO\n",
1692 return eth_link_speed;
1695 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1696 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1697 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1698 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1700 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1704 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1707 if (link_speed & ETH_LINK_SPEED_FIXED) {
1708 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1710 if (one_speed & (one_speed - 1)) {
1712 "Invalid advertised speeds (%u) for port %u\n",
1713 link_speed, port_id);
1716 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1718 "Unsupported advertised speed (%u) for port %u\n",
1719 link_speed, port_id);
1723 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1725 "Unsupported advertised speeds (%u) for port %u\n",
1726 link_speed, port_id);
1733 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1737 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1738 link_speed = BNXT_SUPPORTED_SPEEDS;
1740 if (link_speed & ETH_LINK_SPEED_100M)
1741 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1742 if (link_speed & ETH_LINK_SPEED_100M_HD)
1743 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1744 if (link_speed & ETH_LINK_SPEED_1G)
1745 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1746 if (link_speed & ETH_LINK_SPEED_2_5G)
1747 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1748 if (link_speed & ETH_LINK_SPEED_10G)
1749 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1750 if (link_speed & ETH_LINK_SPEED_20G)
1751 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1752 if (link_speed & ETH_LINK_SPEED_25G)
1753 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1754 if (link_speed & ETH_LINK_SPEED_40G)
1755 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1756 if (link_speed & ETH_LINK_SPEED_50G)
1757 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1761 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1763 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1765 switch (hw_link_speed) {
1766 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1767 eth_link_speed = ETH_SPEED_NUM_100M;
1769 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1770 eth_link_speed = ETH_SPEED_NUM_1G;
1772 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1773 eth_link_speed = ETH_SPEED_NUM_2_5G;
1775 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1776 eth_link_speed = ETH_SPEED_NUM_10G;
1778 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1779 eth_link_speed = ETH_SPEED_NUM_20G;
1781 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1782 eth_link_speed = ETH_SPEED_NUM_25G;
1784 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1785 eth_link_speed = ETH_SPEED_NUM_40G;
1787 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1788 eth_link_speed = ETH_SPEED_NUM_50G;
1790 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1792 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1796 return eth_link_speed;
1799 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1801 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1803 switch (hw_link_duplex) {
1804 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1805 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1806 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1808 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1809 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1812 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1816 return eth_link_duplex;
1819 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1822 struct bnxt_link_info *link_info = &bp->link_info;
1824 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1827 "Get link config failed with rc %d\n", rc);
1830 if (link_info->link_up)
1832 bnxt_parse_hw_link_speed(link_info->link_speed);
1834 link->link_speed = ETH_LINK_SPEED_10M;
1835 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1836 link->link_status = link_info->link_up;
1837 link->link_autoneg = link_info->auto_mode ==
1838 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1839 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1844 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1847 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1848 struct bnxt_link_info link_req;
1851 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1854 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1855 bp->eth_dev->data->port_id);
1859 memset(&link_req, 0, sizeof(link_req));
1860 link_req.link_up = link_up;
1864 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1865 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1867 link_req.phy_flags |=
1868 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1869 link_req.auto_mode =
1870 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1871 link_req.auto_link_speed_mask =
1872 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1874 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1875 link_req.link_speed = speed;
1876 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1878 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1879 link_req.auto_pause = bp->link_info.auto_pause;
1880 link_req.force_pause = bp->link_info.force_pause;
1883 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1886 "Set link config failed with rc %d\n", rc);
1889 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1895 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1897 struct hwrm_func_qcfg_input req = {0};
1898 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1901 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1902 req.fid = rte_cpu_to_le_16(0xffff);
1904 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1908 /* Hard Coded.. 0xfff VLAN ID mask */
1909 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1911 switch (resp->port_partition_type) {
1912 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1913 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1914 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1915 bp->port_partition_type = resp->port_partition_type;
1918 bp->port_partition_type = 0;
1925 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1926 struct hwrm_func_qcaps_output *qcaps)
1928 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1929 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1930 sizeof(qcaps->mac_address));
1931 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1932 qcaps->max_rx_rings = fcfg->num_rx_rings;
1933 qcaps->max_tx_rings = fcfg->num_tx_rings;
1934 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1935 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1937 qcaps->first_vf_id = 0;
1938 qcaps->max_vnics = fcfg->num_vnics;
1939 qcaps->max_decap_records = 0;
1940 qcaps->max_encap_records = 0;
1941 qcaps->max_tx_wm_flows = 0;
1942 qcaps->max_tx_em_flows = 0;
1943 qcaps->max_rx_wm_flows = 0;
1944 qcaps->max_rx_em_flows = 0;
1945 qcaps->max_flow_id = 0;
1946 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1947 qcaps->max_sp_tx_rings = 0;
1948 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1951 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1953 struct hwrm_func_cfg_input req = {0};
1954 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1957 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1958 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1959 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1960 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1961 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1962 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1963 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1964 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1965 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1966 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1967 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1968 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1969 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1970 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1971 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1972 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1973 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1974 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1975 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1976 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1977 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1978 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1979 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1980 req.fid = rte_cpu_to_le_16(0xffff);
1982 HWRM_PREP(req, FUNC_CFG, -1, resp);
1984 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1990 static void populate_vf_func_cfg_req(struct bnxt *bp,
1991 struct hwrm_func_cfg_input *req,
1994 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1995 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1996 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1997 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1998 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1999 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2000 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2001 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2002 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2003 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2005 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2006 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2007 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2008 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2009 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2011 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2012 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2014 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2015 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2016 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2017 /* TODO: For now, do not support VMDq/RFS on VFs. */
2018 req->num_vnics = rte_cpu_to_le_16(1);
2019 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2023 static void add_random_mac_if_needed(struct bnxt *bp,
2024 struct hwrm_func_cfg_input *cfg_req,
2027 struct ether_addr mac;
2029 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2032 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2034 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2035 eth_random_addr(cfg_req->dflt_mac_addr);
2036 bp->pf.vf_info[vf].random_mac = true;
2038 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2042 static void reserve_resources_from_vf(struct bnxt *bp,
2043 struct hwrm_func_cfg_input *cfg_req,
2046 struct hwrm_func_qcaps_input req = {0};
2047 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2050 /* Get the actual allocated values now */
2051 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2052 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2053 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2056 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2057 copy_func_cfg_to_qcaps(cfg_req, resp);
2058 } else if (resp->error_code) {
2059 rc = rte_le_to_cpu_16(resp->error_code);
2060 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2061 copy_func_cfg_to_qcaps(cfg_req, resp);
2064 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2065 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2066 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2067 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2068 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2069 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2071 * TODO: While not supporting VMDq with VFs, max_vnics is always
2072 * forced to 1 in this case
2074 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2075 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2078 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2080 struct hwrm_func_qcfg_input req = {0};
2081 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2084 /* Check for zero MAC address */
2085 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2086 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2087 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2089 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2091 } else if (resp->error_code) {
2092 rc = rte_le_to_cpu_16(resp->error_code);
2093 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2096 return rte_le_to_cpu_16(resp->vlan);
2099 static int update_pf_resource_max(struct bnxt *bp)
2101 struct hwrm_func_qcfg_input req = {0};
2102 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2105 /* And copy the allocated numbers into the pf struct */
2106 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2107 req.fid = rte_cpu_to_le_16(0xffff);
2108 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2111 /* Only TX ring value reflects actual allocation? TODO */
2112 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2113 bp->pf.evb_mode = resp->evb_mode;
2118 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2123 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2127 rc = bnxt_hwrm_func_qcaps(bp);
2131 bp->pf.func_cfg_flags &=
2132 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2133 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2134 bp->pf.func_cfg_flags |=
2135 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2136 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2140 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2142 struct hwrm_func_cfg_input req = {0};
2143 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2150 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2154 rc = bnxt_hwrm_func_qcaps(bp);
2159 bp->pf.active_vfs = num_vfs;
2162 * First, configure the PF to only use one TX ring. This ensures that
2163 * there are enough rings for all VFs.
2165 * If we don't do this, when we call func_alloc() later, we will lock
2166 * extra rings to the PF that won't be available during func_cfg() of
2169 * This has been fixed with firmware versions above 20.6.54
2171 bp->pf.func_cfg_flags &=
2172 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2173 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2174 bp->pf.func_cfg_flags |=
2175 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2176 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2181 * Now, create and register a buffer to hold forwarded VF requests
2183 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2184 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2185 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2186 if (bp->pf.vf_req_buf == NULL) {
2190 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2191 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2192 for (i = 0; i < num_vfs; i++)
2193 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2194 (i * HWRM_MAX_REQ_LEN);
2196 rc = bnxt_hwrm_func_buf_rgtr(bp);
2200 populate_vf_func_cfg_req(bp, &req, num_vfs);
2202 bp->pf.active_vfs = 0;
2203 for (i = 0; i < num_vfs; i++) {
2204 add_random_mac_if_needed(bp, &req, i);
2206 HWRM_PREP(req, FUNC_CFG, -1, resp);
2207 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2208 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2211 /* Clear enable flag for next pass */
2212 req.enables &= ~rte_cpu_to_le_32(
2213 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2215 if (rc || resp->error_code) {
2217 "Failed to initizlie VF %d\n", i);
2219 "Not all VFs available. (%d, %d)\n",
2220 rc, resp->error_code);
2224 reserve_resources_from_vf(bp, &req, i);
2225 bp->pf.active_vfs++;
2229 * Now configure the PF to use "the rest" of the resources
2230 * We're using STD_TX_RING_MODE here though which will limit the TX
2231 * rings. This will allow QoS to function properly. Not setting this
2232 * will cause PF rings to break bandwidth settings.
2234 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2238 rc = update_pf_resource_max(bp);
2245 bnxt_hwrm_func_buf_unrgtr(bp);
2249 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2251 struct hwrm_func_cfg_input req = {0};
2252 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2255 HWRM_PREP(req, FUNC_CFG, -1, resp);
2257 req.fid = rte_cpu_to_le_16(0xffff);
2258 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2259 req.evb_mode = bp->pf.evb_mode;
2261 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2267 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2268 uint8_t tunnel_type)
2270 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2271 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2274 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2275 req.tunnel_type = tunnel_type;
2276 req.tunnel_dst_port_val = port;
2277 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2280 switch (tunnel_type) {
2281 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2282 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2283 bp->vxlan_port = port;
2285 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2286 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2287 bp->geneve_port = port;
2295 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2296 uint8_t tunnel_type)
2298 struct hwrm_tunnel_dst_port_free_input req = {0};
2299 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2302 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2303 req.tunnel_type = tunnel_type;
2304 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2305 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2311 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf)
2313 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2314 struct hwrm_func_cfg_input req = {0};
2317 HWRM_PREP(req, FUNC_CFG, -1, resp);
2318 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2319 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2320 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2326 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2328 uint32_t *flag = flagp;
2330 vnic->flags = *flag;
2333 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2335 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2338 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2341 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2342 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2344 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2346 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2347 req.req_buf_page_size = rte_cpu_to_le_16(
2348 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2349 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2350 req.req_buf_page_addr[0] =
2351 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2352 if (req.req_buf_page_addr[0] == 0) {
2354 "unable to map buffer address to physical memory\n");
2358 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2365 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2368 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2369 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2371 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2373 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2380 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2382 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2383 struct hwrm_func_cfg_input req = {0};
2386 HWRM_PREP(req, FUNC_CFG, -1, resp);
2387 req.fid = rte_cpu_to_le_16(0xffff);
2388 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2389 req.enables = rte_cpu_to_le_32(
2390 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2391 req.async_event_cr = rte_cpu_to_le_16(
2392 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2393 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2399 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2401 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2402 struct hwrm_func_vf_cfg_input req = {0};
2405 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2406 req.enables = rte_cpu_to_le_32(
2407 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2408 req.async_event_cr = rte_cpu_to_le_16(
2409 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2410 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2416 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2418 struct hwrm_func_cfg_input req = {0};
2419 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2420 uint16_t dflt_vlan, fid;
2421 uint32_t func_cfg_flags;
2424 HWRM_PREP(req, FUNC_CFG, -1, resp);
2427 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2428 fid = bp->pf.vf_info[vf].fid;
2429 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2431 fid = rte_cpu_to_le_16(0xffff);
2432 func_cfg_flags = bp->pf.func_cfg_flags;
2433 dflt_vlan = bp->vlan;
2436 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2437 req.fid = rte_cpu_to_le_16(fid);
2438 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2439 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2441 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2447 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2448 uint16_t max_bw, uint16_t enables)
2450 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2451 struct hwrm_func_cfg_input req = {0};
2454 HWRM_PREP(req, FUNC_CFG, -1, resp);
2455 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2456 req.enables |= rte_cpu_to_le_32(enables);
2457 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2458 req.max_bw = rte_cpu_to_le_32(max_bw);
2459 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2465 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2467 struct hwrm_func_cfg_input req = {0};
2468 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2471 HWRM_PREP(req, FUNC_CFG, -1, resp);
2472 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2473 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2474 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2475 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2477 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2483 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2484 void *encaped, size_t ec_size)
2487 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2488 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2490 if (ec_size > sizeof(req.encap_request))
2493 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2495 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2496 memcpy(req.encap_request, encaped, ec_size);
2498 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2505 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2506 struct ether_addr *mac)
2508 struct hwrm_func_qcfg_input req = {0};
2509 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2512 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2513 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2514 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2518 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2522 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2523 void *encaped, size_t ec_size)
2526 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2527 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2529 if (ec_size > sizeof(req.encap_request))
2532 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2534 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2535 memcpy(req.encap_request, encaped, ec_size);
2537 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2544 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2545 struct rte_eth_stats *stats)
2548 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2549 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2551 HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2553 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2555 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2559 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2560 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2561 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2562 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2563 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2564 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2566 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2567 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2568 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2569 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2570 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2571 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2573 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2574 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2575 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2580 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2582 struct hwrm_port_qstats_input req = {0};
2583 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2584 struct bnxt_pf_info *pf = &bp->pf;
2587 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2590 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2591 req.port_id = rte_cpu_to_le_16(pf->port_id);
2592 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2593 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2594 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2599 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2601 struct hwrm_port_clr_stats_input req = {0};
2602 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2603 struct bnxt_pf_info *pf = &bp->pf;
2606 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2609 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2610 req.port_id = rte_cpu_to_le_16(pf->port_id);
2611 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2616 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2618 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2619 struct hwrm_port_led_qcaps_input req = {0};
2625 HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2626 req.port_id = bp->pf.port_id;
2627 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2630 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2633 bp->num_leds = resp->num_leds;
2634 memcpy(bp->leds, &resp->led0_id,
2635 sizeof(bp->leds[0]) * bp->num_leds);
2636 for (i = 0; i < bp->num_leds; i++) {
2637 struct bnxt_led_info *led = &bp->leds[i];
2639 uint16_t caps = led->led_state_caps;
2641 if (!led->led_group_id ||
2642 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2651 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2653 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2654 struct hwrm_port_led_cfg_input req = {0};
2655 struct bnxt_led_cfg *led_cfg;
2656 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2657 uint16_t duration = 0;
2660 if (!bp->num_leds || BNXT_VF(bp))
2663 HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2665 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2666 duration = rte_cpu_to_le_16(500);
2668 req.port_id = bp->pf.port_id;
2669 req.num_leds = bp->num_leds;
2670 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2671 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2672 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2673 led_cfg->led_id = bp->leds[i].led_id;
2674 led_cfg->led_state = led_state;
2675 led_cfg->led_blink_on = duration;
2676 led_cfg->led_blink_off = duration;
2677 led_cfg->led_group_id = bp->leds[i].led_group_id;
2680 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2686 static void bnxt_vnic_count(struct bnxt_vnic_info *vnic, void *cbdata)
2688 uint32_t *count = cbdata;
2690 if (vnic->func_default)
2691 *count = *count + 1;
2694 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2695 struct bnxt_vnic_info *vnic __rte_unused)
2700 int bnxt_vf_default_vnic_count(struct bnxt *bp, uint16_t vf)
2704 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2705 &count, bnxt_vnic_count_hwrm_stub);
2710 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2713 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2714 struct hwrm_func_vf_vnic_ids_query_output *resp =
2715 bp->hwrm_cmd_resp_addr;
2718 /* First query all VNIC ids */
2719 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2721 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2722 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2723 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2725 if (req.vnic_id_tbl_addr == 0) {
2727 "unable to map VNIC ID table address to physical memory\n");
2730 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2732 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2734 } else if (resp->error_code) {
2735 rc = rte_le_to_cpu_16(resp->error_code);
2736 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2740 return rte_le_to_cpu_32(resp->vnic_id_cnt);
2744 * This function queries the VNIC IDs for a specified VF. It then calls
2745 * the vnic_cb to update the necessary field in vnic_info with cbdata.
2746 * Then it calls the hwrm_cb function to program this new vnic configuration.
2748 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2749 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2750 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2752 struct bnxt_vnic_info vnic;
2754 int i, num_vnic_ids;
2759 /* First query all VNIC ids */
2760 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2761 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2762 RTE_CACHE_LINE_SIZE);
2763 if (vnic_ids == NULL) {
2767 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2768 rte_mem_lock_page(((char *)vnic_ids) + sz);
2770 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2772 if (num_vnic_ids < 0)
2773 return num_vnic_ids;
2775 /* Retrieve VNIC, update bd_stall then update */
2777 for (i = 0; i < num_vnic_ids; i++) {
2778 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2779 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2780 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2783 if (vnic.mru == 4) /* Indicates unallocated */
2786 vnic_cb(&vnic, cbdata);
2788 rc = hwrm_cb(bp, &vnic);
2798 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2801 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2802 struct hwrm_func_cfg_input req = {0};
2805 HWRM_PREP(req, FUNC_CFG, -1, resp);
2806 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2807 req.enables |= rte_cpu_to_le_32(
2808 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2809 req.vlan_antispoof_mode = on ?
2810 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2811 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2812 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2818 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2820 struct bnxt_vnic_info vnic;
2823 int num_vnic_ids, i;
2827 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2828 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2829 RTE_CACHE_LINE_SIZE);
2830 if (vnic_ids == NULL) {
2835 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2836 rte_mem_lock_page(((char *)vnic_ids) + sz);
2838 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2844 * Loop through to find the default VNIC ID.
2845 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2846 * by sending the hwrm_func_qcfg command to the firmware.
2848 for (i = 0; i < num_vnic_ids; i++) {
2849 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2850 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2851 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2852 bp->pf.first_vf_id + vf);
2855 if (vnic.func_default) {
2857 return vnic.fw_vnic_id;
2860 /* Could not find a default VNIC. */
2861 RTE_LOG(ERR, PMD, "No default VNIC\n");