4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 if (resp->resp_len >= 16) { \
180 struct hwrm_err_output *tmp_hwrm_err_op = \
183 "%s error %d:%d:%08x:%04x\n", \
185 rc, tmp_hwrm_err_op->cmd_err, \
187 tmp_hwrm_err_op->opaque_0), \
189 tmp_hwrm_err_op->opaque_1)); \
193 "%s error %d\n", __func__, rc); \
199 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
202 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
203 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
205 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
206 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
216 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
217 struct bnxt_vnic_info *vnic,
219 struct bnxt_vlan_table_entry *vlan_table)
222 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
223 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
226 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
227 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
229 /* FIXME add multicast flag, when multicast adding options is supported
232 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
233 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
234 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
235 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
236 if (vnic->mc_addr_cnt) {
237 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
238 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
239 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
241 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
243 if (vlan_count && vlan_table) {
244 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
245 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
246 rte_mem_virt2phy(vlan_table));
247 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
250 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
257 int bnxt_hwrm_clear_filter(struct bnxt *bp,
258 struct bnxt_filter_info *filter)
261 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
262 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
264 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
266 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
268 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
272 filter->fw_l2_filter_id = -1;
277 int bnxt_hwrm_set_filter(struct bnxt *bp,
279 struct bnxt_filter_info *filter)
282 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
283 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
284 uint32_t enables = 0;
286 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
288 req.flags = rte_cpu_to_le_32(filter->flags);
290 enables = filter->enables |
291 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
292 req.dst_id = rte_cpu_to_le_16(dst_id);
295 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
296 memcpy(req.l2_addr, filter->l2_addr,
299 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
300 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
303 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
304 req.l2_ovlan = filter->l2_ovlan;
306 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
307 req.l2_ovlan_mask = filter->l2_ovlan_mask;
308 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
309 req.src_id = rte_cpu_to_le_32(filter->src_id);
310 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
311 req.src_type = filter->src_type;
313 req.enables = rte_cpu_to_le_32(enables);
315 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
324 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
327 struct hwrm_func_qcaps_input req = {.req_type = 0 };
328 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
329 uint16_t new_max_vfs;
332 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
334 req.fid = rte_cpu_to_le_16(0xffff);
336 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
340 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
342 bp->pf.port_id = resp->port_id;
343 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
344 new_max_vfs = bp->pdev->max_vfs;
345 if (new_max_vfs != bp->pf.max_vfs) {
347 rte_free(bp->pf.vf_info);
348 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
349 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
350 bp->pf.max_vfs = new_max_vfs;
351 for (i = 0; i < new_max_vfs; i++) {
352 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
353 bp->pf.vf_info[i].vlan_table =
354 rte_zmalloc("VF VLAN table",
357 if (bp->pf.vf_info[i].vlan_table == NULL)
359 "Fail to alloc VLAN table for VF %d\n",
363 bp->pf.vf_info[i].vlan_table);
364 STAILQ_INIT(&bp->pf.vf_info[i].filter);
369 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
370 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
371 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
372 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
373 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
374 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
375 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
376 /* TODO: For now, do not support VMDq/RFS on VFs. */
381 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
385 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
387 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
392 int bnxt_hwrm_func_reset(struct bnxt *bp)
395 struct hwrm_func_reset_input req = {.req_type = 0 };
396 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
398 HWRM_PREP(req, FUNC_RESET, -1, resp);
400 req.enables = rte_cpu_to_le_32(0);
402 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
409 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
412 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
413 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
415 if (bp->flags & BNXT_FLAG_REGISTERED)
418 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
419 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
420 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
421 req.ver_maj = RTE_VER_YEAR;
422 req.ver_min = RTE_VER_MONTH;
423 req.ver_upd = RTE_VER_MINOR;
426 req.enables |= rte_cpu_to_le_32(
427 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
428 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
429 RTE_MIN(sizeof(req.vf_req_fwd),
430 sizeof(bp->pf.vf_req_fwd)));
433 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
434 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
436 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
440 bp->flags |= BNXT_FLAG_REGISTERED;
445 int bnxt_hwrm_ver_get(struct bnxt *bp)
448 struct hwrm_ver_get_input req = {.req_type = 0 };
449 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
452 uint16_t max_resp_len;
453 char type[RTE_MEMZONE_NAMESIZE];
455 HWRM_PREP(req, VER_GET, -1, resp);
457 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
458 req.hwrm_intf_min = HWRM_VERSION_MINOR;
459 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
462 * Hold the lock since we may be adjusting the response pointers.
464 rte_spinlock_lock(&bp->hwrm_lock);
465 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
469 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
470 resp->hwrm_intf_maj, resp->hwrm_intf_min,
472 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
473 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
474 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
475 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
476 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
478 my_version = HWRM_VERSION_MAJOR << 16;
479 my_version |= HWRM_VERSION_MINOR << 8;
480 my_version |= HWRM_VERSION_UPDATE;
482 fw_version = resp->hwrm_intf_maj << 16;
483 fw_version |= resp->hwrm_intf_min << 8;
484 fw_version |= resp->hwrm_intf_upd;
486 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
487 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
492 if (my_version != fw_version) {
493 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
494 if (my_version < fw_version) {
496 "Firmware API version is newer than driver.\n");
498 "The driver may be missing features.\n");
501 "Firmware API version is older than driver.\n");
503 "Not all driver features may be functional.\n");
507 if (bp->max_req_len > resp->max_req_win_len) {
508 RTE_LOG(ERR, PMD, "Unsupported request length\n");
511 bp->max_req_len = resp->max_req_win_len;
512 max_resp_len = resp->max_resp_len;
513 if (bp->max_resp_len != max_resp_len) {
514 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
515 bp->pdev->addr.domain, bp->pdev->addr.bus,
516 bp->pdev->addr.devid, bp->pdev->addr.function);
518 rte_free(bp->hwrm_cmd_resp_addr);
520 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
521 if (bp->hwrm_cmd_resp_addr == NULL) {
525 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
526 bp->hwrm_cmd_resp_dma_addr =
527 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
528 if (bp->hwrm_cmd_resp_dma_addr == 0) {
530 "Unable to map response buffer to physical memory.\n");
534 bp->max_resp_len = max_resp_len;
538 rte_spinlock_unlock(&bp->hwrm_lock);
542 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
545 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
546 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
548 if (!(bp->flags & BNXT_FLAG_REGISTERED))
551 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
554 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
558 bp->flags &= ~BNXT_FLAG_REGISTERED;
563 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
566 struct hwrm_port_phy_cfg_input req = {0};
567 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
568 uint32_t enables = 0;
570 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
573 req.flags = rte_cpu_to_le_32(conf->phy_flags);
574 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
576 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
577 * any auto mode, even "none".
579 if (!conf->link_speed) {
580 req.auto_mode |= conf->auto_mode;
581 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
582 req.auto_link_speed_mask = conf->auto_link_speed_mask;
584 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
585 req.auto_link_speed = bp->link_info.auto_link_speed;
587 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
589 req.auto_duplex = conf->duplex;
590 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
591 req.auto_pause = conf->auto_pause;
592 req.force_pause = conf->force_pause;
593 /* Set force_pause if there is no auto or if there is a force */
594 if (req.auto_pause && !req.force_pause)
595 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
597 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
599 req.enables = rte_cpu_to_le_32(enables);
602 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
603 RTE_LOG(INFO, PMD, "Force Link Down\n");
606 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
613 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
614 struct bnxt_link_info *link_info)
617 struct hwrm_port_phy_qcfg_input req = {0};
618 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
620 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
622 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
626 link_info->phy_link_status = resp->link;
627 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
628 link_info->link_up = 1;
629 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
631 link_info->link_up = 0;
632 link_info->link_speed = 0;
634 link_info->duplex = resp->duplex;
635 link_info->pause = resp->pause;
636 link_info->auto_pause = resp->auto_pause;
637 link_info->force_pause = resp->force_pause;
638 link_info->auto_mode = resp->auto_mode;
640 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
641 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
642 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
643 link_info->phy_ver[0] = resp->phy_maj;
644 link_info->phy_ver[1] = resp->phy_min;
645 link_info->phy_ver[2] = resp->phy_bld;
650 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
653 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
654 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
656 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
658 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
662 #define GET_QUEUE_INFO(x) \
663 bp->cos_queue[x].id = resp->queue_id##x; \
664 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
678 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
679 struct bnxt_ring *ring,
680 uint32_t ring_type, uint32_t map_index,
681 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
684 uint32_t enables = 0;
685 struct hwrm_ring_alloc_input req = {.req_type = 0 };
686 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
688 HWRM_PREP(req, RING_ALLOC, -1, resp);
690 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
691 req.fbo = rte_cpu_to_le_32(0);
692 /* Association of ring index with doorbell index */
693 req.logical_id = rte_cpu_to_le_16(map_index);
694 req.length = rte_cpu_to_le_32(ring->ring_size);
697 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
698 req.queue_id = bp->cos_queue[0].id;
700 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
701 req.ring_type = ring_type;
702 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
703 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
704 if (stats_ctx_id != INVALID_STATS_CTX_ID)
706 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
708 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
709 req.ring_type = ring_type;
711 * TODO: Some HWRM versions crash with
712 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
714 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
717 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
721 req.enables = rte_cpu_to_le_32(enables);
723 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
725 if (rc || resp->error_code) {
726 if (rc == 0 && resp->error_code)
727 rc = rte_le_to_cpu_16(resp->error_code);
729 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
731 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
733 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
735 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
737 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
739 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
742 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
747 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
751 int bnxt_hwrm_ring_free(struct bnxt *bp,
752 struct bnxt_ring *ring, uint32_t ring_type)
755 struct hwrm_ring_free_input req = {.req_type = 0 };
756 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
758 HWRM_PREP(req, RING_FREE, -1, resp);
760 req.ring_type = ring_type;
761 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
763 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
765 if (rc || resp->error_code) {
766 if (rc == 0 && resp->error_code)
767 rc = rte_le_to_cpu_16(resp->error_code);
770 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
771 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
774 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
775 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
778 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
779 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
783 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
790 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
793 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
794 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
796 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
798 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
799 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
800 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
801 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
803 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
807 bp->grp_info[idx].fw_grp_id =
808 rte_le_to_cpu_16(resp->ring_group_id);
813 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
816 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
817 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
819 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
821 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
823 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
827 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
831 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
834 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
835 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
837 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
840 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
842 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
844 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
852 unsigned int idx __rte_unused)
855 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
856 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
858 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
860 req.update_period_ms = rte_cpu_to_le_32(0);
863 rte_cpu_to_le_64(cpr->hw_stats_map);
865 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
869 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
874 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
875 unsigned int idx __rte_unused)
878 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
879 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
881 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
883 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
892 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
895 struct hwrm_vnic_alloc_input req = { 0 };
896 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
898 /* map ring groups to this vnic */
899 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
900 vnic->start_grp_id, vnic->end_grp_id);
901 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
902 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
903 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
904 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
905 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
906 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
907 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
908 ETHER_CRC_LEN + VLAN_TAG_SIZE;
909 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
911 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
915 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
919 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
920 struct bnxt_vnic_info *vnic,
921 struct bnxt_plcmodes_cfg *pmode)
924 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
925 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
927 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
929 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
931 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
935 pmode->flags = rte_le_to_cpu_32(resp->flags);
936 /* dflt_vnic bit doesn't exist in the _cfg command */
937 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
938 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
939 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
940 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
945 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
946 struct bnxt_vnic_info *vnic,
947 struct bnxt_plcmodes_cfg *pmode)
950 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
951 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
953 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
955 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
956 req.flags = rte_cpu_to_le_32(pmode->flags);
957 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
958 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
959 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
960 req.enables = rte_cpu_to_le_32(
961 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
962 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
963 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
966 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
973 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
976 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
977 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
978 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
979 struct bnxt_plcmodes_cfg pmodes;
981 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
985 HWRM_PREP(req, VNIC_CFG, -1, resp);
987 /* Only RSS support for now TBD: COS & LB */
989 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
990 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
991 if (vnic->lb_rule != 0xffff)
992 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
993 if (vnic->cos_rule != 0xffff)
994 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
995 if (vnic->rss_rule != 0xffff)
996 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
997 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
998 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
999 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1000 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1001 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1002 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1003 req.mru = rte_cpu_to_le_16(vnic->mru);
1004 if (vnic->func_default)
1006 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1007 if (vnic->vlan_strip)
1009 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1012 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1013 if (vnic->roce_dual)
1014 req.flags |= rte_cpu_to_le_32(
1015 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1016 if (vnic->roce_only)
1017 req.flags |= rte_cpu_to_le_32(
1018 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1019 if (vnic->rss_dflt_cr)
1020 req.flags |= rte_cpu_to_le_32(
1021 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1023 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1027 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1032 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1036 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1037 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1039 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1042 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1043 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1044 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1046 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1050 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1051 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1052 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1053 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1054 vnic->mru = rte_le_to_cpu_16(resp->mru);
1055 vnic->func_default = rte_le_to_cpu_32(
1056 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1057 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1058 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1059 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1060 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1061 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1062 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1063 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1064 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1065 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1066 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1071 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1074 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1075 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1076 bp->hwrm_cmd_resp_addr;
1078 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1080 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1084 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1089 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1092 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1093 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1094 bp->hwrm_cmd_resp_addr;
1096 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1098 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1100 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1104 vnic->rss_rule = INVALID_HW_RING_ID;
1109 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1112 struct hwrm_vnic_free_input req = {.req_type = 0 };
1113 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1115 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1118 HWRM_PREP(req, VNIC_FREE, -1, resp);
1120 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1122 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1126 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1130 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1131 struct bnxt_vnic_info *vnic)
1134 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1135 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1137 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1139 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1141 req.ring_grp_tbl_addr =
1142 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1143 req.hash_key_tbl_addr =
1144 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1145 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1147 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1154 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1155 struct bnxt_vnic_info *vnic)
1158 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1159 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1162 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1164 req.flags = rte_cpu_to_le_32(
1165 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1167 req.enables = rte_cpu_to_le_32(
1168 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1170 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1171 size -= RTE_PKTMBUF_HEADROOM;
1173 req.jumbo_thresh = rte_cpu_to_le_16(size);
1174 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1176 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1183 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1184 struct bnxt_vnic_info *vnic, bool enable)
1187 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1188 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1190 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1193 req.enables = rte_cpu_to_le_32(
1194 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1195 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1196 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1197 req.flags = rte_cpu_to_le_32(
1198 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1199 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1200 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1201 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1202 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1203 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1204 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1205 req.max_agg_segs = rte_cpu_to_le_16(5);
1207 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1208 req.min_agg_len = rte_cpu_to_le_32(512);
1211 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1220 struct hwrm_func_cfg_input req = {0};
1221 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1224 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1225 req.enables = rte_cpu_to_le_32(
1226 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1227 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1228 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1230 HWRM_PREP(req, FUNC_CFG, -1, resp);
1232 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1235 bp->pf.vf_info[vf].random_mac = false;
1240 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1244 struct hwrm_func_qstats_input req = {.req_type = 0};
1245 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1247 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1249 req.fid = rte_cpu_to_le_16(fid);
1251 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1256 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1261 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1262 struct rte_eth_stats *stats)
1265 struct hwrm_func_qstats_input req = {.req_type = 0};
1266 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1268 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1270 req.fid = rte_cpu_to_le_16(fid);
1272 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1276 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1277 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1278 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1279 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1280 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1281 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1283 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1284 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1285 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1286 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1287 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1288 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1290 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1291 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1293 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1298 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1301 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1302 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1304 HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1306 req.fid = rte_cpu_to_le_16(fid);
1308 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1316 * HWRM utility functions
1319 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1324 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1325 struct bnxt_tx_queue *txq;
1326 struct bnxt_rx_queue *rxq;
1327 struct bnxt_cp_ring_info *cpr;
1329 if (i >= bp->rx_cp_nr_rings) {
1330 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1333 rxq = bp->rx_queues[i];
1337 rc = bnxt_hwrm_stat_clear(bp, cpr);
1344 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1348 struct bnxt_cp_ring_info *cpr;
1350 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1352 if (i >= bp->rx_cp_nr_rings)
1353 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1355 cpr = bp->rx_queues[i]->cp_ring;
1356 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1357 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1358 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1360 * TODO. Need a better way to reset grp_info.stats_ctx
1361 * for Rx rings only. stats_ctx is not saved for Tx
1364 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1372 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1377 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1378 struct bnxt_tx_queue *txq;
1379 struct bnxt_rx_queue *rxq;
1380 struct bnxt_cp_ring_info *cpr;
1382 if (i >= bp->rx_cp_nr_rings) {
1383 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1386 rxq = bp->rx_queues[i];
1390 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1398 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1403 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1405 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1407 "Attempt to free invalid ring group %d\n",
1412 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1420 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1421 unsigned int idx __rte_unused)
1423 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1425 bnxt_hwrm_ring_free(bp, cp_ring,
1426 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1427 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1428 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1429 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1430 sizeof(*cpr->cp_desc_ring));
1431 cpr->cp_raw_cons = 0;
1434 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1439 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1440 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1441 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1442 struct bnxt_ring *ring = txr->tx_ring_struct;
1443 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1444 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1446 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1447 bnxt_hwrm_ring_free(bp, ring,
1448 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1449 ring->fw_ring_id = INVALID_HW_RING_ID;
1450 memset(txr->tx_desc_ring, 0,
1451 txr->tx_ring_struct->ring_size *
1452 sizeof(*txr->tx_desc_ring));
1453 memset(txr->tx_buf_ring, 0,
1454 txr->tx_ring_struct->ring_size *
1455 sizeof(*txr->tx_buf_ring));
1459 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1460 bnxt_free_cp_ring(bp, cpr, idx);
1461 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1465 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1466 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1467 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1468 struct bnxt_ring *ring = rxr->rx_ring_struct;
1469 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1470 unsigned int idx = i + 1;
1472 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1473 bnxt_hwrm_ring_free(bp, ring,
1474 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1475 ring->fw_ring_id = INVALID_HW_RING_ID;
1476 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1477 memset(rxr->rx_desc_ring, 0,
1478 rxr->rx_ring_struct->ring_size *
1479 sizeof(*rxr->rx_desc_ring));
1480 memset(rxr->rx_buf_ring, 0,
1481 rxr->rx_ring_struct->ring_size *
1482 sizeof(*rxr->rx_buf_ring));
1484 memset(rxr->ag_buf_ring, 0,
1485 rxr->ag_ring_struct->ring_size *
1486 sizeof(*rxr->ag_buf_ring));
1489 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1490 bnxt_free_cp_ring(bp, cpr, idx);
1491 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1492 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1496 /* Default completion ring */
1498 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1500 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1501 bnxt_free_cp_ring(bp, cpr, 0);
1502 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1509 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1514 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1515 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1522 void bnxt_free_hwrm_resources(struct bnxt *bp)
1524 /* Release memzone */
1525 rte_free(bp->hwrm_cmd_resp_addr);
1526 bp->hwrm_cmd_resp_addr = NULL;
1527 bp->hwrm_cmd_resp_dma_addr = 0;
1530 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1532 struct rte_pci_device *pdev = bp->pdev;
1533 char type[RTE_MEMZONE_NAMESIZE];
1535 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1536 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1537 bp->max_req_len = HWRM_MAX_REQ_LEN;
1538 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1539 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1540 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1541 if (bp->hwrm_cmd_resp_addr == NULL)
1543 bp->hwrm_cmd_resp_dma_addr =
1544 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1545 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1547 "unable to map response address to physical memory\n");
1550 rte_spinlock_init(&bp->hwrm_lock);
1555 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1557 struct bnxt_filter_info *filter;
1560 STAILQ_FOREACH(filter, &vnic->filter, next) {
1561 rc = bnxt_hwrm_clear_filter(bp, filter);
1568 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1570 struct bnxt_filter_info *filter;
1573 STAILQ_FOREACH(filter, &vnic->filter, next) {
1574 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1581 void bnxt_free_tunnel_ports(struct bnxt *bp)
1583 if (bp->vxlan_port_cnt)
1584 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1585 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1587 if (bp->geneve_port_cnt)
1588 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1589 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1590 bp->geneve_port = 0;
1593 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1595 struct bnxt_vnic_info *vnic;
1598 if (bp->vnic_info == NULL)
1601 vnic = &bp->vnic_info[0];
1603 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1605 /* VNIC resources */
1606 for (i = 0; i < bp->nr_vnics; i++) {
1607 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1609 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1611 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1613 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1615 bnxt_hwrm_vnic_free(bp, vnic);
1617 /* Ring resources */
1618 bnxt_free_all_hwrm_rings(bp);
1619 bnxt_free_all_hwrm_ring_grps(bp);
1620 bnxt_free_all_hwrm_stat_ctxs(bp);
1621 bnxt_free_tunnel_ports(bp);
1624 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1626 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1628 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1629 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1631 switch (conf_link_speed) {
1632 case ETH_LINK_SPEED_10M_HD:
1633 case ETH_LINK_SPEED_100M_HD:
1634 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1636 return hw_link_duplex;
1639 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1641 uint16_t eth_link_speed = 0;
1643 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1644 return ETH_LINK_SPEED_AUTONEG;
1646 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1647 case ETH_LINK_SPEED_100M:
1648 case ETH_LINK_SPEED_100M_HD:
1650 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1652 case ETH_LINK_SPEED_1G:
1654 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1656 case ETH_LINK_SPEED_2_5G:
1658 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1660 case ETH_LINK_SPEED_10G:
1662 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1664 case ETH_LINK_SPEED_20G:
1666 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1668 case ETH_LINK_SPEED_25G:
1670 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1672 case ETH_LINK_SPEED_40G:
1674 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1676 case ETH_LINK_SPEED_50G:
1678 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1682 "Unsupported link speed %d; default to AUTO\n",
1686 return eth_link_speed;
1689 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1690 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1691 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1692 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1694 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1698 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1701 if (link_speed & ETH_LINK_SPEED_FIXED) {
1702 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1704 if (one_speed & (one_speed - 1)) {
1706 "Invalid advertised speeds (%u) for port %u\n",
1707 link_speed, port_id);
1710 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1712 "Unsupported advertised speed (%u) for port %u\n",
1713 link_speed, port_id);
1717 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1719 "Unsupported advertised speeds (%u) for port %u\n",
1720 link_speed, port_id);
1727 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1731 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1732 link_speed = BNXT_SUPPORTED_SPEEDS;
1734 if (link_speed & ETH_LINK_SPEED_100M)
1735 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1736 if (link_speed & ETH_LINK_SPEED_100M_HD)
1737 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1738 if (link_speed & ETH_LINK_SPEED_1G)
1739 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1740 if (link_speed & ETH_LINK_SPEED_2_5G)
1741 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1742 if (link_speed & ETH_LINK_SPEED_10G)
1743 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1744 if (link_speed & ETH_LINK_SPEED_20G)
1745 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1746 if (link_speed & ETH_LINK_SPEED_25G)
1747 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1748 if (link_speed & ETH_LINK_SPEED_40G)
1749 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1750 if (link_speed & ETH_LINK_SPEED_50G)
1751 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1755 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1757 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1759 switch (hw_link_speed) {
1760 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1761 eth_link_speed = ETH_SPEED_NUM_100M;
1763 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1764 eth_link_speed = ETH_SPEED_NUM_1G;
1766 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1767 eth_link_speed = ETH_SPEED_NUM_2_5G;
1769 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1770 eth_link_speed = ETH_SPEED_NUM_10G;
1772 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1773 eth_link_speed = ETH_SPEED_NUM_20G;
1775 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1776 eth_link_speed = ETH_SPEED_NUM_25G;
1778 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1779 eth_link_speed = ETH_SPEED_NUM_40G;
1781 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1782 eth_link_speed = ETH_SPEED_NUM_50G;
1784 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1786 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1790 return eth_link_speed;
1793 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1795 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1797 switch (hw_link_duplex) {
1798 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1799 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1800 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1802 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1803 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1806 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1810 return eth_link_duplex;
1813 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1816 struct bnxt_link_info *link_info = &bp->link_info;
1818 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1821 "Get link config failed with rc %d\n", rc);
1824 if (link_info->link_up)
1826 bnxt_parse_hw_link_speed(link_info->link_speed);
1828 link->link_speed = ETH_LINK_SPEED_10M;
1829 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1830 link->link_status = link_info->link_up;
1831 link->link_autoneg = link_info->auto_mode ==
1832 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1833 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1838 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1841 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1842 struct bnxt_link_info link_req;
1845 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1848 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1849 bp->eth_dev->data->port_id);
1853 memset(&link_req, 0, sizeof(link_req));
1854 link_req.link_up = link_up;
1858 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1859 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1861 link_req.phy_flags |=
1862 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1863 link_req.auto_mode =
1864 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1865 link_req.auto_link_speed_mask =
1866 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1868 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1869 link_req.link_speed = speed;
1870 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1872 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1873 link_req.auto_pause = bp->link_info.auto_pause;
1874 link_req.force_pause = bp->link_info.force_pause;
1877 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1880 "Set link config failed with rc %d\n", rc);
1883 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1889 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1891 struct hwrm_func_qcfg_input req = {0};
1892 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1895 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1896 req.fid = rte_cpu_to_le_16(0xffff);
1898 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1902 /* Hard Coded.. 0xfff VLAN ID mask */
1903 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1905 switch (resp->port_partition_type) {
1906 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1907 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1908 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1909 bp->port_partition_type = resp->port_partition_type;
1912 bp->port_partition_type = 0;
1919 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1920 struct hwrm_func_qcaps_output *qcaps)
1922 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1923 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1924 sizeof(qcaps->mac_address));
1925 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1926 qcaps->max_rx_rings = fcfg->num_rx_rings;
1927 qcaps->max_tx_rings = fcfg->num_tx_rings;
1928 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1929 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1931 qcaps->first_vf_id = 0;
1932 qcaps->max_vnics = fcfg->num_vnics;
1933 qcaps->max_decap_records = 0;
1934 qcaps->max_encap_records = 0;
1935 qcaps->max_tx_wm_flows = 0;
1936 qcaps->max_tx_em_flows = 0;
1937 qcaps->max_rx_wm_flows = 0;
1938 qcaps->max_rx_em_flows = 0;
1939 qcaps->max_flow_id = 0;
1940 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1941 qcaps->max_sp_tx_rings = 0;
1942 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1945 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1947 struct hwrm_func_cfg_input req = {0};
1948 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1951 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1952 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1953 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1954 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1955 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1956 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1957 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1958 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1959 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1960 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1961 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1962 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1963 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1964 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1965 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1966 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1967 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1968 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1969 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1970 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1971 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1972 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1973 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1974 req.fid = rte_cpu_to_le_16(0xffff);
1976 HWRM_PREP(req, FUNC_CFG, -1, resp);
1978 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1984 static void populate_vf_func_cfg_req(struct bnxt *bp,
1985 struct hwrm_func_cfg_input *req,
1988 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1989 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1990 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1991 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1992 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1993 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1994 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1995 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1996 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1997 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1999 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2000 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2001 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2002 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2003 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2005 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2006 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2008 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2009 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2010 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2011 /* TODO: For now, do not support VMDq/RFS on VFs. */
2012 req->num_vnics = rte_cpu_to_le_16(1);
2013 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2017 static void add_random_mac_if_needed(struct bnxt *bp,
2018 struct hwrm_func_cfg_input *cfg_req,
2021 struct ether_addr mac;
2023 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2026 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2028 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2029 eth_random_addr(cfg_req->dflt_mac_addr);
2030 bp->pf.vf_info[vf].random_mac = true;
2032 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2036 static void reserve_resources_from_vf(struct bnxt *bp,
2037 struct hwrm_func_cfg_input *cfg_req,
2040 struct hwrm_func_qcaps_input req = {0};
2041 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2044 /* Get the actual allocated values now */
2045 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2046 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2047 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2050 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2051 copy_func_cfg_to_qcaps(cfg_req, resp);
2052 } else if (resp->error_code) {
2053 rc = rte_le_to_cpu_16(resp->error_code);
2054 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2055 copy_func_cfg_to_qcaps(cfg_req, resp);
2058 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2059 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2060 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2061 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2062 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2063 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2065 * TODO: While not supporting VMDq with VFs, max_vnics is always
2066 * forced to 1 in this case
2068 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2069 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2072 static int update_pf_resource_max(struct bnxt *bp)
2074 struct hwrm_func_qcfg_input req = {0};
2075 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2078 /* And copy the allocated numbers into the pf struct */
2079 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2080 req.fid = rte_cpu_to_le_16(0xffff);
2081 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2084 /* Only TX ring value reflects actual allocation? TODO */
2085 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2086 bp->pf.evb_mode = resp->evb_mode;
2091 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2096 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2100 rc = bnxt_hwrm_func_qcaps(bp);
2104 bp->pf.func_cfg_flags &=
2105 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2106 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2107 bp->pf.func_cfg_flags |=
2108 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2109 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2113 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2115 struct hwrm_func_cfg_input req = {0};
2116 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2123 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2127 rc = bnxt_hwrm_func_qcaps(bp);
2132 bp->pf.active_vfs = num_vfs;
2135 * First, configure the PF to only use one TX ring. This ensures that
2136 * there are enough rings for all VFs.
2138 * If we don't do this, when we call func_alloc() later, we will lock
2139 * extra rings to the PF that won't be available during func_cfg() of
2142 * This has been fixed with firmware versions above 20.6.54
2144 bp->pf.func_cfg_flags &=
2145 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2146 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2147 bp->pf.func_cfg_flags |=
2148 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2149 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2154 * Now, create and register a buffer to hold forwarded VF requests
2156 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2157 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2158 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2159 if (bp->pf.vf_req_buf == NULL) {
2163 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2164 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2165 for (i = 0; i < num_vfs; i++)
2166 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2167 (i * HWRM_MAX_REQ_LEN);
2169 rc = bnxt_hwrm_func_buf_rgtr(bp);
2173 populate_vf_func_cfg_req(bp, &req, num_vfs);
2175 bp->pf.active_vfs = 0;
2176 for (i = 0; i < num_vfs; i++) {
2177 add_random_mac_if_needed(bp, &req, i);
2179 HWRM_PREP(req, FUNC_CFG, -1, resp);
2180 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2181 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2184 /* Clear enable flag for next pass */
2185 req.enables &= ~rte_cpu_to_le_32(
2186 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2188 if (rc || resp->error_code) {
2190 "Failed to initizlie VF %d\n", i);
2192 "Not all VFs available. (%d, %d)\n",
2193 rc, resp->error_code);
2197 reserve_resources_from_vf(bp, &req, i);
2198 bp->pf.active_vfs++;
2202 * Now configure the PF to use "the rest" of the resources
2203 * We're using STD_TX_RING_MODE here though which will limit the TX
2204 * rings. This will allow QoS to function properly. Not setting this
2205 * will cause PF rings to break bandwidth settings.
2207 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2211 rc = update_pf_resource_max(bp);
2218 bnxt_hwrm_func_buf_unrgtr(bp);
2222 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2224 struct hwrm_func_cfg_input req = {0};
2225 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2228 HWRM_PREP(req, FUNC_CFG, -1, resp);
2230 req.fid = rte_cpu_to_le_16(0xffff);
2231 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2232 req.evb_mode = bp->pf.evb_mode;
2234 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2240 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2241 uint8_t tunnel_type)
2243 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2244 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2247 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2248 req.tunnel_type = tunnel_type;
2249 req.tunnel_dst_port_val = port;
2250 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2253 switch (tunnel_type) {
2254 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2255 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2256 bp->vxlan_port = port;
2258 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2259 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2260 bp->geneve_port = port;
2268 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2269 uint8_t tunnel_type)
2271 struct hwrm_tunnel_dst_port_free_input req = {0};
2272 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2275 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2276 req.tunnel_type = tunnel_type;
2277 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2284 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf)
2286 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2287 struct hwrm_func_cfg_input req = {0};
2290 HWRM_PREP(req, FUNC_CFG, -1, resp);
2291 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2292 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2293 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2299 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2302 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2303 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2305 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2307 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2308 req.req_buf_page_size = rte_cpu_to_le_16(
2309 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2310 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2311 req.req_buf_page_addr[0] =
2312 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2313 if (req.req_buf_page_addr[0] == 0) {
2315 "unable to map buffer address to physical memory\n");
2319 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2326 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2329 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2330 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2332 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2334 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2341 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2343 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2344 struct hwrm_func_cfg_input req = {0};
2347 HWRM_PREP(req, FUNC_CFG, -1, resp);
2348 req.fid = rte_cpu_to_le_16(0xffff);
2349 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2350 req.enables = rte_cpu_to_le_32(
2351 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2352 req.async_event_cr = rte_cpu_to_le_16(
2353 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2354 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2360 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2362 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2363 struct hwrm_func_vf_cfg_input req = {0};
2366 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2367 req.enables = rte_cpu_to_le_32(
2368 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2369 req.async_event_cr = rte_cpu_to_le_16(
2370 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2371 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2377 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2379 struct hwrm_func_cfg_input req = {0};
2380 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2381 uint16_t dflt_vlan, fid;
2382 uint32_t func_cfg_flags;
2385 HWRM_PREP(req, FUNC_CFG, -1, resp);
2388 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2389 fid = bp->pf.vf_info[vf].fid;
2390 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2392 fid = rte_cpu_to_le_16(0xffff);
2393 func_cfg_flags = bp->pf.func_cfg_flags;
2394 dflt_vlan = bp->vlan;
2397 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2398 req.fid = rte_cpu_to_le_16(fid);
2399 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2400 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2402 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2408 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2409 uint16_t max_bw, uint16_t enables)
2411 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2412 struct hwrm_func_cfg_input req = {0};
2415 HWRM_PREP(req, FUNC_CFG, -1, resp);
2416 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2417 req.enables |= rte_cpu_to_le_32(enables);
2418 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2419 req.max_bw = rte_cpu_to_le_32(max_bw);
2420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2426 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2427 void *encaped, size_t ec_size)
2430 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2431 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2433 if (ec_size > sizeof(req.encap_request))
2436 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2438 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2439 memcpy(req.encap_request, encaped, ec_size);
2441 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2448 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2449 struct ether_addr *mac)
2451 struct hwrm_func_qcfg_input req = {0};
2452 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2455 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2456 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2461 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2465 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2466 void *encaped, size_t ec_size)
2469 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2470 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2472 if (ec_size > sizeof(req.encap_request))
2475 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2477 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2478 memcpy(req.encap_request, encaped, ec_size);
2480 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2487 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2488 struct rte_eth_stats *stats)
2491 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2492 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2494 HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2496 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2498 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2502 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2503 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2504 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2505 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2506 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2507 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2509 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2510 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2511 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2512 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2513 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2514 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2516 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2517 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2518 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2523 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2525 struct hwrm_port_qstats_input req = {0};
2526 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2527 struct bnxt_pf_info *pf = &bp->pf;
2530 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2533 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2534 req.port_id = rte_cpu_to_le_16(pf->port_id);
2535 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2536 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2537 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2542 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2544 struct hwrm_port_clr_stats_input req = {0};
2545 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2546 struct bnxt_pf_info *pf = &bp->pf;
2549 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2552 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2553 req.port_id = rte_cpu_to_le_16(pf->port_id);
2554 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2559 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2561 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2562 struct hwrm_port_led_qcaps_input req = {0};
2568 HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2569 req.port_id = bp->pf.port_id;
2570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2573 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2576 bp->num_leds = resp->num_leds;
2577 memcpy(bp->leds, &resp->led0_id,
2578 sizeof(bp->leds[0]) * bp->num_leds);
2579 for (i = 0; i < bp->num_leds; i++) {
2580 struct bnxt_led_info *led = &bp->leds[i];
2582 uint16_t caps = led->led_state_caps;
2584 if (!led->led_group_id ||
2585 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2594 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2596 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2597 struct hwrm_port_led_cfg_input req = {0};
2598 struct bnxt_led_cfg *led_cfg;
2599 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2600 uint16_t duration = 0;
2603 if (!bp->num_leds || BNXT_VF(bp))
2606 HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2608 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2609 duration = rte_cpu_to_le_16(500);
2611 req.port_id = bp->pf.port_id;
2612 req.num_leds = bp->num_leds;
2613 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2614 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2615 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2616 led_cfg->led_id = bp->leds[i].led_id;
2617 led_cfg->led_state = led_state;
2618 led_cfg->led_blink_on = duration;
2619 led_cfg->led_blink_off = duration;
2620 led_cfg->led_group_id = bp->leds[i].led_group_id;
2623 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2629 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2632 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2633 struct hwrm_func_vf_vnic_ids_query_output *resp =
2634 bp->hwrm_cmd_resp_addr;
2637 /* First query all VNIC ids */
2638 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2640 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2641 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2642 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2644 if (req.vnic_id_tbl_addr == 0) {
2646 "unable to map VNIC ID table address to physical memory\n");
2649 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2651 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2653 } else if (resp->error_code) {
2654 rc = rte_le_to_cpu_16(resp->error_code);
2655 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2659 return rte_le_to_cpu_32(resp->vnic_id_cnt);
2663 * This function queries the VNIC IDs for a specified VF. It then calls
2664 * the vnic_cb to update the necessary field in vnic_info with cbdata.
2665 * Then it calls the hwrm_cb function to program this new vnic configuration.
2667 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2668 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2669 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2671 struct bnxt_vnic_info vnic;
2673 int i, num_vnic_ids;
2678 /* First query all VNIC ids */
2679 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2680 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2681 RTE_CACHE_LINE_SIZE);
2682 if (vnic_ids == NULL) {
2686 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2687 rte_mem_lock_page(((char *)vnic_ids) + sz);
2689 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2691 if (num_vnic_ids < 0)
2692 return num_vnic_ids;
2694 /* Retrieve VNIC, update bd_stall then update */
2696 for (i = 0; i < num_vnic_ids; i++) {
2697 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2698 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2699 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2702 if (vnic.mru == 4) /* Indicates unallocated */
2705 vnic_cb(&vnic, cbdata);
2707 rc = hwrm_cb(bp, &vnic);
2717 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2720 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2721 struct hwrm_func_cfg_input req = {0};
2724 HWRM_PREP(req, FUNC_CFG, -1, resp);
2725 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2726 req.enables |= rte_cpu_to_le_32(
2727 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2728 req.vlan_antispoof_mode = on ?
2729 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2730 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2731 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2737 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2739 struct bnxt_vnic_info vnic;
2742 int num_vnic_ids, i;
2746 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2747 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2748 RTE_CACHE_LINE_SIZE);
2749 if (vnic_ids == NULL) {
2754 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2755 rte_mem_lock_page(((char *)vnic_ids) + sz);
2757 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2763 * Loop through to find the default VNIC ID.
2764 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2765 * by sending the hwrm_func_qcfg command to the firmware.
2767 for (i = 0; i < num_vnic_ids; i++) {
2768 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2769 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2770 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2771 bp->pf.first_vf_id + vf);
2774 if (vnic.func_default) {
2776 return vnic.fw_vnic_id;
2779 /* Could not find a default VNIC. */
2780 RTE_LOG(ERR, PMD, "No default VNIC\n");