1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_VERSION_1_9_1 0x10901
32 struct bnxt_plcmodes_cfg {
34 uint16_t jumbo_thresh;
36 uint16_t hds_threshold;
39 static int page_getenum(size_t size)
55 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
56 return sizeof(void *) * 8 - 1;
59 static int page_roundup(size_t size)
61 return 1 << page_getenum(size);
65 * HWRM Functions (sent to HWRM)
66 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
67 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
68 * command was failed by the ChiMP.
71 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
75 struct input *req = msg;
76 struct output *resp = bp->hwrm_cmd_resp_addr;
80 uint16_t max_req_len = bp->max_req_len;
81 struct hwrm_short_input short_input = { 0 };
83 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
84 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86 memset(short_cmd_req, 0, bp->max_req_len);
87 memcpy(short_cmd_req, req, msg_len);
89 short_input.req_type = rte_cpu_to_le_16(req->req_type);
90 short_input.signature = rte_cpu_to_le_16(
91 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
92 short_input.size = rte_cpu_to_le_16(msg_len);
93 short_input.req_addr =
94 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96 data = (uint32_t *)&short_input;
97 msg_len = sizeof(short_input);
99 /* Sync memory write before updating doorbell */
102 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
105 /* Write request msg to hwrm channel */
106 for (i = 0; i < msg_len; i += 4) {
107 bar = (uint8_t *)bp->bar0 + i;
108 rte_write32(*data, bar);
112 /* Zero the rest of the request space */
113 for (; i < max_req_len; i += 4) {
114 bar = (uint8_t *)bp->bar0 + i;
118 /* Ring channel doorbell */
119 bar = (uint8_t *)bp->bar0 + 0x100;
122 /* Poll for the valid bit */
123 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
124 /* Sanity check on the resp->resp_len */
126 if (resp->resp_len && resp->resp_len <=
128 /* Last byte of resp contains the valid key */
129 valid = (uint8_t *)resp + resp->resp_len - 1;
130 if (*valid == HWRM_RESP_VALID_KEY)
136 if (i >= HWRM_CMD_TIMEOUT) {
137 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
148 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
149 * spinlock, and does initial processing.
151 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
152 * releases the spinlock only if it returns. If the regular int return codes
153 * are not used by the function, HWRM_CHECK_RESULT() should not be used
154 * directly, rather it should be copied and modified to suit the function.
156 * HWRM_UNLOCK() must be called after all response processing is completed.
158 #define HWRM_PREP(req, type) do { \
159 rte_spinlock_lock(&bp->hwrm_lock); \
160 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
161 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
162 req.cmpl_ring = rte_cpu_to_le_16(-1); \
163 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
164 req.target_id = rte_cpu_to_le_16(0xffff); \
165 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
168 #define HWRM_CHECK_RESULT() do {\
170 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
171 rte_spinlock_unlock(&bp->hwrm_lock); \
174 if (resp->error_code) { \
175 rc = rte_le_to_cpu_16(resp->error_code); \
176 if (resp->resp_len >= 16) { \
177 struct hwrm_err_output *tmp_hwrm_err_op = \
180 "error %d:%d:%08x:%04x\n", \
181 rc, tmp_hwrm_err_op->cmd_err, \
183 tmp_hwrm_err_op->opaque_0), \
185 tmp_hwrm_err_op->opaque_1)); \
187 PMD_DRV_LOG(ERR, "error %d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
194 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
196 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
199 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
200 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
203 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
206 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
214 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
215 struct bnxt_vnic_info *vnic,
217 struct bnxt_vlan_table_entry *vlan_table)
220 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
224 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
225 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
227 /* FIXME add multicast flag, when multicast adding options is supported
230 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
231 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
232 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
233 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
234 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
235 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
236 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
237 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
238 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
239 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
240 if (vnic->mc_addr_cnt) {
241 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
242 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
243 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
246 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
247 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
248 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
249 rte_mem_virt2iova(vlan_table));
250 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
252 req.mask = rte_cpu_to_le_32(mask);
254 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
262 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
264 struct bnxt_vlan_antispoof_table_entry *vlan_table)
267 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
268 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
269 bp->hwrm_cmd_resp_addr;
272 * Older HWRM versions did not support this command, and the set_rx_mask
273 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
274 * removed from set_rx_mask call, and this command was added.
276 * This command is also present from 1.7.8.11 and higher,
279 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
280 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
281 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
286 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
287 req.fid = rte_cpu_to_le_16(fid);
289 req.vlan_tag_mask_tbl_addr =
290 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
291 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
293 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
301 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
302 struct bnxt_filter_info *filter)
305 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
306 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
308 if (filter->fw_l2_filter_id == UINT64_MAX)
311 HWRM_PREP(req, CFA_L2_FILTER_FREE);
313 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
315 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
320 filter->fw_l2_filter_id = -1;
325 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
327 struct bnxt_filter_info *filter)
330 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
331 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
332 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
333 const struct rte_eth_vmdq_rx_conf *conf =
334 &dev_conf->rx_adv_conf.vmdq_rx_conf;
335 uint32_t enables = 0;
336 uint16_t j = dst_id - 1;
338 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
339 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
340 conf->pool_map[j].pools & (1UL << j)) {
342 "Add vlan %u to vmdq pool %u\n",
343 conf->pool_map[j].vlan_id, j);
345 filter->l2_ivlan = conf->pool_map[j].vlan_id;
347 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
348 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
351 if (filter->fw_l2_filter_id != UINT64_MAX)
352 bnxt_hwrm_clear_l2_filter(bp, filter);
354 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
356 req.flags = rte_cpu_to_le_32(filter->flags);
358 enables = filter->enables |
359 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
360 req.dst_id = rte_cpu_to_le_16(dst_id);
363 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
364 memcpy(req.l2_addr, filter->l2_addr,
367 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
368 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
371 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
372 req.l2_ovlan = filter->l2_ovlan;
374 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
375 req.l2_ovlan = filter->l2_ivlan;
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
378 req.l2_ovlan_mask = filter->l2_ovlan_mask;
380 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
381 req.l2_ovlan_mask = filter->l2_ivlan_mask;
382 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
383 req.src_id = rte_cpu_to_le_32(filter->src_id);
384 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
385 req.src_type = filter->src_type;
387 req.enables = rte_cpu_to_le_32(enables);
389 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
393 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
399 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
401 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
402 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
409 HWRM_PREP(req, PORT_MAC_CFG);
412 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
414 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
415 if (ptp->tx_tstamp_en)
416 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
418 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
419 req.flags = rte_cpu_to_le_32(flags);
421 rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
422 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
424 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
430 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
433 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
434 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
435 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
437 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
441 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
443 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
445 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
449 if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
452 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
456 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
457 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
458 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
459 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
460 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
461 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
462 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
463 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
464 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
465 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
466 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
467 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
468 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
469 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
470 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
471 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
472 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
473 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
481 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
484 struct hwrm_func_qcaps_input req = {.req_type = 0 };
485 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
486 uint16_t new_max_vfs;
490 HWRM_PREP(req, FUNC_QCAPS);
492 req.fid = rte_cpu_to_le_16(0xffff);
494 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
498 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
499 flags = rte_le_to_cpu_32(resp->flags);
501 bp->pf.port_id = resp->port_id;
502 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
503 new_max_vfs = bp->pdev->max_vfs;
504 if (new_max_vfs != bp->pf.max_vfs) {
506 rte_free(bp->pf.vf_info);
507 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
508 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
509 bp->pf.max_vfs = new_max_vfs;
510 for (i = 0; i < new_max_vfs; i++) {
511 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
512 bp->pf.vf_info[i].vlan_table =
513 rte_zmalloc("VF VLAN table",
516 if (bp->pf.vf_info[i].vlan_table == NULL)
518 "Fail to alloc VLAN table for VF %d\n",
522 bp->pf.vf_info[i].vlan_table);
523 bp->pf.vf_info[i].vlan_as_table =
524 rte_zmalloc("VF VLAN AS table",
527 if (bp->pf.vf_info[i].vlan_as_table == NULL)
529 "Alloc VLAN AS table for VF %d fail\n",
533 bp->pf.vf_info[i].vlan_as_table);
534 STAILQ_INIT(&bp->pf.vf_info[i].filter);
539 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
540 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
541 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
542 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
543 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
544 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
545 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
546 /* TODO: For now, do not support VMDq/RFS on VFs. */
551 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
555 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
557 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
558 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
559 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
560 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
562 bnxt_hwrm_ptp_qcfg(bp);
571 int bnxt_hwrm_func_reset(struct bnxt *bp)
574 struct hwrm_func_reset_input req = {.req_type = 0 };
575 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
577 HWRM_PREP(req, FUNC_RESET);
579 req.enables = rte_cpu_to_le_32(0);
581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
589 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
592 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
593 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
595 if (bp->flags & BNXT_FLAG_REGISTERED)
598 HWRM_PREP(req, FUNC_DRV_RGTR);
599 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
600 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
601 req.ver_maj = RTE_VER_YEAR;
602 req.ver_min = RTE_VER_MONTH;
603 req.ver_upd = RTE_VER_MINOR;
606 req.enables |= rte_cpu_to_le_32(
607 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
608 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
609 RTE_MIN(sizeof(req.vf_req_fwd),
610 sizeof(bp->pf.vf_req_fwd)));
613 req.async_event_fwd[0] |=
614 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
615 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
616 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
617 req.async_event_fwd[1] |=
618 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
619 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
621 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
626 bp->flags |= BNXT_FLAG_REGISTERED;
631 int bnxt_hwrm_ver_get(struct bnxt *bp)
634 struct hwrm_ver_get_input req = {.req_type = 0 };
635 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
638 uint16_t max_resp_len;
639 char type[RTE_MEMZONE_NAMESIZE];
640 uint32_t dev_caps_cfg;
642 bp->max_req_len = HWRM_MAX_REQ_LEN;
643 HWRM_PREP(req, VER_GET);
645 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
646 req.hwrm_intf_min = HWRM_VERSION_MINOR;
647 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
649 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
653 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
654 resp->hwrm_intf_maj, resp->hwrm_intf_min,
656 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
657 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
658 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
659 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
660 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
662 my_version = HWRM_VERSION_MAJOR << 16;
663 my_version |= HWRM_VERSION_MINOR << 8;
664 my_version |= HWRM_VERSION_UPDATE;
666 fw_version = resp->hwrm_intf_maj << 16;
667 fw_version |= resp->hwrm_intf_min << 8;
668 fw_version |= resp->hwrm_intf_upd;
669 bp->hwrm_spec_code = fw_version;
671 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
672 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
677 if (my_version != fw_version) {
678 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
679 if (my_version < fw_version) {
681 "Firmware API version is newer than driver.\n");
683 "The driver may be missing features.\n");
686 "Firmware API version is older than driver.\n");
688 "Not all driver features may be functional.\n");
692 if (bp->max_req_len > resp->max_req_win_len) {
693 PMD_DRV_LOG(ERR, "Unsupported request length\n");
696 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
697 max_resp_len = resp->max_resp_len;
698 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
700 if (bp->max_resp_len != max_resp_len) {
701 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
702 bp->pdev->addr.domain, bp->pdev->addr.bus,
703 bp->pdev->addr.devid, bp->pdev->addr.function);
705 rte_free(bp->hwrm_cmd_resp_addr);
707 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
708 if (bp->hwrm_cmd_resp_addr == NULL) {
712 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
713 bp->hwrm_cmd_resp_dma_addr =
714 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
715 if (bp->hwrm_cmd_resp_dma_addr == 0) {
717 "Unable to map response buffer to physical memory.\n");
721 bp->max_resp_len = max_resp_len;
725 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
727 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
728 PMD_DRV_LOG(DEBUG, "Short command supported\n");
730 rte_free(bp->hwrm_short_cmd_req_addr);
732 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
734 if (bp->hwrm_short_cmd_req_addr == NULL) {
738 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
739 bp->hwrm_short_cmd_req_dma_addr =
740 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
741 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
742 rte_free(bp->hwrm_short_cmd_req_addr);
744 "Unable to map buffer to physical memory.\n");
749 bp->flags |= BNXT_FLAG_SHORT_CMD;
757 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
760 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
761 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
763 if (!(bp->flags & BNXT_FLAG_REGISTERED))
766 HWRM_PREP(req, FUNC_DRV_UNRGTR);
769 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
774 bp->flags &= ~BNXT_FLAG_REGISTERED;
779 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
782 struct hwrm_port_phy_cfg_input req = {0};
783 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
784 uint32_t enables = 0;
786 HWRM_PREP(req, PORT_PHY_CFG);
789 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
790 if (bp->link_info.auto_mode && conf->link_speed) {
791 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
792 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
795 req.flags = rte_cpu_to_le_32(conf->phy_flags);
796 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
797 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
799 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
800 * any auto mode, even "none".
802 if (!conf->link_speed) {
803 /* No speeds specified. Enable AutoNeg - all speeds */
805 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
807 /* AutoNeg - Advertise speeds specified. */
808 if (conf->auto_link_speed_mask &&
809 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
811 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
812 req.auto_link_speed_mask =
813 conf->auto_link_speed_mask;
815 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
818 req.auto_duplex = conf->duplex;
819 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
820 req.auto_pause = conf->auto_pause;
821 req.force_pause = conf->force_pause;
822 /* Set force_pause if there is no auto or if there is a force */
823 if (req.auto_pause && !req.force_pause)
824 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
826 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
828 req.enables = rte_cpu_to_le_32(enables);
831 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
832 PMD_DRV_LOG(INFO, "Force Link Down\n");
835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
843 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
844 struct bnxt_link_info *link_info)
847 struct hwrm_port_phy_qcfg_input req = {0};
848 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
850 HWRM_PREP(req, PORT_PHY_QCFG);
852 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
856 link_info->phy_link_status = resp->link;
858 (link_info->phy_link_status ==
859 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
860 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
861 link_info->duplex = resp->duplex_cfg;
862 link_info->pause = resp->pause;
863 link_info->auto_pause = resp->auto_pause;
864 link_info->force_pause = resp->force_pause;
865 link_info->auto_mode = resp->auto_mode;
866 link_info->phy_type = resp->phy_type;
867 link_info->media_type = resp->media_type;
869 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
870 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
871 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
872 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
873 link_info->phy_ver[0] = resp->phy_maj;
874 link_info->phy_ver[1] = resp->phy_min;
875 link_info->phy_ver[2] = resp->phy_bld;
879 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
880 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
881 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
882 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
883 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
884 link_info->auto_link_speed_mask);
885 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
886 link_info->force_link_speed);
891 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
894 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
895 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
898 HWRM_PREP(req, QUEUE_QPORTCFG);
900 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
901 /* HWRM Version >= 1.9.1 */
902 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
904 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
909 #define GET_QUEUE_INFO(x) \
910 bp->cos_queue[x].id = resp->queue_id##x; \
911 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
924 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
925 bp->tx_cosq_id = bp->cos_queue[0].id;
927 /* iterate and find the COSq profile to use for Tx */
928 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
929 if (bp->cos_queue[i].profile ==
930 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
931 bp->tx_cosq_id = bp->cos_queue[i].id;
936 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
941 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
942 struct bnxt_ring *ring,
943 uint32_t ring_type, uint32_t map_index,
944 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
947 uint32_t enables = 0;
948 struct hwrm_ring_alloc_input req = {.req_type = 0 };
949 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
951 HWRM_PREP(req, RING_ALLOC);
953 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
954 req.fbo = rte_cpu_to_le_32(0);
955 /* Association of ring index with doorbell index */
956 req.logical_id = rte_cpu_to_le_16(map_index);
957 req.length = rte_cpu_to_le_32(ring->ring_size);
960 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
961 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
963 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
964 req.ring_type = ring_type;
965 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
966 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
967 if (stats_ctx_id != INVALID_STATS_CTX_ID)
969 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
971 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
972 req.ring_type = ring_type;
974 * TODO: Some HWRM versions crash with
975 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
977 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
980 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
985 req.enables = rte_cpu_to_le_32(enables);
987 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
989 if (rc || resp->error_code) {
990 if (rc == 0 && resp->error_code)
991 rc = rte_le_to_cpu_16(resp->error_code);
993 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
995 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
998 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1000 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1003 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1005 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1009 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1015 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1020 int bnxt_hwrm_ring_free(struct bnxt *bp,
1021 struct bnxt_ring *ring, uint32_t ring_type)
1024 struct hwrm_ring_free_input req = {.req_type = 0 };
1025 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1027 HWRM_PREP(req, RING_FREE);
1029 req.ring_type = ring_type;
1030 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1032 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1034 if (rc || resp->error_code) {
1035 if (rc == 0 && resp->error_code)
1036 rc = rte_le_to_cpu_16(resp->error_code);
1039 switch (ring_type) {
1040 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1041 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1044 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1045 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1048 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1049 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1053 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1061 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1064 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1065 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1067 HWRM_PREP(req, RING_GRP_ALLOC);
1069 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1070 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1071 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1072 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1074 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1076 HWRM_CHECK_RESULT();
1078 bp->grp_info[idx].fw_grp_id =
1079 rte_le_to_cpu_16(resp->ring_group_id);
1086 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1089 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1090 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1092 HWRM_PREP(req, RING_GRP_FREE);
1094 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1096 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1098 HWRM_CHECK_RESULT();
1101 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1105 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1108 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1109 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1111 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1114 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1116 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1118 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1120 HWRM_CHECK_RESULT();
1126 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1127 unsigned int idx __rte_unused)
1130 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1131 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1133 HWRM_PREP(req, STAT_CTX_ALLOC);
1135 req.update_period_ms = rte_cpu_to_le_32(0);
1137 req.stats_dma_addr =
1138 rte_cpu_to_le_64(cpr->hw_stats_map);
1140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1142 HWRM_CHECK_RESULT();
1144 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1151 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1152 unsigned int idx __rte_unused)
1155 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1156 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1158 HWRM_PREP(req, STAT_CTX_FREE);
1160 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1162 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1164 HWRM_CHECK_RESULT();
1170 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1173 struct hwrm_vnic_alloc_input req = { 0 };
1174 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1176 /* map ring groups to this vnic */
1177 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1178 vnic->start_grp_id, vnic->end_grp_id);
1179 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1180 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1181 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1182 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1183 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1184 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1185 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1186 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1187 HWRM_PREP(req, VNIC_ALLOC);
1189 if (vnic->func_default)
1191 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1192 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1194 HWRM_CHECK_RESULT();
1196 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1198 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1202 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1203 struct bnxt_vnic_info *vnic,
1204 struct bnxt_plcmodes_cfg *pmode)
1207 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1208 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1210 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1212 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1214 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1216 HWRM_CHECK_RESULT();
1218 pmode->flags = rte_le_to_cpu_32(resp->flags);
1219 /* dflt_vnic bit doesn't exist in the _cfg command */
1220 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1221 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1222 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1223 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1230 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1231 struct bnxt_vnic_info *vnic,
1232 struct bnxt_plcmodes_cfg *pmode)
1235 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1236 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1238 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1240 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1241 req.flags = rte_cpu_to_le_32(pmode->flags);
1242 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1243 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1244 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1245 req.enables = rte_cpu_to_le_32(
1246 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1247 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1248 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1251 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1253 HWRM_CHECK_RESULT();
1259 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1262 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1263 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1264 uint32_t ctx_enable_flag = 0;
1265 struct bnxt_plcmodes_cfg pmodes;
1267 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1268 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1272 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1276 HWRM_PREP(req, VNIC_CFG);
1278 /* Only RSS support for now TBD: COS & LB */
1280 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1281 if (vnic->lb_rule != 0xffff)
1282 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1283 if (vnic->cos_rule != 0xffff)
1284 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1285 if (vnic->rss_rule != 0xffff) {
1286 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1287 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1289 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1290 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1291 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1292 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1293 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1294 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1295 req.mru = rte_cpu_to_le_16(vnic->mru);
1296 if (vnic->func_default)
1298 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1299 if (vnic->vlan_strip)
1301 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1304 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1305 if (vnic->roce_dual)
1306 req.flags |= rte_cpu_to_le_32(
1307 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1308 if (vnic->roce_only)
1309 req.flags |= rte_cpu_to_le_32(
1310 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1311 if (vnic->rss_dflt_cr)
1312 req.flags |= rte_cpu_to_le_32(
1313 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1315 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1317 HWRM_CHECK_RESULT();
1320 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1325 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1329 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1330 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1332 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1333 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1336 HWRM_PREP(req, VNIC_QCFG);
1339 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1340 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1341 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1343 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1345 HWRM_CHECK_RESULT();
1347 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1348 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1349 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1350 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1351 vnic->mru = rte_le_to_cpu_16(resp->mru);
1352 vnic->func_default = rte_le_to_cpu_32(
1353 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1354 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1355 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1356 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1357 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1358 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1359 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1360 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1361 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1362 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1363 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1370 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1373 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1374 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1375 bp->hwrm_cmd_resp_addr;
1377 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1379 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1381 HWRM_CHECK_RESULT();
1383 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1385 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1390 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1393 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1394 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1395 bp->hwrm_cmd_resp_addr;
1397 if (vnic->rss_rule == 0xffff) {
1398 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1401 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1403 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1405 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1407 HWRM_CHECK_RESULT();
1410 vnic->rss_rule = INVALID_HW_RING_ID;
1415 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1418 struct hwrm_vnic_free_input req = {.req_type = 0 };
1419 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1421 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1422 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1426 HWRM_PREP(req, VNIC_FREE);
1428 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1430 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1432 HWRM_CHECK_RESULT();
1435 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1439 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1440 struct bnxt_vnic_info *vnic)
1443 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1444 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1446 HWRM_PREP(req, VNIC_RSS_CFG);
1448 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1450 req.ring_grp_tbl_addr =
1451 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1452 req.hash_key_tbl_addr =
1453 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1454 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1456 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1458 HWRM_CHECK_RESULT();
1464 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1465 struct bnxt_vnic_info *vnic)
1468 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1469 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1472 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1474 req.flags = rte_cpu_to_le_32(
1475 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1477 req.enables = rte_cpu_to_le_32(
1478 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1480 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1481 size -= RTE_PKTMBUF_HEADROOM;
1483 req.jumbo_thresh = rte_cpu_to_le_16(size);
1484 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1486 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1488 HWRM_CHECK_RESULT();
1494 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1495 struct bnxt_vnic_info *vnic, bool enable)
1498 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1499 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1501 HWRM_PREP(req, VNIC_TPA_CFG);
1504 req.enables = rte_cpu_to_le_32(
1505 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1506 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1507 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1508 req.flags = rte_cpu_to_le_32(
1509 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1510 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1511 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1512 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1513 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1514 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1515 req.max_agg_segs = rte_cpu_to_le_16(5);
1517 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1518 req.min_agg_len = rte_cpu_to_le_32(512);
1520 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1522 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1524 HWRM_CHECK_RESULT();
1530 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1532 struct hwrm_func_cfg_input req = {0};
1533 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1536 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1537 req.enables = rte_cpu_to_le_32(
1538 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1539 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1540 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1542 HWRM_PREP(req, FUNC_CFG);
1544 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1545 HWRM_CHECK_RESULT();
1548 bp->pf.vf_info[vf].random_mac = false;
1553 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1557 struct hwrm_func_qstats_input req = {.req_type = 0};
1558 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1560 HWRM_PREP(req, FUNC_QSTATS);
1562 req.fid = rte_cpu_to_le_16(fid);
1564 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1566 HWRM_CHECK_RESULT();
1569 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1576 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1577 struct rte_eth_stats *stats)
1580 struct hwrm_func_qstats_input req = {.req_type = 0};
1581 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1583 HWRM_PREP(req, FUNC_QSTATS);
1585 req.fid = rte_cpu_to_le_16(fid);
1587 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1589 HWRM_CHECK_RESULT();
1591 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1592 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1593 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1594 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1595 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1596 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1598 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1599 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1600 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1601 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1602 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1603 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1605 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1606 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1608 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1615 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1618 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1619 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1621 HWRM_PREP(req, FUNC_CLR_STATS);
1623 req.fid = rte_cpu_to_le_16(fid);
1625 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1627 HWRM_CHECK_RESULT();
1634 * HWRM utility functions
1637 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1642 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1643 struct bnxt_tx_queue *txq;
1644 struct bnxt_rx_queue *rxq;
1645 struct bnxt_cp_ring_info *cpr;
1647 if (i >= bp->rx_cp_nr_rings) {
1648 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1651 rxq = bp->rx_queues[i];
1655 rc = bnxt_hwrm_stat_clear(bp, cpr);
1662 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1666 struct bnxt_cp_ring_info *cpr;
1668 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1670 if (i >= bp->rx_cp_nr_rings) {
1671 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1673 cpr = bp->rx_queues[i]->cp_ring;
1674 bp->grp_info[i].fw_stats_ctx = -1;
1676 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1677 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1678 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1686 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1691 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1692 struct bnxt_tx_queue *txq;
1693 struct bnxt_rx_queue *rxq;
1694 struct bnxt_cp_ring_info *cpr;
1696 if (i >= bp->rx_cp_nr_rings) {
1697 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1700 rxq = bp->rx_queues[i];
1704 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1712 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1717 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1719 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1722 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1730 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1731 unsigned int idx __rte_unused)
1733 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1735 bnxt_hwrm_ring_free(bp, cp_ring,
1736 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1737 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1738 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1739 sizeof(*cpr->cp_desc_ring));
1740 cpr->cp_raw_cons = 0;
1743 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1748 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1749 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1750 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1751 struct bnxt_ring *ring = txr->tx_ring_struct;
1752 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1753 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1755 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1756 bnxt_hwrm_ring_free(bp, ring,
1757 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1758 ring->fw_ring_id = INVALID_HW_RING_ID;
1759 memset(txr->tx_desc_ring, 0,
1760 txr->tx_ring_struct->ring_size *
1761 sizeof(*txr->tx_desc_ring));
1762 memset(txr->tx_buf_ring, 0,
1763 txr->tx_ring_struct->ring_size *
1764 sizeof(*txr->tx_buf_ring));
1768 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1769 bnxt_free_cp_ring(bp, cpr, idx);
1770 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1774 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1775 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1776 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1777 struct bnxt_ring *ring = rxr->rx_ring_struct;
1778 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1779 unsigned int idx = i + 1;
1781 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1782 bnxt_hwrm_ring_free(bp, ring,
1783 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1784 ring->fw_ring_id = INVALID_HW_RING_ID;
1785 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1786 memset(rxr->rx_desc_ring, 0,
1787 rxr->rx_ring_struct->ring_size *
1788 sizeof(*rxr->rx_desc_ring));
1789 memset(rxr->rx_buf_ring, 0,
1790 rxr->rx_ring_struct->ring_size *
1791 sizeof(*rxr->rx_buf_ring));
1794 ring = rxr->ag_ring_struct;
1795 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1796 bnxt_hwrm_ring_free(bp, ring,
1797 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1798 ring->fw_ring_id = INVALID_HW_RING_ID;
1799 memset(rxr->ag_buf_ring, 0,
1800 rxr->ag_ring_struct->ring_size *
1801 sizeof(*rxr->ag_buf_ring));
1803 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1805 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1806 bnxt_free_cp_ring(bp, cpr, idx);
1807 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1808 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1812 /* Default completion ring */
1814 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1816 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1817 bnxt_free_cp_ring(bp, cpr, 0);
1818 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1825 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1830 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1831 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1838 void bnxt_free_hwrm_resources(struct bnxt *bp)
1840 /* Release memzone */
1841 rte_free(bp->hwrm_cmd_resp_addr);
1842 rte_free(bp->hwrm_short_cmd_req_addr);
1843 bp->hwrm_cmd_resp_addr = NULL;
1844 bp->hwrm_short_cmd_req_addr = NULL;
1845 bp->hwrm_cmd_resp_dma_addr = 0;
1846 bp->hwrm_short_cmd_req_dma_addr = 0;
1849 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1851 struct rte_pci_device *pdev = bp->pdev;
1852 char type[RTE_MEMZONE_NAMESIZE];
1854 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1855 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1856 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1857 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1858 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1859 if (bp->hwrm_cmd_resp_addr == NULL)
1861 bp->hwrm_cmd_resp_dma_addr =
1862 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1863 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1865 "unable to map response address to physical memory\n");
1868 rte_spinlock_init(&bp->hwrm_lock);
1873 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1875 struct bnxt_filter_info *filter;
1878 STAILQ_FOREACH(filter, &vnic->filter, next) {
1879 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1880 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1881 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1882 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1884 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1892 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1894 struct bnxt_filter_info *filter;
1895 struct rte_flow *flow;
1898 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1899 filter = flow->filter;
1900 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1901 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1902 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1903 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1904 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1906 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1908 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1916 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1918 struct bnxt_filter_info *filter;
1921 STAILQ_FOREACH(filter, &vnic->filter, next) {
1922 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1923 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1925 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1926 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1929 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1937 void bnxt_free_tunnel_ports(struct bnxt *bp)
1939 if (bp->vxlan_port_cnt)
1940 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1941 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1943 if (bp->geneve_port_cnt)
1944 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1945 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1946 bp->geneve_port = 0;
1949 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1953 if (bp->vnic_info == NULL)
1957 * Cleanup VNICs in reverse order, to make sure the L2 filter
1958 * from vnic0 is last to be cleaned up.
1960 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1961 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1963 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1965 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1967 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1969 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1971 bnxt_hwrm_vnic_free(bp, vnic);
1973 /* Ring resources */
1974 bnxt_free_all_hwrm_rings(bp);
1975 bnxt_free_all_hwrm_ring_grps(bp);
1976 bnxt_free_all_hwrm_stat_ctxs(bp);
1977 bnxt_free_tunnel_ports(bp);
1980 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1982 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1984 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1985 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1987 switch (conf_link_speed) {
1988 case ETH_LINK_SPEED_10M_HD:
1989 case ETH_LINK_SPEED_100M_HD:
1990 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1992 return hw_link_duplex;
1995 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1997 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2000 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2002 uint16_t eth_link_speed = 0;
2004 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2005 return ETH_LINK_SPEED_AUTONEG;
2007 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2008 case ETH_LINK_SPEED_100M:
2009 case ETH_LINK_SPEED_100M_HD:
2011 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2013 case ETH_LINK_SPEED_1G:
2015 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2017 case ETH_LINK_SPEED_2_5G:
2019 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2021 case ETH_LINK_SPEED_10G:
2023 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2025 case ETH_LINK_SPEED_20G:
2027 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2029 case ETH_LINK_SPEED_25G:
2031 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2033 case ETH_LINK_SPEED_40G:
2035 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2037 case ETH_LINK_SPEED_50G:
2039 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2041 case ETH_LINK_SPEED_100G:
2043 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2047 "Unsupported link speed %d; default to AUTO\n",
2051 return eth_link_speed;
2054 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2055 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2056 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2057 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2059 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2063 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2066 if (link_speed & ETH_LINK_SPEED_FIXED) {
2067 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2069 if (one_speed & (one_speed - 1)) {
2071 "Invalid advertised speeds (%u) for port %u\n",
2072 link_speed, port_id);
2075 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2077 "Unsupported advertised speed (%u) for port %u\n",
2078 link_speed, port_id);
2082 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2084 "Unsupported advertised speeds (%u) for port %u\n",
2085 link_speed, port_id);
2093 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2097 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2098 if (bp->link_info.support_speeds)
2099 return bp->link_info.support_speeds;
2100 link_speed = BNXT_SUPPORTED_SPEEDS;
2103 if (link_speed & ETH_LINK_SPEED_100M)
2104 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2105 if (link_speed & ETH_LINK_SPEED_100M_HD)
2106 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2107 if (link_speed & ETH_LINK_SPEED_1G)
2108 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2109 if (link_speed & ETH_LINK_SPEED_2_5G)
2110 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2111 if (link_speed & ETH_LINK_SPEED_10G)
2112 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2113 if (link_speed & ETH_LINK_SPEED_20G)
2114 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2115 if (link_speed & ETH_LINK_SPEED_25G)
2116 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2117 if (link_speed & ETH_LINK_SPEED_40G)
2118 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2119 if (link_speed & ETH_LINK_SPEED_50G)
2120 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2121 if (link_speed & ETH_LINK_SPEED_100G)
2122 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2126 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2128 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2130 switch (hw_link_speed) {
2131 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2132 eth_link_speed = ETH_SPEED_NUM_100M;
2134 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2135 eth_link_speed = ETH_SPEED_NUM_1G;
2137 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2138 eth_link_speed = ETH_SPEED_NUM_2_5G;
2140 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2141 eth_link_speed = ETH_SPEED_NUM_10G;
2143 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2144 eth_link_speed = ETH_SPEED_NUM_20G;
2146 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2147 eth_link_speed = ETH_SPEED_NUM_25G;
2149 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2150 eth_link_speed = ETH_SPEED_NUM_40G;
2152 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2153 eth_link_speed = ETH_SPEED_NUM_50G;
2155 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2156 eth_link_speed = ETH_SPEED_NUM_100G;
2158 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2160 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2164 return eth_link_speed;
2167 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2169 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2171 switch (hw_link_duplex) {
2172 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2173 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2174 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2176 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2177 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2180 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2184 return eth_link_duplex;
2187 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2190 struct bnxt_link_info *link_info = &bp->link_info;
2192 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2195 "Get link config failed with rc %d\n", rc);
2198 if (link_info->link_speed)
2200 bnxt_parse_hw_link_speed(link_info->link_speed);
2202 link->link_speed = ETH_SPEED_NUM_NONE;
2203 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2204 link->link_status = link_info->link_up;
2205 link->link_autoneg = link_info->auto_mode ==
2206 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2207 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2212 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2215 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2216 struct bnxt_link_info link_req;
2217 uint16_t speed, autoneg;
2219 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2222 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2223 bp->eth_dev->data->port_id);
2227 memset(&link_req, 0, sizeof(link_req));
2228 link_req.link_up = link_up;
2232 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2233 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2234 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2235 /* Autoneg can be done only when the FW allows */
2236 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2237 bp->link_info.force_link_speed)) {
2238 link_req.phy_flags |=
2239 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2240 link_req.auto_link_speed_mask =
2241 bnxt_parse_eth_link_speed_mask(bp,
2242 dev_conf->link_speeds);
2244 if (bp->link_info.phy_type ==
2245 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2246 bp->link_info.phy_type ==
2247 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2248 bp->link_info.media_type ==
2249 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2250 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2254 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2255 /* If user wants a particular speed try that first. */
2257 link_req.link_speed = speed;
2258 else if (bp->link_info.force_link_speed)
2259 link_req.link_speed = bp->link_info.force_link_speed;
2261 link_req.link_speed = bp->link_info.auto_link_speed;
2263 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2264 link_req.auto_pause = bp->link_info.auto_pause;
2265 link_req.force_pause = bp->link_info.force_pause;
2268 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2271 "Set link config failed with rc %d\n", rc);
2279 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2281 struct hwrm_func_qcfg_input req = {0};
2282 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2286 HWRM_PREP(req, FUNC_QCFG);
2287 req.fid = rte_cpu_to_le_16(0xffff);
2289 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2291 HWRM_CHECK_RESULT();
2293 /* Hard Coded.. 0xfff VLAN ID mask */
2294 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2295 flags = rte_le_to_cpu_16(resp->flags);
2296 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2297 bp->flags |= BNXT_FLAG_MULTI_HOST;
2299 switch (resp->port_partition_type) {
2300 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2301 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2302 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2303 bp->port_partition_type = resp->port_partition_type;
2306 bp->port_partition_type = 0;
2315 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2316 struct hwrm_func_qcaps_output *qcaps)
2318 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2319 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2320 sizeof(qcaps->mac_address));
2321 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2322 qcaps->max_rx_rings = fcfg->num_rx_rings;
2323 qcaps->max_tx_rings = fcfg->num_tx_rings;
2324 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2325 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2327 qcaps->first_vf_id = 0;
2328 qcaps->max_vnics = fcfg->num_vnics;
2329 qcaps->max_decap_records = 0;
2330 qcaps->max_encap_records = 0;
2331 qcaps->max_tx_wm_flows = 0;
2332 qcaps->max_tx_em_flows = 0;
2333 qcaps->max_rx_wm_flows = 0;
2334 qcaps->max_rx_em_flows = 0;
2335 qcaps->max_flow_id = 0;
2336 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2337 qcaps->max_sp_tx_rings = 0;
2338 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2341 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2343 struct hwrm_func_cfg_input req = {0};
2344 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2347 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2348 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2349 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2350 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2351 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2352 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2353 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2354 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2355 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2356 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2357 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2358 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2359 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2360 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2361 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2362 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2363 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2364 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2365 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2366 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2367 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2368 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2369 req.fid = rte_cpu_to_le_16(0xffff);
2371 HWRM_PREP(req, FUNC_CFG);
2373 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2375 HWRM_CHECK_RESULT();
2381 static void populate_vf_func_cfg_req(struct bnxt *bp,
2382 struct hwrm_func_cfg_input *req,
2385 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2386 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2387 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2388 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2389 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2390 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2391 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2392 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2393 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2394 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2396 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2397 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2398 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2399 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2400 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2402 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2403 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2405 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2406 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2407 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2408 /* TODO: For now, do not support VMDq/RFS on VFs. */
2409 req->num_vnics = rte_cpu_to_le_16(1);
2410 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2414 static void add_random_mac_if_needed(struct bnxt *bp,
2415 struct hwrm_func_cfg_input *cfg_req,
2418 struct ether_addr mac;
2420 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2423 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2425 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2426 eth_random_addr(cfg_req->dflt_mac_addr);
2427 bp->pf.vf_info[vf].random_mac = true;
2429 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2433 static void reserve_resources_from_vf(struct bnxt *bp,
2434 struct hwrm_func_cfg_input *cfg_req,
2437 struct hwrm_func_qcaps_input req = {0};
2438 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2441 /* Get the actual allocated values now */
2442 HWRM_PREP(req, FUNC_QCAPS);
2443 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2444 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2447 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2448 copy_func_cfg_to_qcaps(cfg_req, resp);
2449 } else if (resp->error_code) {
2450 rc = rte_le_to_cpu_16(resp->error_code);
2451 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2452 copy_func_cfg_to_qcaps(cfg_req, resp);
2455 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2456 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2457 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2458 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2459 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2460 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2462 * TODO: While not supporting VMDq with VFs, max_vnics is always
2463 * forced to 1 in this case
2465 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2466 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2471 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2473 struct hwrm_func_qcfg_input req = {0};
2474 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2477 /* Check for zero MAC address */
2478 HWRM_PREP(req, FUNC_QCFG);
2479 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2480 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2482 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2484 } else if (resp->error_code) {
2485 rc = rte_le_to_cpu_16(resp->error_code);
2486 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2489 rc = rte_le_to_cpu_16(resp->vlan);
2496 static int update_pf_resource_max(struct bnxt *bp)
2498 struct hwrm_func_qcfg_input req = {0};
2499 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2502 /* And copy the allocated numbers into the pf struct */
2503 HWRM_PREP(req, FUNC_QCFG);
2504 req.fid = rte_cpu_to_le_16(0xffff);
2505 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2506 HWRM_CHECK_RESULT();
2508 /* Only TX ring value reflects actual allocation? TODO */
2509 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2510 bp->pf.evb_mode = resp->evb_mode;
2517 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2522 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2526 rc = bnxt_hwrm_func_qcaps(bp);
2530 bp->pf.func_cfg_flags &=
2531 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2532 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2533 bp->pf.func_cfg_flags |=
2534 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2535 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2539 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2541 struct hwrm_func_cfg_input req = {0};
2542 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2549 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2553 rc = bnxt_hwrm_func_qcaps(bp);
2558 bp->pf.active_vfs = num_vfs;
2561 * First, configure the PF to only use one TX ring. This ensures that
2562 * there are enough rings for all VFs.
2564 * If we don't do this, when we call func_alloc() later, we will lock
2565 * extra rings to the PF that won't be available during func_cfg() of
2568 * This has been fixed with firmware versions above 20.6.54
2570 bp->pf.func_cfg_flags &=
2571 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2572 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2573 bp->pf.func_cfg_flags |=
2574 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2575 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2580 * Now, create and register a buffer to hold forwarded VF requests
2582 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2583 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2584 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2585 if (bp->pf.vf_req_buf == NULL) {
2589 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2590 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2591 for (i = 0; i < num_vfs; i++)
2592 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2593 (i * HWRM_MAX_REQ_LEN);
2595 rc = bnxt_hwrm_func_buf_rgtr(bp);
2599 populate_vf_func_cfg_req(bp, &req, num_vfs);
2601 bp->pf.active_vfs = 0;
2602 for (i = 0; i < num_vfs; i++) {
2603 add_random_mac_if_needed(bp, &req, i);
2605 HWRM_PREP(req, FUNC_CFG);
2606 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2607 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2608 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2610 /* Clear enable flag for next pass */
2611 req.enables &= ~rte_cpu_to_le_32(
2612 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2614 if (rc || resp->error_code) {
2616 "Failed to initizlie VF %d\n", i);
2618 "Not all VFs available. (%d, %d)\n",
2619 rc, resp->error_code);
2626 reserve_resources_from_vf(bp, &req, i);
2627 bp->pf.active_vfs++;
2628 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2632 * Now configure the PF to use "the rest" of the resources
2633 * We're using STD_TX_RING_MODE here though which will limit the TX
2634 * rings. This will allow QoS to function properly. Not setting this
2635 * will cause PF rings to break bandwidth settings.
2637 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2641 rc = update_pf_resource_max(bp);
2648 bnxt_hwrm_func_buf_unrgtr(bp);
2652 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2654 struct hwrm_func_cfg_input req = {0};
2655 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2658 HWRM_PREP(req, FUNC_CFG);
2660 req.fid = rte_cpu_to_le_16(0xffff);
2661 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2662 req.evb_mode = bp->pf.evb_mode;
2664 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2665 HWRM_CHECK_RESULT();
2671 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2672 uint8_t tunnel_type)
2674 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2675 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2678 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2679 req.tunnel_type = tunnel_type;
2680 req.tunnel_dst_port_val = port;
2681 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2682 HWRM_CHECK_RESULT();
2684 switch (tunnel_type) {
2685 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2686 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2687 bp->vxlan_port = port;
2689 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2690 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2691 bp->geneve_port = port;
2702 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2703 uint8_t tunnel_type)
2705 struct hwrm_tunnel_dst_port_free_input req = {0};
2706 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2709 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2711 req.tunnel_type = tunnel_type;
2712 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2713 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2715 HWRM_CHECK_RESULT();
2721 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2724 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2725 struct hwrm_func_cfg_input req = {0};
2728 HWRM_PREP(req, FUNC_CFG);
2730 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2731 req.flags = rte_cpu_to_le_32(flags);
2732 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2734 HWRM_CHECK_RESULT();
2740 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2742 uint32_t *flag = flagp;
2744 vnic->flags = *flag;
2747 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2749 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2752 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2755 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2756 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2758 HWRM_PREP(req, FUNC_BUF_RGTR);
2760 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2761 req.req_buf_page_size = rte_cpu_to_le_16(
2762 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2763 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2764 req.req_buf_page_addr[0] =
2765 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2766 if (req.req_buf_page_addr[0] == 0) {
2768 "unable to map buffer address to physical memory\n");
2772 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2774 HWRM_CHECK_RESULT();
2780 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2783 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2784 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2786 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2788 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2790 HWRM_CHECK_RESULT();
2796 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2798 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2799 struct hwrm_func_cfg_input req = {0};
2802 HWRM_PREP(req, FUNC_CFG);
2804 req.fid = rte_cpu_to_le_16(0xffff);
2805 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2806 req.enables = rte_cpu_to_le_32(
2807 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2808 req.async_event_cr = rte_cpu_to_le_16(
2809 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2810 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2812 HWRM_CHECK_RESULT();
2818 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2820 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2821 struct hwrm_func_vf_cfg_input req = {0};
2824 HWRM_PREP(req, FUNC_VF_CFG);
2826 req.enables = rte_cpu_to_le_32(
2827 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2828 req.async_event_cr = rte_cpu_to_le_16(
2829 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2830 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2832 HWRM_CHECK_RESULT();
2838 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2840 struct hwrm_func_cfg_input req = {0};
2841 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2842 uint16_t dflt_vlan, fid;
2843 uint32_t func_cfg_flags;
2846 HWRM_PREP(req, FUNC_CFG);
2849 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2850 fid = bp->pf.vf_info[vf].fid;
2851 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2853 fid = rte_cpu_to_le_16(0xffff);
2854 func_cfg_flags = bp->pf.func_cfg_flags;
2855 dflt_vlan = bp->vlan;
2858 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2859 req.fid = rte_cpu_to_le_16(fid);
2860 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2861 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2863 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2865 HWRM_CHECK_RESULT();
2871 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2872 uint16_t max_bw, uint16_t enables)
2874 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2875 struct hwrm_func_cfg_input req = {0};
2878 HWRM_PREP(req, FUNC_CFG);
2880 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2881 req.enables |= rte_cpu_to_le_32(enables);
2882 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2883 req.max_bw = rte_cpu_to_le_32(max_bw);
2884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2886 HWRM_CHECK_RESULT();
2892 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2894 struct hwrm_func_cfg_input req = {0};
2895 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2898 HWRM_PREP(req, FUNC_CFG);
2900 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2901 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2902 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2903 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2907 HWRM_CHECK_RESULT();
2913 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2914 void *encaped, size_t ec_size)
2917 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2918 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2920 if (ec_size > sizeof(req.encap_request))
2923 HWRM_PREP(req, REJECT_FWD_RESP);
2925 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2926 memcpy(req.encap_request, encaped, ec_size);
2928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2930 HWRM_CHECK_RESULT();
2936 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2937 struct ether_addr *mac)
2939 struct hwrm_func_qcfg_input req = {0};
2940 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2943 HWRM_PREP(req, FUNC_QCFG);
2945 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2946 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2948 HWRM_CHECK_RESULT();
2950 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2957 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2958 void *encaped, size_t ec_size)
2961 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2962 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2964 if (ec_size > sizeof(req.encap_request))
2967 HWRM_PREP(req, EXEC_FWD_RESP);
2969 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2970 memcpy(req.encap_request, encaped, ec_size);
2972 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2974 HWRM_CHECK_RESULT();
2980 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2981 struct rte_eth_stats *stats, uint8_t rx)
2984 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2985 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2987 HWRM_PREP(req, STAT_CTX_QUERY);
2989 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2991 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2993 HWRM_CHECK_RESULT();
2996 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2997 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2998 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2999 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3000 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3001 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3002 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3003 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3005 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3006 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3007 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3008 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3009 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3010 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3011 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3020 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3022 struct hwrm_port_qstats_input req = {0};
3023 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3024 struct bnxt_pf_info *pf = &bp->pf;
3027 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3030 HWRM_PREP(req, PORT_QSTATS);
3032 req.port_id = rte_cpu_to_le_16(pf->port_id);
3033 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3034 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3035 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3037 HWRM_CHECK_RESULT();
3043 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3045 struct hwrm_port_clr_stats_input req = {0};
3046 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3047 struct bnxt_pf_info *pf = &bp->pf;
3050 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3053 HWRM_PREP(req, PORT_CLR_STATS);
3055 req.port_id = rte_cpu_to_le_16(pf->port_id);
3056 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3058 HWRM_CHECK_RESULT();
3064 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3066 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3067 struct hwrm_port_led_qcaps_input req = {0};
3073 HWRM_PREP(req, PORT_LED_QCAPS);
3074 req.port_id = bp->pf.port_id;
3075 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3077 HWRM_CHECK_RESULT();
3079 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3082 bp->num_leds = resp->num_leds;
3083 memcpy(bp->leds, &resp->led0_id,
3084 sizeof(bp->leds[0]) * bp->num_leds);
3085 for (i = 0; i < bp->num_leds; i++) {
3086 struct bnxt_led_info *led = &bp->leds[i];
3088 uint16_t caps = led->led_state_caps;
3090 if (!led->led_group_id ||
3091 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3103 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3105 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3106 struct hwrm_port_led_cfg_input req = {0};
3107 struct bnxt_led_cfg *led_cfg;
3108 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3109 uint16_t duration = 0;
3112 if (!bp->num_leds || BNXT_VF(bp))
3115 HWRM_PREP(req, PORT_LED_CFG);
3118 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3119 duration = rte_cpu_to_le_16(500);
3121 req.port_id = bp->pf.port_id;
3122 req.num_leds = bp->num_leds;
3123 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3124 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3125 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3126 led_cfg->led_id = bp->leds[i].led_id;
3127 led_cfg->led_state = led_state;
3128 led_cfg->led_blink_on = duration;
3129 led_cfg->led_blink_off = duration;
3130 led_cfg->led_group_id = bp->leds[i].led_group_id;
3133 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3135 HWRM_CHECK_RESULT();
3141 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3145 struct hwrm_nvm_get_dir_info_input req = {0};
3146 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3148 HWRM_PREP(req, NVM_GET_DIR_INFO);
3150 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3152 HWRM_CHECK_RESULT();
3156 *entries = rte_le_to_cpu_32(resp->entries);
3157 *length = rte_le_to_cpu_32(resp->entry_length);
3162 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3165 uint32_t dir_entries;
3166 uint32_t entry_length;
3169 rte_iova_t dma_handle;
3170 struct hwrm_nvm_get_dir_entries_input req = {0};
3171 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3173 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3177 *data++ = dir_entries;
3178 *data++ = entry_length;
3180 memset(data, 0xff, len);
3182 buflen = dir_entries * entry_length;
3183 buf = rte_malloc("nvm_dir", buflen, 0);
3184 rte_mem_lock_page(buf);
3187 dma_handle = rte_mem_virt2iova(buf);
3188 if (dma_handle == 0) {
3190 "unable to map response address to physical memory\n");
3193 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3194 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3195 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3197 HWRM_CHECK_RESULT();
3201 memcpy(data, buf, len > buflen ? buflen : len);
3208 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3209 uint32_t offset, uint32_t length,
3214 rte_iova_t dma_handle;
3215 struct hwrm_nvm_read_input req = {0};
3216 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3218 buf = rte_malloc("nvm_item", length, 0);
3219 rte_mem_lock_page(buf);
3223 dma_handle = rte_mem_virt2iova(buf);
3224 if (dma_handle == 0) {
3226 "unable to map response address to physical memory\n");
3229 HWRM_PREP(req, NVM_READ);
3230 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3231 req.dir_idx = rte_cpu_to_le_16(index);
3232 req.offset = rte_cpu_to_le_32(offset);
3233 req.len = rte_cpu_to_le_32(length);
3234 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3235 HWRM_CHECK_RESULT();
3238 memcpy(data, buf, length);
3244 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3247 struct hwrm_nvm_erase_dir_entry_input req = {0};
3248 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3250 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3251 req.dir_idx = rte_cpu_to_le_16(index);
3252 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3253 HWRM_CHECK_RESULT();
3260 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3261 uint16_t dir_ordinal, uint16_t dir_ext,
3262 uint16_t dir_attr, const uint8_t *data,
3266 struct hwrm_nvm_write_input req = {0};
3267 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3268 rte_iova_t dma_handle;
3271 HWRM_PREP(req, NVM_WRITE);
3273 req.dir_type = rte_cpu_to_le_16(dir_type);
3274 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3275 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3276 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3277 req.dir_data_length = rte_cpu_to_le_32(data_len);
3279 buf = rte_malloc("nvm_write", data_len, 0);
3280 rte_mem_lock_page(buf);
3284 dma_handle = rte_mem_virt2iova(buf);
3285 if (dma_handle == 0) {
3287 "unable to map response address to physical memory\n");
3290 memcpy(buf, data, data_len);
3291 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3293 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3295 HWRM_CHECK_RESULT();
3303 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3305 uint32_t *count = cbdata;
3307 *count = *count + 1;
3310 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3311 struct bnxt_vnic_info *vnic __rte_unused)
3316 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3320 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3321 &count, bnxt_vnic_count_hwrm_stub);
3326 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3329 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3330 struct hwrm_func_vf_vnic_ids_query_output *resp =
3331 bp->hwrm_cmd_resp_addr;
3334 /* First query all VNIC ids */
3335 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3337 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3338 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3339 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3341 if (req.vnic_id_tbl_addr == 0) {
3344 "unable to map VNIC ID table address to physical memory\n");
3347 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3350 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3352 } else if (resp->error_code) {
3353 rc = rte_le_to_cpu_16(resp->error_code);
3355 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3358 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3366 * This function queries the VNIC IDs for a specified VF. It then calls
3367 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3368 * Then it calls the hwrm_cb function to program this new vnic configuration.
3370 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3371 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3372 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3374 struct bnxt_vnic_info vnic;
3376 int i, num_vnic_ids;
3381 /* First query all VNIC ids */
3382 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3383 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3384 RTE_CACHE_LINE_SIZE);
3385 if (vnic_ids == NULL) {
3389 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3390 rte_mem_lock_page(((char *)vnic_ids) + sz);
3392 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3394 if (num_vnic_ids < 0)
3395 return num_vnic_ids;
3397 /* Retrieve VNIC, update bd_stall then update */
3399 for (i = 0; i < num_vnic_ids; i++) {
3400 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3401 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3402 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3405 if (vnic.mru <= 4) /* Indicates unallocated */
3408 vnic_cb(&vnic, cbdata);
3410 rc = hwrm_cb(bp, &vnic);
3420 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3423 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3424 struct hwrm_func_cfg_input req = {0};
3427 HWRM_PREP(req, FUNC_CFG);
3429 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3430 req.enables |= rte_cpu_to_le_32(
3431 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3432 req.vlan_antispoof_mode = on ?
3433 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3434 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3435 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3437 HWRM_CHECK_RESULT();
3443 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3445 struct bnxt_vnic_info vnic;
3448 int num_vnic_ids, i;
3452 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3453 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3454 RTE_CACHE_LINE_SIZE);
3455 if (vnic_ids == NULL) {
3460 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3461 rte_mem_lock_page(((char *)vnic_ids) + sz);
3463 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3469 * Loop through to find the default VNIC ID.
3470 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3471 * by sending the hwrm_func_qcfg command to the firmware.
3473 for (i = 0; i < num_vnic_ids; i++) {
3474 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3475 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3476 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3477 bp->pf.first_vf_id + vf);
3480 if (vnic.func_default) {
3482 return vnic.fw_vnic_id;
3485 /* Could not find a default VNIC. */
3486 PMD_DRV_LOG(ERR, "No default VNIC\n");
3492 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3494 struct bnxt_filter_info *filter)
3497 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3498 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3499 uint32_t enables = 0;
3501 if (filter->fw_em_filter_id != UINT64_MAX)
3502 bnxt_hwrm_clear_em_filter(bp, filter);
3504 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3506 req.flags = rte_cpu_to_le_32(filter->flags);
3508 enables = filter->enables |
3509 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3510 req.dst_id = rte_cpu_to_le_16(dst_id);
3512 if (filter->ip_addr_type) {
3513 req.ip_addr_type = filter->ip_addr_type;
3514 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3517 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3518 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3520 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3521 memcpy(req.src_macaddr, filter->src_macaddr,
3524 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3525 memcpy(req.dst_macaddr, filter->dst_macaddr,
3528 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3529 req.ovlan_vid = filter->l2_ovlan;
3531 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3532 req.ivlan_vid = filter->l2_ivlan;
3534 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3535 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3537 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3538 req.ip_protocol = filter->ip_protocol;
3540 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3541 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3543 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3544 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3546 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3547 req.src_port = rte_cpu_to_be_16(filter->src_port);
3549 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3550 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3552 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3553 req.mirror_vnic_id = filter->mirror_vnic_id;
3555 req.enables = rte_cpu_to_le_32(enables);
3557 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3559 HWRM_CHECK_RESULT();
3561 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3567 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3570 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3571 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3573 if (filter->fw_em_filter_id == UINT64_MAX)
3576 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3577 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3579 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3583 HWRM_CHECK_RESULT();
3586 filter->fw_em_filter_id = -1;
3587 filter->fw_l2_filter_id = -1;
3592 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3594 struct bnxt_filter_info *filter)
3597 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3598 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3599 bp->hwrm_cmd_resp_addr;
3600 uint32_t enables = 0;
3602 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3603 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3605 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3607 req.flags = rte_cpu_to_le_32(filter->flags);
3609 enables = filter->enables |
3610 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3611 req.dst_id = rte_cpu_to_le_16(dst_id);
3614 if (filter->ip_addr_type) {
3615 req.ip_addr_type = filter->ip_addr_type;
3617 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3620 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3621 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3623 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3624 memcpy(req.src_macaddr, filter->src_macaddr,
3627 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3628 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3631 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3632 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3634 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3635 req.ip_protocol = filter->ip_protocol;
3637 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3638 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3640 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3641 req.src_ipaddr_mask[0] =
3642 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3644 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3645 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3647 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3648 req.dst_ipaddr_mask[0] =
3649 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3651 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3652 req.src_port = rte_cpu_to_le_16(filter->src_port);
3654 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3655 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3657 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3658 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3660 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3661 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3663 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3664 req.mirror_vnic_id = filter->mirror_vnic_id;
3666 req.enables = rte_cpu_to_le_32(enables);
3668 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3670 HWRM_CHECK_RESULT();
3672 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3678 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3679 struct bnxt_filter_info *filter)
3682 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3683 struct hwrm_cfa_ntuple_filter_free_output *resp =
3684 bp->hwrm_cmd_resp_addr;
3686 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3689 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3691 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3693 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3695 HWRM_CHECK_RESULT();
3698 filter->fw_ntuple_filter_id = -1;
3699 filter->fw_l2_filter_id = UINT64_MAX;
3704 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3706 unsigned int rss_idx, fw_idx, i;
3708 if (vnic->rss_table && vnic->hash_type) {
3710 * Fill the RSS hash & redirection table with
3711 * ring group ids for all VNICs
3713 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3714 rss_idx++, fw_idx++) {
3715 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3716 fw_idx %= bp->rx_cp_nr_rings;
3717 if (vnic->fw_grp_ids[fw_idx] !=
3722 if (i == bp->rx_cp_nr_rings)
3724 vnic->rss_table[rss_idx] =
3725 vnic->fw_grp_ids[fw_idx];
3727 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);