1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
33 struct bnxt_plcmodes_cfg {
35 uint16_t jumbo_thresh;
37 uint16_t hds_threshold;
40 static int page_getenum(size_t size)
56 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57 return sizeof(void *) * 8 - 1;
60 static int page_roundup(size_t size)
62 return 1 << page_getenum(size);
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
76 struct input *req = msg;
77 struct output *resp = bp->hwrm_cmd_resp_addr;
81 uint16_t max_req_len = bp->max_req_len;
82 struct hwrm_short_input short_input = { 0 };
84 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
85 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
87 memset(short_cmd_req, 0, bp->max_req_len);
88 memcpy(short_cmd_req, req, msg_len);
90 short_input.req_type = rte_cpu_to_le_16(req->req_type);
91 short_input.signature = rte_cpu_to_le_16(
92 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
93 short_input.size = rte_cpu_to_le_16(msg_len);
94 short_input.req_addr =
95 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
97 data = (uint32_t *)&short_input;
98 msg_len = sizeof(short_input);
100 /* Sync memory write before updating doorbell */
103 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
106 /* Write request msg to hwrm channel */
107 for (i = 0; i < msg_len; i += 4) {
108 bar = (uint8_t *)bp->bar0 + i;
109 rte_write32(*data, bar);
113 /* Zero the rest of the request space */
114 for (; i < max_req_len; i += 4) {
115 bar = (uint8_t *)bp->bar0 + i;
119 /* Ring channel doorbell */
120 bar = (uint8_t *)bp->bar0 + 0x100;
123 /* Poll for the valid bit */
124 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
125 /* Sanity check on the resp->resp_len */
127 if (resp->resp_len && resp->resp_len <=
129 /* Last byte of resp contains the valid key */
130 valid = (uint8_t *)resp + resp->resp_len - 1;
131 if (*valid == HWRM_RESP_VALID_KEY)
137 if (i >= HWRM_CMD_TIMEOUT) {
138 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
149 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
150 * spinlock, and does initial processing.
152 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
153 * releases the spinlock only if it returns. If the regular int return codes
154 * are not used by the function, HWRM_CHECK_RESULT() should not be used
155 * directly, rather it should be copied and modified to suit the function.
157 * HWRM_UNLOCK() must be called after all response processing is completed.
159 #define HWRM_PREP(req, type) do { \
160 rte_spinlock_lock(&bp->hwrm_lock); \
161 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
162 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
163 req.cmpl_ring = rte_cpu_to_le_16(-1); \
164 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
165 req.target_id = rte_cpu_to_le_16(0xffff); \
166 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
169 #define HWRM_CHECK_RESULT() do {\
171 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
172 rte_spinlock_unlock(&bp->hwrm_lock); \
175 if (resp->error_code) { \
176 rc = rte_le_to_cpu_16(resp->error_code); \
177 if (resp->resp_len >= 16) { \
178 struct hwrm_err_output *tmp_hwrm_err_op = \
181 "error %d:%d:%08x:%04x\n", \
182 rc, tmp_hwrm_err_op->cmd_err, \
184 tmp_hwrm_err_op->opaque_0), \
186 tmp_hwrm_err_op->opaque_1)); \
188 PMD_DRV_LOG(ERR, "error %d\n", rc); \
190 rte_spinlock_unlock(&bp->hwrm_lock); \
195 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
197 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
200 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
201 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
203 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
204 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
207 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
215 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
216 struct bnxt_vnic_info *vnic,
218 struct bnxt_vlan_table_entry *vlan_table)
221 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
222 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
225 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
228 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
229 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231 /* FIXME add multicast flag, when multicast adding options is supported
234 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
235 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
236 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
237 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
238 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
239 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
240 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
241 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
242 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
243 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
244 if (vnic->mc_addr_cnt) {
245 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
246 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
247 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
250 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
251 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
252 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
253 rte_mem_virt2iova(vlan_table));
254 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
256 req.mask = rte_cpu_to_le_32(mask);
258 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
266 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
268 struct bnxt_vlan_antispoof_table_entry *vlan_table)
271 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
272 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
273 bp->hwrm_cmd_resp_addr;
276 * Older HWRM versions did not support this command, and the set_rx_mask
277 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
278 * removed from set_rx_mask call, and this command was added.
280 * This command is also present from 1.7.8.11 and higher,
283 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
284 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
285 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
290 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
291 req.fid = rte_cpu_to_le_16(fid);
293 req.vlan_tag_mask_tbl_addr =
294 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
295 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
297 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
305 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
306 struct bnxt_filter_info *filter)
309 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
310 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
312 if (filter->fw_l2_filter_id == UINT64_MAX)
315 HWRM_PREP(req, CFA_L2_FILTER_FREE);
317 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
319 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
324 filter->fw_l2_filter_id = UINT64_MAX;
329 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
331 struct bnxt_filter_info *filter)
334 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
335 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
336 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
337 const struct rte_eth_vmdq_rx_conf *conf =
338 &dev_conf->rx_adv_conf.vmdq_rx_conf;
339 uint32_t enables = 0;
340 uint16_t j = dst_id - 1;
342 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
343 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
344 conf->pool_map[j].pools & (1UL << j)) {
346 "Add vlan %u to vmdq pool %u\n",
347 conf->pool_map[j].vlan_id, j);
349 filter->l2_ivlan = conf->pool_map[j].vlan_id;
351 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
352 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
355 if (filter->fw_l2_filter_id != UINT64_MAX)
356 bnxt_hwrm_clear_l2_filter(bp, filter);
358 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
360 req.flags = rte_cpu_to_le_32(filter->flags);
362 enables = filter->enables |
363 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
364 req.dst_id = rte_cpu_to_le_16(dst_id);
367 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
368 memcpy(req.l2_addr, filter->l2_addr,
371 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
372 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
375 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
376 req.l2_ovlan = filter->l2_ovlan;
378 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
379 req.l2_ovlan = filter->l2_ivlan;
381 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
382 req.l2_ovlan_mask = filter->l2_ovlan_mask;
384 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
385 req.l2_ovlan_mask = filter->l2_ivlan_mask;
386 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
387 req.src_id = rte_cpu_to_le_32(filter->src_id);
388 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
389 req.src_type = filter->src_type;
391 req.enables = rte_cpu_to_le_32(enables);
393 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
397 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
403 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
405 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
406 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
413 HWRM_PREP(req, PORT_MAC_CFG);
416 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
419 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
420 if (ptp->tx_tstamp_en)
421 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
424 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
425 req.flags = rte_cpu_to_le_32(flags);
426 req.enables = rte_cpu_to_le_32
427 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
428 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
430 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
436 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
439 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
440 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
441 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
443 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
447 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
449 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
451 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
455 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
458 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
462 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
463 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
464 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
465 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
466 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
467 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
468 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
469 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
470 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
471 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
472 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
473 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
474 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
475 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
476 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
477 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
478 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
479 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
487 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
490 struct hwrm_func_qcaps_input req = {.req_type = 0 };
491 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
492 uint16_t new_max_vfs;
496 HWRM_PREP(req, FUNC_QCAPS);
498 req.fid = rte_cpu_to_le_16(0xffff);
500 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
504 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
505 flags = rte_le_to_cpu_32(resp->flags);
507 bp->pf.port_id = resp->port_id;
508 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
509 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
510 new_max_vfs = bp->pdev->max_vfs;
511 if (new_max_vfs != bp->pf.max_vfs) {
513 rte_free(bp->pf.vf_info);
514 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
515 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
516 bp->pf.max_vfs = new_max_vfs;
517 for (i = 0; i < new_max_vfs; i++) {
518 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
519 bp->pf.vf_info[i].vlan_table =
520 rte_zmalloc("VF VLAN table",
523 if (bp->pf.vf_info[i].vlan_table == NULL)
525 "Fail to alloc VLAN table for VF %d\n",
529 bp->pf.vf_info[i].vlan_table);
530 bp->pf.vf_info[i].vlan_as_table =
531 rte_zmalloc("VF VLAN AS table",
534 if (bp->pf.vf_info[i].vlan_as_table == NULL)
536 "Alloc VLAN AS table for VF %d fail\n",
540 bp->pf.vf_info[i].vlan_as_table);
541 STAILQ_INIT(&bp->pf.vf_info[i].filter);
546 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
547 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
548 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
549 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
550 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
551 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
552 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
553 /* TODO: For now, do not support VMDq/RFS on VFs. */
558 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
562 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
564 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
565 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
566 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
567 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
569 bnxt_hwrm_ptp_qcfg(bp);
578 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
582 rc = __bnxt_hwrm_func_qcaps(bp);
583 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
584 rc = bnxt_hwrm_func_resc_qcaps(bp);
586 bp->flags |= BNXT_FLAG_NEW_RM;
592 int bnxt_hwrm_func_reset(struct bnxt *bp)
595 struct hwrm_func_reset_input req = {.req_type = 0 };
596 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
598 HWRM_PREP(req, FUNC_RESET);
600 req.enables = rte_cpu_to_le_32(0);
602 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
610 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
613 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
614 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
616 if (bp->flags & BNXT_FLAG_REGISTERED)
619 HWRM_PREP(req, FUNC_DRV_RGTR);
620 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
621 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
622 req.ver_maj = RTE_VER_YEAR;
623 req.ver_min = RTE_VER_MONTH;
624 req.ver_upd = RTE_VER_MINOR;
627 req.enables |= rte_cpu_to_le_32(
628 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
629 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
630 RTE_MIN(sizeof(req.vf_req_fwd),
631 sizeof(bp->pf.vf_req_fwd)));
634 * PF can sniff HWRM API issued by VF. This can be set up by
635 * linux driver and inherited by the DPDK PF driver. Clear
636 * this HWRM sniffer list in FW because DPDK PF driver does
640 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
643 req.async_event_fwd[0] |=
644 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
645 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
646 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
647 req.async_event_fwd[1] |=
648 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
649 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
651 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
656 bp->flags |= BNXT_FLAG_REGISTERED;
661 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp)
664 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
665 struct hwrm_func_vf_cfg_input req = {0};
667 HWRM_PREP(req, FUNC_VF_CFG);
669 req.enables = rte_cpu_to_le_32
670 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
671 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
672 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
673 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
674 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
676 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
677 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
678 AGG_RING_MULTIPLIER);
679 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
680 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
682 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
684 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
691 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
694 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695 struct hwrm_func_resource_qcaps_input req = {0};
697 HWRM_PREP(req, FUNC_RESOURCE_QCAPS);
698 req.fid = rte_cpu_to_le_16(0xffff);
700 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
705 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
706 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
707 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
708 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
709 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
710 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
711 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
712 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
719 int bnxt_hwrm_ver_get(struct bnxt *bp)
722 struct hwrm_ver_get_input req = {.req_type = 0 };
723 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
726 uint16_t max_resp_len;
727 char type[RTE_MEMZONE_NAMESIZE];
728 uint32_t dev_caps_cfg;
730 bp->max_req_len = HWRM_MAX_REQ_LEN;
731 HWRM_PREP(req, VER_GET);
733 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
734 req.hwrm_intf_min = HWRM_VERSION_MINOR;
735 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
737 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
741 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
742 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
743 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
744 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
745 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
746 (resp->hwrm_fw_min_8b << 16) |
747 (resp->hwrm_fw_bld_8b << 8) |
748 resp->hwrm_fw_rsvd_8b;
749 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
750 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
752 my_version = HWRM_VERSION_MAJOR << 16;
753 my_version |= HWRM_VERSION_MINOR << 8;
754 my_version |= HWRM_VERSION_UPDATE;
756 fw_version = resp->hwrm_intf_maj_8b << 16;
757 fw_version |= resp->hwrm_intf_min_8b << 8;
758 fw_version |= resp->hwrm_intf_upd_8b;
759 bp->hwrm_spec_code = fw_version;
761 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
762 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
767 if (my_version != fw_version) {
768 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
769 if (my_version < fw_version) {
771 "Firmware API version is newer than driver.\n");
773 "The driver may be missing features.\n");
776 "Firmware API version is older than driver.\n");
778 "Not all driver features may be functional.\n");
782 if (bp->max_req_len > resp->max_req_win_len) {
783 PMD_DRV_LOG(ERR, "Unsupported request length\n");
786 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
787 max_resp_len = resp->max_resp_len;
788 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
790 if (bp->max_resp_len != max_resp_len) {
791 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
792 bp->pdev->addr.domain, bp->pdev->addr.bus,
793 bp->pdev->addr.devid, bp->pdev->addr.function);
795 rte_free(bp->hwrm_cmd_resp_addr);
797 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
798 if (bp->hwrm_cmd_resp_addr == NULL) {
802 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
803 bp->hwrm_cmd_resp_dma_addr =
804 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
805 if (bp->hwrm_cmd_resp_dma_addr == 0) {
807 "Unable to map response buffer to physical memory.\n");
811 bp->max_resp_len = max_resp_len;
815 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
817 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
818 PMD_DRV_LOG(DEBUG, "Short command supported\n");
820 rte_free(bp->hwrm_short_cmd_req_addr);
822 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
824 if (bp->hwrm_short_cmd_req_addr == NULL) {
828 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
829 bp->hwrm_short_cmd_req_dma_addr =
830 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
831 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
832 rte_free(bp->hwrm_short_cmd_req_addr);
834 "Unable to map buffer to physical memory.\n");
839 bp->flags |= BNXT_FLAG_SHORT_CMD;
847 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
850 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
851 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
853 if (!(bp->flags & BNXT_FLAG_REGISTERED))
856 HWRM_PREP(req, FUNC_DRV_UNRGTR);
859 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
864 bp->flags &= ~BNXT_FLAG_REGISTERED;
869 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
872 struct hwrm_port_phy_cfg_input req = {0};
873 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
874 uint32_t enables = 0;
876 HWRM_PREP(req, PORT_PHY_CFG);
879 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
880 if (bp->link_info.auto_mode && conf->link_speed) {
881 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
882 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
885 req.flags = rte_cpu_to_le_32(conf->phy_flags);
886 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
887 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
889 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
890 * any auto mode, even "none".
892 if (!conf->link_speed) {
893 /* No speeds specified. Enable AutoNeg - all speeds */
895 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
897 /* AutoNeg - Advertise speeds specified. */
898 if (conf->auto_link_speed_mask &&
899 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
901 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
902 req.auto_link_speed_mask =
903 conf->auto_link_speed_mask;
905 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
908 req.auto_duplex = conf->duplex;
909 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
910 req.auto_pause = conf->auto_pause;
911 req.force_pause = conf->force_pause;
912 /* Set force_pause if there is no auto or if there is a force */
913 if (req.auto_pause && !req.force_pause)
914 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
916 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
918 req.enables = rte_cpu_to_le_32(enables);
921 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
922 PMD_DRV_LOG(INFO, "Force Link Down\n");
925 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
933 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
934 struct bnxt_link_info *link_info)
937 struct hwrm_port_phy_qcfg_input req = {0};
938 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
940 HWRM_PREP(req, PORT_PHY_QCFG);
942 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
946 link_info->phy_link_status = resp->link;
948 (link_info->phy_link_status ==
949 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
950 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
951 link_info->duplex = resp->duplex_cfg;
952 link_info->pause = resp->pause;
953 link_info->auto_pause = resp->auto_pause;
954 link_info->force_pause = resp->force_pause;
955 link_info->auto_mode = resp->auto_mode;
956 link_info->phy_type = resp->phy_type;
957 link_info->media_type = resp->media_type;
959 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
960 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
961 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
962 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
963 link_info->phy_ver[0] = resp->phy_maj;
964 link_info->phy_ver[1] = resp->phy_min;
965 link_info->phy_ver[2] = resp->phy_bld;
969 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
970 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
971 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
972 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
973 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
974 link_info->auto_link_speed_mask);
975 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
976 link_info->force_link_speed);
981 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
984 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
985 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
988 HWRM_PREP(req, QUEUE_QPORTCFG);
990 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
991 /* HWRM Version >= 1.9.1 */
992 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
994 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
995 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
999 #define GET_QUEUE_INFO(x) \
1000 bp->cos_queue[x].id = resp->queue_id##x; \
1001 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1014 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1015 bp->tx_cosq_id = bp->cos_queue[0].id;
1017 /* iterate and find the COSq profile to use for Tx */
1018 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1019 if (bp->cos_queue[i].profile ==
1020 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1021 bp->tx_cosq_id = bp->cos_queue[i].id;
1026 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1031 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1032 struct bnxt_ring *ring,
1033 uint32_t ring_type, uint32_t map_index,
1034 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1037 uint32_t enables = 0;
1038 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1039 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1041 HWRM_PREP(req, RING_ALLOC);
1043 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1044 req.fbo = rte_cpu_to_le_32(0);
1045 /* Association of ring index with doorbell index */
1046 req.logical_id = rte_cpu_to_le_16(map_index);
1047 req.length = rte_cpu_to_le_32(ring->ring_size);
1049 switch (ring_type) {
1050 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1051 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1053 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1054 req.ring_type = ring_type;
1055 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1056 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1057 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1059 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1061 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1062 req.ring_type = ring_type;
1064 * TODO: Some HWRM versions crash with
1065 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1067 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1070 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1075 req.enables = rte_cpu_to_le_32(enables);
1077 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1079 if (rc || resp->error_code) {
1080 if (rc == 0 && resp->error_code)
1081 rc = rte_le_to_cpu_16(resp->error_code);
1082 switch (ring_type) {
1083 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1085 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1088 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1090 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1093 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1095 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1099 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1105 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1110 int bnxt_hwrm_ring_free(struct bnxt *bp,
1111 struct bnxt_ring *ring, uint32_t ring_type)
1114 struct hwrm_ring_free_input req = {.req_type = 0 };
1115 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1117 HWRM_PREP(req, RING_FREE);
1119 req.ring_type = ring_type;
1120 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1122 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1124 if (rc || resp->error_code) {
1125 if (rc == 0 && resp->error_code)
1126 rc = rte_le_to_cpu_16(resp->error_code);
1129 switch (ring_type) {
1130 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1131 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1134 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1135 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1138 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1139 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1143 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1151 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1154 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1155 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1157 HWRM_PREP(req, RING_GRP_ALLOC);
1159 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1160 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1161 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1162 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1164 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1166 HWRM_CHECK_RESULT();
1168 bp->grp_info[idx].fw_grp_id =
1169 rte_le_to_cpu_16(resp->ring_group_id);
1176 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1179 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1180 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1182 HWRM_PREP(req, RING_GRP_FREE);
1184 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1186 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1188 HWRM_CHECK_RESULT();
1191 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1195 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1198 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1199 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1201 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1204 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1206 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1208 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1210 HWRM_CHECK_RESULT();
1216 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1217 unsigned int idx __rte_unused)
1220 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1221 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1223 HWRM_PREP(req, STAT_CTX_ALLOC);
1225 req.update_period_ms = rte_cpu_to_le_32(0);
1227 req.stats_dma_addr =
1228 rte_cpu_to_le_64(cpr->hw_stats_map);
1230 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1232 HWRM_CHECK_RESULT();
1234 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1241 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1242 unsigned int idx __rte_unused)
1245 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1246 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1248 HWRM_PREP(req, STAT_CTX_FREE);
1250 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1252 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1254 HWRM_CHECK_RESULT();
1260 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1263 struct hwrm_vnic_alloc_input req = { 0 };
1264 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1266 /* map ring groups to this vnic */
1267 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1268 vnic->start_grp_id, vnic->end_grp_id);
1269 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1270 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1271 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1272 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1273 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1274 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1275 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1276 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1277 HWRM_PREP(req, VNIC_ALLOC);
1279 if (vnic->func_default)
1281 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1284 HWRM_CHECK_RESULT();
1286 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1288 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1292 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1293 struct bnxt_vnic_info *vnic,
1294 struct bnxt_plcmodes_cfg *pmode)
1297 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1298 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1300 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1302 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1304 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1306 HWRM_CHECK_RESULT();
1308 pmode->flags = rte_le_to_cpu_32(resp->flags);
1309 /* dflt_vnic bit doesn't exist in the _cfg command */
1310 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1311 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1312 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1313 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1320 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1321 struct bnxt_vnic_info *vnic,
1322 struct bnxt_plcmodes_cfg *pmode)
1325 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1326 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1328 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1330 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1331 req.flags = rte_cpu_to_le_32(pmode->flags);
1332 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1333 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1334 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1335 req.enables = rte_cpu_to_le_32(
1336 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1337 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1338 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1341 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1343 HWRM_CHECK_RESULT();
1349 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1352 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1353 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1354 uint32_t ctx_enable_flag = 0;
1355 struct bnxt_plcmodes_cfg pmodes;
1357 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1358 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1362 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1366 HWRM_PREP(req, VNIC_CFG);
1368 /* Only RSS support for now TBD: COS & LB */
1370 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1371 if (vnic->lb_rule != 0xffff)
1372 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1373 if (vnic->cos_rule != 0xffff)
1374 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1375 if (vnic->rss_rule != 0xffff) {
1376 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1377 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1379 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1380 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1381 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1382 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1383 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1384 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1385 req.mru = rte_cpu_to_le_16(vnic->mru);
1386 if (vnic->func_default)
1388 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1389 if (vnic->vlan_strip)
1391 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1394 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1395 if (vnic->roce_dual)
1396 req.flags |= rte_cpu_to_le_32(
1397 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1398 if (vnic->roce_only)
1399 req.flags |= rte_cpu_to_le_32(
1400 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1401 if (vnic->rss_dflt_cr)
1402 req.flags |= rte_cpu_to_le_32(
1403 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1405 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1407 HWRM_CHECK_RESULT();
1410 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1415 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1419 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1420 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1422 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1423 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1426 HWRM_PREP(req, VNIC_QCFG);
1429 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1430 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1431 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1433 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1435 HWRM_CHECK_RESULT();
1437 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1438 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1439 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1440 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1441 vnic->mru = rte_le_to_cpu_16(resp->mru);
1442 vnic->func_default = rte_le_to_cpu_32(
1443 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1444 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1445 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1446 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1447 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1448 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1449 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1450 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1451 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1452 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1453 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1460 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1463 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1464 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1465 bp->hwrm_cmd_resp_addr;
1467 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1469 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1471 HWRM_CHECK_RESULT();
1473 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1475 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1480 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1483 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1484 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1485 bp->hwrm_cmd_resp_addr;
1487 if (vnic->rss_rule == 0xffff) {
1488 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1491 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1493 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1495 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1497 HWRM_CHECK_RESULT();
1500 vnic->rss_rule = INVALID_HW_RING_ID;
1505 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1508 struct hwrm_vnic_free_input req = {.req_type = 0 };
1509 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1511 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1512 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1516 HWRM_PREP(req, VNIC_FREE);
1518 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1520 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1522 HWRM_CHECK_RESULT();
1525 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1529 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1530 struct bnxt_vnic_info *vnic)
1533 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1534 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1536 HWRM_PREP(req, VNIC_RSS_CFG);
1538 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1539 req.hash_mode_flags = vnic->hash_mode;
1541 req.ring_grp_tbl_addr =
1542 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1543 req.hash_key_tbl_addr =
1544 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1545 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1547 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1549 HWRM_CHECK_RESULT();
1555 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1556 struct bnxt_vnic_info *vnic)
1559 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1560 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1563 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1565 req.flags = rte_cpu_to_le_32(
1566 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1568 req.enables = rte_cpu_to_le_32(
1569 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1571 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1572 size -= RTE_PKTMBUF_HEADROOM;
1574 req.jumbo_thresh = rte_cpu_to_le_16(size);
1575 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1577 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1579 HWRM_CHECK_RESULT();
1585 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1586 struct bnxt_vnic_info *vnic, bool enable)
1589 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1590 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1592 HWRM_PREP(req, VNIC_TPA_CFG);
1595 req.enables = rte_cpu_to_le_32(
1596 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1597 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1598 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1599 req.flags = rte_cpu_to_le_32(
1600 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1601 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1602 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1603 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1604 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1605 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1606 req.max_agg_segs = rte_cpu_to_le_16(5);
1608 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1609 req.min_agg_len = rte_cpu_to_le_32(512);
1611 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1613 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1615 HWRM_CHECK_RESULT();
1621 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1623 struct hwrm_func_cfg_input req = {0};
1624 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1627 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1628 req.enables = rte_cpu_to_le_32(
1629 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1630 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1631 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1633 HWRM_PREP(req, FUNC_CFG);
1635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1636 HWRM_CHECK_RESULT();
1639 bp->pf.vf_info[vf].random_mac = false;
1644 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1648 struct hwrm_func_qstats_input req = {.req_type = 0};
1649 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1651 HWRM_PREP(req, FUNC_QSTATS);
1653 req.fid = rte_cpu_to_le_16(fid);
1655 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1657 HWRM_CHECK_RESULT();
1660 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1667 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1668 struct rte_eth_stats *stats)
1671 struct hwrm_func_qstats_input req = {.req_type = 0};
1672 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1674 HWRM_PREP(req, FUNC_QSTATS);
1676 req.fid = rte_cpu_to_le_16(fid);
1678 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1680 HWRM_CHECK_RESULT();
1682 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1683 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1684 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1685 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1686 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1687 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1689 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1690 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1691 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1692 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1693 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1694 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1696 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1697 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1698 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1705 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1708 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1709 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1711 HWRM_PREP(req, FUNC_CLR_STATS);
1713 req.fid = rte_cpu_to_le_16(fid);
1715 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1717 HWRM_CHECK_RESULT();
1724 * HWRM utility functions
1727 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1732 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1733 struct bnxt_tx_queue *txq;
1734 struct bnxt_rx_queue *rxq;
1735 struct bnxt_cp_ring_info *cpr;
1737 if (i >= bp->rx_cp_nr_rings) {
1738 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1741 rxq = bp->rx_queues[i];
1745 rc = bnxt_hwrm_stat_clear(bp, cpr);
1752 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1756 struct bnxt_cp_ring_info *cpr;
1758 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1760 if (i >= bp->rx_cp_nr_rings) {
1761 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1763 cpr = bp->rx_queues[i]->cp_ring;
1764 bp->grp_info[i].fw_stats_ctx = -1;
1766 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1767 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1768 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1776 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1781 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1782 struct bnxt_tx_queue *txq;
1783 struct bnxt_rx_queue *rxq;
1784 struct bnxt_cp_ring_info *cpr;
1786 if (i >= bp->rx_cp_nr_rings) {
1787 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1790 rxq = bp->rx_queues[i];
1794 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1802 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1807 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1809 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1812 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1820 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1821 unsigned int idx __rte_unused)
1823 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1825 bnxt_hwrm_ring_free(bp, cp_ring,
1826 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1827 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1828 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1829 sizeof(*cpr->cp_desc_ring));
1830 cpr->cp_raw_cons = 0;
1833 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1838 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1839 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1840 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1841 struct bnxt_ring *ring = txr->tx_ring_struct;
1842 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1843 unsigned int idx = bp->rx_cp_nr_rings + i;
1845 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1846 bnxt_hwrm_ring_free(bp, ring,
1847 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1848 ring->fw_ring_id = INVALID_HW_RING_ID;
1849 memset(txr->tx_desc_ring, 0,
1850 txr->tx_ring_struct->ring_size *
1851 sizeof(*txr->tx_desc_ring));
1852 memset(txr->tx_buf_ring, 0,
1853 txr->tx_ring_struct->ring_size *
1854 sizeof(*txr->tx_buf_ring));
1858 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1859 bnxt_free_cp_ring(bp, cpr, idx);
1860 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1864 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1865 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1866 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1867 struct bnxt_ring *ring = rxr->rx_ring_struct;
1868 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1870 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1871 bnxt_hwrm_ring_free(bp, ring,
1872 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1873 ring->fw_ring_id = INVALID_HW_RING_ID;
1874 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
1875 memset(rxr->rx_desc_ring, 0,
1876 rxr->rx_ring_struct->ring_size *
1877 sizeof(*rxr->rx_desc_ring));
1878 memset(rxr->rx_buf_ring, 0,
1879 rxr->rx_ring_struct->ring_size *
1880 sizeof(*rxr->rx_buf_ring));
1883 ring = rxr->ag_ring_struct;
1884 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1885 bnxt_hwrm_ring_free(bp, ring,
1886 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1887 ring->fw_ring_id = INVALID_HW_RING_ID;
1888 memset(rxr->ag_buf_ring, 0,
1889 rxr->ag_ring_struct->ring_size *
1890 sizeof(*rxr->ag_buf_ring));
1892 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1894 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1895 bnxt_free_cp_ring(bp, cpr, i);
1896 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1897 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1901 /* Default completion ring */
1903 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1905 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1906 bnxt_free_cp_ring(bp, cpr, 0);
1907 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1914 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1919 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1920 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1927 void bnxt_free_hwrm_resources(struct bnxt *bp)
1929 /* Release memzone */
1930 rte_free(bp->hwrm_cmd_resp_addr);
1931 rte_free(bp->hwrm_short_cmd_req_addr);
1932 bp->hwrm_cmd_resp_addr = NULL;
1933 bp->hwrm_short_cmd_req_addr = NULL;
1934 bp->hwrm_cmd_resp_dma_addr = 0;
1935 bp->hwrm_short_cmd_req_dma_addr = 0;
1938 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1940 struct rte_pci_device *pdev = bp->pdev;
1941 char type[RTE_MEMZONE_NAMESIZE];
1943 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1944 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1945 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1946 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1947 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1948 if (bp->hwrm_cmd_resp_addr == NULL)
1950 bp->hwrm_cmd_resp_dma_addr =
1951 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1952 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1954 "unable to map response address to physical memory\n");
1957 rte_spinlock_init(&bp->hwrm_lock);
1962 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1964 struct bnxt_filter_info *filter;
1967 STAILQ_FOREACH(filter, &vnic->filter, next) {
1968 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1969 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1970 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1971 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1973 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1981 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1983 struct bnxt_filter_info *filter;
1984 struct rte_flow *flow;
1987 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1988 filter = flow->filter;
1989 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1990 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1991 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1992 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1993 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1995 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1997 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2005 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2007 struct bnxt_filter_info *filter;
2010 STAILQ_FOREACH(filter, &vnic->filter, next) {
2011 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2012 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2014 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2015 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2018 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2026 void bnxt_free_tunnel_ports(struct bnxt *bp)
2028 if (bp->vxlan_port_cnt)
2029 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2030 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2032 if (bp->geneve_port_cnt)
2033 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2034 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2035 bp->geneve_port = 0;
2038 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2042 if (bp->vnic_info == NULL)
2046 * Cleanup VNICs in reverse order, to make sure the L2 filter
2047 * from vnic0 is last to be cleaned up.
2049 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2050 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2052 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2054 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2056 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2058 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2060 bnxt_hwrm_vnic_free(bp, vnic);
2062 /* Ring resources */
2063 bnxt_free_all_hwrm_rings(bp);
2064 bnxt_free_all_hwrm_ring_grps(bp);
2065 bnxt_free_all_hwrm_stat_ctxs(bp);
2066 bnxt_free_tunnel_ports(bp);
2069 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2071 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2073 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2074 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2076 switch (conf_link_speed) {
2077 case ETH_LINK_SPEED_10M_HD:
2078 case ETH_LINK_SPEED_100M_HD:
2080 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2082 return hw_link_duplex;
2085 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2087 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2090 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2092 uint16_t eth_link_speed = 0;
2094 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2095 return ETH_LINK_SPEED_AUTONEG;
2097 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2098 case ETH_LINK_SPEED_100M:
2099 case ETH_LINK_SPEED_100M_HD:
2102 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2104 case ETH_LINK_SPEED_1G:
2106 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2108 case ETH_LINK_SPEED_2_5G:
2110 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2112 case ETH_LINK_SPEED_10G:
2114 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2116 case ETH_LINK_SPEED_20G:
2118 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2120 case ETH_LINK_SPEED_25G:
2122 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2124 case ETH_LINK_SPEED_40G:
2126 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2128 case ETH_LINK_SPEED_50G:
2130 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2132 case ETH_LINK_SPEED_100G:
2134 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2138 "Unsupported link speed %d; default to AUTO\n",
2142 return eth_link_speed;
2145 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2146 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2147 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2148 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2150 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2154 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2157 if (link_speed & ETH_LINK_SPEED_FIXED) {
2158 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2160 if (one_speed & (one_speed - 1)) {
2162 "Invalid advertised speeds (%u) for port %u\n",
2163 link_speed, port_id);
2166 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2168 "Unsupported advertised speed (%u) for port %u\n",
2169 link_speed, port_id);
2173 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2175 "Unsupported advertised speeds (%u) for port %u\n",
2176 link_speed, port_id);
2184 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2188 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2189 if (bp->link_info.support_speeds)
2190 return bp->link_info.support_speeds;
2191 link_speed = BNXT_SUPPORTED_SPEEDS;
2194 if (link_speed & ETH_LINK_SPEED_100M)
2195 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2196 if (link_speed & ETH_LINK_SPEED_100M_HD)
2197 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2198 if (link_speed & ETH_LINK_SPEED_1G)
2199 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2200 if (link_speed & ETH_LINK_SPEED_2_5G)
2201 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2202 if (link_speed & ETH_LINK_SPEED_10G)
2203 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2204 if (link_speed & ETH_LINK_SPEED_20G)
2205 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2206 if (link_speed & ETH_LINK_SPEED_25G)
2207 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2208 if (link_speed & ETH_LINK_SPEED_40G)
2209 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2210 if (link_speed & ETH_LINK_SPEED_50G)
2211 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2212 if (link_speed & ETH_LINK_SPEED_100G)
2213 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2217 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2219 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2221 switch (hw_link_speed) {
2222 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2223 eth_link_speed = ETH_SPEED_NUM_100M;
2225 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2226 eth_link_speed = ETH_SPEED_NUM_1G;
2228 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2229 eth_link_speed = ETH_SPEED_NUM_2_5G;
2231 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2232 eth_link_speed = ETH_SPEED_NUM_10G;
2234 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2235 eth_link_speed = ETH_SPEED_NUM_20G;
2237 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2238 eth_link_speed = ETH_SPEED_NUM_25G;
2240 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2241 eth_link_speed = ETH_SPEED_NUM_40G;
2243 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2244 eth_link_speed = ETH_SPEED_NUM_50G;
2246 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2247 eth_link_speed = ETH_SPEED_NUM_100G;
2249 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2251 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2255 return eth_link_speed;
2258 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2260 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2262 switch (hw_link_duplex) {
2263 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2264 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2266 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2268 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2269 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2272 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2276 return eth_link_duplex;
2279 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2282 struct bnxt_link_info *link_info = &bp->link_info;
2284 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2287 "Get link config failed with rc %d\n", rc);
2290 if (link_info->link_speed)
2292 bnxt_parse_hw_link_speed(link_info->link_speed);
2294 link->link_speed = ETH_SPEED_NUM_NONE;
2295 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2296 link->link_status = link_info->link_up;
2297 link->link_autoneg = link_info->auto_mode ==
2298 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2299 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2304 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2307 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2308 struct bnxt_link_info link_req;
2309 uint16_t speed, autoneg;
2311 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2314 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2315 bp->eth_dev->data->port_id);
2319 memset(&link_req, 0, sizeof(link_req));
2320 link_req.link_up = link_up;
2324 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2325 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2326 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2327 /* Autoneg can be done only when the FW allows */
2328 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2329 bp->link_info.force_link_speed)) {
2330 link_req.phy_flags |=
2331 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2332 link_req.auto_link_speed_mask =
2333 bnxt_parse_eth_link_speed_mask(bp,
2334 dev_conf->link_speeds);
2336 if (bp->link_info.phy_type ==
2337 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2338 bp->link_info.phy_type ==
2339 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2340 bp->link_info.media_type ==
2341 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2342 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2346 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2347 /* If user wants a particular speed try that first. */
2349 link_req.link_speed = speed;
2350 else if (bp->link_info.force_link_speed)
2351 link_req.link_speed = bp->link_info.force_link_speed;
2353 link_req.link_speed = bp->link_info.auto_link_speed;
2355 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2356 link_req.auto_pause = bp->link_info.auto_pause;
2357 link_req.force_pause = bp->link_info.force_pause;
2360 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2363 "Set link config failed with rc %d\n", rc);
2371 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2373 struct hwrm_func_qcfg_input req = {0};
2374 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2378 HWRM_PREP(req, FUNC_QCFG);
2379 req.fid = rte_cpu_to_le_16(0xffff);
2381 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2383 HWRM_CHECK_RESULT();
2385 /* Hard Coded.. 0xfff VLAN ID mask */
2386 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2387 flags = rte_le_to_cpu_16(resp->flags);
2388 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2389 bp->flags |= BNXT_FLAG_MULTI_HOST;
2391 switch (resp->port_partition_type) {
2392 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2393 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2394 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2396 bp->port_partition_type = resp->port_partition_type;
2399 bp->port_partition_type = 0;
2408 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2409 struct hwrm_func_qcaps_output *qcaps)
2411 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2412 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2413 sizeof(qcaps->mac_address));
2414 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2415 qcaps->max_rx_rings = fcfg->num_rx_rings;
2416 qcaps->max_tx_rings = fcfg->num_tx_rings;
2417 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2418 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2420 qcaps->first_vf_id = 0;
2421 qcaps->max_vnics = fcfg->num_vnics;
2422 qcaps->max_decap_records = 0;
2423 qcaps->max_encap_records = 0;
2424 qcaps->max_tx_wm_flows = 0;
2425 qcaps->max_tx_em_flows = 0;
2426 qcaps->max_rx_wm_flows = 0;
2427 qcaps->max_rx_em_flows = 0;
2428 qcaps->max_flow_id = 0;
2429 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2430 qcaps->max_sp_tx_rings = 0;
2431 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2434 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2436 struct hwrm_func_cfg_input req = {0};
2437 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2440 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2441 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2442 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2443 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2444 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2445 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2446 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2447 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2448 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2449 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2450 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2451 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2452 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2453 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2455 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2456 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2457 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2458 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2459 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2460 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2461 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2462 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2463 req.fid = rte_cpu_to_le_16(0xffff);
2465 HWRM_PREP(req, FUNC_CFG);
2467 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2469 HWRM_CHECK_RESULT();
2475 static void populate_vf_func_cfg_req(struct bnxt *bp,
2476 struct hwrm_func_cfg_input *req,
2479 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2480 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2481 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2482 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2483 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2484 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2485 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2486 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2487 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2488 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2490 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2491 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2493 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2494 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2496 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2498 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2499 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2501 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2502 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2503 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2504 /* TODO: For now, do not support VMDq/RFS on VFs. */
2505 req->num_vnics = rte_cpu_to_le_16(1);
2506 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2510 static void add_random_mac_if_needed(struct bnxt *bp,
2511 struct hwrm_func_cfg_input *cfg_req,
2514 struct ether_addr mac;
2516 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2519 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2521 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2522 eth_random_addr(cfg_req->dflt_mac_addr);
2523 bp->pf.vf_info[vf].random_mac = true;
2525 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2529 static void reserve_resources_from_vf(struct bnxt *bp,
2530 struct hwrm_func_cfg_input *cfg_req,
2533 struct hwrm_func_qcaps_input req = {0};
2534 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2537 /* Get the actual allocated values now */
2538 HWRM_PREP(req, FUNC_QCAPS);
2539 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2540 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2543 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2544 copy_func_cfg_to_qcaps(cfg_req, resp);
2545 } else if (resp->error_code) {
2546 rc = rte_le_to_cpu_16(resp->error_code);
2547 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2548 copy_func_cfg_to_qcaps(cfg_req, resp);
2551 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2552 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2553 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2554 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2555 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2556 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2558 * TODO: While not supporting VMDq with VFs, max_vnics is always
2559 * forced to 1 in this case
2561 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2562 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2567 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2569 struct hwrm_func_qcfg_input req = {0};
2570 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2573 /* Check for zero MAC address */
2574 HWRM_PREP(req, FUNC_QCFG);
2575 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2576 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2578 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2580 } else if (resp->error_code) {
2581 rc = rte_le_to_cpu_16(resp->error_code);
2582 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2585 rc = rte_le_to_cpu_16(resp->vlan);
2592 static int update_pf_resource_max(struct bnxt *bp)
2594 struct hwrm_func_qcfg_input req = {0};
2595 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2598 /* And copy the allocated numbers into the pf struct */
2599 HWRM_PREP(req, FUNC_QCFG);
2600 req.fid = rte_cpu_to_le_16(0xffff);
2601 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2602 HWRM_CHECK_RESULT();
2604 /* Only TX ring value reflects actual allocation? TODO */
2605 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2606 bp->pf.evb_mode = resp->evb_mode;
2613 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2618 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2622 rc = bnxt_hwrm_func_qcaps(bp);
2626 bp->pf.func_cfg_flags &=
2627 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2628 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2629 bp->pf.func_cfg_flags |=
2630 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2631 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2635 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2637 struct hwrm_func_cfg_input req = {0};
2638 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2645 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2649 rc = bnxt_hwrm_func_qcaps(bp);
2654 bp->pf.active_vfs = num_vfs;
2657 * First, configure the PF to only use one TX ring. This ensures that
2658 * there are enough rings for all VFs.
2660 * If we don't do this, when we call func_alloc() later, we will lock
2661 * extra rings to the PF that won't be available during func_cfg() of
2664 * This has been fixed with firmware versions above 20.6.54
2666 bp->pf.func_cfg_flags &=
2667 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2668 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2669 bp->pf.func_cfg_flags |=
2670 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2671 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2676 * Now, create and register a buffer to hold forwarded VF requests
2678 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2679 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2680 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2681 if (bp->pf.vf_req_buf == NULL) {
2685 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2686 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2687 for (i = 0; i < num_vfs; i++)
2688 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2689 (i * HWRM_MAX_REQ_LEN);
2691 rc = bnxt_hwrm_func_buf_rgtr(bp);
2695 populate_vf_func_cfg_req(bp, &req, num_vfs);
2697 bp->pf.active_vfs = 0;
2698 for (i = 0; i < num_vfs; i++) {
2699 add_random_mac_if_needed(bp, &req, i);
2701 HWRM_PREP(req, FUNC_CFG);
2702 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2703 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2704 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2706 /* Clear enable flag for next pass */
2707 req.enables &= ~rte_cpu_to_le_32(
2708 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2710 if (rc || resp->error_code) {
2712 "Failed to initizlie VF %d\n", i);
2714 "Not all VFs available. (%d, %d)\n",
2715 rc, resp->error_code);
2722 reserve_resources_from_vf(bp, &req, i);
2723 bp->pf.active_vfs++;
2724 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2728 * Now configure the PF to use "the rest" of the resources
2729 * We're using STD_TX_RING_MODE here though which will limit the TX
2730 * rings. This will allow QoS to function properly. Not setting this
2731 * will cause PF rings to break bandwidth settings.
2733 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2737 rc = update_pf_resource_max(bp);
2744 bnxt_hwrm_func_buf_unrgtr(bp);
2748 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2750 struct hwrm_func_cfg_input req = {0};
2751 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2754 HWRM_PREP(req, FUNC_CFG);
2756 req.fid = rte_cpu_to_le_16(0xffff);
2757 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2758 req.evb_mode = bp->pf.evb_mode;
2760 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2761 HWRM_CHECK_RESULT();
2767 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2768 uint8_t tunnel_type)
2770 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2771 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2774 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2775 req.tunnel_type = tunnel_type;
2776 req.tunnel_dst_port_val = port;
2777 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2778 HWRM_CHECK_RESULT();
2780 switch (tunnel_type) {
2781 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2782 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2783 bp->vxlan_port = port;
2785 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2786 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2787 bp->geneve_port = port;
2798 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2799 uint8_t tunnel_type)
2801 struct hwrm_tunnel_dst_port_free_input req = {0};
2802 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2805 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2807 req.tunnel_type = tunnel_type;
2808 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2809 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2811 HWRM_CHECK_RESULT();
2817 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2820 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2821 struct hwrm_func_cfg_input req = {0};
2824 HWRM_PREP(req, FUNC_CFG);
2826 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2827 req.flags = rte_cpu_to_le_32(flags);
2828 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2830 HWRM_CHECK_RESULT();
2836 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2838 uint32_t *flag = flagp;
2840 vnic->flags = *flag;
2843 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2845 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2848 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2851 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2852 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2854 HWRM_PREP(req, FUNC_BUF_RGTR);
2856 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2857 req.req_buf_page_size = rte_cpu_to_le_16(
2858 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2859 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2860 req.req_buf_page_addr0 =
2861 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2862 if (req.req_buf_page_addr0 == 0) {
2864 "unable to map buffer address to physical memory\n");
2868 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2870 HWRM_CHECK_RESULT();
2876 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2879 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2880 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2882 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2886 HWRM_CHECK_RESULT();
2892 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2894 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2895 struct hwrm_func_cfg_input req = {0};
2898 HWRM_PREP(req, FUNC_CFG);
2900 req.fid = rte_cpu_to_le_16(0xffff);
2901 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2902 req.enables = rte_cpu_to_le_32(
2903 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2904 req.async_event_cr = rte_cpu_to_le_16(
2905 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2906 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2908 HWRM_CHECK_RESULT();
2914 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2916 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2917 struct hwrm_func_vf_cfg_input req = {0};
2920 HWRM_PREP(req, FUNC_VF_CFG);
2922 req.enables = rte_cpu_to_le_32(
2923 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2924 req.async_event_cr = rte_cpu_to_le_16(
2925 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2926 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2928 HWRM_CHECK_RESULT();
2934 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2936 struct hwrm_func_cfg_input req = {0};
2937 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2938 uint16_t dflt_vlan, fid;
2939 uint32_t func_cfg_flags;
2942 HWRM_PREP(req, FUNC_CFG);
2945 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2946 fid = bp->pf.vf_info[vf].fid;
2947 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2949 fid = rte_cpu_to_le_16(0xffff);
2950 func_cfg_flags = bp->pf.func_cfg_flags;
2951 dflt_vlan = bp->vlan;
2954 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2955 req.fid = rte_cpu_to_le_16(fid);
2956 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2957 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2959 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2961 HWRM_CHECK_RESULT();
2967 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2968 uint16_t max_bw, uint16_t enables)
2970 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2971 struct hwrm_func_cfg_input req = {0};
2974 HWRM_PREP(req, FUNC_CFG);
2976 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2977 req.enables |= rte_cpu_to_le_32(enables);
2978 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2979 req.max_bw = rte_cpu_to_le_32(max_bw);
2980 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2982 HWRM_CHECK_RESULT();
2988 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2990 struct hwrm_func_cfg_input req = {0};
2991 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2994 HWRM_PREP(req, FUNC_CFG);
2996 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2997 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2998 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2999 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3001 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3003 HWRM_CHECK_RESULT();
3009 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3014 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3016 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3021 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3022 void *encaped, size_t ec_size)
3025 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3026 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3028 if (ec_size > sizeof(req.encap_request))
3031 HWRM_PREP(req, REJECT_FWD_RESP);
3033 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3034 memcpy(req.encap_request, encaped, ec_size);
3036 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3038 HWRM_CHECK_RESULT();
3044 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3045 struct ether_addr *mac)
3047 struct hwrm_func_qcfg_input req = {0};
3048 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3051 HWRM_PREP(req, FUNC_QCFG);
3053 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3054 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3056 HWRM_CHECK_RESULT();
3058 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3065 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3066 void *encaped, size_t ec_size)
3069 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3070 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3072 if (ec_size > sizeof(req.encap_request))
3075 HWRM_PREP(req, EXEC_FWD_RESP);
3077 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3078 memcpy(req.encap_request, encaped, ec_size);
3080 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3082 HWRM_CHECK_RESULT();
3088 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3089 struct rte_eth_stats *stats, uint8_t rx)
3092 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3093 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3095 HWRM_PREP(req, STAT_CTX_QUERY);
3097 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3099 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3101 HWRM_CHECK_RESULT();
3104 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3105 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3106 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3107 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3108 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3109 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3110 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3111 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3113 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3114 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3115 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3116 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3117 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3118 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3119 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3128 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3130 struct hwrm_port_qstats_input req = {0};
3131 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3132 struct bnxt_pf_info *pf = &bp->pf;
3135 HWRM_PREP(req, PORT_QSTATS);
3137 req.port_id = rte_cpu_to_le_16(pf->port_id);
3138 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3139 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3142 HWRM_CHECK_RESULT();
3148 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3150 struct hwrm_port_clr_stats_input req = {0};
3151 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3152 struct bnxt_pf_info *pf = &bp->pf;
3155 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3156 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3157 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3160 HWRM_PREP(req, PORT_CLR_STATS);
3162 req.port_id = rte_cpu_to_le_16(pf->port_id);
3163 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3165 HWRM_CHECK_RESULT();
3171 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3173 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3174 struct hwrm_port_led_qcaps_input req = {0};
3180 HWRM_PREP(req, PORT_LED_QCAPS);
3181 req.port_id = bp->pf.port_id;
3182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3184 HWRM_CHECK_RESULT();
3186 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3189 bp->num_leds = resp->num_leds;
3190 memcpy(bp->leds, &resp->led0_id,
3191 sizeof(bp->leds[0]) * bp->num_leds);
3192 for (i = 0; i < bp->num_leds; i++) {
3193 struct bnxt_led_info *led = &bp->leds[i];
3195 uint16_t caps = led->led_state_caps;
3197 if (!led->led_group_id ||
3198 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3210 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3212 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3213 struct hwrm_port_led_cfg_input req = {0};
3214 struct bnxt_led_cfg *led_cfg;
3215 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3216 uint16_t duration = 0;
3219 if (!bp->num_leds || BNXT_VF(bp))
3222 HWRM_PREP(req, PORT_LED_CFG);
3225 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3226 duration = rte_cpu_to_le_16(500);
3228 req.port_id = bp->pf.port_id;
3229 req.num_leds = bp->num_leds;
3230 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3231 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3232 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3233 led_cfg->led_id = bp->leds[i].led_id;
3234 led_cfg->led_state = led_state;
3235 led_cfg->led_blink_on = duration;
3236 led_cfg->led_blink_off = duration;
3237 led_cfg->led_group_id = bp->leds[i].led_group_id;
3240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3242 HWRM_CHECK_RESULT();
3248 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3252 struct hwrm_nvm_get_dir_info_input req = {0};
3253 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3255 HWRM_PREP(req, NVM_GET_DIR_INFO);
3257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3259 HWRM_CHECK_RESULT();
3263 *entries = rte_le_to_cpu_32(resp->entries);
3264 *length = rte_le_to_cpu_32(resp->entry_length);
3269 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3272 uint32_t dir_entries;
3273 uint32_t entry_length;
3276 rte_iova_t dma_handle;
3277 struct hwrm_nvm_get_dir_entries_input req = {0};
3278 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3280 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3284 *data++ = dir_entries;
3285 *data++ = entry_length;
3287 memset(data, 0xff, len);
3289 buflen = dir_entries * entry_length;
3290 buf = rte_malloc("nvm_dir", buflen, 0);
3291 rte_mem_lock_page(buf);
3294 dma_handle = rte_mem_virt2iova(buf);
3295 if (dma_handle == 0) {
3297 "unable to map response address to physical memory\n");
3300 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3301 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3302 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3304 HWRM_CHECK_RESULT();
3308 memcpy(data, buf, len > buflen ? buflen : len);
3315 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3316 uint32_t offset, uint32_t length,
3321 rte_iova_t dma_handle;
3322 struct hwrm_nvm_read_input req = {0};
3323 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3325 buf = rte_malloc("nvm_item", length, 0);
3326 rte_mem_lock_page(buf);
3330 dma_handle = rte_mem_virt2iova(buf);
3331 if (dma_handle == 0) {
3333 "unable to map response address to physical memory\n");
3336 HWRM_PREP(req, NVM_READ);
3337 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3338 req.dir_idx = rte_cpu_to_le_16(index);
3339 req.offset = rte_cpu_to_le_32(offset);
3340 req.len = rte_cpu_to_le_32(length);
3341 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3342 HWRM_CHECK_RESULT();
3345 memcpy(data, buf, length);
3351 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3354 struct hwrm_nvm_erase_dir_entry_input req = {0};
3355 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3357 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3358 req.dir_idx = rte_cpu_to_le_16(index);
3359 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3360 HWRM_CHECK_RESULT();
3367 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3368 uint16_t dir_ordinal, uint16_t dir_ext,
3369 uint16_t dir_attr, const uint8_t *data,
3373 struct hwrm_nvm_write_input req = {0};
3374 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3375 rte_iova_t dma_handle;
3378 HWRM_PREP(req, NVM_WRITE);
3380 req.dir_type = rte_cpu_to_le_16(dir_type);
3381 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3382 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3383 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3384 req.dir_data_length = rte_cpu_to_le_32(data_len);
3386 buf = rte_malloc("nvm_write", data_len, 0);
3387 rte_mem_lock_page(buf);
3391 dma_handle = rte_mem_virt2iova(buf);
3392 if (dma_handle == 0) {
3394 "unable to map response address to physical memory\n");
3397 memcpy(buf, data, data_len);
3398 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3400 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3402 HWRM_CHECK_RESULT();
3410 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3412 uint32_t *count = cbdata;
3414 *count = *count + 1;
3417 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3418 struct bnxt_vnic_info *vnic __rte_unused)
3423 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3427 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3428 &count, bnxt_vnic_count_hwrm_stub);
3433 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3436 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3437 struct hwrm_func_vf_vnic_ids_query_output *resp =
3438 bp->hwrm_cmd_resp_addr;
3441 /* First query all VNIC ids */
3442 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3444 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3445 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3446 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3448 if (req.vnic_id_tbl_addr == 0) {
3451 "unable to map VNIC ID table address to physical memory\n");
3454 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3457 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3459 } else if (resp->error_code) {
3460 rc = rte_le_to_cpu_16(resp->error_code);
3462 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3465 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3473 * This function queries the VNIC IDs for a specified VF. It then calls
3474 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3475 * Then it calls the hwrm_cb function to program this new vnic configuration.
3477 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3478 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3479 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3481 struct bnxt_vnic_info vnic;
3483 int i, num_vnic_ids;
3488 /* First query all VNIC ids */
3489 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3490 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3491 RTE_CACHE_LINE_SIZE);
3492 if (vnic_ids == NULL) {
3496 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3497 rte_mem_lock_page(((char *)vnic_ids) + sz);
3499 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3501 if (num_vnic_ids < 0)
3502 return num_vnic_ids;
3504 /* Retrieve VNIC, update bd_stall then update */
3506 for (i = 0; i < num_vnic_ids; i++) {
3507 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3508 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3509 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3512 if (vnic.mru <= 4) /* Indicates unallocated */
3515 vnic_cb(&vnic, cbdata);
3517 rc = hwrm_cb(bp, &vnic);
3527 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3530 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3531 struct hwrm_func_cfg_input req = {0};
3534 HWRM_PREP(req, FUNC_CFG);
3536 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3537 req.enables |= rte_cpu_to_le_32(
3538 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3539 req.vlan_antispoof_mode = on ?
3540 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3541 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3542 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3544 HWRM_CHECK_RESULT();
3550 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3552 struct bnxt_vnic_info vnic;
3555 int num_vnic_ids, i;
3559 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3560 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3561 RTE_CACHE_LINE_SIZE);
3562 if (vnic_ids == NULL) {
3567 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3568 rte_mem_lock_page(((char *)vnic_ids) + sz);
3570 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3576 * Loop through to find the default VNIC ID.
3577 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3578 * by sending the hwrm_func_qcfg command to the firmware.
3580 for (i = 0; i < num_vnic_ids; i++) {
3581 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3582 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3583 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3584 bp->pf.first_vf_id + vf);
3587 if (vnic.func_default) {
3589 return vnic.fw_vnic_id;
3592 /* Could not find a default VNIC. */
3593 PMD_DRV_LOG(ERR, "No default VNIC\n");
3599 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3601 struct bnxt_filter_info *filter)
3604 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3605 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3606 uint32_t enables = 0;
3608 if (filter->fw_em_filter_id != UINT64_MAX)
3609 bnxt_hwrm_clear_em_filter(bp, filter);
3611 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3613 req.flags = rte_cpu_to_le_32(filter->flags);
3615 enables = filter->enables |
3616 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3617 req.dst_id = rte_cpu_to_le_16(dst_id);
3619 if (filter->ip_addr_type) {
3620 req.ip_addr_type = filter->ip_addr_type;
3621 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3624 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3625 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3627 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3628 memcpy(req.src_macaddr, filter->src_macaddr,
3631 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3632 memcpy(req.dst_macaddr, filter->dst_macaddr,
3635 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3636 req.ovlan_vid = filter->l2_ovlan;
3638 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3639 req.ivlan_vid = filter->l2_ivlan;
3641 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3642 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3644 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3645 req.ip_protocol = filter->ip_protocol;
3647 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3648 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3650 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3651 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3653 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3654 req.src_port = rte_cpu_to_be_16(filter->src_port);
3656 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3657 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3659 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3660 req.mirror_vnic_id = filter->mirror_vnic_id;
3662 req.enables = rte_cpu_to_le_32(enables);
3664 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3666 HWRM_CHECK_RESULT();
3668 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3674 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3677 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3678 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3680 if (filter->fw_em_filter_id == UINT64_MAX)
3683 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3684 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3686 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3688 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3690 HWRM_CHECK_RESULT();
3693 filter->fw_em_filter_id = UINT64_MAX;
3694 filter->fw_l2_filter_id = UINT64_MAX;
3699 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3701 struct bnxt_filter_info *filter)
3704 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3705 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3706 bp->hwrm_cmd_resp_addr;
3707 uint32_t enables = 0;
3709 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3710 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3712 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3714 req.flags = rte_cpu_to_le_32(filter->flags);
3716 enables = filter->enables |
3717 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3718 req.dst_id = rte_cpu_to_le_16(dst_id);
3721 if (filter->ip_addr_type) {
3722 req.ip_addr_type = filter->ip_addr_type;
3724 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3727 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3728 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3730 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3731 memcpy(req.src_macaddr, filter->src_macaddr,
3734 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3735 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3738 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3739 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3741 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3742 req.ip_protocol = filter->ip_protocol;
3744 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3745 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3747 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3748 req.src_ipaddr_mask[0] =
3749 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3751 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3752 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3754 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3755 req.dst_ipaddr_mask[0] =
3756 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3758 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3759 req.src_port = rte_cpu_to_le_16(filter->src_port);
3761 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3762 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3764 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3765 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3767 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3768 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3770 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3771 req.mirror_vnic_id = filter->mirror_vnic_id;
3773 req.enables = rte_cpu_to_le_32(enables);
3775 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3777 HWRM_CHECK_RESULT();
3779 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3785 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3786 struct bnxt_filter_info *filter)
3789 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3790 struct hwrm_cfa_ntuple_filter_free_output *resp =
3791 bp->hwrm_cmd_resp_addr;
3793 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3796 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3798 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3800 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3802 HWRM_CHECK_RESULT();
3805 filter->fw_ntuple_filter_id = UINT64_MAX;
3806 filter->fw_l2_filter_id = UINT64_MAX;
3811 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3813 unsigned int rss_idx, fw_idx, i;
3815 if (vnic->rss_table && vnic->hash_type) {
3817 * Fill the RSS hash & redirection table with
3818 * ring group ids for all VNICs
3820 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3821 rss_idx++, fw_idx++) {
3822 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3823 fw_idx %= bp->rx_cp_nr_rings;
3824 if (vnic->fw_grp_ids[fw_idx] !=
3829 if (i == bp->rx_cp_nr_rings)
3831 vnic->rss_table[rss_idx] =
3832 vnic->fw_grp_ids[fw_idx];
3834 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3839 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3840 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3844 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3846 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3847 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3849 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3850 req->num_cmpl_dma_aggr_during_int =
3851 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3853 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3855 /* min timer set to 1/2 of interrupt timer */
3856 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3858 /* buf timer set to 1/4 of interrupt timer */
3859 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3861 req->cmpl_aggr_dma_tmr_during_int =
3862 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3864 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3865 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3866 req->flags = rte_cpu_to_le_16(flags);
3869 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3870 struct bnxt_coal *coal, uint16_t ring_id)
3872 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3873 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3874 bp->hwrm_cmd_resp_addr;
3877 /* Set ring coalesce parameters only for Stratus 100G NIC */
3878 if (!bnxt_stratus_device(bp))
3881 HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS);
3882 bnxt_hwrm_set_coal_params(coal, &req);
3883 req.ring_id = rte_cpu_to_le_16(ring_id);
3884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3885 HWRM_CHECK_RESULT();