1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 6000000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
33 struct bnxt_plcmodes_cfg {
35 uint16_t jumbo_thresh;
37 uint16_t hds_threshold;
40 static int page_getenum(size_t size)
56 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57 return sizeof(void *) * 8 - 1;
60 static int page_roundup(size_t size)
62 return 1 << page_getenum(size);
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73 uint32_t msg_len, bool use_kong_mb)
76 struct input *req = msg;
77 struct output *resp = bp->hwrm_cmd_resp_addr;
81 uint16_t max_req_len = bp->max_req_len;
82 struct hwrm_short_input short_input = { 0 };
83 uint16_t bar_offset = use_kong_mb ?
84 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85 uint16_t mb_trigger_offset = use_kong_mb ?
86 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
88 if (bp->flags & BNXT_FLAG_SHORT_CMD ||
89 msg_len > bp->max_req_len) {
90 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
92 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
93 memcpy(short_cmd_req, req, msg_len);
95 short_input.req_type = rte_cpu_to_le_16(req->req_type);
96 short_input.signature = rte_cpu_to_le_16(
97 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
98 short_input.size = rte_cpu_to_le_16(msg_len);
99 short_input.req_addr =
100 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
102 data = (uint32_t *)&short_input;
103 msg_len = sizeof(short_input);
105 /* Sync memory write before updating doorbell */
108 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
111 /* Write request msg to hwrm channel */
112 for (i = 0; i < msg_len; i += 4) {
113 bar = (uint8_t *)bp->bar0 + bar_offset + i;
114 rte_write32(*data, bar);
118 /* Zero the rest of the request space */
119 for (; i < max_req_len; i += 4) {
120 bar = (uint8_t *)bp->bar0 + bar_offset + i;
124 /* Ring channel doorbell */
125 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
128 /* Poll for the valid bit */
129 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
130 /* Sanity check on the resp->resp_len */
132 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
153 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
154 * spinlock, and does initial processing.
156 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
157 * releases the spinlock only if it returns. If the regular int return codes
158 * are not used by the function, HWRM_CHECK_RESULT() should not be used
159 * directly, rather it should be copied and modified to suit the function.
161 * HWRM_UNLOCK() must be called after all response processing is completed.
163 #define HWRM_PREP(req, type, kong) do { \
164 rte_spinlock_lock(&bp->hwrm_lock); \
165 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167 req.cmpl_ring = rte_cpu_to_le_16(-1); \
168 req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170 req.target_id = rte_cpu_to_le_16(0xffff); \
171 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
174 #define HWRM_CHECK_RESULT_SILENT() do {\
176 rte_spinlock_unlock(&bp->hwrm_lock); \
179 if (resp->error_code) { \
180 rc = rte_le_to_cpu_16(resp->error_code); \
181 rte_spinlock_unlock(&bp->hwrm_lock); \
186 #define HWRM_CHECK_RESULT() do {\
188 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
190 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
196 if (resp->error_code) { \
197 rc = rte_le_to_cpu_16(resp->error_code); \
198 if (resp->resp_len >= 16) { \
199 struct hwrm_err_output *tmp_hwrm_err_op = \
202 "error %d:%d:%08x:%04x\n", \
203 rc, tmp_hwrm_err_op->cmd_err, \
205 tmp_hwrm_err_op->opaque_0), \
207 tmp_hwrm_err_op->opaque_1)); \
209 PMD_DRV_LOG(ERR, "error %d\n", rc); \
211 rte_spinlock_unlock(&bp->hwrm_lock); \
212 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
220 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
225 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
232 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241 struct bnxt_vnic_info *vnic,
243 struct bnxt_vlan_table_entry *vlan_table)
246 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
250 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
253 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
256 /* FIXME add multicast flag, when multicast adding options is supported
259 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269 if (vnic->mc_addr_cnt) {
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278 rte_mem_virt2iova(vlan_table));
279 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
281 req.mask = rte_cpu_to_le_32(mask);
283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
293 struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298 bp->hwrm_cmd_resp_addr;
301 * Older HWRM versions did not support this command, and the set_rx_mask
302 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303 * removed from set_rx_mask call, and this command was added.
305 * This command is also present from 1.7.8.11 and higher,
308 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
315 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316 req.fid = rte_cpu_to_le_16(fid);
318 req.vlan_tag_mask_tbl_addr =
319 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
322 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331 struct bnxt_filter_info *filter)
334 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
337 if (filter->fw_l2_filter_id == UINT64_MAX)
340 HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
342 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
349 filter->fw_l2_filter_id = UINT64_MAX;
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
356 struct bnxt_filter_info *filter)
359 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362 const struct rte_eth_vmdq_rx_conf *conf =
363 &dev_conf->rx_adv_conf.vmdq_rx_conf;
364 uint32_t enables = 0;
365 uint16_t j = dst_id - 1;
367 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369 conf->pool_map[j].pools & (1UL << j)) {
371 "Add vlan %u to vmdq pool %u\n",
372 conf->pool_map[j].vlan_id, j);
374 filter->l2_ivlan = conf->pool_map[j].vlan_id;
376 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380 if (filter->fw_l2_filter_id != UINT64_MAX)
381 bnxt_hwrm_clear_l2_filter(bp, filter);
383 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
385 req.flags = rte_cpu_to_le_32(filter->flags);
387 rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
389 enables = filter->enables |
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391 req.dst_id = rte_cpu_to_le_16(dst_id);
394 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395 memcpy(req.l2_addr, filter->l2_addr,
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
402 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403 req.l2_ovlan = filter->l2_ovlan;
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406 req.l2_ivlan = filter->l2_ivlan;
408 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409 req.l2_ovlan_mask = filter->l2_ovlan_mask;
411 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412 req.l2_ivlan_mask = filter->l2_ivlan_mask;
413 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414 req.src_id = rte_cpu_to_le_32(filter->src_id);
415 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416 req.src_type = filter->src_type;
418 req.enables = rte_cpu_to_le_32(enables);
420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
424 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
432 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
440 HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
443 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
446 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
447 if (ptp->tx_tstamp_en)
448 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
451 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
452 req.flags = rte_cpu_to_le_32(flags);
453 req.enables = rte_cpu_to_le_32
454 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
455 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
463 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
466 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
467 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
468 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
470 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
474 HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
476 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
478 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
482 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
485 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
489 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
490 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
491 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
492 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
493 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
494 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
495 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
496 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
497 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
498 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
499 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
500 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
501 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
502 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
503 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
504 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
505 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
506 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
514 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
517 struct hwrm_func_qcaps_input req = {.req_type = 0 };
518 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
519 uint16_t new_max_vfs;
523 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
525 req.fid = rte_cpu_to_le_16(0xffff);
527 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
531 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
532 flags = rte_le_to_cpu_32(resp->flags);
534 bp->pf.port_id = resp->port_id;
535 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
536 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
537 new_max_vfs = bp->pdev->max_vfs;
538 if (new_max_vfs != bp->pf.max_vfs) {
540 rte_free(bp->pf.vf_info);
541 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
542 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
543 bp->pf.max_vfs = new_max_vfs;
544 for (i = 0; i < new_max_vfs; i++) {
545 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
546 bp->pf.vf_info[i].vlan_table =
547 rte_zmalloc("VF VLAN table",
550 if (bp->pf.vf_info[i].vlan_table == NULL)
552 "Fail to alloc VLAN table for VF %d\n",
556 bp->pf.vf_info[i].vlan_table);
557 bp->pf.vf_info[i].vlan_as_table =
558 rte_zmalloc("VF VLAN AS table",
561 if (bp->pf.vf_info[i].vlan_as_table == NULL)
563 "Alloc VLAN AS table for VF %d fail\n",
567 bp->pf.vf_info[i].vlan_as_table);
568 STAILQ_INIT(&bp->pf.vf_info[i].filter);
573 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
574 memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
575 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
576 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
577 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
578 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
579 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
580 /* TODO: For now, do not support VMDq/RFS on VFs. */
585 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
589 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
591 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
592 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
593 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
594 PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
596 bnxt_hwrm_ptp_qcfg(bp);
605 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
609 rc = __bnxt_hwrm_func_qcaps(bp);
610 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
611 rc = bnxt_hwrm_func_resc_qcaps(bp);
613 bp->flags |= BNXT_FLAG_NEW_RM;
619 int bnxt_hwrm_func_reset(struct bnxt *bp)
622 struct hwrm_func_reset_input req = {.req_type = 0 };
623 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
625 HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
627 req.enables = rte_cpu_to_le_32(0);
629 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
637 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
640 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
641 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
643 if (bp->flags & BNXT_FLAG_REGISTERED)
646 HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
647 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
648 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
649 req.ver_maj = RTE_VER_YEAR;
650 req.ver_min = RTE_VER_MONTH;
651 req.ver_upd = RTE_VER_MINOR;
654 req.enables |= rte_cpu_to_le_32(
655 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
656 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
657 RTE_MIN(sizeof(req.vf_req_fwd),
658 sizeof(bp->pf.vf_req_fwd)));
661 * PF can sniff HWRM API issued by VF. This can be set up by
662 * linux driver and inherited by the DPDK PF driver. Clear
663 * this HWRM sniffer list in FW because DPDK PF driver does
667 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
670 req.async_event_fwd[0] |=
671 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
672 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
673 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
674 req.async_event_fwd[1] |=
675 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
676 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
678 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
683 bp->flags |= BNXT_FLAG_REGISTERED;
688 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
690 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
693 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
696 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
701 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
702 struct hwrm_func_vf_cfg_input req = {0};
704 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
706 req.enables = rte_cpu_to_le_32
707 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
708 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
709 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
710 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
711 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
712 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
714 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
715 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
716 AGG_RING_MULTIPLIER);
717 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
718 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
720 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
721 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
722 if (bp->vf_resv_strategy ==
723 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
724 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
725 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
726 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
727 req.enables |= rte_cpu_to_le_32(enables);
728 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
729 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
730 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
734 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
735 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
736 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
737 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
738 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
739 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
741 req.flags = rte_cpu_to_le_32(flags);
743 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
746 HWRM_CHECK_RESULT_SILENT();
754 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
757 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
758 struct hwrm_func_resource_qcaps_input req = {0};
760 HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
761 req.fid = rte_cpu_to_le_16(0xffff);
763 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
768 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
769 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
770 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
771 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
772 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
773 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
774 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
775 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
777 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
778 if (bp->vf_resv_strategy >
779 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
780 bp->vf_resv_strategy =
781 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
787 int bnxt_hwrm_ver_get(struct bnxt *bp)
790 struct hwrm_ver_get_input req = {.req_type = 0 };
791 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
793 uint16_t max_resp_len;
794 char type[RTE_MEMZONE_NAMESIZE];
795 uint32_t dev_caps_cfg;
797 bp->max_req_len = HWRM_MAX_REQ_LEN;
798 HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
800 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
801 req.hwrm_intf_min = HWRM_VERSION_MINOR;
802 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
804 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
808 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
809 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
810 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
811 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
812 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
813 (resp->hwrm_fw_min_8b << 16) |
814 (resp->hwrm_fw_bld_8b << 8) |
815 resp->hwrm_fw_rsvd_8b;
816 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
817 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
819 fw_version = resp->hwrm_intf_maj_8b << 16;
820 fw_version |= resp->hwrm_intf_min_8b << 8;
821 fw_version |= resp->hwrm_intf_upd_8b;
822 bp->hwrm_spec_code = fw_version;
824 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
825 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
830 if (bp->max_req_len > resp->max_req_win_len) {
831 PMD_DRV_LOG(ERR, "Unsupported request length\n");
834 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
835 bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
836 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
837 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
839 max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
840 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
842 if (bp->max_resp_len != max_resp_len) {
843 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
844 bp->pdev->addr.domain, bp->pdev->addr.bus,
845 bp->pdev->addr.devid, bp->pdev->addr.function);
847 rte_free(bp->hwrm_cmd_resp_addr);
849 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
850 if (bp->hwrm_cmd_resp_addr == NULL) {
854 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
855 bp->hwrm_cmd_resp_dma_addr =
856 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
857 if (bp->hwrm_cmd_resp_dma_addr == 0) {
859 "Unable to map response buffer to physical memory.\n");
863 bp->max_resp_len = max_resp_len;
867 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
869 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
870 PMD_DRV_LOG(DEBUG, "Short command supported\n");
871 bp->flags |= BNXT_FLAG_SHORT_CMD;
875 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
877 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
878 bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
879 sprintf(type, "bnxt_hwrm_short_%04x:%02x:%02x:%02x",
880 bp->pdev->addr.domain, bp->pdev->addr.bus,
881 bp->pdev->addr.devid, bp->pdev->addr.function);
883 rte_free(bp->hwrm_short_cmd_req_addr);
885 bp->hwrm_short_cmd_req_addr =
886 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
887 if (bp->hwrm_short_cmd_req_addr == NULL) {
891 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
892 bp->hwrm_short_cmd_req_dma_addr =
893 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
894 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
895 rte_free(bp->hwrm_short_cmd_req_addr);
897 "Unable to map buffer to physical memory.\n");
903 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
904 bp->flags |= BNXT_FLAG_KONG_MB_EN;
905 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
908 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
909 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
916 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
919 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
920 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
922 if (!(bp->flags & BNXT_FLAG_REGISTERED))
925 HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
933 bp->flags &= ~BNXT_FLAG_REGISTERED;
938 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
941 struct hwrm_port_phy_cfg_input req = {0};
942 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
943 uint32_t enables = 0;
945 HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
948 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
949 if (bp->link_info.auto_mode && conf->link_speed) {
950 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
951 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
954 req.flags = rte_cpu_to_le_32(conf->phy_flags);
955 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
956 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
958 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
959 * any auto mode, even "none".
961 if (!conf->link_speed) {
962 /* No speeds specified. Enable AutoNeg - all speeds */
964 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
966 /* AutoNeg - Advertise speeds specified. */
967 if (conf->auto_link_speed_mask &&
968 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
970 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
971 req.auto_link_speed_mask =
972 conf->auto_link_speed_mask;
974 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
977 req.auto_duplex = conf->duplex;
978 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
979 req.auto_pause = conf->auto_pause;
980 req.force_pause = conf->force_pause;
981 /* Set force_pause if there is no auto or if there is a force */
982 if (req.auto_pause && !req.force_pause)
983 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
985 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
987 req.enables = rte_cpu_to_le_32(enables);
990 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
991 PMD_DRV_LOG(INFO, "Force Link Down\n");
994 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1002 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1003 struct bnxt_link_info *link_info)
1006 struct hwrm_port_phy_qcfg_input req = {0};
1007 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1009 HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1011 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1013 HWRM_CHECK_RESULT();
1015 link_info->phy_link_status = resp->link;
1016 link_info->link_up =
1017 (link_info->phy_link_status ==
1018 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1019 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1020 link_info->duplex = resp->duplex_cfg;
1021 link_info->pause = resp->pause;
1022 link_info->auto_pause = resp->auto_pause;
1023 link_info->force_pause = resp->force_pause;
1024 link_info->auto_mode = resp->auto_mode;
1025 link_info->phy_type = resp->phy_type;
1026 link_info->media_type = resp->media_type;
1028 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1029 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1030 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1031 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1032 link_info->phy_ver[0] = resp->phy_maj;
1033 link_info->phy_ver[1] = resp->phy_min;
1034 link_info->phy_ver[2] = resp->phy_bld;
1038 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1039 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1040 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1041 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1042 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1043 link_info->auto_link_speed_mask);
1044 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1045 link_info->force_link_speed);
1050 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1053 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1054 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1057 HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1059 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1060 /* HWRM Version >= 1.9.1 */
1061 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1063 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1064 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1066 HWRM_CHECK_RESULT();
1068 #define GET_QUEUE_INFO(x) \
1069 bp->cos_queue[x].id = resp->queue_id##x; \
1070 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1083 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1084 bp->tx_cosq_id = bp->cos_queue[0].id;
1086 /* iterate and find the COSq profile to use for Tx */
1087 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1088 if (bp->cos_queue[i].profile ==
1089 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1090 bp->tx_cosq_id = bp->cos_queue[i].id;
1095 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1100 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1101 struct bnxt_ring *ring,
1102 uint32_t ring_type, uint32_t map_index,
1103 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1106 uint32_t enables = 0;
1107 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1108 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1110 HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1112 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1113 req.fbo = rte_cpu_to_le_32(0);
1114 /* Association of ring index with doorbell index */
1115 req.logical_id = rte_cpu_to_le_16(map_index);
1116 req.length = rte_cpu_to_le_32(ring->ring_size);
1118 switch (ring_type) {
1119 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1120 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1122 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1123 req.ring_type = ring_type;
1124 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1125 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1126 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1128 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1130 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1131 req.ring_type = ring_type;
1133 * TODO: Some HWRM versions crash with
1134 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1136 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1139 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1144 req.enables = rte_cpu_to_le_32(enables);
1146 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1148 if (rc || resp->error_code) {
1149 if (rc == 0 && resp->error_code)
1150 rc = rte_le_to_cpu_16(resp->error_code);
1151 switch (ring_type) {
1152 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1154 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1157 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1159 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1162 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1164 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1168 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1174 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1179 int bnxt_hwrm_ring_free(struct bnxt *bp,
1180 struct bnxt_ring *ring, uint32_t ring_type)
1183 struct hwrm_ring_free_input req = {.req_type = 0 };
1184 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1186 HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1188 req.ring_type = ring_type;
1189 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1191 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1193 if (rc || resp->error_code) {
1194 if (rc == 0 && resp->error_code)
1195 rc = rte_le_to_cpu_16(resp->error_code);
1198 switch (ring_type) {
1199 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1200 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1203 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1204 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1207 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1208 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1212 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1220 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1223 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1224 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1226 HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1228 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1229 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1230 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1231 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1233 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1235 HWRM_CHECK_RESULT();
1237 bp->grp_info[idx].fw_grp_id =
1238 rte_le_to_cpu_16(resp->ring_group_id);
1245 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1248 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1249 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1251 HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1253 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1255 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1257 HWRM_CHECK_RESULT();
1260 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1264 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1267 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1268 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1270 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1273 HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1275 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1277 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1279 HWRM_CHECK_RESULT();
1285 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1286 unsigned int idx __rte_unused)
1289 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1290 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1292 HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1294 req.update_period_ms = rte_cpu_to_le_32(0);
1296 req.stats_dma_addr =
1297 rte_cpu_to_le_64(cpr->hw_stats_map);
1299 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1301 HWRM_CHECK_RESULT();
1303 cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1310 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1311 unsigned int idx __rte_unused)
1314 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1315 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1317 HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1319 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1321 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1323 HWRM_CHECK_RESULT();
1329 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1332 struct hwrm_vnic_alloc_input req = { 0 };
1333 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1335 /* map ring groups to this vnic */
1336 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1337 vnic->start_grp_id, vnic->end_grp_id);
1338 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1339 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1341 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1342 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1343 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1344 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1345 vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
1346 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
1347 HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1349 if (vnic->func_default)
1351 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1352 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1354 HWRM_CHECK_RESULT();
1356 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1358 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1362 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1363 struct bnxt_vnic_info *vnic,
1364 struct bnxt_plcmodes_cfg *pmode)
1367 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1368 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1370 HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1372 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1374 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1376 HWRM_CHECK_RESULT();
1378 pmode->flags = rte_le_to_cpu_32(resp->flags);
1379 /* dflt_vnic bit doesn't exist in the _cfg command */
1380 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1381 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1382 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1383 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1390 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1391 struct bnxt_vnic_info *vnic,
1392 struct bnxt_plcmodes_cfg *pmode)
1395 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1396 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1398 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1400 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1401 req.flags = rte_cpu_to_le_32(pmode->flags);
1402 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1403 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1404 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1405 req.enables = rte_cpu_to_le_32(
1406 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1407 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1408 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1411 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1413 HWRM_CHECK_RESULT();
1419 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1422 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1423 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1424 uint32_t ctx_enable_flag = 0;
1425 struct bnxt_plcmodes_cfg pmodes;
1427 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1428 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1432 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1436 HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1438 /* Only RSS support for now TBD: COS & LB */
1440 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1441 if (vnic->lb_rule != 0xffff)
1442 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1443 if (vnic->cos_rule != 0xffff)
1444 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1445 if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1446 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1447 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1449 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1450 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1451 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1452 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1453 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1454 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1455 req.mru = rte_cpu_to_le_16(vnic->mru);
1456 /* Configure default VNIC only once. */
1457 if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1459 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1460 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1462 if (vnic->vlan_strip)
1464 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1467 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1468 if (vnic->roce_dual)
1469 req.flags |= rte_cpu_to_le_32(
1470 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1471 if (vnic->roce_only)
1472 req.flags |= rte_cpu_to_le_32(
1473 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1474 if (vnic->rss_dflt_cr)
1475 req.flags |= rte_cpu_to_le_32(
1476 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1478 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1480 HWRM_CHECK_RESULT();
1483 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1488 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1492 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1493 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1495 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1496 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1499 HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1502 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1503 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1504 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1506 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1508 HWRM_CHECK_RESULT();
1510 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1511 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1512 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1513 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1514 vnic->mru = rte_le_to_cpu_16(resp->mru);
1515 vnic->func_default = rte_le_to_cpu_32(
1516 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1517 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1518 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1519 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1520 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1521 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1522 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1523 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1524 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1525 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1526 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1533 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1536 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1537 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1538 bp->hwrm_cmd_resp_addr;
1540 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1542 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1544 HWRM_CHECK_RESULT();
1546 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1548 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1553 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1556 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1557 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1558 bp->hwrm_cmd_resp_addr;
1560 if (vnic->rss_rule == (uint16_t)HWRM_NA_SIGNATURE) {
1561 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1564 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1566 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1568 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1570 HWRM_CHECK_RESULT();
1573 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1578 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1581 struct hwrm_vnic_free_input req = {.req_type = 0 };
1582 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1584 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1585 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1589 HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1591 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1593 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1595 HWRM_CHECK_RESULT();
1598 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1599 /* Configure default VNIC again if necessary. */
1600 if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
1601 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
1606 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1607 struct bnxt_vnic_info *vnic)
1610 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1611 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1613 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1615 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1616 req.hash_mode_flags = vnic->hash_mode;
1618 req.ring_grp_tbl_addr =
1619 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1620 req.hash_key_tbl_addr =
1621 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1622 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1623 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1625 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1627 HWRM_CHECK_RESULT();
1633 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1634 struct bnxt_vnic_info *vnic)
1637 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1638 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1641 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1642 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1646 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1648 req.flags = rte_cpu_to_le_32(
1649 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1651 req.enables = rte_cpu_to_le_32(
1652 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1654 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1655 size -= RTE_PKTMBUF_HEADROOM;
1657 req.jumbo_thresh = rte_cpu_to_le_16(size);
1658 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1660 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1662 HWRM_CHECK_RESULT();
1668 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1669 struct bnxt_vnic_info *vnic, bool enable)
1672 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1673 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1675 HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1678 req.enables = rte_cpu_to_le_32(
1679 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1680 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1681 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1682 req.flags = rte_cpu_to_le_32(
1683 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1684 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1685 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1686 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1687 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1688 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1689 req.max_agg_segs = rte_cpu_to_le_16(5);
1691 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1692 req.min_agg_len = rte_cpu_to_le_32(512);
1694 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1696 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1698 HWRM_CHECK_RESULT();
1704 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1706 struct hwrm_func_cfg_input req = {0};
1707 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1710 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1711 req.enables = rte_cpu_to_le_32(
1712 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1713 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1714 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1716 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1718 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1719 HWRM_CHECK_RESULT();
1722 bp->pf.vf_info[vf].random_mac = false;
1727 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1731 struct hwrm_func_qstats_input req = {.req_type = 0};
1732 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1734 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1736 req.fid = rte_cpu_to_le_16(fid);
1738 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1740 HWRM_CHECK_RESULT();
1743 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1750 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1751 struct rte_eth_stats *stats)
1754 struct hwrm_func_qstats_input req = {.req_type = 0};
1755 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1757 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1759 req.fid = rte_cpu_to_le_16(fid);
1761 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1763 HWRM_CHECK_RESULT();
1765 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1766 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1767 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1768 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1769 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1770 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1772 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1773 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1774 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1775 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1776 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1777 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1779 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1780 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1781 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1788 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1791 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1792 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1794 HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1796 req.fid = rte_cpu_to_le_16(fid);
1798 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1800 HWRM_CHECK_RESULT();
1807 * HWRM utility functions
1810 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1815 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1816 struct bnxt_tx_queue *txq;
1817 struct bnxt_rx_queue *rxq;
1818 struct bnxt_cp_ring_info *cpr;
1820 if (i >= bp->rx_cp_nr_rings) {
1821 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1824 rxq = bp->rx_queues[i];
1828 rc = bnxt_hwrm_stat_clear(bp, cpr);
1835 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1839 struct bnxt_cp_ring_info *cpr;
1841 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1843 if (i >= bp->rx_cp_nr_rings) {
1844 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1846 cpr = bp->rx_queues[i]->cp_ring;
1847 bp->grp_info[i].fw_stats_ctx = -1;
1849 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1850 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1851 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1859 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1864 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1865 struct bnxt_tx_queue *txq;
1866 struct bnxt_rx_queue *rxq;
1867 struct bnxt_cp_ring_info *cpr;
1869 if (i >= bp->rx_cp_nr_rings) {
1870 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1873 rxq = bp->rx_queues[i];
1877 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1885 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1890 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1892 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1895 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1903 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1905 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1907 bnxt_hwrm_ring_free(bp, cp_ring,
1908 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1909 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1910 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1911 sizeof(*cpr->cp_desc_ring));
1912 cpr->cp_raw_cons = 0;
1915 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1917 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1918 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1919 struct bnxt_ring *ring = rxr->rx_ring_struct;
1920 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1922 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1923 bnxt_hwrm_ring_free(bp, ring,
1924 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1925 ring->fw_ring_id = INVALID_HW_RING_ID;
1926 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1927 memset(rxr->rx_desc_ring, 0,
1928 rxr->rx_ring_struct->ring_size *
1929 sizeof(*rxr->rx_desc_ring));
1930 memset(rxr->rx_buf_ring, 0,
1931 rxr->rx_ring_struct->ring_size *
1932 sizeof(*rxr->rx_buf_ring));
1935 ring = rxr->ag_ring_struct;
1936 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1937 bnxt_hwrm_ring_free(bp, ring,
1938 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1939 ring->fw_ring_id = INVALID_HW_RING_ID;
1940 memset(rxr->ag_buf_ring, 0,
1941 rxr->ag_ring_struct->ring_size *
1942 sizeof(*rxr->ag_buf_ring));
1944 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1946 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1947 bnxt_free_cp_ring(bp, cpr);
1949 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1952 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1956 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1957 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1958 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1959 struct bnxt_ring *ring = txr->tx_ring_struct;
1960 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1962 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1963 bnxt_hwrm_ring_free(bp, ring,
1964 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1965 ring->fw_ring_id = INVALID_HW_RING_ID;
1966 memset(txr->tx_desc_ring, 0,
1967 txr->tx_ring_struct->ring_size *
1968 sizeof(*txr->tx_desc_ring));
1969 memset(txr->tx_buf_ring, 0,
1970 txr->tx_ring_struct->ring_size *
1971 sizeof(*txr->tx_buf_ring));
1975 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1976 bnxt_free_cp_ring(bp, cpr);
1977 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1981 for (i = 0; i < bp->rx_cp_nr_rings; i++)
1982 bnxt_free_hwrm_rx_ring(bp, i);
1987 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1992 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1993 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2000 void bnxt_free_hwrm_resources(struct bnxt *bp)
2002 /* Release memzone */
2003 rte_free(bp->hwrm_cmd_resp_addr);
2004 rte_free(bp->hwrm_short_cmd_req_addr);
2005 bp->hwrm_cmd_resp_addr = NULL;
2006 bp->hwrm_short_cmd_req_addr = NULL;
2007 bp->hwrm_cmd_resp_dma_addr = 0;
2008 bp->hwrm_short_cmd_req_dma_addr = 0;
2011 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2013 struct rte_pci_device *pdev = bp->pdev;
2014 char type[RTE_MEMZONE_NAMESIZE];
2016 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2017 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2018 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2019 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2020 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2021 if (bp->hwrm_cmd_resp_addr == NULL)
2023 bp->hwrm_cmd_resp_dma_addr =
2024 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2025 if (bp->hwrm_cmd_resp_dma_addr == 0) {
2027 "unable to map response address to physical memory\n");
2030 rte_spinlock_init(&bp->hwrm_lock);
2035 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2037 struct bnxt_filter_info *filter;
2040 STAILQ_FOREACH(filter, &vnic->filter, next) {
2041 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2042 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2043 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2044 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2046 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2047 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2055 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2057 struct bnxt_filter_info *filter;
2058 struct rte_flow *flow;
2061 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2062 filter = flow->filter;
2063 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2064 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2065 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2066 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2067 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2069 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2071 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2079 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2081 struct bnxt_filter_info *filter;
2084 STAILQ_FOREACH(filter, &vnic->filter, next) {
2085 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2086 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2088 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2089 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2092 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2100 void bnxt_free_tunnel_ports(struct bnxt *bp)
2102 if (bp->vxlan_port_cnt)
2103 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2104 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2106 if (bp->geneve_port_cnt)
2107 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2108 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2109 bp->geneve_port = 0;
2112 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2116 if (bp->vnic_info == NULL)
2120 * Cleanup VNICs in reverse order, to make sure the L2 filter
2121 * from vnic0 is last to be cleaned up.
2123 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2124 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2126 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2128 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2130 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2132 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2134 bnxt_hwrm_vnic_free(bp, vnic);
2136 rte_free(vnic->fw_grp_ids);
2138 /* Ring resources */
2139 bnxt_free_all_hwrm_rings(bp);
2140 bnxt_free_all_hwrm_ring_grps(bp);
2141 bnxt_free_all_hwrm_stat_ctxs(bp);
2142 bnxt_free_tunnel_ports(bp);
2145 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2147 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2149 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2150 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2152 switch (conf_link_speed) {
2153 case ETH_LINK_SPEED_10M_HD:
2154 case ETH_LINK_SPEED_100M_HD:
2156 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2158 return hw_link_duplex;
2161 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2163 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2166 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2168 uint16_t eth_link_speed = 0;
2170 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2171 return ETH_LINK_SPEED_AUTONEG;
2173 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2174 case ETH_LINK_SPEED_100M:
2175 case ETH_LINK_SPEED_100M_HD:
2178 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2180 case ETH_LINK_SPEED_1G:
2182 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2184 case ETH_LINK_SPEED_2_5G:
2186 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2188 case ETH_LINK_SPEED_10G:
2190 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2192 case ETH_LINK_SPEED_20G:
2194 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2196 case ETH_LINK_SPEED_25G:
2198 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2200 case ETH_LINK_SPEED_40G:
2202 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2204 case ETH_LINK_SPEED_50G:
2206 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2208 case ETH_LINK_SPEED_100G:
2210 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2214 "Unsupported link speed %d; default to AUTO\n",
2218 return eth_link_speed;
2221 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2222 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2223 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2224 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2226 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2230 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2233 if (link_speed & ETH_LINK_SPEED_FIXED) {
2234 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2236 if (one_speed & (one_speed - 1)) {
2238 "Invalid advertised speeds (%u) for port %u\n",
2239 link_speed, port_id);
2242 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2244 "Unsupported advertised speed (%u) for port %u\n",
2245 link_speed, port_id);
2249 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2251 "Unsupported advertised speeds (%u) for port %u\n",
2252 link_speed, port_id);
2260 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2264 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2265 if (bp->link_info.support_speeds)
2266 return bp->link_info.support_speeds;
2267 link_speed = BNXT_SUPPORTED_SPEEDS;
2270 if (link_speed & ETH_LINK_SPEED_100M)
2271 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2272 if (link_speed & ETH_LINK_SPEED_100M_HD)
2273 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2274 if (link_speed & ETH_LINK_SPEED_1G)
2275 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2276 if (link_speed & ETH_LINK_SPEED_2_5G)
2277 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2278 if (link_speed & ETH_LINK_SPEED_10G)
2279 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2280 if (link_speed & ETH_LINK_SPEED_20G)
2281 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2282 if (link_speed & ETH_LINK_SPEED_25G)
2283 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2284 if (link_speed & ETH_LINK_SPEED_40G)
2285 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2286 if (link_speed & ETH_LINK_SPEED_50G)
2287 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2288 if (link_speed & ETH_LINK_SPEED_100G)
2289 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2293 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2295 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2297 switch (hw_link_speed) {
2298 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2299 eth_link_speed = ETH_SPEED_NUM_100M;
2301 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2302 eth_link_speed = ETH_SPEED_NUM_1G;
2304 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2305 eth_link_speed = ETH_SPEED_NUM_2_5G;
2307 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2308 eth_link_speed = ETH_SPEED_NUM_10G;
2310 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2311 eth_link_speed = ETH_SPEED_NUM_20G;
2313 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2314 eth_link_speed = ETH_SPEED_NUM_25G;
2316 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2317 eth_link_speed = ETH_SPEED_NUM_40G;
2319 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2320 eth_link_speed = ETH_SPEED_NUM_50G;
2322 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2323 eth_link_speed = ETH_SPEED_NUM_100G;
2325 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2327 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2331 return eth_link_speed;
2334 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2336 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2338 switch (hw_link_duplex) {
2339 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2340 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2342 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2344 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2345 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2348 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2352 return eth_link_duplex;
2355 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2358 struct bnxt_link_info *link_info = &bp->link_info;
2360 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2363 "Get link config failed with rc %d\n", rc);
2366 if (link_info->link_speed)
2368 bnxt_parse_hw_link_speed(link_info->link_speed);
2370 link->link_speed = ETH_SPEED_NUM_NONE;
2371 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2372 link->link_status = link_info->link_up;
2373 link->link_autoneg = link_info->auto_mode ==
2374 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2375 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2380 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2383 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2384 struct bnxt_link_info link_req;
2385 uint16_t speed, autoneg;
2387 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2390 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2391 bp->eth_dev->data->port_id);
2395 memset(&link_req, 0, sizeof(link_req));
2396 link_req.link_up = link_up;
2400 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2401 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2402 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2403 /* Autoneg can be done only when the FW allows */
2404 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2405 bp->link_info.force_link_speed)) {
2406 link_req.phy_flags |=
2407 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2408 link_req.auto_link_speed_mask =
2409 bnxt_parse_eth_link_speed_mask(bp,
2410 dev_conf->link_speeds);
2412 if (bp->link_info.phy_type ==
2413 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2414 bp->link_info.phy_type ==
2415 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2416 bp->link_info.media_type ==
2417 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2418 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2422 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2423 /* If user wants a particular speed try that first. */
2425 link_req.link_speed = speed;
2426 else if (bp->link_info.force_link_speed)
2427 link_req.link_speed = bp->link_info.force_link_speed;
2429 link_req.link_speed = bp->link_info.auto_link_speed;
2431 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2432 link_req.auto_pause = bp->link_info.auto_pause;
2433 link_req.force_pause = bp->link_info.force_pause;
2436 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2439 "Set link config failed with rc %d\n", rc);
2447 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2449 struct hwrm_func_qcfg_input req = {0};
2450 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2454 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2455 req.fid = rte_cpu_to_le_16(0xffff);
2457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2459 HWRM_CHECK_RESULT();
2461 /* Hard Coded.. 0xfff VLAN ID mask */
2462 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2463 flags = rte_le_to_cpu_16(resp->flags);
2464 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2465 bp->flags |= BNXT_FLAG_MULTI_HOST;
2467 if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2468 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2469 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2472 switch (resp->port_partition_type) {
2473 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2474 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2475 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2477 bp->port_partition_type = resp->port_partition_type;
2480 bp->port_partition_type = 0;
2489 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2490 struct hwrm_func_qcaps_output *qcaps)
2492 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2493 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2494 sizeof(qcaps->mac_address));
2495 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2496 qcaps->max_rx_rings = fcfg->num_rx_rings;
2497 qcaps->max_tx_rings = fcfg->num_tx_rings;
2498 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2499 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2501 qcaps->first_vf_id = 0;
2502 qcaps->max_vnics = fcfg->num_vnics;
2503 qcaps->max_decap_records = 0;
2504 qcaps->max_encap_records = 0;
2505 qcaps->max_tx_wm_flows = 0;
2506 qcaps->max_tx_em_flows = 0;
2507 qcaps->max_rx_wm_flows = 0;
2508 qcaps->max_rx_em_flows = 0;
2509 qcaps->max_flow_id = 0;
2510 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2511 qcaps->max_sp_tx_rings = 0;
2512 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2515 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2517 struct hwrm_func_cfg_input req = {0};
2518 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2521 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2522 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2523 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2524 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2525 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2526 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2527 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2528 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2529 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2530 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2531 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2532 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2533 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2534 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2536 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2537 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2538 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2539 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2540 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2541 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2542 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2543 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2544 req.fid = rte_cpu_to_le_16(0xffff);
2546 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2548 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2550 HWRM_CHECK_RESULT();
2556 static void populate_vf_func_cfg_req(struct bnxt *bp,
2557 struct hwrm_func_cfg_input *req,
2560 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2561 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2562 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2563 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2564 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2565 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2566 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2567 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2568 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2569 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2571 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2572 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2574 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
2575 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
2577 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2579 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2580 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2582 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2583 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2584 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2585 /* TODO: For now, do not support VMDq/RFS on VFs. */
2586 req->num_vnics = rte_cpu_to_le_16(1);
2587 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2591 static void add_random_mac_if_needed(struct bnxt *bp,
2592 struct hwrm_func_cfg_input *cfg_req,
2595 struct rte_ether_addr mac;
2597 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2600 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2602 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2603 rte_eth_random_addr(cfg_req->dflt_mac_addr);
2604 bp->pf.vf_info[vf].random_mac = true;
2606 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
2607 RTE_ETHER_ADDR_LEN);
2611 static void reserve_resources_from_vf(struct bnxt *bp,
2612 struct hwrm_func_cfg_input *cfg_req,
2615 struct hwrm_func_qcaps_input req = {0};
2616 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2619 /* Get the actual allocated values now */
2620 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2621 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2622 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2625 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2626 copy_func_cfg_to_qcaps(cfg_req, resp);
2627 } else if (resp->error_code) {
2628 rc = rte_le_to_cpu_16(resp->error_code);
2629 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2630 copy_func_cfg_to_qcaps(cfg_req, resp);
2633 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2634 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2635 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2636 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2637 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2638 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2640 * TODO: While not supporting VMDq with VFs, max_vnics is always
2641 * forced to 1 in this case
2643 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2644 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2649 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2651 struct hwrm_func_qcfg_input req = {0};
2652 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2655 /* Check for zero MAC address */
2656 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2657 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2658 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2660 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2662 } else if (resp->error_code) {
2663 rc = rte_le_to_cpu_16(resp->error_code);
2664 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2667 rc = rte_le_to_cpu_16(resp->vlan);
2674 static int update_pf_resource_max(struct bnxt *bp)
2676 struct hwrm_func_qcfg_input req = {0};
2677 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2680 /* And copy the allocated numbers into the pf struct */
2681 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2682 req.fid = rte_cpu_to_le_16(0xffff);
2683 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2684 HWRM_CHECK_RESULT();
2686 /* Only TX ring value reflects actual allocation? TODO */
2687 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2688 bp->pf.evb_mode = resp->evb_mode;
2695 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2700 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2704 rc = bnxt_hwrm_func_qcaps(bp);
2708 bp->pf.func_cfg_flags &=
2709 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2710 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2711 bp->pf.func_cfg_flags |=
2712 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2713 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2717 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2719 struct hwrm_func_cfg_input req = {0};
2720 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2727 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2731 rc = bnxt_hwrm_func_qcaps(bp);
2736 bp->pf.active_vfs = num_vfs;
2739 * First, configure the PF to only use one TX ring. This ensures that
2740 * there are enough rings for all VFs.
2742 * If we don't do this, when we call func_alloc() later, we will lock
2743 * extra rings to the PF that won't be available during func_cfg() of
2746 * This has been fixed with firmware versions above 20.6.54
2748 bp->pf.func_cfg_flags &=
2749 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2750 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2751 bp->pf.func_cfg_flags |=
2752 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2753 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2758 * Now, create and register a buffer to hold forwarded VF requests
2760 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2761 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2762 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2763 if (bp->pf.vf_req_buf == NULL) {
2767 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2768 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2769 for (i = 0; i < num_vfs; i++)
2770 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2771 (i * HWRM_MAX_REQ_LEN);
2773 rc = bnxt_hwrm_func_buf_rgtr(bp);
2777 populate_vf_func_cfg_req(bp, &req, num_vfs);
2779 bp->pf.active_vfs = 0;
2780 for (i = 0; i < num_vfs; i++) {
2781 add_random_mac_if_needed(bp, &req, i);
2783 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2784 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2785 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2786 rc = bnxt_hwrm_send_message(bp,
2791 /* Clear enable flag for next pass */
2792 req.enables &= ~rte_cpu_to_le_32(
2793 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2795 if (rc || resp->error_code) {
2797 "Failed to initizlie VF %d\n", i);
2799 "Not all VFs available. (%d, %d)\n",
2800 rc, resp->error_code);
2807 reserve_resources_from_vf(bp, &req, i);
2808 bp->pf.active_vfs++;
2809 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2813 * Now configure the PF to use "the rest" of the resources
2814 * We're using STD_TX_RING_MODE here though which will limit the TX
2815 * rings. This will allow QoS to function properly. Not setting this
2816 * will cause PF rings to break bandwidth settings.
2818 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2822 rc = update_pf_resource_max(bp);
2829 bnxt_hwrm_func_buf_unrgtr(bp);
2833 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2835 struct hwrm_func_cfg_input req = {0};
2836 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2839 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2841 req.fid = rte_cpu_to_le_16(0xffff);
2842 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2843 req.evb_mode = bp->pf.evb_mode;
2845 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2846 HWRM_CHECK_RESULT();
2852 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2853 uint8_t tunnel_type)
2855 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2856 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2859 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2860 req.tunnel_type = tunnel_type;
2861 req.tunnel_dst_port_val = port;
2862 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2863 HWRM_CHECK_RESULT();
2865 switch (tunnel_type) {
2866 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2867 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2868 bp->vxlan_port = port;
2870 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2871 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2872 bp->geneve_port = port;
2883 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2884 uint8_t tunnel_type)
2886 struct hwrm_tunnel_dst_port_free_input req = {0};
2887 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2890 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2892 req.tunnel_type = tunnel_type;
2893 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2894 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2896 HWRM_CHECK_RESULT();
2902 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2905 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2906 struct hwrm_func_cfg_input req = {0};
2909 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2911 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2912 req.flags = rte_cpu_to_le_32(flags);
2913 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2915 HWRM_CHECK_RESULT();
2921 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2923 uint32_t *flag = flagp;
2925 vnic->flags = *flag;
2928 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2930 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2933 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2936 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2937 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2939 HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2941 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2942 req.req_buf_page_size = rte_cpu_to_le_16(
2943 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2944 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2945 req.req_buf_page_addr0 =
2946 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2947 if (req.req_buf_page_addr0 == 0) {
2949 "unable to map buffer address to physical memory\n");
2953 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2955 HWRM_CHECK_RESULT();
2961 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2964 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2965 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2967 HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2969 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2971 HWRM_CHECK_RESULT();
2977 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2979 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2980 struct hwrm_func_cfg_input req = {0};
2983 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2985 req.fid = rte_cpu_to_le_16(0xffff);
2986 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2987 req.enables = rte_cpu_to_le_32(
2988 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2989 req.async_event_cr = rte_cpu_to_le_16(
2990 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2991 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2993 HWRM_CHECK_RESULT();
2999 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3001 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3002 struct hwrm_func_vf_cfg_input req = {0};
3005 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3007 req.enables = rte_cpu_to_le_32(
3008 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3009 req.async_event_cr = rte_cpu_to_le_16(
3010 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
3011 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3013 HWRM_CHECK_RESULT();
3019 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3021 struct hwrm_func_cfg_input req = {0};
3022 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3023 uint16_t dflt_vlan, fid;
3024 uint32_t func_cfg_flags;
3027 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3030 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3031 fid = bp->pf.vf_info[vf].fid;
3032 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3034 fid = rte_cpu_to_le_16(0xffff);
3035 func_cfg_flags = bp->pf.func_cfg_flags;
3036 dflt_vlan = bp->vlan;
3039 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3040 req.fid = rte_cpu_to_le_16(fid);
3041 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3042 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3044 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3046 HWRM_CHECK_RESULT();
3052 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3053 uint16_t max_bw, uint16_t enables)
3055 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3056 struct hwrm_func_cfg_input req = {0};
3059 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3061 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3062 req.enables |= rte_cpu_to_le_32(enables);
3063 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3064 req.max_bw = rte_cpu_to_le_32(max_bw);
3065 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3067 HWRM_CHECK_RESULT();
3073 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3075 struct hwrm_func_cfg_input req = {0};
3076 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3079 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3081 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3082 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3083 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3084 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3086 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3088 HWRM_CHECK_RESULT();
3094 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3099 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3101 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3106 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3107 void *encaped, size_t ec_size)
3110 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3111 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3113 if (ec_size > sizeof(req.encap_request))
3116 HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3118 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3119 memcpy(req.encap_request, encaped, ec_size);
3121 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3123 HWRM_CHECK_RESULT();
3129 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3130 struct rte_ether_addr *mac)
3132 struct hwrm_func_qcfg_input req = {0};
3133 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3136 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3138 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3139 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3141 HWRM_CHECK_RESULT();
3143 memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3150 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3151 void *encaped, size_t ec_size)
3154 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3155 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3157 if (ec_size > sizeof(req.encap_request))
3160 HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3162 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3163 memcpy(req.encap_request, encaped, ec_size);
3165 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3167 HWRM_CHECK_RESULT();
3173 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3174 struct rte_eth_stats *stats, uint8_t rx)
3177 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3178 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3180 HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3182 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3184 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3186 HWRM_CHECK_RESULT();
3189 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3190 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3191 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3192 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3193 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3194 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3195 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3196 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3198 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3199 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3200 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3201 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3202 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3203 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3212 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3214 struct hwrm_port_qstats_input req = {0};
3215 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3216 struct bnxt_pf_info *pf = &bp->pf;
3219 HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3221 req.port_id = rte_cpu_to_le_16(pf->port_id);
3222 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3223 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3224 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3226 HWRM_CHECK_RESULT();
3232 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3234 struct hwrm_port_clr_stats_input req = {0};
3235 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3236 struct bnxt_pf_info *pf = &bp->pf;
3239 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3240 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3241 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3244 HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3246 req.port_id = rte_cpu_to_le_16(pf->port_id);
3247 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3249 HWRM_CHECK_RESULT();
3255 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3257 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3258 struct hwrm_port_led_qcaps_input req = {0};
3264 HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3265 req.port_id = bp->pf.port_id;
3266 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3268 HWRM_CHECK_RESULT();
3270 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3273 bp->num_leds = resp->num_leds;
3274 memcpy(bp->leds, &resp->led0_id,
3275 sizeof(bp->leds[0]) * bp->num_leds);
3276 for (i = 0; i < bp->num_leds; i++) {
3277 struct bnxt_led_info *led = &bp->leds[i];
3279 uint16_t caps = led->led_state_caps;
3281 if (!led->led_group_id ||
3282 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3294 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3296 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3297 struct hwrm_port_led_cfg_input req = {0};
3298 struct bnxt_led_cfg *led_cfg;
3299 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3300 uint16_t duration = 0;
3303 if (!bp->num_leds || BNXT_VF(bp))
3306 HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3309 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3310 duration = rte_cpu_to_le_16(500);
3312 req.port_id = bp->pf.port_id;
3313 req.num_leds = bp->num_leds;
3314 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3315 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3316 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3317 led_cfg->led_id = bp->leds[i].led_id;
3318 led_cfg->led_state = led_state;
3319 led_cfg->led_blink_on = duration;
3320 led_cfg->led_blink_off = duration;
3321 led_cfg->led_group_id = bp->leds[i].led_group_id;
3324 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3326 HWRM_CHECK_RESULT();
3332 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3336 struct hwrm_nvm_get_dir_info_input req = {0};
3337 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3339 HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3341 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3343 HWRM_CHECK_RESULT();
3347 *entries = rte_le_to_cpu_32(resp->entries);
3348 *length = rte_le_to_cpu_32(resp->entry_length);
3353 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3356 uint32_t dir_entries;
3357 uint32_t entry_length;
3360 rte_iova_t dma_handle;
3361 struct hwrm_nvm_get_dir_entries_input req = {0};
3362 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3364 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3368 *data++ = dir_entries;
3369 *data++ = entry_length;
3371 memset(data, 0xff, len);
3373 buflen = dir_entries * entry_length;
3374 buf = rte_malloc("nvm_dir", buflen, 0);
3375 rte_mem_lock_page(buf);
3378 dma_handle = rte_mem_virt2iova(buf);
3379 if (dma_handle == 0) {
3381 "unable to map response address to physical memory\n");
3384 HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3385 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3386 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3389 memcpy(data, buf, len > buflen ? buflen : len);
3392 HWRM_CHECK_RESULT();
3398 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3399 uint32_t offset, uint32_t length,
3404 rte_iova_t dma_handle;
3405 struct hwrm_nvm_read_input req = {0};
3406 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3408 buf = rte_malloc("nvm_item", length, 0);
3409 rte_mem_lock_page(buf);
3413 dma_handle = rte_mem_virt2iova(buf);
3414 if (dma_handle == 0) {
3416 "unable to map response address to physical memory\n");
3419 HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3420 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3421 req.dir_idx = rte_cpu_to_le_16(index);
3422 req.offset = rte_cpu_to_le_32(offset);
3423 req.len = rte_cpu_to_le_32(length);
3424 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3426 memcpy(data, buf, length);
3429 HWRM_CHECK_RESULT();
3435 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3438 struct hwrm_nvm_erase_dir_entry_input req = {0};
3439 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3441 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3442 req.dir_idx = rte_cpu_to_le_16(index);
3443 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3444 HWRM_CHECK_RESULT();
3451 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3452 uint16_t dir_ordinal, uint16_t dir_ext,
3453 uint16_t dir_attr, const uint8_t *data,
3457 struct hwrm_nvm_write_input req = {0};
3458 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3459 rte_iova_t dma_handle;
3462 buf = rte_malloc("nvm_write", data_len, 0);
3463 rte_mem_lock_page(buf);
3467 dma_handle = rte_mem_virt2iova(buf);
3468 if (dma_handle == 0) {
3470 "unable to map response address to physical memory\n");
3473 memcpy(buf, data, data_len);
3475 HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3477 req.dir_type = rte_cpu_to_le_16(dir_type);
3478 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3479 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3480 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3481 req.dir_data_length = rte_cpu_to_le_32(data_len);
3482 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3484 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3487 HWRM_CHECK_RESULT();
3494 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3496 uint32_t *count = cbdata;
3498 *count = *count + 1;
3501 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3502 struct bnxt_vnic_info *vnic __rte_unused)
3507 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3511 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3512 &count, bnxt_vnic_count_hwrm_stub);
3517 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3520 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3521 struct hwrm_func_vf_vnic_ids_query_output *resp =
3522 bp->hwrm_cmd_resp_addr;
3525 /* First query all VNIC ids */
3526 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3528 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3529 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3530 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3532 if (req.vnic_id_tbl_addr == 0) {
3535 "unable to map VNIC ID table address to physical memory\n");
3538 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3541 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3543 } else if (resp->error_code) {
3544 rc = rte_le_to_cpu_16(resp->error_code);
3546 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3549 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3557 * This function queries the VNIC IDs for a specified VF. It then calls
3558 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3559 * Then it calls the hwrm_cb function to program this new vnic configuration.
3561 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3562 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3563 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3565 struct bnxt_vnic_info vnic;
3567 int i, num_vnic_ids;
3572 /* First query all VNIC ids */
3573 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3574 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3575 RTE_CACHE_LINE_SIZE);
3576 if (vnic_ids == NULL) {
3580 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3581 rte_mem_lock_page(((char *)vnic_ids) + sz);
3583 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3585 if (num_vnic_ids < 0)
3586 return num_vnic_ids;
3588 /* Retrieve VNIC, update bd_stall then update */
3590 for (i = 0; i < num_vnic_ids; i++) {
3591 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3592 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3593 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3596 if (vnic.mru <= 4) /* Indicates unallocated */
3599 vnic_cb(&vnic, cbdata);
3601 rc = hwrm_cb(bp, &vnic);
3611 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3614 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3615 struct hwrm_func_cfg_input req = {0};
3618 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3620 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3621 req.enables |= rte_cpu_to_le_32(
3622 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3623 req.vlan_antispoof_mode = on ?
3624 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3625 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3626 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3628 HWRM_CHECK_RESULT();
3634 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3636 struct bnxt_vnic_info vnic;
3639 int num_vnic_ids, i;
3643 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3644 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3645 RTE_CACHE_LINE_SIZE);
3646 if (vnic_ids == NULL) {
3651 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3652 rte_mem_lock_page(((char *)vnic_ids) + sz);
3654 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3660 * Loop through to find the default VNIC ID.
3661 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3662 * by sending the hwrm_func_qcfg command to the firmware.
3664 for (i = 0; i < num_vnic_ids; i++) {
3665 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3666 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3667 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3668 bp->pf.first_vf_id + vf);
3671 if (vnic.func_default) {
3673 return vnic.fw_vnic_id;
3676 /* Could not find a default VNIC. */
3677 PMD_DRV_LOG(ERR, "No default VNIC\n");
3683 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3685 struct bnxt_filter_info *filter)
3688 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3689 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3690 uint32_t enables = 0;
3692 if (filter->fw_em_filter_id != UINT64_MAX)
3693 bnxt_hwrm_clear_em_filter(bp, filter);
3695 HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3697 req.flags = rte_cpu_to_le_32(filter->flags);
3699 enables = filter->enables |
3700 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3701 req.dst_id = rte_cpu_to_le_16(dst_id);
3703 if (filter->ip_addr_type) {
3704 req.ip_addr_type = filter->ip_addr_type;
3705 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3708 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3709 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3711 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3712 memcpy(req.src_macaddr, filter->src_macaddr,
3713 RTE_ETHER_ADDR_LEN);
3715 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3716 memcpy(req.dst_macaddr, filter->dst_macaddr,
3717 RTE_ETHER_ADDR_LEN);
3719 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3720 req.ovlan_vid = filter->l2_ovlan;
3722 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3723 req.ivlan_vid = filter->l2_ivlan;
3725 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3726 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3728 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3729 req.ip_protocol = filter->ip_protocol;
3731 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3732 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3734 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3735 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3737 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3738 req.src_port = rte_cpu_to_be_16(filter->src_port);
3740 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3741 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3743 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3744 req.mirror_vnic_id = filter->mirror_vnic_id;
3746 req.enables = rte_cpu_to_le_32(enables);
3748 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3750 HWRM_CHECK_RESULT();
3752 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3758 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3761 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3762 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3764 if (filter->fw_em_filter_id == UINT64_MAX)
3767 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3768 HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3770 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3772 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3774 HWRM_CHECK_RESULT();
3777 filter->fw_em_filter_id = UINT64_MAX;
3778 filter->fw_l2_filter_id = UINT64_MAX;
3783 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3785 struct bnxt_filter_info *filter)
3788 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3789 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3790 bp->hwrm_cmd_resp_addr;
3791 uint32_t enables = 0;
3793 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3794 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3796 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3798 req.flags = rte_cpu_to_le_32(filter->flags);
3800 enables = filter->enables |
3801 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3802 req.dst_id = rte_cpu_to_le_16(dst_id);
3805 if (filter->ip_addr_type) {
3806 req.ip_addr_type = filter->ip_addr_type;
3808 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3811 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3812 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3814 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3815 memcpy(req.src_macaddr, filter->src_macaddr,
3816 RTE_ETHER_ADDR_LEN);
3818 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3819 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3820 //RTE_ETHER_ADDR_LEN);
3822 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3823 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3825 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3826 req.ip_protocol = filter->ip_protocol;
3828 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3829 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3831 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3832 req.src_ipaddr_mask[0] =
3833 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3835 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3836 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3838 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3839 req.dst_ipaddr_mask[0] =
3840 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3842 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3843 req.src_port = rte_cpu_to_le_16(filter->src_port);
3845 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3846 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3848 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3849 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3851 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3852 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3854 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3855 req.mirror_vnic_id = filter->mirror_vnic_id;
3857 req.enables = rte_cpu_to_le_32(enables);
3859 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3861 HWRM_CHECK_RESULT();
3863 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3869 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3870 struct bnxt_filter_info *filter)
3873 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3874 struct hwrm_cfa_ntuple_filter_free_output *resp =
3875 bp->hwrm_cmd_resp_addr;
3877 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3880 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3882 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3886 HWRM_CHECK_RESULT();
3889 filter->fw_ntuple_filter_id = UINT64_MAX;
3894 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3896 unsigned int rss_idx, fw_idx, i;
3898 if (vnic->rss_table && vnic->hash_type) {
3900 * Fill the RSS hash & redirection table with
3901 * ring group ids for all VNICs
3903 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3904 rss_idx++, fw_idx++) {
3905 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3906 fw_idx %= bp->rx_cp_nr_rings;
3907 if (vnic->fw_grp_ids[fw_idx] !=
3912 if (i == bp->rx_cp_nr_rings)
3914 vnic->rss_table[rss_idx] =
3915 vnic->fw_grp_ids[fw_idx];
3917 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3922 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3923 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3927 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3929 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3930 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3932 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3933 req->num_cmpl_dma_aggr_during_int =
3934 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3936 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3938 /* min timer set to 1/2 of interrupt timer */
3939 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3941 /* buf timer set to 1/4 of interrupt timer */
3942 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3944 req->cmpl_aggr_dma_tmr_during_int =
3945 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3947 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3948 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3949 req->flags = rte_cpu_to_le_16(flags);
3952 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3953 struct bnxt_coal *coal, uint16_t ring_id)
3955 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3956 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3957 bp->hwrm_cmd_resp_addr;
3960 /* Set ring coalesce parameters only for Stratus 100G NIC */
3961 if (!bnxt_stratus_device(bp))
3964 HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3965 bnxt_hwrm_set_coal_params(coal, &req);
3966 req.ring_id = rte_cpu_to_le_16(ring_id);
3967 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3968 HWRM_CHECK_RESULT();
3973 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3975 struct hwrm_port_qstats_ext_input req = {0};
3976 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3977 struct bnxt_pf_info *pf = &bp->pf;
3980 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3981 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3984 HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3986 req.port_id = rte_cpu_to_le_16(pf->port_id);
3987 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3988 req.tx_stat_host_addr =
3989 rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3991 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3993 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3994 req.rx_stat_host_addr =
3995 rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3997 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3999 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4002 bp->fw_rx_port_stats_ext_size = 0;
4003 bp->fw_tx_port_stats_ext_size = 0;
4005 bp->fw_rx_port_stats_ext_size =
4006 rte_le_to_cpu_16(resp->rx_stat_size);
4007 bp->fw_tx_port_stats_ext_size =
4008 rte_le_to_cpu_16(resp->tx_stat_size);
4011 HWRM_CHECK_RESULT();