1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
33 struct bnxt_plcmodes_cfg {
35 uint16_t jumbo_thresh;
37 uint16_t hds_threshold;
40 static int page_getenum(size_t size)
56 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57 return sizeof(void *) * 8 - 1;
60 static int page_roundup(size_t size)
62 return 1 << page_getenum(size);
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73 uint32_t msg_len, bool use_kong_mb)
76 struct input *req = msg;
77 struct output *resp = bp->hwrm_cmd_resp_addr;
81 uint16_t max_req_len = bp->max_req_len;
82 struct hwrm_short_input short_input = { 0 };
83 uint16_t bar_offset = use_kong_mb ?
84 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85 uint16_t mb_trigger_offset = use_kong_mb ?
86 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
88 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
89 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
91 memset(short_cmd_req, 0, bp->max_req_len);
92 memcpy(short_cmd_req, req, msg_len);
94 short_input.req_type = rte_cpu_to_le_16(req->req_type);
95 short_input.signature = rte_cpu_to_le_16(
96 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
97 short_input.size = rte_cpu_to_le_16(msg_len);
98 short_input.req_addr =
99 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
101 data = (uint32_t *)&short_input;
102 msg_len = sizeof(short_input);
104 /* Sync memory write before updating doorbell */
107 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + bar_offset + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + bar_offset + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
153 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
154 * spinlock, and does initial processing.
156 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
157 * releases the spinlock only if it returns. If the regular int return codes
158 * are not used by the function, HWRM_CHECK_RESULT() should not be used
159 * directly, rather it should be copied and modified to suit the function.
161 * HWRM_UNLOCK() must be called after all response processing is completed.
163 #define HWRM_PREP(req, type, kong) do { \
164 rte_spinlock_lock(&bp->hwrm_lock); \
165 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167 req.cmpl_ring = rte_cpu_to_le_16(-1); \
168 req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170 req.target_id = rte_cpu_to_le_16(0xffff); \
171 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
174 #define HWRM_CHECK_RESULT_SILENT() do {\
176 rte_spinlock_unlock(&bp->hwrm_lock); \
179 if (resp->error_code) { \
180 rc = rte_le_to_cpu_16(resp->error_code); \
181 rte_spinlock_unlock(&bp->hwrm_lock); \
186 #define HWRM_CHECK_RESULT() do {\
188 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
190 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
196 if (resp->error_code) { \
197 rc = rte_le_to_cpu_16(resp->error_code); \
198 if (resp->resp_len >= 16) { \
199 struct hwrm_err_output *tmp_hwrm_err_op = \
202 "error %d:%d:%08x:%04x\n", \
203 rc, tmp_hwrm_err_op->cmd_err, \
205 tmp_hwrm_err_op->opaque_0), \
207 tmp_hwrm_err_op->opaque_1)); \
209 PMD_DRV_LOG(ERR, "error %d\n", rc); \
211 rte_spinlock_unlock(&bp->hwrm_lock); \
212 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
220 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
225 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
232 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241 struct bnxt_vnic_info *vnic,
243 struct bnxt_vlan_table_entry *vlan_table)
246 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
250 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
253 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
256 /* FIXME add multicast flag, when multicast adding options is supported
259 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269 if (vnic->mc_addr_cnt) {
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278 rte_mem_virt2iova(vlan_table));
279 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
281 req.mask = rte_cpu_to_le_32(mask);
283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
293 struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298 bp->hwrm_cmd_resp_addr;
301 * Older HWRM versions did not support this command, and the set_rx_mask
302 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303 * removed from set_rx_mask call, and this command was added.
305 * This command is also present from 1.7.8.11 and higher,
308 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
315 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316 req.fid = rte_cpu_to_le_16(fid);
318 req.vlan_tag_mask_tbl_addr =
319 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
322 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331 struct bnxt_filter_info *filter)
334 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
337 if (filter->fw_l2_filter_id == UINT64_MAX)
340 HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
342 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
349 filter->fw_l2_filter_id = UINT64_MAX;
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
356 struct bnxt_filter_info *filter)
359 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362 const struct rte_eth_vmdq_rx_conf *conf =
363 &dev_conf->rx_adv_conf.vmdq_rx_conf;
364 uint32_t enables = 0;
365 uint16_t j = dst_id - 1;
367 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369 conf->pool_map[j].pools & (1UL << j)) {
371 "Add vlan %u to vmdq pool %u\n",
372 conf->pool_map[j].vlan_id, j);
374 filter->l2_ivlan = conf->pool_map[j].vlan_id;
376 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380 if (filter->fw_l2_filter_id != UINT64_MAX)
381 bnxt_hwrm_clear_l2_filter(bp, filter);
383 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
385 req.flags = rte_cpu_to_le_32(filter->flags);
387 rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
389 enables = filter->enables |
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391 req.dst_id = rte_cpu_to_le_16(dst_id);
394 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395 memcpy(req.l2_addr, filter->l2_addr,
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
402 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403 req.l2_ovlan = filter->l2_ovlan;
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406 req.l2_ivlan = filter->l2_ivlan;
408 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409 req.l2_ovlan_mask = filter->l2_ovlan_mask;
411 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412 req.l2_ivlan_mask = filter->l2_ivlan_mask;
413 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414 req.src_id = rte_cpu_to_le_32(filter->src_id);
415 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416 req.src_type = filter->src_type;
418 req.enables = rte_cpu_to_le_32(enables);
420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
424 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
432 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
440 HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
443 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
446 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
447 if (ptp->tx_tstamp_en)
448 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
451 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
452 req.flags = rte_cpu_to_le_32(flags);
453 req.enables = rte_cpu_to_le_32
454 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
455 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
463 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
466 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
467 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
468 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
470 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
474 HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
476 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
478 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
482 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
485 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
489 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
490 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
491 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
492 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
493 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
494 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
495 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
496 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
497 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
498 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
499 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
500 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
501 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
502 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
503 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
504 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
505 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
506 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
514 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
517 struct hwrm_func_qcaps_input req = {.req_type = 0 };
518 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
519 uint16_t new_max_vfs;
523 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
525 req.fid = rte_cpu_to_le_16(0xffff);
527 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
531 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
532 flags = rte_le_to_cpu_32(resp->flags);
534 bp->pf.port_id = resp->port_id;
535 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
536 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
537 new_max_vfs = bp->pdev->max_vfs;
538 if (new_max_vfs != bp->pf.max_vfs) {
540 rte_free(bp->pf.vf_info);
541 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
542 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
543 bp->pf.max_vfs = new_max_vfs;
544 for (i = 0; i < new_max_vfs; i++) {
545 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
546 bp->pf.vf_info[i].vlan_table =
547 rte_zmalloc("VF VLAN table",
550 if (bp->pf.vf_info[i].vlan_table == NULL)
552 "Fail to alloc VLAN table for VF %d\n",
556 bp->pf.vf_info[i].vlan_table);
557 bp->pf.vf_info[i].vlan_as_table =
558 rte_zmalloc("VF VLAN AS table",
561 if (bp->pf.vf_info[i].vlan_as_table == NULL)
563 "Alloc VLAN AS table for VF %d fail\n",
567 bp->pf.vf_info[i].vlan_as_table);
568 STAILQ_INIT(&bp->pf.vf_info[i].filter);
573 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
574 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
575 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
576 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
577 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
578 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
579 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
580 /* TODO: For now, do not support VMDq/RFS on VFs. */
585 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
589 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
591 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
592 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
593 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
594 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
596 bnxt_hwrm_ptp_qcfg(bp);
605 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
609 rc = __bnxt_hwrm_func_qcaps(bp);
610 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
611 rc = bnxt_hwrm_func_resc_qcaps(bp);
613 bp->flags |= BNXT_FLAG_NEW_RM;
619 int bnxt_hwrm_func_reset(struct bnxt *bp)
622 struct hwrm_func_reset_input req = {.req_type = 0 };
623 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
625 HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
627 req.enables = rte_cpu_to_le_32(0);
629 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
637 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
640 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
641 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
643 if (bp->flags & BNXT_FLAG_REGISTERED)
646 HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
647 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
648 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
649 req.ver_maj = RTE_VER_YEAR;
650 req.ver_min = RTE_VER_MONTH;
651 req.ver_upd = RTE_VER_MINOR;
654 req.enables |= rte_cpu_to_le_32(
655 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
656 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
657 RTE_MIN(sizeof(req.vf_req_fwd),
658 sizeof(bp->pf.vf_req_fwd)));
661 * PF can sniff HWRM API issued by VF. This can be set up by
662 * linux driver and inherited by the DPDK PF driver. Clear
663 * this HWRM sniffer list in FW because DPDK PF driver does
667 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
670 req.async_event_fwd[0] |=
671 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
672 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
673 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
674 req.async_event_fwd[1] |=
675 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
676 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
678 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
683 bp->flags |= BNXT_FLAG_REGISTERED;
688 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
690 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
693 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
696 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
701 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
702 struct hwrm_func_vf_cfg_input req = {0};
704 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
706 req.enables = rte_cpu_to_le_32
707 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
708 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
709 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
710 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
711 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
712 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
714 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
715 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
716 AGG_RING_MULTIPLIER);
717 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
718 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
720 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
721 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
722 if (bp->vf_resv_strategy ==
723 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
724 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
725 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
726 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
727 req.enables |= rte_cpu_to_le_32(enables);
728 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
729 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
730 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
734 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
735 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
736 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
737 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
738 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
739 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
741 req.flags = rte_cpu_to_le_32(flags);
743 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
746 HWRM_CHECK_RESULT_SILENT();
754 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
757 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
758 struct hwrm_func_resource_qcaps_input req = {0};
760 HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
761 req.fid = rte_cpu_to_le_16(0xffff);
763 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
768 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
769 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
770 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
771 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
772 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
773 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
774 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
775 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
777 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
778 if (bp->vf_resv_strategy >
779 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
780 bp->vf_resv_strategy =
781 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
787 int bnxt_hwrm_ver_get(struct bnxt *bp)
790 struct hwrm_ver_get_input req = {.req_type = 0 };
791 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
794 uint16_t max_resp_len;
795 char type[RTE_MEMZONE_NAMESIZE];
796 uint32_t dev_caps_cfg;
798 bp->max_req_len = HWRM_MAX_REQ_LEN;
799 HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
801 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
802 req.hwrm_intf_min = HWRM_VERSION_MINOR;
803 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
805 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
809 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
810 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
811 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
812 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
813 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
814 (resp->hwrm_fw_min_8b << 16) |
815 (resp->hwrm_fw_bld_8b << 8) |
816 resp->hwrm_fw_rsvd_8b;
817 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
818 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
820 my_version = HWRM_VERSION_MAJOR << 16;
821 my_version |= HWRM_VERSION_MINOR << 8;
822 my_version |= HWRM_VERSION_UPDATE;
824 fw_version = resp->hwrm_intf_maj_8b << 16;
825 fw_version |= resp->hwrm_intf_min_8b << 8;
826 fw_version |= resp->hwrm_intf_upd_8b;
827 bp->hwrm_spec_code = fw_version;
829 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
830 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
835 if (my_version != fw_version) {
836 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
837 if (my_version < fw_version) {
839 "Firmware API version is newer than driver.\n");
841 "The driver may be missing features.\n");
844 "Firmware API version is older than driver.\n");
846 "Not all driver features may be functional.\n");
850 if (bp->max_req_len > resp->max_req_win_len) {
851 PMD_DRV_LOG(ERR, "Unsupported request length\n");
854 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
855 max_resp_len = resp->max_resp_len;
856 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
858 if (bp->max_resp_len != max_resp_len) {
859 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
860 bp->pdev->addr.domain, bp->pdev->addr.bus,
861 bp->pdev->addr.devid, bp->pdev->addr.function);
863 rte_free(bp->hwrm_cmd_resp_addr);
865 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
866 if (bp->hwrm_cmd_resp_addr == NULL) {
870 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
871 bp->hwrm_cmd_resp_dma_addr =
872 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
873 if (bp->hwrm_cmd_resp_dma_addr == 0) {
875 "Unable to map response buffer to physical memory.\n");
879 bp->max_resp_len = max_resp_len;
883 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
885 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
886 PMD_DRV_LOG(DEBUG, "Short command supported\n");
888 rte_free(bp->hwrm_short_cmd_req_addr);
890 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
892 if (bp->hwrm_short_cmd_req_addr == NULL) {
896 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
897 bp->hwrm_short_cmd_req_dma_addr =
898 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
899 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
900 rte_free(bp->hwrm_short_cmd_req_addr);
902 "Unable to map buffer to physical memory.\n");
907 bp->flags |= BNXT_FLAG_SHORT_CMD;
910 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
911 bp->flags |= BNXT_FLAG_KONG_MB_EN;
912 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
915 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
916 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
923 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
926 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
927 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
929 if (!(bp->flags & BNXT_FLAG_REGISTERED))
932 HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
935 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
940 bp->flags &= ~BNXT_FLAG_REGISTERED;
945 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
948 struct hwrm_port_phy_cfg_input req = {0};
949 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
950 uint32_t enables = 0;
952 HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
955 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
956 if (bp->link_info.auto_mode && conf->link_speed) {
957 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
958 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
961 req.flags = rte_cpu_to_le_32(conf->phy_flags);
962 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
963 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
965 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
966 * any auto mode, even "none".
968 if (!conf->link_speed) {
969 /* No speeds specified. Enable AutoNeg - all speeds */
971 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
973 /* AutoNeg - Advertise speeds specified. */
974 if (conf->auto_link_speed_mask &&
975 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
977 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
978 req.auto_link_speed_mask =
979 conf->auto_link_speed_mask;
981 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
984 req.auto_duplex = conf->duplex;
985 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
986 req.auto_pause = conf->auto_pause;
987 req.force_pause = conf->force_pause;
988 /* Set force_pause if there is no auto or if there is a force */
989 if (req.auto_pause && !req.force_pause)
990 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
992 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
994 req.enables = rte_cpu_to_le_32(enables);
997 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
998 PMD_DRV_LOG(INFO, "Force Link Down\n");
1001 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1003 HWRM_CHECK_RESULT();
1009 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1010 struct bnxt_link_info *link_info)
1013 struct hwrm_port_phy_qcfg_input req = {0};
1014 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1016 HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1018 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1020 HWRM_CHECK_RESULT();
1022 link_info->phy_link_status = resp->link;
1023 link_info->link_up =
1024 (link_info->phy_link_status ==
1025 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1026 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1027 link_info->duplex = resp->duplex_cfg;
1028 link_info->pause = resp->pause;
1029 link_info->auto_pause = resp->auto_pause;
1030 link_info->force_pause = resp->force_pause;
1031 link_info->auto_mode = resp->auto_mode;
1032 link_info->phy_type = resp->phy_type;
1033 link_info->media_type = resp->media_type;
1035 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1036 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1037 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1038 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1039 link_info->phy_ver[0] = resp->phy_maj;
1040 link_info->phy_ver[1] = resp->phy_min;
1041 link_info->phy_ver[2] = resp->phy_bld;
1045 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1046 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1047 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1048 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1049 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1050 link_info->auto_link_speed_mask);
1051 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1052 link_info->force_link_speed);
1057 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1060 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1061 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1064 HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1066 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1067 /* HWRM Version >= 1.9.1 */
1068 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1070 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1071 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1073 HWRM_CHECK_RESULT();
1075 #define GET_QUEUE_INFO(x) \
1076 bp->cos_queue[x].id = resp->queue_id##x; \
1077 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1090 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1091 bp->tx_cosq_id = bp->cos_queue[0].id;
1093 /* iterate and find the COSq profile to use for Tx */
1094 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1095 if (bp->cos_queue[i].profile ==
1096 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1097 bp->tx_cosq_id = bp->cos_queue[i].id;
1102 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1107 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1108 struct bnxt_ring *ring,
1109 uint32_t ring_type, uint32_t map_index,
1110 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1113 uint32_t enables = 0;
1114 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1115 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1117 HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1119 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1120 req.fbo = rte_cpu_to_le_32(0);
1121 /* Association of ring index with doorbell index */
1122 req.logical_id = rte_cpu_to_le_16(map_index);
1123 req.length = rte_cpu_to_le_32(ring->ring_size);
1125 switch (ring_type) {
1126 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1127 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1129 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1130 req.ring_type = ring_type;
1131 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1132 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1133 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1135 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1137 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1138 req.ring_type = ring_type;
1140 * TODO: Some HWRM versions crash with
1141 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1143 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1146 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1151 req.enables = rte_cpu_to_le_32(enables);
1153 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1155 if (rc || resp->error_code) {
1156 if (rc == 0 && resp->error_code)
1157 rc = rte_le_to_cpu_16(resp->error_code);
1158 switch (ring_type) {
1159 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1161 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1164 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1166 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1169 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1171 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1175 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1181 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1186 int bnxt_hwrm_ring_free(struct bnxt *bp,
1187 struct bnxt_ring *ring, uint32_t ring_type)
1190 struct hwrm_ring_free_input req = {.req_type = 0 };
1191 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1193 HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1195 req.ring_type = ring_type;
1196 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1198 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1200 if (rc || resp->error_code) {
1201 if (rc == 0 && resp->error_code)
1202 rc = rte_le_to_cpu_16(resp->error_code);
1205 switch (ring_type) {
1206 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1207 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1210 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1211 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1214 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1215 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1219 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1227 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1230 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1231 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1233 HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1235 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1236 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1237 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1238 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1242 HWRM_CHECK_RESULT();
1244 bp->grp_info[idx].fw_grp_id =
1245 rte_le_to_cpu_16(resp->ring_group_id);
1252 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1255 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1256 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1258 HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1260 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1262 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1264 HWRM_CHECK_RESULT();
1267 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1271 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1274 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1275 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1277 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1280 HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1282 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1284 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1286 HWRM_CHECK_RESULT();
1292 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1293 unsigned int idx __rte_unused)
1296 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1297 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1299 HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1301 req.update_period_ms = rte_cpu_to_le_32(0);
1303 req.stats_dma_addr =
1304 rte_cpu_to_le_64(cpr->hw_stats_map);
1306 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1308 HWRM_CHECK_RESULT();
1310 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1317 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1318 unsigned int idx __rte_unused)
1321 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1322 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1324 HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1326 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1328 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1330 HWRM_CHECK_RESULT();
1336 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1339 struct hwrm_vnic_alloc_input req = { 0 };
1340 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1342 /* map ring groups to this vnic */
1343 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1344 vnic->start_grp_id, vnic->end_grp_id);
1345 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1346 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1348 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1349 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1350 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1351 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1352 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1353 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1354 HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1356 if (vnic->func_default)
1358 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1359 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1361 HWRM_CHECK_RESULT();
1363 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1365 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1369 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1370 struct bnxt_vnic_info *vnic,
1371 struct bnxt_plcmodes_cfg *pmode)
1374 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1375 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1377 HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1379 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1381 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1383 HWRM_CHECK_RESULT();
1385 pmode->flags = rte_le_to_cpu_32(resp->flags);
1386 /* dflt_vnic bit doesn't exist in the _cfg command */
1387 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1388 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1389 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1390 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1397 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1398 struct bnxt_vnic_info *vnic,
1399 struct bnxt_plcmodes_cfg *pmode)
1402 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1403 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1405 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1407 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1408 req.flags = rte_cpu_to_le_32(pmode->flags);
1409 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1410 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1411 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1412 req.enables = rte_cpu_to_le_32(
1413 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1414 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1415 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1418 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1420 HWRM_CHECK_RESULT();
1426 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1429 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1430 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1431 uint32_t ctx_enable_flag = 0;
1432 struct bnxt_plcmodes_cfg pmodes;
1434 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1435 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1439 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1443 HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1445 /* Only RSS support for now TBD: COS & LB */
1447 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1448 if (vnic->lb_rule != 0xffff)
1449 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1450 if (vnic->cos_rule != 0xffff)
1451 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1452 if (vnic->rss_rule != 0xffff) {
1453 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1454 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1456 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1457 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1458 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1459 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1460 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1461 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1462 req.mru = rte_cpu_to_le_16(vnic->mru);
1463 if (vnic->func_default)
1465 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1466 if (vnic->vlan_strip)
1468 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1471 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1472 if (vnic->roce_dual)
1473 req.flags |= rte_cpu_to_le_32(
1474 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1475 if (vnic->roce_only)
1476 req.flags |= rte_cpu_to_le_32(
1477 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1478 if (vnic->rss_dflt_cr)
1479 req.flags |= rte_cpu_to_le_32(
1480 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1482 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1484 HWRM_CHECK_RESULT();
1487 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1492 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1496 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1497 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1499 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1500 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1503 HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1506 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1507 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1508 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1510 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1512 HWRM_CHECK_RESULT();
1514 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1515 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1516 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1517 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1518 vnic->mru = rte_le_to_cpu_16(resp->mru);
1519 vnic->func_default = rte_le_to_cpu_32(
1520 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1521 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1522 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1523 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1524 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1525 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1526 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1527 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1528 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1529 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1530 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1537 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1540 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1541 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1542 bp->hwrm_cmd_resp_addr;
1544 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1546 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1548 HWRM_CHECK_RESULT();
1550 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1552 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1557 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1560 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1561 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1562 bp->hwrm_cmd_resp_addr;
1564 if (vnic->rss_rule == 0xffff) {
1565 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1568 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1570 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1572 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1574 HWRM_CHECK_RESULT();
1577 vnic->rss_rule = INVALID_HW_RING_ID;
1582 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1585 struct hwrm_vnic_free_input req = {.req_type = 0 };
1586 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1588 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1589 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1593 HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1595 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1597 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1599 HWRM_CHECK_RESULT();
1602 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1606 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1607 struct bnxt_vnic_info *vnic)
1610 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1611 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1613 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1615 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1616 req.hash_mode_flags = vnic->hash_mode;
1618 req.ring_grp_tbl_addr =
1619 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1620 req.hash_key_tbl_addr =
1621 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1622 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1624 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1626 HWRM_CHECK_RESULT();
1632 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1633 struct bnxt_vnic_info *vnic)
1636 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1637 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1640 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1641 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1645 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1647 req.flags = rte_cpu_to_le_32(
1648 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1650 req.enables = rte_cpu_to_le_32(
1651 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1653 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1654 size -= RTE_PKTMBUF_HEADROOM;
1656 req.jumbo_thresh = rte_cpu_to_le_16(size);
1657 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1659 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1661 HWRM_CHECK_RESULT();
1667 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1668 struct bnxt_vnic_info *vnic, bool enable)
1671 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1672 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1674 HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1677 req.enables = rte_cpu_to_le_32(
1678 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1679 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1680 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1681 req.flags = rte_cpu_to_le_32(
1682 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1683 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1684 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1685 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1686 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1687 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1688 req.max_agg_segs = rte_cpu_to_le_16(5);
1690 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1691 req.min_agg_len = rte_cpu_to_le_32(512);
1693 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1695 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1697 HWRM_CHECK_RESULT();
1703 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1705 struct hwrm_func_cfg_input req = {0};
1706 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1709 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1710 req.enables = rte_cpu_to_le_32(
1711 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1712 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1713 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1715 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1717 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1718 HWRM_CHECK_RESULT();
1721 bp->pf.vf_info[vf].random_mac = false;
1726 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1730 struct hwrm_func_qstats_input req = {.req_type = 0};
1731 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1733 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1735 req.fid = rte_cpu_to_le_16(fid);
1737 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1739 HWRM_CHECK_RESULT();
1742 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1749 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1750 struct rte_eth_stats *stats)
1753 struct hwrm_func_qstats_input req = {.req_type = 0};
1754 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1756 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1758 req.fid = rte_cpu_to_le_16(fid);
1760 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1762 HWRM_CHECK_RESULT();
1764 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1765 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1766 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1767 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1768 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1769 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1771 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1772 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1773 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1774 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1775 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1776 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1778 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1779 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1780 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1787 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1790 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1791 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1793 HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1795 req.fid = rte_cpu_to_le_16(fid);
1797 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1799 HWRM_CHECK_RESULT();
1806 * HWRM utility functions
1809 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1814 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1815 struct bnxt_tx_queue *txq;
1816 struct bnxt_rx_queue *rxq;
1817 struct bnxt_cp_ring_info *cpr;
1819 if (i >= bp->rx_cp_nr_rings) {
1820 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1823 rxq = bp->rx_queues[i];
1827 rc = bnxt_hwrm_stat_clear(bp, cpr);
1834 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1838 struct bnxt_cp_ring_info *cpr;
1840 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1842 if (i >= bp->rx_cp_nr_rings) {
1843 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1845 cpr = bp->rx_queues[i]->cp_ring;
1846 bp->grp_info[i].fw_stats_ctx = -1;
1848 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1849 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1850 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1858 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1863 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1864 struct bnxt_tx_queue *txq;
1865 struct bnxt_rx_queue *rxq;
1866 struct bnxt_cp_ring_info *cpr;
1868 if (i >= bp->rx_cp_nr_rings) {
1869 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1872 rxq = bp->rx_queues[i];
1876 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1884 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1889 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1891 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1894 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1902 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1904 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1906 bnxt_hwrm_ring_free(bp, cp_ring,
1907 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1908 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1909 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1910 sizeof(*cpr->cp_desc_ring));
1911 cpr->cp_raw_cons = 0;
1914 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1916 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1917 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1918 struct bnxt_ring *ring = rxr->rx_ring_struct;
1919 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1921 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1922 bnxt_hwrm_ring_free(bp, ring,
1923 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1924 ring->fw_ring_id = INVALID_HW_RING_ID;
1925 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1926 memset(rxr->rx_desc_ring, 0,
1927 rxr->rx_ring_struct->ring_size *
1928 sizeof(*rxr->rx_desc_ring));
1929 memset(rxr->rx_buf_ring, 0,
1930 rxr->rx_ring_struct->ring_size *
1931 sizeof(*rxr->rx_buf_ring));
1934 ring = rxr->ag_ring_struct;
1935 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1936 bnxt_hwrm_ring_free(bp, ring,
1937 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1938 ring->fw_ring_id = INVALID_HW_RING_ID;
1939 memset(rxr->ag_buf_ring, 0,
1940 rxr->ag_ring_struct->ring_size *
1941 sizeof(*rxr->ag_buf_ring));
1943 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1945 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1946 bnxt_free_cp_ring(bp, cpr);
1948 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1951 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1955 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1956 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1957 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1958 struct bnxt_ring *ring = txr->tx_ring_struct;
1959 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1961 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1962 bnxt_hwrm_ring_free(bp, ring,
1963 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1964 ring->fw_ring_id = INVALID_HW_RING_ID;
1965 memset(txr->tx_desc_ring, 0,
1966 txr->tx_ring_struct->ring_size *
1967 sizeof(*txr->tx_desc_ring));
1968 memset(txr->tx_buf_ring, 0,
1969 txr->tx_ring_struct->ring_size *
1970 sizeof(*txr->tx_buf_ring));
1974 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1975 bnxt_free_cp_ring(bp, cpr);
1976 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1980 for (i = 0; i < bp->rx_cp_nr_rings; i++)
1981 bnxt_free_hwrm_rx_ring(bp, i);
1986 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1991 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1992 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1999 void bnxt_free_hwrm_resources(struct bnxt *bp)
2001 /* Release memzone */
2002 rte_free(bp->hwrm_cmd_resp_addr);
2003 rte_free(bp->hwrm_short_cmd_req_addr);
2004 bp->hwrm_cmd_resp_addr = NULL;
2005 bp->hwrm_short_cmd_req_addr = NULL;
2006 bp->hwrm_cmd_resp_dma_addr = 0;
2007 bp->hwrm_short_cmd_req_dma_addr = 0;
2010 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2012 struct rte_pci_device *pdev = bp->pdev;
2013 char type[RTE_MEMZONE_NAMESIZE];
2015 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2016 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2017 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2018 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2019 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2020 if (bp->hwrm_cmd_resp_addr == NULL)
2022 bp->hwrm_cmd_resp_dma_addr =
2023 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2024 if (bp->hwrm_cmd_resp_dma_addr == 0) {
2026 "unable to map response address to physical memory\n");
2029 rte_spinlock_init(&bp->hwrm_lock);
2034 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2036 struct bnxt_filter_info *filter;
2039 STAILQ_FOREACH(filter, &vnic->filter, next) {
2040 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2041 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2042 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2043 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2045 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2046 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2054 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2056 struct bnxt_filter_info *filter;
2057 struct rte_flow *flow;
2060 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2061 filter = flow->filter;
2062 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2063 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2064 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2065 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2066 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2068 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2070 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2078 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2080 struct bnxt_filter_info *filter;
2083 STAILQ_FOREACH(filter, &vnic->filter, next) {
2084 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2085 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2087 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2088 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2091 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2099 void bnxt_free_tunnel_ports(struct bnxt *bp)
2101 if (bp->vxlan_port_cnt)
2102 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2103 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2105 if (bp->geneve_port_cnt)
2106 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2107 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2108 bp->geneve_port = 0;
2111 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2115 if (bp->vnic_info == NULL)
2119 * Cleanup VNICs in reverse order, to make sure the L2 filter
2120 * from vnic0 is last to be cleaned up.
2122 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2123 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2125 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2127 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2129 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2131 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2133 bnxt_hwrm_vnic_free(bp, vnic);
2135 rte_free(vnic->fw_grp_ids);
2137 /* Ring resources */
2138 bnxt_free_all_hwrm_rings(bp);
2139 bnxt_free_all_hwrm_ring_grps(bp);
2140 bnxt_free_all_hwrm_stat_ctxs(bp);
2141 bnxt_free_tunnel_ports(bp);
2144 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2146 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2148 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2149 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2151 switch (conf_link_speed) {
2152 case ETH_LINK_SPEED_10M_HD:
2153 case ETH_LINK_SPEED_100M_HD:
2155 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2157 return hw_link_duplex;
2160 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2162 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2165 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2167 uint16_t eth_link_speed = 0;
2169 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2170 return ETH_LINK_SPEED_AUTONEG;
2172 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2173 case ETH_LINK_SPEED_100M:
2174 case ETH_LINK_SPEED_100M_HD:
2177 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2179 case ETH_LINK_SPEED_1G:
2181 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2183 case ETH_LINK_SPEED_2_5G:
2185 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2187 case ETH_LINK_SPEED_10G:
2189 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2191 case ETH_LINK_SPEED_20G:
2193 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2195 case ETH_LINK_SPEED_25G:
2197 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2199 case ETH_LINK_SPEED_40G:
2201 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2203 case ETH_LINK_SPEED_50G:
2205 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2207 case ETH_LINK_SPEED_100G:
2209 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2213 "Unsupported link speed %d; default to AUTO\n",
2217 return eth_link_speed;
2220 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2221 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2222 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2223 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2225 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2229 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2232 if (link_speed & ETH_LINK_SPEED_FIXED) {
2233 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2235 if (one_speed & (one_speed - 1)) {
2237 "Invalid advertised speeds (%u) for port %u\n",
2238 link_speed, port_id);
2241 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2243 "Unsupported advertised speed (%u) for port %u\n",
2244 link_speed, port_id);
2248 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2250 "Unsupported advertised speeds (%u) for port %u\n",
2251 link_speed, port_id);
2259 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2263 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2264 if (bp->link_info.support_speeds)
2265 return bp->link_info.support_speeds;
2266 link_speed = BNXT_SUPPORTED_SPEEDS;
2269 if (link_speed & ETH_LINK_SPEED_100M)
2270 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2271 if (link_speed & ETH_LINK_SPEED_100M_HD)
2272 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2273 if (link_speed & ETH_LINK_SPEED_1G)
2274 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2275 if (link_speed & ETH_LINK_SPEED_2_5G)
2276 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2277 if (link_speed & ETH_LINK_SPEED_10G)
2278 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2279 if (link_speed & ETH_LINK_SPEED_20G)
2280 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2281 if (link_speed & ETH_LINK_SPEED_25G)
2282 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2283 if (link_speed & ETH_LINK_SPEED_40G)
2284 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2285 if (link_speed & ETH_LINK_SPEED_50G)
2286 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2287 if (link_speed & ETH_LINK_SPEED_100G)
2288 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2292 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2294 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2296 switch (hw_link_speed) {
2297 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2298 eth_link_speed = ETH_SPEED_NUM_100M;
2300 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2301 eth_link_speed = ETH_SPEED_NUM_1G;
2303 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2304 eth_link_speed = ETH_SPEED_NUM_2_5G;
2306 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2307 eth_link_speed = ETH_SPEED_NUM_10G;
2309 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2310 eth_link_speed = ETH_SPEED_NUM_20G;
2312 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2313 eth_link_speed = ETH_SPEED_NUM_25G;
2315 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2316 eth_link_speed = ETH_SPEED_NUM_40G;
2318 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2319 eth_link_speed = ETH_SPEED_NUM_50G;
2321 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2322 eth_link_speed = ETH_SPEED_NUM_100G;
2324 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2326 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2330 return eth_link_speed;
2333 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2335 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2337 switch (hw_link_duplex) {
2338 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2339 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2341 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2343 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2344 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2347 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2351 return eth_link_duplex;
2354 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2357 struct bnxt_link_info *link_info = &bp->link_info;
2359 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2362 "Get link config failed with rc %d\n", rc);
2365 if (link_info->link_speed)
2367 bnxt_parse_hw_link_speed(link_info->link_speed);
2369 link->link_speed = ETH_SPEED_NUM_NONE;
2370 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2371 link->link_status = link_info->link_up;
2372 link->link_autoneg = link_info->auto_mode ==
2373 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2374 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2379 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2382 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2383 struct bnxt_link_info link_req;
2384 uint16_t speed, autoneg;
2386 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2389 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2390 bp->eth_dev->data->port_id);
2394 memset(&link_req, 0, sizeof(link_req));
2395 link_req.link_up = link_up;
2399 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2400 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2401 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2402 /* Autoneg can be done only when the FW allows */
2403 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2404 bp->link_info.force_link_speed)) {
2405 link_req.phy_flags |=
2406 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2407 link_req.auto_link_speed_mask =
2408 bnxt_parse_eth_link_speed_mask(bp,
2409 dev_conf->link_speeds);
2411 if (bp->link_info.phy_type ==
2412 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2413 bp->link_info.phy_type ==
2414 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2415 bp->link_info.media_type ==
2416 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2417 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2421 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2422 /* If user wants a particular speed try that first. */
2424 link_req.link_speed = speed;
2425 else if (bp->link_info.force_link_speed)
2426 link_req.link_speed = bp->link_info.force_link_speed;
2428 link_req.link_speed = bp->link_info.auto_link_speed;
2430 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2431 link_req.auto_pause = bp->link_info.auto_pause;
2432 link_req.force_pause = bp->link_info.force_pause;
2435 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2438 "Set link config failed with rc %d\n", rc);
2446 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2448 struct hwrm_func_qcfg_input req = {0};
2449 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2453 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2454 req.fid = rte_cpu_to_le_16(0xffff);
2456 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2458 HWRM_CHECK_RESULT();
2460 /* Hard Coded.. 0xfff VLAN ID mask */
2461 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2462 flags = rte_le_to_cpu_16(resp->flags);
2463 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2464 bp->flags |= BNXT_FLAG_MULTI_HOST;
2466 if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2467 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2468 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2471 switch (resp->port_partition_type) {
2472 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2473 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2474 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2476 bp->port_partition_type = resp->port_partition_type;
2479 bp->port_partition_type = 0;
2488 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2489 struct hwrm_func_qcaps_output *qcaps)
2491 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2492 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2493 sizeof(qcaps->mac_address));
2494 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2495 qcaps->max_rx_rings = fcfg->num_rx_rings;
2496 qcaps->max_tx_rings = fcfg->num_tx_rings;
2497 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2498 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2500 qcaps->first_vf_id = 0;
2501 qcaps->max_vnics = fcfg->num_vnics;
2502 qcaps->max_decap_records = 0;
2503 qcaps->max_encap_records = 0;
2504 qcaps->max_tx_wm_flows = 0;
2505 qcaps->max_tx_em_flows = 0;
2506 qcaps->max_rx_wm_flows = 0;
2507 qcaps->max_rx_em_flows = 0;
2508 qcaps->max_flow_id = 0;
2509 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2510 qcaps->max_sp_tx_rings = 0;
2511 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2514 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2516 struct hwrm_func_cfg_input req = {0};
2517 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2520 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2521 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2522 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2523 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2524 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2525 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2526 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2527 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2528 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2529 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2530 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2531 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2532 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2533 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2535 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2536 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2537 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2538 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2539 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2540 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2541 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2542 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2543 req.fid = rte_cpu_to_le_16(0xffff);
2545 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2547 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2549 HWRM_CHECK_RESULT();
2555 static void populate_vf_func_cfg_req(struct bnxt *bp,
2556 struct hwrm_func_cfg_input *req,
2559 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2560 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2561 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2562 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2563 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2564 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2565 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2566 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2567 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2568 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2570 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2571 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2573 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2574 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2576 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2578 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2579 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2581 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2582 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2583 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2584 /* TODO: For now, do not support VMDq/RFS on VFs. */
2585 req->num_vnics = rte_cpu_to_le_16(1);
2586 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2590 static void add_random_mac_if_needed(struct bnxt *bp,
2591 struct hwrm_func_cfg_input *cfg_req,
2594 struct ether_addr mac;
2596 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2599 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2601 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2602 eth_random_addr(cfg_req->dflt_mac_addr);
2603 bp->pf.vf_info[vf].random_mac = true;
2605 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2609 static void reserve_resources_from_vf(struct bnxt *bp,
2610 struct hwrm_func_cfg_input *cfg_req,
2613 struct hwrm_func_qcaps_input req = {0};
2614 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2617 /* Get the actual allocated values now */
2618 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2619 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2620 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2623 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2624 copy_func_cfg_to_qcaps(cfg_req, resp);
2625 } else if (resp->error_code) {
2626 rc = rte_le_to_cpu_16(resp->error_code);
2627 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2628 copy_func_cfg_to_qcaps(cfg_req, resp);
2631 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2632 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2633 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2634 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2635 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2636 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2638 * TODO: While not supporting VMDq with VFs, max_vnics is always
2639 * forced to 1 in this case
2641 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2642 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2647 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2649 struct hwrm_func_qcfg_input req = {0};
2650 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2653 /* Check for zero MAC address */
2654 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2655 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2656 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2658 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2660 } else if (resp->error_code) {
2661 rc = rte_le_to_cpu_16(resp->error_code);
2662 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2665 rc = rte_le_to_cpu_16(resp->vlan);
2672 static int update_pf_resource_max(struct bnxt *bp)
2674 struct hwrm_func_qcfg_input req = {0};
2675 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2678 /* And copy the allocated numbers into the pf struct */
2679 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2680 req.fid = rte_cpu_to_le_16(0xffff);
2681 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2682 HWRM_CHECK_RESULT();
2684 /* Only TX ring value reflects actual allocation? TODO */
2685 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2686 bp->pf.evb_mode = resp->evb_mode;
2693 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2698 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2702 rc = bnxt_hwrm_func_qcaps(bp);
2706 bp->pf.func_cfg_flags &=
2707 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2708 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2709 bp->pf.func_cfg_flags |=
2710 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2711 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2715 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2717 struct hwrm_func_cfg_input req = {0};
2718 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2725 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2729 rc = bnxt_hwrm_func_qcaps(bp);
2734 bp->pf.active_vfs = num_vfs;
2737 * First, configure the PF to only use one TX ring. This ensures that
2738 * there are enough rings for all VFs.
2740 * If we don't do this, when we call func_alloc() later, we will lock
2741 * extra rings to the PF that won't be available during func_cfg() of
2744 * This has been fixed with firmware versions above 20.6.54
2746 bp->pf.func_cfg_flags &=
2747 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2748 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2749 bp->pf.func_cfg_flags |=
2750 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2751 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2756 * Now, create and register a buffer to hold forwarded VF requests
2758 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2759 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2760 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2761 if (bp->pf.vf_req_buf == NULL) {
2765 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2766 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2767 for (i = 0; i < num_vfs; i++)
2768 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2769 (i * HWRM_MAX_REQ_LEN);
2771 rc = bnxt_hwrm_func_buf_rgtr(bp);
2775 populate_vf_func_cfg_req(bp, &req, num_vfs);
2777 bp->pf.active_vfs = 0;
2778 for (i = 0; i < num_vfs; i++) {
2779 add_random_mac_if_needed(bp, &req, i);
2781 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2782 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2783 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2784 rc = bnxt_hwrm_send_message(bp,
2789 /* Clear enable flag for next pass */
2790 req.enables &= ~rte_cpu_to_le_32(
2791 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2793 if (rc || resp->error_code) {
2795 "Failed to initizlie VF %d\n", i);
2797 "Not all VFs available. (%d, %d)\n",
2798 rc, resp->error_code);
2805 reserve_resources_from_vf(bp, &req, i);
2806 bp->pf.active_vfs++;
2807 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2811 * Now configure the PF to use "the rest" of the resources
2812 * We're using STD_TX_RING_MODE here though which will limit the TX
2813 * rings. This will allow QoS to function properly. Not setting this
2814 * will cause PF rings to break bandwidth settings.
2816 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2820 rc = update_pf_resource_max(bp);
2827 bnxt_hwrm_func_buf_unrgtr(bp);
2831 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2833 struct hwrm_func_cfg_input req = {0};
2834 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2837 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2839 req.fid = rte_cpu_to_le_16(0xffff);
2840 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2841 req.evb_mode = bp->pf.evb_mode;
2843 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2844 HWRM_CHECK_RESULT();
2850 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2851 uint8_t tunnel_type)
2853 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2854 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2857 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2858 req.tunnel_type = tunnel_type;
2859 req.tunnel_dst_port_val = port;
2860 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2861 HWRM_CHECK_RESULT();
2863 switch (tunnel_type) {
2864 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2865 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2866 bp->vxlan_port = port;
2868 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2869 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2870 bp->geneve_port = port;
2881 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2882 uint8_t tunnel_type)
2884 struct hwrm_tunnel_dst_port_free_input req = {0};
2885 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2888 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2890 req.tunnel_type = tunnel_type;
2891 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2892 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2894 HWRM_CHECK_RESULT();
2900 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2903 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2904 struct hwrm_func_cfg_input req = {0};
2907 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2909 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2910 req.flags = rte_cpu_to_le_32(flags);
2911 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2913 HWRM_CHECK_RESULT();
2919 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2921 uint32_t *flag = flagp;
2923 vnic->flags = *flag;
2926 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2928 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2931 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2934 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2935 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2937 HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2939 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2940 req.req_buf_page_size = rte_cpu_to_le_16(
2941 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2942 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2943 req.req_buf_page_addr0 =
2944 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2945 if (req.req_buf_page_addr0 == 0) {
2947 "unable to map buffer address to physical memory\n");
2951 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2953 HWRM_CHECK_RESULT();
2959 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2962 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2963 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2965 HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2967 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2969 HWRM_CHECK_RESULT();
2975 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2977 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2978 struct hwrm_func_cfg_input req = {0};
2981 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2983 req.fid = rte_cpu_to_le_16(0xffff);
2984 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2985 req.enables = rte_cpu_to_le_32(
2986 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2987 req.async_event_cr = rte_cpu_to_le_16(
2988 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2989 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2991 HWRM_CHECK_RESULT();
2997 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2999 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3000 struct hwrm_func_vf_cfg_input req = {0};
3003 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3005 req.enables = rte_cpu_to_le_32(
3006 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3007 req.async_event_cr = rte_cpu_to_le_16(
3008 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
3009 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3011 HWRM_CHECK_RESULT();
3017 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3019 struct hwrm_func_cfg_input req = {0};
3020 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3021 uint16_t dflt_vlan, fid;
3022 uint32_t func_cfg_flags;
3025 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3028 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3029 fid = bp->pf.vf_info[vf].fid;
3030 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3032 fid = rte_cpu_to_le_16(0xffff);
3033 func_cfg_flags = bp->pf.func_cfg_flags;
3034 dflt_vlan = bp->vlan;
3037 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3038 req.fid = rte_cpu_to_le_16(fid);
3039 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3040 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3042 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3044 HWRM_CHECK_RESULT();
3050 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3051 uint16_t max_bw, uint16_t enables)
3053 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3054 struct hwrm_func_cfg_input req = {0};
3057 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3059 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3060 req.enables |= rte_cpu_to_le_32(enables);
3061 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3062 req.max_bw = rte_cpu_to_le_32(max_bw);
3063 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3065 HWRM_CHECK_RESULT();
3071 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3073 struct hwrm_func_cfg_input req = {0};
3074 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3077 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3079 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3080 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3081 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3082 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3084 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3086 HWRM_CHECK_RESULT();
3092 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3097 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3099 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3104 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3105 void *encaped, size_t ec_size)
3108 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3109 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3111 if (ec_size > sizeof(req.encap_request))
3114 HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3116 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3117 memcpy(req.encap_request, encaped, ec_size);
3119 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3121 HWRM_CHECK_RESULT();
3127 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3128 struct ether_addr *mac)
3130 struct hwrm_func_qcfg_input req = {0};
3131 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3134 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3136 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3137 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3139 HWRM_CHECK_RESULT();
3141 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3148 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3149 void *encaped, size_t ec_size)
3152 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3153 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3155 if (ec_size > sizeof(req.encap_request))
3158 HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3160 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3161 memcpy(req.encap_request, encaped, ec_size);
3163 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3165 HWRM_CHECK_RESULT();
3171 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3172 struct rte_eth_stats *stats, uint8_t rx)
3175 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3176 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3178 HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3180 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3184 HWRM_CHECK_RESULT();
3187 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3188 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3189 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3190 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3191 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3192 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3193 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3194 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3196 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3197 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3198 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3199 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3200 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3201 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3202 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3211 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3213 struct hwrm_port_qstats_input req = {0};
3214 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3215 struct bnxt_pf_info *pf = &bp->pf;
3218 HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3220 req.port_id = rte_cpu_to_le_16(pf->port_id);
3221 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3222 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3223 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3225 HWRM_CHECK_RESULT();
3231 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3233 struct hwrm_port_clr_stats_input req = {0};
3234 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3235 struct bnxt_pf_info *pf = &bp->pf;
3238 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3239 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3240 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3243 HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3245 req.port_id = rte_cpu_to_le_16(pf->port_id);
3246 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3248 HWRM_CHECK_RESULT();
3254 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3256 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3257 struct hwrm_port_led_qcaps_input req = {0};
3263 HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3264 req.port_id = bp->pf.port_id;
3265 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3267 HWRM_CHECK_RESULT();
3269 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3272 bp->num_leds = resp->num_leds;
3273 memcpy(bp->leds, &resp->led0_id,
3274 sizeof(bp->leds[0]) * bp->num_leds);
3275 for (i = 0; i < bp->num_leds; i++) {
3276 struct bnxt_led_info *led = &bp->leds[i];
3278 uint16_t caps = led->led_state_caps;
3280 if (!led->led_group_id ||
3281 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3293 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3295 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3296 struct hwrm_port_led_cfg_input req = {0};
3297 struct bnxt_led_cfg *led_cfg;
3298 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3299 uint16_t duration = 0;
3302 if (!bp->num_leds || BNXT_VF(bp))
3305 HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3308 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3309 duration = rte_cpu_to_le_16(500);
3311 req.port_id = bp->pf.port_id;
3312 req.num_leds = bp->num_leds;
3313 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3314 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3315 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3316 led_cfg->led_id = bp->leds[i].led_id;
3317 led_cfg->led_state = led_state;
3318 led_cfg->led_blink_on = duration;
3319 led_cfg->led_blink_off = duration;
3320 led_cfg->led_group_id = bp->leds[i].led_group_id;
3323 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3325 HWRM_CHECK_RESULT();
3331 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3335 struct hwrm_nvm_get_dir_info_input req = {0};
3336 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3338 HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3340 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3342 HWRM_CHECK_RESULT();
3346 *entries = rte_le_to_cpu_32(resp->entries);
3347 *length = rte_le_to_cpu_32(resp->entry_length);
3352 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3355 uint32_t dir_entries;
3356 uint32_t entry_length;
3359 rte_iova_t dma_handle;
3360 struct hwrm_nvm_get_dir_entries_input req = {0};
3361 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3363 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3367 *data++ = dir_entries;
3368 *data++ = entry_length;
3370 memset(data, 0xff, len);
3372 buflen = dir_entries * entry_length;
3373 buf = rte_malloc("nvm_dir", buflen, 0);
3374 rte_mem_lock_page(buf);
3377 dma_handle = rte_mem_virt2iova(buf);
3378 if (dma_handle == 0) {
3380 "unable to map response address to physical memory\n");
3383 HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3384 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3385 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3388 memcpy(data, buf, len > buflen ? buflen : len);
3391 HWRM_CHECK_RESULT();
3397 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3398 uint32_t offset, uint32_t length,
3403 rte_iova_t dma_handle;
3404 struct hwrm_nvm_read_input req = {0};
3405 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3407 buf = rte_malloc("nvm_item", length, 0);
3408 rte_mem_lock_page(buf);
3412 dma_handle = rte_mem_virt2iova(buf);
3413 if (dma_handle == 0) {
3415 "unable to map response address to physical memory\n");
3418 HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3419 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3420 req.dir_idx = rte_cpu_to_le_16(index);
3421 req.offset = rte_cpu_to_le_32(offset);
3422 req.len = rte_cpu_to_le_32(length);
3423 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3425 memcpy(data, buf, length);
3428 HWRM_CHECK_RESULT();
3434 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3437 struct hwrm_nvm_erase_dir_entry_input req = {0};
3438 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3440 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3441 req.dir_idx = rte_cpu_to_le_16(index);
3442 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3443 HWRM_CHECK_RESULT();
3450 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3451 uint16_t dir_ordinal, uint16_t dir_ext,
3452 uint16_t dir_attr, const uint8_t *data,
3456 struct hwrm_nvm_write_input req = {0};
3457 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3458 rte_iova_t dma_handle;
3461 buf = rte_malloc("nvm_write", data_len, 0);
3462 rte_mem_lock_page(buf);
3466 dma_handle = rte_mem_virt2iova(buf);
3467 if (dma_handle == 0) {
3469 "unable to map response address to physical memory\n");
3472 memcpy(buf, data, data_len);
3474 HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3476 req.dir_type = rte_cpu_to_le_16(dir_type);
3477 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3478 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3479 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3480 req.dir_data_length = rte_cpu_to_le_32(data_len);
3481 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3483 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3486 HWRM_CHECK_RESULT();
3493 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3495 uint32_t *count = cbdata;
3497 *count = *count + 1;
3500 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3501 struct bnxt_vnic_info *vnic __rte_unused)
3506 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3510 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3511 &count, bnxt_vnic_count_hwrm_stub);
3516 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3519 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3520 struct hwrm_func_vf_vnic_ids_query_output *resp =
3521 bp->hwrm_cmd_resp_addr;
3524 /* First query all VNIC ids */
3525 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3527 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3528 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3529 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3531 if (req.vnic_id_tbl_addr == 0) {
3534 "unable to map VNIC ID table address to physical memory\n");
3537 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3540 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3542 } else if (resp->error_code) {
3543 rc = rte_le_to_cpu_16(resp->error_code);
3545 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3548 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3556 * This function queries the VNIC IDs for a specified VF. It then calls
3557 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3558 * Then it calls the hwrm_cb function to program this new vnic configuration.
3560 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3561 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3562 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3564 struct bnxt_vnic_info vnic;
3566 int i, num_vnic_ids;
3571 /* First query all VNIC ids */
3572 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3573 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3574 RTE_CACHE_LINE_SIZE);
3575 if (vnic_ids == NULL) {
3579 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3580 rte_mem_lock_page(((char *)vnic_ids) + sz);
3582 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3584 if (num_vnic_ids < 0)
3585 return num_vnic_ids;
3587 /* Retrieve VNIC, update bd_stall then update */
3589 for (i = 0; i < num_vnic_ids; i++) {
3590 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3591 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3592 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3595 if (vnic.mru <= 4) /* Indicates unallocated */
3598 vnic_cb(&vnic, cbdata);
3600 rc = hwrm_cb(bp, &vnic);
3610 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3613 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3614 struct hwrm_func_cfg_input req = {0};
3617 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3619 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3620 req.enables |= rte_cpu_to_le_32(
3621 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3622 req.vlan_antispoof_mode = on ?
3623 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3624 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3625 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3627 HWRM_CHECK_RESULT();
3633 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3635 struct bnxt_vnic_info vnic;
3638 int num_vnic_ids, i;
3642 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3643 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3644 RTE_CACHE_LINE_SIZE);
3645 if (vnic_ids == NULL) {
3650 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3651 rte_mem_lock_page(((char *)vnic_ids) + sz);
3653 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3659 * Loop through to find the default VNIC ID.
3660 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3661 * by sending the hwrm_func_qcfg command to the firmware.
3663 for (i = 0; i < num_vnic_ids; i++) {
3664 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3665 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3666 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3667 bp->pf.first_vf_id + vf);
3670 if (vnic.func_default) {
3672 return vnic.fw_vnic_id;
3675 /* Could not find a default VNIC. */
3676 PMD_DRV_LOG(ERR, "No default VNIC\n");
3682 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3684 struct bnxt_filter_info *filter)
3687 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3688 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3689 uint32_t enables = 0;
3691 if (filter->fw_em_filter_id != UINT64_MAX)
3692 bnxt_hwrm_clear_em_filter(bp, filter);
3694 HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3696 req.flags = rte_cpu_to_le_32(filter->flags);
3698 enables = filter->enables |
3699 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3700 req.dst_id = rte_cpu_to_le_16(dst_id);
3702 if (filter->ip_addr_type) {
3703 req.ip_addr_type = filter->ip_addr_type;
3704 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3707 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3708 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3710 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3711 memcpy(req.src_macaddr, filter->src_macaddr,
3714 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3715 memcpy(req.dst_macaddr, filter->dst_macaddr,
3718 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3719 req.ovlan_vid = filter->l2_ovlan;
3721 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3722 req.ivlan_vid = filter->l2_ivlan;
3724 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3725 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3727 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3728 req.ip_protocol = filter->ip_protocol;
3730 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3731 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3733 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3734 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3736 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3737 req.src_port = rte_cpu_to_be_16(filter->src_port);
3739 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3740 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3742 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3743 req.mirror_vnic_id = filter->mirror_vnic_id;
3745 req.enables = rte_cpu_to_le_32(enables);
3747 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3749 HWRM_CHECK_RESULT();
3751 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3757 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3760 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3761 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3763 if (filter->fw_em_filter_id == UINT64_MAX)
3766 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3767 HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3769 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3771 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3773 HWRM_CHECK_RESULT();
3776 filter->fw_em_filter_id = UINT64_MAX;
3777 filter->fw_l2_filter_id = UINT64_MAX;
3782 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3784 struct bnxt_filter_info *filter)
3787 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3788 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3789 bp->hwrm_cmd_resp_addr;
3790 uint32_t enables = 0;
3792 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3793 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3795 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3797 req.flags = rte_cpu_to_le_32(filter->flags);
3799 enables = filter->enables |
3800 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3801 req.dst_id = rte_cpu_to_le_16(dst_id);
3804 if (filter->ip_addr_type) {
3805 req.ip_addr_type = filter->ip_addr_type;
3807 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3810 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3811 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3813 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3814 memcpy(req.src_macaddr, filter->src_macaddr,
3817 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3818 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3821 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3822 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3824 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3825 req.ip_protocol = filter->ip_protocol;
3827 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3828 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3830 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3831 req.src_ipaddr_mask[0] =
3832 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3834 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3835 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3837 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3838 req.dst_ipaddr_mask[0] =
3839 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3841 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3842 req.src_port = rte_cpu_to_le_16(filter->src_port);
3844 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3845 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3847 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3848 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3850 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3851 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3853 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3854 req.mirror_vnic_id = filter->mirror_vnic_id;
3856 req.enables = rte_cpu_to_le_32(enables);
3858 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3860 HWRM_CHECK_RESULT();
3862 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3868 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3869 struct bnxt_filter_info *filter)
3872 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3873 struct hwrm_cfa_ntuple_filter_free_output *resp =
3874 bp->hwrm_cmd_resp_addr;
3876 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3879 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3881 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3883 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3885 HWRM_CHECK_RESULT();
3888 filter->fw_ntuple_filter_id = UINT64_MAX;
3893 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3895 unsigned int rss_idx, fw_idx, i;
3897 if (vnic->rss_table && vnic->hash_type) {
3899 * Fill the RSS hash & redirection table with
3900 * ring group ids for all VNICs
3902 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3903 rss_idx++, fw_idx++) {
3904 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3905 fw_idx %= bp->rx_cp_nr_rings;
3906 if (vnic->fw_grp_ids[fw_idx] !=
3911 if (i == bp->rx_cp_nr_rings)
3913 vnic->rss_table[rss_idx] =
3914 vnic->fw_grp_ids[fw_idx];
3916 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3921 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3922 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3926 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3928 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3929 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3931 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3932 req->num_cmpl_dma_aggr_during_int =
3933 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3935 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3937 /* min timer set to 1/2 of interrupt timer */
3938 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3940 /* buf timer set to 1/4 of interrupt timer */
3941 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3943 req->cmpl_aggr_dma_tmr_during_int =
3944 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3946 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3947 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3948 req->flags = rte_cpu_to_le_16(flags);
3951 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3952 struct bnxt_coal *coal, uint16_t ring_id)
3954 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3955 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3956 bp->hwrm_cmd_resp_addr;
3959 /* Set ring coalesce parameters only for Stratus 100G NIC */
3960 if (!bnxt_stratus_device(bp))
3963 HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3964 bnxt_hwrm_set_coal_params(coal, &req);
3965 req.ring_id = rte_cpu_to_le_16(ring_id);
3966 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3967 HWRM_CHECK_RESULT();
3972 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3974 struct hwrm_port_qstats_ext_input req = {0};
3975 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3976 struct bnxt_pf_info *pf = &bp->pf;
3979 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3980 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3983 HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3985 req.port_id = rte_cpu_to_le_16(pf->port_id);
3986 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3987 req.tx_stat_host_addr =
3988 rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3990 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3992 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3993 req.rx_stat_host_addr =
3994 rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3996 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3998 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4001 bp->fw_rx_port_stats_ext_size = 0;
4002 bp->fw_tx_port_stats_ext_size = 0;
4004 bp->fw_rx_port_stats_ext_size =
4005 rte_le_to_cpu_16(resp->rx_stat_size);
4006 bp->fw_tx_port_stats_ext_size =
4007 rte_le_to_cpu_16(resp->tx_stat_size);
4010 HWRM_CHECK_RESULT();