1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_SPEC_CODE_1_8_3 0x10803
31 #define HWRM_VERSION_1_9_1 0x10901
33 struct bnxt_plcmodes_cfg {
35 uint16_t jumbo_thresh;
37 uint16_t hds_threshold;
40 static int page_getenum(size_t size)
56 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
57 return sizeof(void *) * 8 - 1;
60 static int page_roundup(size_t size)
62 return 1 << page_getenum(size);
66 * HWRM Functions (sent to HWRM)
67 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
68 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
69 * command was failed by the ChiMP.
72 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
73 uint32_t msg_len, bool use_kong_mb)
76 struct input *req = msg;
77 struct output *resp = bp->hwrm_cmd_resp_addr;
81 uint16_t max_req_len = bp->max_req_len;
82 struct hwrm_short_input short_input = { 0 };
83 uint16_t bar_offset = use_kong_mb ?
84 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
85 uint16_t mb_trigger_offset = use_kong_mb ?
86 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
88 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
89 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
91 memset(short_cmd_req, 0, bp->max_req_len);
92 memcpy(short_cmd_req, req, msg_len);
94 short_input.req_type = rte_cpu_to_le_16(req->req_type);
95 short_input.signature = rte_cpu_to_le_16(
96 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
97 short_input.size = rte_cpu_to_le_16(msg_len);
98 short_input.req_addr =
99 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
101 data = (uint32_t *)&short_input;
102 msg_len = sizeof(short_input);
104 /* Sync memory write before updating doorbell */
107 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + bar_offset + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + bar_offset + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
153 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
154 * spinlock, and does initial processing.
156 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
157 * releases the spinlock only if it returns. If the regular int return codes
158 * are not used by the function, HWRM_CHECK_RESULT() should not be used
159 * directly, rather it should be copied and modified to suit the function.
161 * HWRM_UNLOCK() must be called after all response processing is completed.
163 #define HWRM_PREP(req, type, kong) do { \
164 rte_spinlock_lock(&bp->hwrm_lock); \
165 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
166 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
167 req.cmpl_ring = rte_cpu_to_le_16(-1); \
168 req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
169 rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
170 req.target_id = rte_cpu_to_le_16(0xffff); \
171 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
174 #define HWRM_CHECK_RESULT_SILENT() do {\
176 rte_spinlock_unlock(&bp->hwrm_lock); \
179 if (resp->error_code) { \
180 rc = rte_le_to_cpu_16(resp->error_code); \
181 rte_spinlock_unlock(&bp->hwrm_lock); \
186 #define HWRM_CHECK_RESULT() do {\
188 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
190 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
196 if (resp->error_code) { \
197 rc = rte_le_to_cpu_16(resp->error_code); \
198 if (resp->resp_len >= 16) { \
199 struct hwrm_err_output *tmp_hwrm_err_op = \
202 "error %d:%d:%08x:%04x\n", \
203 rc, tmp_hwrm_err_op->cmd_err, \
205 tmp_hwrm_err_op->opaque_0), \
207 tmp_hwrm_err_op->opaque_1)); \
209 PMD_DRV_LOG(ERR, "error %d\n", rc); \
211 rte_spinlock_unlock(&bp->hwrm_lock); \
212 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
220 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
222 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
225 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
226 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
228 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
229 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
232 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241 struct bnxt_vnic_info *vnic,
243 struct bnxt_vlan_table_entry *vlan_table)
246 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
250 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
253 HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
254 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
256 /* FIXME add multicast flag, when multicast adding options is supported
259 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
260 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
261 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
263 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
265 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
267 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269 if (vnic->mc_addr_cnt) {
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
272 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
275 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
276 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
277 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
278 rte_mem_virt2iova(vlan_table));
279 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
281 req.mask = rte_cpu_to_le_32(mask);
283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
291 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
293 struct bnxt_vlan_antispoof_table_entry *vlan_table)
296 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
297 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
298 bp->hwrm_cmd_resp_addr;
301 * Older HWRM versions did not support this command, and the set_rx_mask
302 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
303 * removed from set_rx_mask call, and this command was added.
305 * This command is also present from 1.7.8.11 and higher,
308 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
309 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
310 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
315 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
316 req.fid = rte_cpu_to_le_16(fid);
318 req.vlan_tag_mask_tbl_addr =
319 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
320 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
322 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
330 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
331 struct bnxt_filter_info *filter)
334 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
335 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
337 if (filter->fw_l2_filter_id == UINT64_MAX)
340 HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
342 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
349 filter->fw_l2_filter_id = UINT64_MAX;
354 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
356 struct bnxt_filter_info *filter)
359 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
360 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
361 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
362 const struct rte_eth_vmdq_rx_conf *conf =
363 &dev_conf->rx_adv_conf.vmdq_rx_conf;
364 uint32_t enables = 0;
365 uint16_t j = dst_id - 1;
367 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
368 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
369 conf->pool_map[j].pools & (1UL << j)) {
371 "Add vlan %u to vmdq pool %u\n",
372 conf->pool_map[j].vlan_id, j);
374 filter->l2_ivlan = conf->pool_map[j].vlan_id;
376 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
380 if (filter->fw_l2_filter_id != UINT64_MAX)
381 bnxt_hwrm_clear_l2_filter(bp, filter);
383 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
385 req.flags = rte_cpu_to_le_32(filter->flags);
387 enables = filter->enables |
388 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
389 req.dst_id = rte_cpu_to_le_16(dst_id);
392 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
393 memcpy(req.l2_addr, filter->l2_addr,
396 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
397 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
400 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
401 req.l2_ovlan = filter->l2_ovlan;
403 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
404 req.l2_ivlan = filter->l2_ivlan;
406 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
407 req.l2_ovlan_mask = filter->l2_ovlan_mask;
409 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
410 req.l2_ivlan_mask = filter->l2_ivlan_mask;
411 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
412 req.src_id = rte_cpu_to_le_32(filter->src_id);
413 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
414 req.src_type = filter->src_type;
416 req.enables = rte_cpu_to_le_32(enables);
418 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
422 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
428 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
430 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
431 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
438 HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
441 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
444 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
445 if (ptp->tx_tstamp_en)
446 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
449 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450 req.flags = rte_cpu_to_le_32(flags);
451 req.enables = rte_cpu_to_le_32
452 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
455 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
464 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
468 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
472 HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
474 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
476 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
480 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
483 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
487 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
512 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
515 struct hwrm_func_qcaps_input req = {.req_type = 0 };
516 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517 uint16_t new_max_vfs;
521 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
523 req.fid = rte_cpu_to_le_16(0xffff);
525 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
529 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530 flags = rte_le_to_cpu_32(resp->flags);
532 bp->pf.port_id = resp->port_id;
533 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
535 new_max_vfs = bp->pdev->max_vfs;
536 if (new_max_vfs != bp->pf.max_vfs) {
538 rte_free(bp->pf.vf_info);
539 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
540 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
541 bp->pf.max_vfs = new_max_vfs;
542 for (i = 0; i < new_max_vfs; i++) {
543 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
544 bp->pf.vf_info[i].vlan_table =
545 rte_zmalloc("VF VLAN table",
548 if (bp->pf.vf_info[i].vlan_table == NULL)
550 "Fail to alloc VLAN table for VF %d\n",
554 bp->pf.vf_info[i].vlan_table);
555 bp->pf.vf_info[i].vlan_as_table =
556 rte_zmalloc("VF VLAN AS table",
559 if (bp->pf.vf_info[i].vlan_as_table == NULL)
561 "Alloc VLAN AS table for VF %d fail\n",
565 bp->pf.vf_info[i].vlan_as_table);
566 STAILQ_INIT(&bp->pf.vf_info[i].filter);
571 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
572 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
573 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
574 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
575 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
576 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
577 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
578 /* TODO: For now, do not support VMDq/RFS on VFs. */
583 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
587 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
589 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
590 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
591 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
592 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
594 bnxt_hwrm_ptp_qcfg(bp);
603 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
607 rc = __bnxt_hwrm_func_qcaps(bp);
608 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
609 rc = bnxt_hwrm_func_resc_qcaps(bp);
611 bp->flags |= BNXT_FLAG_NEW_RM;
617 int bnxt_hwrm_func_reset(struct bnxt *bp)
620 struct hwrm_func_reset_input req = {.req_type = 0 };
621 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
623 HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB);
625 req.enables = rte_cpu_to_le_32(0);
627 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
635 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
638 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
639 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
641 if (bp->flags & BNXT_FLAG_REGISTERED)
644 HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
645 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
646 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
647 req.ver_maj = RTE_VER_YEAR;
648 req.ver_min = RTE_VER_MONTH;
649 req.ver_upd = RTE_VER_MINOR;
652 req.enables |= rte_cpu_to_le_32(
653 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
654 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
655 RTE_MIN(sizeof(req.vf_req_fwd),
656 sizeof(bp->pf.vf_req_fwd)));
659 * PF can sniff HWRM API issued by VF. This can be set up by
660 * linux driver and inherited by the DPDK PF driver. Clear
661 * this HWRM sniffer list in FW because DPDK PF driver does
665 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
668 req.async_event_fwd[0] |=
669 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
670 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
671 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
672 req.async_event_fwd[1] |=
673 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
674 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
676 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
681 bp->flags |= BNXT_FLAG_REGISTERED;
686 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
688 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
691 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
694 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
699 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
700 struct hwrm_func_vf_cfg_input req = {0};
702 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
704 req.enables = rte_cpu_to_le_32
705 (HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
706 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
707 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
708 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
709 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS |
710 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS);
712 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
713 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
714 AGG_RING_MULTIPLIER);
715 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
716 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
718 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
719 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
720 if (bp->vf_resv_strategy ==
721 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
722 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
723 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
724 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
725 req.enables |= rte_cpu_to_le_32(enables);
726 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
727 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
728 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
732 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
733 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
734 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
735 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
736 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
737 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
739 req.flags = rte_cpu_to_le_32(flags);
741 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
744 HWRM_CHECK_RESULT_SILENT();
752 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
755 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
756 struct hwrm_func_resource_qcaps_input req = {0};
758 HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
759 req.fid = rte_cpu_to_le_16(0xffff);
761 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
766 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
767 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
768 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
769 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
770 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
771 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
772 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
773 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
775 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
776 if (bp->vf_resv_strategy >
777 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
778 bp->vf_resv_strategy =
779 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
785 int bnxt_hwrm_ver_get(struct bnxt *bp)
788 struct hwrm_ver_get_input req = {.req_type = 0 };
789 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
792 uint16_t max_resp_len;
793 char type[RTE_MEMZONE_NAMESIZE];
794 uint32_t dev_caps_cfg;
796 bp->max_req_len = HWRM_MAX_REQ_LEN;
797 HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB);
799 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
800 req.hwrm_intf_min = HWRM_VERSION_MINOR;
801 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
803 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
807 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
808 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
809 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
810 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
811 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
812 (resp->hwrm_fw_min_8b << 16) |
813 (resp->hwrm_fw_bld_8b << 8) |
814 resp->hwrm_fw_rsvd_8b;
815 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
816 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
818 my_version = HWRM_VERSION_MAJOR << 16;
819 my_version |= HWRM_VERSION_MINOR << 8;
820 my_version |= HWRM_VERSION_UPDATE;
822 fw_version = resp->hwrm_intf_maj_8b << 16;
823 fw_version |= resp->hwrm_intf_min_8b << 8;
824 fw_version |= resp->hwrm_intf_upd_8b;
825 bp->hwrm_spec_code = fw_version;
827 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
828 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
833 if (my_version != fw_version) {
834 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
835 if (my_version < fw_version) {
837 "Firmware API version is newer than driver.\n");
839 "The driver may be missing features.\n");
842 "Firmware API version is older than driver.\n");
844 "Not all driver features may be functional.\n");
848 if (bp->max_req_len > resp->max_req_win_len) {
849 PMD_DRV_LOG(ERR, "Unsupported request length\n");
852 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
853 max_resp_len = resp->max_resp_len;
854 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
856 if (bp->max_resp_len != max_resp_len) {
857 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
858 bp->pdev->addr.domain, bp->pdev->addr.bus,
859 bp->pdev->addr.devid, bp->pdev->addr.function);
861 rte_free(bp->hwrm_cmd_resp_addr);
863 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
864 if (bp->hwrm_cmd_resp_addr == NULL) {
868 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
869 bp->hwrm_cmd_resp_dma_addr =
870 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
871 if (bp->hwrm_cmd_resp_dma_addr == 0) {
873 "Unable to map response buffer to physical memory.\n");
877 bp->max_resp_len = max_resp_len;
881 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
883 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
884 PMD_DRV_LOG(DEBUG, "Short command supported\n");
886 rte_free(bp->hwrm_short_cmd_req_addr);
888 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
890 if (bp->hwrm_short_cmd_req_addr == NULL) {
894 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
895 bp->hwrm_short_cmd_req_dma_addr =
896 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
897 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
898 rte_free(bp->hwrm_short_cmd_req_addr);
900 "Unable to map buffer to physical memory.\n");
905 bp->flags |= BNXT_FLAG_SHORT_CMD;
908 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
909 bp->flags |= BNXT_FLAG_KONG_MB_EN;
910 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
913 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
914 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
921 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
924 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
925 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
927 if (!(bp->flags & BNXT_FLAG_REGISTERED))
930 HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
933 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
938 bp->flags &= ~BNXT_FLAG_REGISTERED;
943 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
946 struct hwrm_port_phy_cfg_input req = {0};
947 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
948 uint32_t enables = 0;
950 HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
953 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
954 if (bp->link_info.auto_mode && conf->link_speed) {
955 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
956 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
959 req.flags = rte_cpu_to_le_32(conf->phy_flags);
960 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
961 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
963 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
964 * any auto mode, even "none".
966 if (!conf->link_speed) {
967 /* No speeds specified. Enable AutoNeg - all speeds */
969 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
971 /* AutoNeg - Advertise speeds specified. */
972 if (conf->auto_link_speed_mask &&
973 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
975 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
976 req.auto_link_speed_mask =
977 conf->auto_link_speed_mask;
979 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
982 req.auto_duplex = conf->duplex;
983 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
984 req.auto_pause = conf->auto_pause;
985 req.force_pause = conf->force_pause;
986 /* Set force_pause if there is no auto or if there is a force */
987 if (req.auto_pause && !req.force_pause)
988 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
990 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
992 req.enables = rte_cpu_to_le_32(enables);
995 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
996 PMD_DRV_LOG(INFO, "Force Link Down\n");
999 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1001 HWRM_CHECK_RESULT();
1007 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1008 struct bnxt_link_info *link_info)
1011 struct hwrm_port_phy_qcfg_input req = {0};
1012 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1014 HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1016 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1018 HWRM_CHECK_RESULT();
1020 link_info->phy_link_status = resp->link;
1021 link_info->link_up =
1022 (link_info->phy_link_status ==
1023 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1024 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1025 link_info->duplex = resp->duplex_cfg;
1026 link_info->pause = resp->pause;
1027 link_info->auto_pause = resp->auto_pause;
1028 link_info->force_pause = resp->force_pause;
1029 link_info->auto_mode = resp->auto_mode;
1030 link_info->phy_type = resp->phy_type;
1031 link_info->media_type = resp->media_type;
1033 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1034 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1035 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1036 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1037 link_info->phy_ver[0] = resp->phy_maj;
1038 link_info->phy_ver[1] = resp->phy_min;
1039 link_info->phy_ver[2] = resp->phy_bld;
1043 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1044 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1045 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1046 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1047 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1048 link_info->auto_link_speed_mask);
1049 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1050 link_info->force_link_speed);
1055 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1058 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1059 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1062 HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1064 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1065 /* HWRM Version >= 1.9.1 */
1066 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
1068 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1069 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1071 HWRM_CHECK_RESULT();
1073 #define GET_QUEUE_INFO(x) \
1074 bp->cos_queue[x].id = resp->queue_id##x; \
1075 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
1088 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1089 bp->tx_cosq_id = bp->cos_queue[0].id;
1091 /* iterate and find the COSq profile to use for Tx */
1092 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1093 if (bp->cos_queue[i].profile ==
1094 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1095 bp->tx_cosq_id = bp->cos_queue[i].id;
1100 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
1105 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1106 struct bnxt_ring *ring,
1107 uint32_t ring_type, uint32_t map_index,
1108 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
1111 uint32_t enables = 0;
1112 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1113 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1115 HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB);
1117 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1118 req.fbo = rte_cpu_to_le_32(0);
1119 /* Association of ring index with doorbell index */
1120 req.logical_id = rte_cpu_to_le_16(map_index);
1121 req.length = rte_cpu_to_le_32(ring->ring_size);
1123 switch (ring_type) {
1124 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1125 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
1127 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1128 req.ring_type = ring_type;
1129 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1130 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
1131 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1133 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1135 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1136 req.ring_type = ring_type;
1138 * TODO: Some HWRM versions crash with
1139 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
1141 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1144 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1149 req.enables = rte_cpu_to_le_32(enables);
1151 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1153 if (rc || resp->error_code) {
1154 if (rc == 0 && resp->error_code)
1155 rc = rte_le_to_cpu_16(resp->error_code);
1156 switch (ring_type) {
1157 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1159 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1162 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1164 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1167 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1169 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1173 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1179 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1184 int bnxt_hwrm_ring_free(struct bnxt *bp,
1185 struct bnxt_ring *ring, uint32_t ring_type)
1188 struct hwrm_ring_free_input req = {.req_type = 0 };
1189 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1191 HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB);
1193 req.ring_type = ring_type;
1194 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1196 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1198 if (rc || resp->error_code) {
1199 if (rc == 0 && resp->error_code)
1200 rc = rte_le_to_cpu_16(resp->error_code);
1203 switch (ring_type) {
1204 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1205 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1208 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1209 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1212 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1213 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1217 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1225 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1228 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1229 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1231 HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1233 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1234 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1235 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1236 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1238 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1240 HWRM_CHECK_RESULT();
1242 bp->grp_info[idx].fw_grp_id =
1243 rte_le_to_cpu_16(resp->ring_group_id);
1250 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1253 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1254 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1256 HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1258 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1260 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1262 HWRM_CHECK_RESULT();
1265 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1269 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1272 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1273 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1275 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1278 HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1280 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1284 HWRM_CHECK_RESULT();
1290 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1291 unsigned int idx __rte_unused)
1294 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1295 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1297 HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1299 req.update_period_ms = rte_cpu_to_le_32(0);
1301 req.stats_dma_addr =
1302 rte_cpu_to_le_64(cpr->hw_stats_map);
1304 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1306 HWRM_CHECK_RESULT();
1308 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1315 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1316 unsigned int idx __rte_unused)
1319 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1320 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1322 HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1324 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1326 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1328 HWRM_CHECK_RESULT();
1334 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1337 struct hwrm_vnic_alloc_input req = { 0 };
1338 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1340 /* map ring groups to this vnic */
1341 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1342 vnic->start_grp_id, vnic->end_grp_id);
1343 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1344 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1346 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1347 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1348 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1349 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1350 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1351 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1352 HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1354 if (vnic->func_default)
1356 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1357 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1359 HWRM_CHECK_RESULT();
1361 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1363 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1367 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1368 struct bnxt_vnic_info *vnic,
1369 struct bnxt_plcmodes_cfg *pmode)
1372 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1373 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1375 HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1377 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1379 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1381 HWRM_CHECK_RESULT();
1383 pmode->flags = rte_le_to_cpu_32(resp->flags);
1384 /* dflt_vnic bit doesn't exist in the _cfg command */
1385 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1386 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1387 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1388 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1395 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1396 struct bnxt_vnic_info *vnic,
1397 struct bnxt_plcmodes_cfg *pmode)
1400 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1401 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1403 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1405 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1406 req.flags = rte_cpu_to_le_32(pmode->flags);
1407 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1408 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1409 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1410 req.enables = rte_cpu_to_le_32(
1411 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1412 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1413 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1416 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1418 HWRM_CHECK_RESULT();
1424 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1427 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1428 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1429 uint32_t ctx_enable_flag = 0;
1430 struct bnxt_plcmodes_cfg pmodes;
1432 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1433 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1437 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1441 HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB);
1443 /* Only RSS support for now TBD: COS & LB */
1445 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1446 if (vnic->lb_rule != 0xffff)
1447 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1448 if (vnic->cos_rule != 0xffff)
1449 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1450 if (vnic->rss_rule != 0xffff) {
1451 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1452 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1454 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1455 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1456 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1457 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1458 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1459 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1460 req.mru = rte_cpu_to_le_16(vnic->mru);
1461 if (vnic->func_default)
1463 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1464 if (vnic->vlan_strip)
1466 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1469 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1470 if (vnic->roce_dual)
1471 req.flags |= rte_cpu_to_le_32(
1472 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1473 if (vnic->roce_only)
1474 req.flags |= rte_cpu_to_le_32(
1475 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1476 if (vnic->rss_dflt_cr)
1477 req.flags |= rte_cpu_to_le_32(
1478 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1480 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1482 HWRM_CHECK_RESULT();
1485 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1490 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1494 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1495 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1497 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1498 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1501 HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB);
1504 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1505 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1506 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1508 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1510 HWRM_CHECK_RESULT();
1512 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1513 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1514 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1515 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1516 vnic->mru = rte_le_to_cpu_16(resp->mru);
1517 vnic->func_default = rte_le_to_cpu_32(
1518 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1519 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1520 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1521 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1522 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1523 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1524 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1525 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1526 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1527 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1528 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1535 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1538 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1539 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1540 bp->hwrm_cmd_resp_addr;
1542 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1544 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1546 HWRM_CHECK_RESULT();
1548 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1550 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1555 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1558 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1559 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1560 bp->hwrm_cmd_resp_addr;
1562 if (vnic->rss_rule == 0xffff) {
1563 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1566 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
1568 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1572 HWRM_CHECK_RESULT();
1575 vnic->rss_rule = INVALID_HW_RING_ID;
1580 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1583 struct hwrm_vnic_free_input req = {.req_type = 0 };
1584 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1586 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1587 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1591 HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB);
1593 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1595 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1597 HWRM_CHECK_RESULT();
1600 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1604 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1605 struct bnxt_vnic_info *vnic)
1608 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1609 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1611 HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
1613 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1614 req.hash_mode_flags = vnic->hash_mode;
1616 req.ring_grp_tbl_addr =
1617 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1618 req.hash_key_tbl_addr =
1619 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1620 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1622 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1624 HWRM_CHECK_RESULT();
1630 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1631 struct bnxt_vnic_info *vnic)
1634 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1635 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1638 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1639 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1643 HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1645 req.flags = rte_cpu_to_le_32(
1646 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1648 req.enables = rte_cpu_to_le_32(
1649 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1651 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1652 size -= RTE_PKTMBUF_HEADROOM;
1654 req.jumbo_thresh = rte_cpu_to_le_16(size);
1655 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1657 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1659 HWRM_CHECK_RESULT();
1665 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1666 struct bnxt_vnic_info *vnic, bool enable)
1669 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1670 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1672 HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
1675 req.enables = rte_cpu_to_le_32(
1676 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1677 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1678 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1679 req.flags = rte_cpu_to_le_32(
1680 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1681 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1682 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1683 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1684 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1685 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1686 req.max_agg_segs = rte_cpu_to_le_16(5);
1688 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1689 req.min_agg_len = rte_cpu_to_le_32(512);
1691 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1693 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1695 HWRM_CHECK_RESULT();
1701 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1703 struct hwrm_func_cfg_input req = {0};
1704 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1707 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1708 req.enables = rte_cpu_to_le_32(
1709 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1710 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1711 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1713 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
1715 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1716 HWRM_CHECK_RESULT();
1719 bp->pf.vf_info[vf].random_mac = false;
1724 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1728 struct hwrm_func_qstats_input req = {.req_type = 0};
1729 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1731 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1733 req.fid = rte_cpu_to_le_16(fid);
1735 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1737 HWRM_CHECK_RESULT();
1740 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1747 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1748 struct rte_eth_stats *stats)
1751 struct hwrm_func_qstats_input req = {.req_type = 0};
1752 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1754 HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB);
1756 req.fid = rte_cpu_to_le_16(fid);
1758 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1760 HWRM_CHECK_RESULT();
1762 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1763 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1764 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1765 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1766 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1767 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1769 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1770 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1771 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1772 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1773 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1774 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1776 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1777 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1778 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1785 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1788 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1789 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1791 HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
1793 req.fid = rte_cpu_to_le_16(fid);
1795 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1797 HWRM_CHECK_RESULT();
1804 * HWRM utility functions
1807 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1812 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1813 struct bnxt_tx_queue *txq;
1814 struct bnxt_rx_queue *rxq;
1815 struct bnxt_cp_ring_info *cpr;
1817 if (i >= bp->rx_cp_nr_rings) {
1818 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1821 rxq = bp->rx_queues[i];
1825 rc = bnxt_hwrm_stat_clear(bp, cpr);
1832 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1836 struct bnxt_cp_ring_info *cpr;
1838 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1840 if (i >= bp->rx_cp_nr_rings) {
1841 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1843 cpr = bp->rx_queues[i]->cp_ring;
1844 bp->grp_info[i].fw_stats_ctx = -1;
1846 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1847 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1848 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1856 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1861 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1862 struct bnxt_tx_queue *txq;
1863 struct bnxt_rx_queue *rxq;
1864 struct bnxt_cp_ring_info *cpr;
1866 if (i >= bp->rx_cp_nr_rings) {
1867 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1870 rxq = bp->rx_queues[i];
1874 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1882 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1887 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1889 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1892 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1900 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1902 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1904 bnxt_hwrm_ring_free(bp, cp_ring,
1905 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1906 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1907 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1908 sizeof(*cpr->cp_desc_ring));
1909 cpr->cp_raw_cons = 0;
1912 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
1914 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
1915 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1916 struct bnxt_ring *ring = rxr->rx_ring_struct;
1917 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1919 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1920 bnxt_hwrm_ring_free(bp, ring,
1921 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1922 ring->fw_ring_id = INVALID_HW_RING_ID;
1923 bp->grp_info[queue_index].rx_fw_ring_id = INVALID_HW_RING_ID;
1924 memset(rxr->rx_desc_ring, 0,
1925 rxr->rx_ring_struct->ring_size *
1926 sizeof(*rxr->rx_desc_ring));
1927 memset(rxr->rx_buf_ring, 0,
1928 rxr->rx_ring_struct->ring_size *
1929 sizeof(*rxr->rx_buf_ring));
1932 ring = rxr->ag_ring_struct;
1933 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1934 bnxt_hwrm_ring_free(bp, ring,
1935 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1936 ring->fw_ring_id = INVALID_HW_RING_ID;
1937 memset(rxr->ag_buf_ring, 0,
1938 rxr->ag_ring_struct->ring_size *
1939 sizeof(*rxr->ag_buf_ring));
1941 bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID;
1943 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1944 bnxt_free_cp_ring(bp, cpr);
1946 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
1949 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1953 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1954 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1955 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1956 struct bnxt_ring *ring = txr->tx_ring_struct;
1957 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1959 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1960 bnxt_hwrm_ring_free(bp, ring,
1961 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1962 ring->fw_ring_id = INVALID_HW_RING_ID;
1963 memset(txr->tx_desc_ring, 0,
1964 txr->tx_ring_struct->ring_size *
1965 sizeof(*txr->tx_desc_ring));
1966 memset(txr->tx_buf_ring, 0,
1967 txr->tx_ring_struct->ring_size *
1968 sizeof(*txr->tx_buf_ring));
1972 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1973 bnxt_free_cp_ring(bp, cpr);
1974 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1978 for (i = 0; i < bp->rx_cp_nr_rings; i++)
1979 bnxt_free_hwrm_rx_ring(bp, i);
1984 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1989 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1990 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1997 void bnxt_free_hwrm_resources(struct bnxt *bp)
1999 /* Release memzone */
2000 rte_free(bp->hwrm_cmd_resp_addr);
2001 rte_free(bp->hwrm_short_cmd_req_addr);
2002 bp->hwrm_cmd_resp_addr = NULL;
2003 bp->hwrm_short_cmd_req_addr = NULL;
2004 bp->hwrm_cmd_resp_dma_addr = 0;
2005 bp->hwrm_short_cmd_req_dma_addr = 0;
2008 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2010 struct rte_pci_device *pdev = bp->pdev;
2011 char type[RTE_MEMZONE_NAMESIZE];
2013 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
2014 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2015 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2016 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2017 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
2018 if (bp->hwrm_cmd_resp_addr == NULL)
2020 bp->hwrm_cmd_resp_dma_addr =
2021 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
2022 if (bp->hwrm_cmd_resp_dma_addr == 0) {
2024 "unable to map response address to physical memory\n");
2027 rte_spinlock_init(&bp->hwrm_lock);
2032 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2034 struct bnxt_filter_info *filter;
2037 STAILQ_FOREACH(filter, &vnic->filter, next) {
2038 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2039 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2040 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2041 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2043 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2044 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2052 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2054 struct bnxt_filter_info *filter;
2055 struct rte_flow *flow;
2058 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
2059 filter = flow->filter;
2060 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
2061 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2062 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2063 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2064 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2066 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2068 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2076 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2078 struct bnxt_filter_info *filter;
2081 STAILQ_FOREACH(filter, &vnic->filter, next) {
2082 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2083 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2085 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2086 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2089 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2097 void bnxt_free_tunnel_ports(struct bnxt *bp)
2099 if (bp->vxlan_port_cnt)
2100 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2101 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2103 if (bp->geneve_port_cnt)
2104 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2105 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2106 bp->geneve_port = 0;
2109 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2113 if (bp->vnic_info == NULL)
2117 * Cleanup VNICs in reverse order, to make sure the L2 filter
2118 * from vnic0 is last to be cleaned up.
2120 for (i = bp->nr_vnics - 1; i >= 0; i--) {
2121 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2123 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2125 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2127 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2129 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2131 bnxt_hwrm_vnic_free(bp, vnic);
2133 rte_free(vnic->fw_grp_ids);
2135 /* Ring resources */
2136 bnxt_free_all_hwrm_rings(bp);
2137 bnxt_free_all_hwrm_ring_grps(bp);
2138 bnxt_free_all_hwrm_stat_ctxs(bp);
2139 bnxt_free_tunnel_ports(bp);
2142 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2144 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2146 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2147 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2149 switch (conf_link_speed) {
2150 case ETH_LINK_SPEED_10M_HD:
2151 case ETH_LINK_SPEED_100M_HD:
2153 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2155 return hw_link_duplex;
2158 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2160 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2163 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2165 uint16_t eth_link_speed = 0;
2167 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2168 return ETH_LINK_SPEED_AUTONEG;
2170 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2171 case ETH_LINK_SPEED_100M:
2172 case ETH_LINK_SPEED_100M_HD:
2175 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2177 case ETH_LINK_SPEED_1G:
2179 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2181 case ETH_LINK_SPEED_2_5G:
2183 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2185 case ETH_LINK_SPEED_10G:
2187 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2189 case ETH_LINK_SPEED_20G:
2191 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2193 case ETH_LINK_SPEED_25G:
2195 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2197 case ETH_LINK_SPEED_40G:
2199 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2201 case ETH_LINK_SPEED_50G:
2203 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2205 case ETH_LINK_SPEED_100G:
2207 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2211 "Unsupported link speed %d; default to AUTO\n",
2215 return eth_link_speed;
2218 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2219 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2220 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2221 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2223 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2227 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2230 if (link_speed & ETH_LINK_SPEED_FIXED) {
2231 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2233 if (one_speed & (one_speed - 1)) {
2235 "Invalid advertised speeds (%u) for port %u\n",
2236 link_speed, port_id);
2239 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2241 "Unsupported advertised speed (%u) for port %u\n",
2242 link_speed, port_id);
2246 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2248 "Unsupported advertised speeds (%u) for port %u\n",
2249 link_speed, port_id);
2257 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2261 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2262 if (bp->link_info.support_speeds)
2263 return bp->link_info.support_speeds;
2264 link_speed = BNXT_SUPPORTED_SPEEDS;
2267 if (link_speed & ETH_LINK_SPEED_100M)
2268 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2269 if (link_speed & ETH_LINK_SPEED_100M_HD)
2270 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2271 if (link_speed & ETH_LINK_SPEED_1G)
2272 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2273 if (link_speed & ETH_LINK_SPEED_2_5G)
2274 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2275 if (link_speed & ETH_LINK_SPEED_10G)
2276 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2277 if (link_speed & ETH_LINK_SPEED_20G)
2278 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2279 if (link_speed & ETH_LINK_SPEED_25G)
2280 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2281 if (link_speed & ETH_LINK_SPEED_40G)
2282 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2283 if (link_speed & ETH_LINK_SPEED_50G)
2284 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2285 if (link_speed & ETH_LINK_SPEED_100G)
2286 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2290 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2292 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2294 switch (hw_link_speed) {
2295 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2296 eth_link_speed = ETH_SPEED_NUM_100M;
2298 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2299 eth_link_speed = ETH_SPEED_NUM_1G;
2301 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2302 eth_link_speed = ETH_SPEED_NUM_2_5G;
2304 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2305 eth_link_speed = ETH_SPEED_NUM_10G;
2307 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2308 eth_link_speed = ETH_SPEED_NUM_20G;
2310 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2311 eth_link_speed = ETH_SPEED_NUM_25G;
2313 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2314 eth_link_speed = ETH_SPEED_NUM_40G;
2316 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2317 eth_link_speed = ETH_SPEED_NUM_50G;
2319 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2320 eth_link_speed = ETH_SPEED_NUM_100G;
2322 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2324 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2328 return eth_link_speed;
2331 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2333 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2335 switch (hw_link_duplex) {
2336 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2337 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2339 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2341 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2342 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2345 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2349 return eth_link_duplex;
2352 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2355 struct bnxt_link_info *link_info = &bp->link_info;
2357 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2360 "Get link config failed with rc %d\n", rc);
2363 if (link_info->link_speed)
2365 bnxt_parse_hw_link_speed(link_info->link_speed);
2367 link->link_speed = ETH_SPEED_NUM_NONE;
2368 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2369 link->link_status = link_info->link_up;
2370 link->link_autoneg = link_info->auto_mode ==
2371 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2372 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2377 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2380 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2381 struct bnxt_link_info link_req;
2382 uint16_t speed, autoneg;
2384 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2387 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2388 bp->eth_dev->data->port_id);
2392 memset(&link_req, 0, sizeof(link_req));
2393 link_req.link_up = link_up;
2397 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2398 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2399 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2400 /* Autoneg can be done only when the FW allows */
2401 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2402 bp->link_info.force_link_speed)) {
2403 link_req.phy_flags |=
2404 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2405 link_req.auto_link_speed_mask =
2406 bnxt_parse_eth_link_speed_mask(bp,
2407 dev_conf->link_speeds);
2409 if (bp->link_info.phy_type ==
2410 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2411 bp->link_info.phy_type ==
2412 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2413 bp->link_info.media_type ==
2414 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2415 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2419 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2420 /* If user wants a particular speed try that first. */
2422 link_req.link_speed = speed;
2423 else if (bp->link_info.force_link_speed)
2424 link_req.link_speed = bp->link_info.force_link_speed;
2426 link_req.link_speed = bp->link_info.auto_link_speed;
2428 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2429 link_req.auto_pause = bp->link_info.auto_pause;
2430 link_req.force_pause = bp->link_info.force_pause;
2433 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2436 "Set link config failed with rc %d\n", rc);
2444 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2446 struct hwrm_func_qcfg_input req = {0};
2447 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2451 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2452 req.fid = rte_cpu_to_le_16(0xffff);
2454 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2456 HWRM_CHECK_RESULT();
2458 /* Hard Coded.. 0xfff VLAN ID mask */
2459 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2460 flags = rte_le_to_cpu_16(resp->flags);
2461 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2462 bp->flags |= BNXT_FLAG_MULTI_HOST;
2464 if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
2465 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
2466 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
2469 switch (resp->port_partition_type) {
2470 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2471 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2472 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2474 bp->port_partition_type = resp->port_partition_type;
2477 bp->port_partition_type = 0;
2486 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2487 struct hwrm_func_qcaps_output *qcaps)
2489 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2490 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2491 sizeof(qcaps->mac_address));
2492 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2493 qcaps->max_rx_rings = fcfg->num_rx_rings;
2494 qcaps->max_tx_rings = fcfg->num_tx_rings;
2495 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2496 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2498 qcaps->first_vf_id = 0;
2499 qcaps->max_vnics = fcfg->num_vnics;
2500 qcaps->max_decap_records = 0;
2501 qcaps->max_encap_records = 0;
2502 qcaps->max_tx_wm_flows = 0;
2503 qcaps->max_tx_em_flows = 0;
2504 qcaps->max_rx_wm_flows = 0;
2505 qcaps->max_rx_em_flows = 0;
2506 qcaps->max_flow_id = 0;
2507 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2508 qcaps->max_sp_tx_rings = 0;
2509 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2512 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2514 struct hwrm_func_cfg_input req = {0};
2515 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2518 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2519 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2520 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2521 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2522 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2523 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2524 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2525 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2526 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2527 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2528 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2529 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2530 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2531 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2533 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2534 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2535 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2536 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2537 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2538 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2539 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2540 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2541 req.fid = rte_cpu_to_le_16(0xffff);
2543 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2545 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2547 HWRM_CHECK_RESULT();
2553 static void populate_vf_func_cfg_req(struct bnxt *bp,
2554 struct hwrm_func_cfg_input *req,
2557 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2558 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2559 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2560 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2561 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2562 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2563 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2564 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2565 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2566 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2568 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2569 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2571 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2572 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2574 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2576 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2577 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2579 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2580 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2581 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2582 /* TODO: For now, do not support VMDq/RFS on VFs. */
2583 req->num_vnics = rte_cpu_to_le_16(1);
2584 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2588 static void add_random_mac_if_needed(struct bnxt *bp,
2589 struct hwrm_func_cfg_input *cfg_req,
2592 struct ether_addr mac;
2594 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2597 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2599 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2600 eth_random_addr(cfg_req->dflt_mac_addr);
2601 bp->pf.vf_info[vf].random_mac = true;
2603 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2607 static void reserve_resources_from_vf(struct bnxt *bp,
2608 struct hwrm_func_cfg_input *cfg_req,
2611 struct hwrm_func_qcaps_input req = {0};
2612 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2615 /* Get the actual allocated values now */
2616 HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB);
2617 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2618 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2621 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2622 copy_func_cfg_to_qcaps(cfg_req, resp);
2623 } else if (resp->error_code) {
2624 rc = rte_le_to_cpu_16(resp->error_code);
2625 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2626 copy_func_cfg_to_qcaps(cfg_req, resp);
2629 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2630 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2631 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2632 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2633 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2634 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2636 * TODO: While not supporting VMDq with VFs, max_vnics is always
2637 * forced to 1 in this case
2639 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2640 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2645 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2647 struct hwrm_func_qcfg_input req = {0};
2648 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2651 /* Check for zero MAC address */
2652 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2653 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2654 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2656 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2658 } else if (resp->error_code) {
2659 rc = rte_le_to_cpu_16(resp->error_code);
2660 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2663 rc = rte_le_to_cpu_16(resp->vlan);
2670 static int update_pf_resource_max(struct bnxt *bp)
2672 struct hwrm_func_qcfg_input req = {0};
2673 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2676 /* And copy the allocated numbers into the pf struct */
2677 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
2678 req.fid = rte_cpu_to_le_16(0xffff);
2679 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2680 HWRM_CHECK_RESULT();
2682 /* Only TX ring value reflects actual allocation? TODO */
2683 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2684 bp->pf.evb_mode = resp->evb_mode;
2691 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2696 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2700 rc = bnxt_hwrm_func_qcaps(bp);
2704 bp->pf.func_cfg_flags &=
2705 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2706 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2707 bp->pf.func_cfg_flags |=
2708 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2709 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2713 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2715 struct hwrm_func_cfg_input req = {0};
2716 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2723 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2727 rc = bnxt_hwrm_func_qcaps(bp);
2732 bp->pf.active_vfs = num_vfs;
2735 * First, configure the PF to only use one TX ring. This ensures that
2736 * there are enough rings for all VFs.
2738 * If we don't do this, when we call func_alloc() later, we will lock
2739 * extra rings to the PF that won't be available during func_cfg() of
2742 * This has been fixed with firmware versions above 20.6.54
2744 bp->pf.func_cfg_flags &=
2745 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2746 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2747 bp->pf.func_cfg_flags |=
2748 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2749 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2754 * Now, create and register a buffer to hold forwarded VF requests
2756 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2757 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2758 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2759 if (bp->pf.vf_req_buf == NULL) {
2763 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2764 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2765 for (i = 0; i < num_vfs; i++)
2766 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2767 (i * HWRM_MAX_REQ_LEN);
2769 rc = bnxt_hwrm_func_buf_rgtr(bp);
2773 populate_vf_func_cfg_req(bp, &req, num_vfs);
2775 bp->pf.active_vfs = 0;
2776 for (i = 0; i < num_vfs; i++) {
2777 add_random_mac_if_needed(bp, &req, i);
2779 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2780 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2781 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2782 rc = bnxt_hwrm_send_message(bp,
2787 /* Clear enable flag for next pass */
2788 req.enables &= ~rte_cpu_to_le_32(
2789 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2791 if (rc || resp->error_code) {
2793 "Failed to initizlie VF %d\n", i);
2795 "Not all VFs available. (%d, %d)\n",
2796 rc, resp->error_code);
2803 reserve_resources_from_vf(bp, &req, i);
2804 bp->pf.active_vfs++;
2805 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2809 * Now configure the PF to use "the rest" of the resources
2810 * We're using STD_TX_RING_MODE here though which will limit the TX
2811 * rings. This will allow QoS to function properly. Not setting this
2812 * will cause PF rings to break bandwidth settings.
2814 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2818 rc = update_pf_resource_max(bp);
2825 bnxt_hwrm_func_buf_unrgtr(bp);
2829 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2831 struct hwrm_func_cfg_input req = {0};
2832 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2835 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2837 req.fid = rte_cpu_to_le_16(0xffff);
2838 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2839 req.evb_mode = bp->pf.evb_mode;
2841 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2842 HWRM_CHECK_RESULT();
2848 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2849 uint8_t tunnel_type)
2851 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2852 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2855 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
2856 req.tunnel_type = tunnel_type;
2857 req.tunnel_dst_port_val = port;
2858 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2859 HWRM_CHECK_RESULT();
2861 switch (tunnel_type) {
2862 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2863 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2864 bp->vxlan_port = port;
2866 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2867 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2868 bp->geneve_port = port;
2879 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2880 uint8_t tunnel_type)
2882 struct hwrm_tunnel_dst_port_free_input req = {0};
2883 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2886 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
2888 req.tunnel_type = tunnel_type;
2889 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2890 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2892 HWRM_CHECK_RESULT();
2898 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2901 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2902 struct hwrm_func_cfg_input req = {0};
2905 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2907 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2908 req.flags = rte_cpu_to_le_32(flags);
2909 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2911 HWRM_CHECK_RESULT();
2917 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2919 uint32_t *flag = flagp;
2921 vnic->flags = *flag;
2924 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2926 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2929 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2932 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2933 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2935 HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
2937 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2938 req.req_buf_page_size = rte_cpu_to_le_16(
2939 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2940 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2941 req.req_buf_page_addr0 =
2942 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2943 if (req.req_buf_page_addr0 == 0) {
2945 "unable to map buffer address to physical memory\n");
2949 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2951 HWRM_CHECK_RESULT();
2957 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2960 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2961 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2963 HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
2965 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2967 HWRM_CHECK_RESULT();
2973 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2975 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2976 struct hwrm_func_cfg_input req = {0};
2979 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
2981 req.fid = rte_cpu_to_le_16(0xffff);
2982 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2983 req.enables = rte_cpu_to_le_32(
2984 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2985 req.async_event_cr = rte_cpu_to_le_16(
2986 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2987 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2989 HWRM_CHECK_RESULT();
2995 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2997 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2998 struct hwrm_func_vf_cfg_input req = {0};
3001 HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3003 req.enables = rte_cpu_to_le_32(
3004 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3005 req.async_event_cr = rte_cpu_to_le_16(
3006 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
3007 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3009 HWRM_CHECK_RESULT();
3015 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3017 struct hwrm_func_cfg_input req = {0};
3018 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3019 uint16_t dflt_vlan, fid;
3020 uint32_t func_cfg_flags;
3023 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3026 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3027 fid = bp->pf.vf_info[vf].fid;
3028 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3030 fid = rte_cpu_to_le_16(0xffff);
3031 func_cfg_flags = bp->pf.func_cfg_flags;
3032 dflt_vlan = bp->vlan;
3035 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3036 req.fid = rte_cpu_to_le_16(fid);
3037 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3038 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3040 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3042 HWRM_CHECK_RESULT();
3048 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3049 uint16_t max_bw, uint16_t enables)
3051 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3052 struct hwrm_func_cfg_input req = {0};
3055 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3057 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3058 req.enables |= rte_cpu_to_le_32(enables);
3059 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3060 req.max_bw = rte_cpu_to_le_32(max_bw);
3061 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3063 HWRM_CHECK_RESULT();
3069 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3071 struct hwrm_func_cfg_input req = {0};
3072 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3075 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3077 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3078 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3079 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3080 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3082 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3084 HWRM_CHECK_RESULT();
3090 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3095 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3097 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3102 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3103 void *encaped, size_t ec_size)
3106 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3107 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3109 if (ec_size > sizeof(req.encap_request))
3112 HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3114 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3115 memcpy(req.encap_request, encaped, ec_size);
3117 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3119 HWRM_CHECK_RESULT();
3125 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3126 struct ether_addr *mac)
3128 struct hwrm_func_qcfg_input req = {0};
3129 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3132 HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB);
3134 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3135 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3137 HWRM_CHECK_RESULT();
3139 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
3146 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3147 void *encaped, size_t ec_size)
3150 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3151 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3153 if (ec_size > sizeof(req.encap_request))
3156 HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3158 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3159 memcpy(req.encap_request, encaped, ec_size);
3161 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3163 HWRM_CHECK_RESULT();
3169 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3170 struct rte_eth_stats *stats, uint8_t rx)
3173 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3174 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3176 HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3178 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3180 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3182 HWRM_CHECK_RESULT();
3185 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3186 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3187 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3188 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3189 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3190 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3191 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3192 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3194 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3195 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3196 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3197 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3198 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3199 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3200 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3209 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3211 struct hwrm_port_qstats_input req = {0};
3212 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3213 struct bnxt_pf_info *pf = &bp->pf;
3216 HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB);
3218 req.port_id = rte_cpu_to_le_16(pf->port_id);
3219 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3220 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3221 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3223 HWRM_CHECK_RESULT();
3229 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3231 struct hwrm_port_clr_stats_input req = {0};
3232 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3233 struct bnxt_pf_info *pf = &bp->pf;
3236 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3237 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3238 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3241 HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3243 req.port_id = rte_cpu_to_le_16(pf->port_id);
3244 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3246 HWRM_CHECK_RESULT();
3252 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3254 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3255 struct hwrm_port_led_qcaps_input req = {0};
3261 HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3262 req.port_id = bp->pf.port_id;
3263 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3265 HWRM_CHECK_RESULT();
3267 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3270 bp->num_leds = resp->num_leds;
3271 memcpy(bp->leds, &resp->led0_id,
3272 sizeof(bp->leds[0]) * bp->num_leds);
3273 for (i = 0; i < bp->num_leds; i++) {
3274 struct bnxt_led_info *led = &bp->leds[i];
3276 uint16_t caps = led->led_state_caps;
3278 if (!led->led_group_id ||
3279 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3291 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3293 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3294 struct hwrm_port_led_cfg_input req = {0};
3295 struct bnxt_led_cfg *led_cfg;
3296 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3297 uint16_t duration = 0;
3300 if (!bp->num_leds || BNXT_VF(bp))
3303 HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3306 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3307 duration = rte_cpu_to_le_16(500);
3309 req.port_id = bp->pf.port_id;
3310 req.num_leds = bp->num_leds;
3311 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3312 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3313 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3314 led_cfg->led_id = bp->leds[i].led_id;
3315 led_cfg->led_state = led_state;
3316 led_cfg->led_blink_on = duration;
3317 led_cfg->led_blink_off = duration;
3318 led_cfg->led_group_id = bp->leds[i].led_group_id;
3321 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3323 HWRM_CHECK_RESULT();
3329 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3333 struct hwrm_nvm_get_dir_info_input req = {0};
3334 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3336 HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3338 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3340 HWRM_CHECK_RESULT();
3344 *entries = rte_le_to_cpu_32(resp->entries);
3345 *length = rte_le_to_cpu_32(resp->entry_length);
3350 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3353 uint32_t dir_entries;
3354 uint32_t entry_length;
3357 rte_iova_t dma_handle;
3358 struct hwrm_nvm_get_dir_entries_input req = {0};
3359 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3361 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3365 *data++ = dir_entries;
3366 *data++ = entry_length;
3368 memset(data, 0xff, len);
3370 buflen = dir_entries * entry_length;
3371 buf = rte_malloc("nvm_dir", buflen, 0);
3372 rte_mem_lock_page(buf);
3375 dma_handle = rte_mem_virt2iova(buf);
3376 if (dma_handle == 0) {
3378 "unable to map response address to physical memory\n");
3381 HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
3382 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3383 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3386 memcpy(data, buf, len > buflen ? buflen : len);
3389 HWRM_CHECK_RESULT();
3395 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3396 uint32_t offset, uint32_t length,
3401 rte_iova_t dma_handle;
3402 struct hwrm_nvm_read_input req = {0};
3403 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3405 buf = rte_malloc("nvm_item", length, 0);
3406 rte_mem_lock_page(buf);
3410 dma_handle = rte_mem_virt2iova(buf);
3411 if (dma_handle == 0) {
3413 "unable to map response address to physical memory\n");
3416 HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB);
3417 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3418 req.dir_idx = rte_cpu_to_le_16(index);
3419 req.offset = rte_cpu_to_le_32(offset);
3420 req.len = rte_cpu_to_le_32(length);
3421 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3423 memcpy(data, buf, length);
3426 HWRM_CHECK_RESULT();
3432 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3435 struct hwrm_nvm_erase_dir_entry_input req = {0};
3436 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3438 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
3439 req.dir_idx = rte_cpu_to_le_16(index);
3440 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3441 HWRM_CHECK_RESULT();
3448 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3449 uint16_t dir_ordinal, uint16_t dir_ext,
3450 uint16_t dir_attr, const uint8_t *data,
3454 struct hwrm_nvm_write_input req = {0};
3455 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3456 rte_iova_t dma_handle;
3459 buf = rte_malloc("nvm_write", data_len, 0);
3460 rte_mem_lock_page(buf);
3464 dma_handle = rte_mem_virt2iova(buf);
3465 if (dma_handle == 0) {
3467 "unable to map response address to physical memory\n");
3470 memcpy(buf, data, data_len);
3472 HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB);
3474 req.dir_type = rte_cpu_to_le_16(dir_type);
3475 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3476 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3477 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3478 req.dir_data_length = rte_cpu_to_le_32(data_len);
3479 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3481 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3484 HWRM_CHECK_RESULT();
3491 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3493 uint32_t *count = cbdata;
3495 *count = *count + 1;
3498 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3499 struct bnxt_vnic_info *vnic __rte_unused)
3504 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3508 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3509 &count, bnxt_vnic_count_hwrm_stub);
3514 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3517 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3518 struct hwrm_func_vf_vnic_ids_query_output *resp =
3519 bp->hwrm_cmd_resp_addr;
3522 /* First query all VNIC ids */
3523 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
3525 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3526 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3527 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3529 if (req.vnic_id_tbl_addr == 0) {
3532 "unable to map VNIC ID table address to physical memory\n");
3535 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3538 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3540 } else if (resp->error_code) {
3541 rc = rte_le_to_cpu_16(resp->error_code);
3543 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3546 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3554 * This function queries the VNIC IDs for a specified VF. It then calls
3555 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3556 * Then it calls the hwrm_cb function to program this new vnic configuration.
3558 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3559 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3560 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3562 struct bnxt_vnic_info vnic;
3564 int i, num_vnic_ids;
3569 /* First query all VNIC ids */
3570 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3571 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3572 RTE_CACHE_LINE_SIZE);
3573 if (vnic_ids == NULL) {
3577 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3578 rte_mem_lock_page(((char *)vnic_ids) + sz);
3580 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3582 if (num_vnic_ids < 0)
3583 return num_vnic_ids;
3585 /* Retrieve VNIC, update bd_stall then update */
3587 for (i = 0; i < num_vnic_ids; i++) {
3588 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3589 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3590 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3593 if (vnic.mru <= 4) /* Indicates unallocated */
3596 vnic_cb(&vnic, cbdata);
3598 rc = hwrm_cb(bp, &vnic);
3608 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3611 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3612 struct hwrm_func_cfg_input req = {0};
3615 HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB);
3617 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3618 req.enables |= rte_cpu_to_le_32(
3619 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3620 req.vlan_antispoof_mode = on ?
3621 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3622 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3623 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3625 HWRM_CHECK_RESULT();
3631 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3633 struct bnxt_vnic_info vnic;
3636 int num_vnic_ids, i;
3640 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3641 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3642 RTE_CACHE_LINE_SIZE);
3643 if (vnic_ids == NULL) {
3648 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3649 rte_mem_lock_page(((char *)vnic_ids) + sz);
3651 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3657 * Loop through to find the default VNIC ID.
3658 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3659 * by sending the hwrm_func_qcfg command to the firmware.
3661 for (i = 0; i < num_vnic_ids; i++) {
3662 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3663 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3664 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3665 bp->pf.first_vf_id + vf);
3668 if (vnic.func_default) {
3670 return vnic.fw_vnic_id;
3673 /* Could not find a default VNIC. */
3674 PMD_DRV_LOG(ERR, "No default VNIC\n");
3680 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3682 struct bnxt_filter_info *filter)
3685 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3686 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3687 uint32_t enables = 0;
3689 if (filter->fw_em_filter_id != UINT64_MAX)
3690 bnxt_hwrm_clear_em_filter(bp, filter);
3692 HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
3694 req.flags = rte_cpu_to_le_32(filter->flags);
3696 enables = filter->enables |
3697 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3698 req.dst_id = rte_cpu_to_le_16(dst_id);
3700 if (filter->ip_addr_type) {
3701 req.ip_addr_type = filter->ip_addr_type;
3702 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3705 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3706 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3708 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3709 memcpy(req.src_macaddr, filter->src_macaddr,
3712 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3713 memcpy(req.dst_macaddr, filter->dst_macaddr,
3716 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3717 req.ovlan_vid = filter->l2_ovlan;
3719 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3720 req.ivlan_vid = filter->l2_ivlan;
3722 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3723 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3725 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3726 req.ip_protocol = filter->ip_protocol;
3728 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3729 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3731 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3732 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3734 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3735 req.src_port = rte_cpu_to_be_16(filter->src_port);
3737 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3738 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3740 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3741 req.mirror_vnic_id = filter->mirror_vnic_id;
3743 req.enables = rte_cpu_to_le_32(enables);
3745 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3747 HWRM_CHECK_RESULT();
3749 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3755 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3758 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3759 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3761 if (filter->fw_em_filter_id == UINT64_MAX)
3764 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3765 HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
3767 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3769 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
3771 HWRM_CHECK_RESULT();
3774 filter->fw_em_filter_id = UINT64_MAX;
3775 filter->fw_l2_filter_id = UINT64_MAX;
3780 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3782 struct bnxt_filter_info *filter)
3785 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3786 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3787 bp->hwrm_cmd_resp_addr;
3788 uint32_t enables = 0;
3790 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3791 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3793 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
3795 req.flags = rte_cpu_to_le_32(filter->flags);
3797 enables = filter->enables |
3798 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3799 req.dst_id = rte_cpu_to_le_16(dst_id);
3802 if (filter->ip_addr_type) {
3803 req.ip_addr_type = filter->ip_addr_type;
3805 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3808 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3809 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3811 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3812 memcpy(req.src_macaddr, filter->src_macaddr,
3815 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3816 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3819 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3820 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3822 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3823 req.ip_protocol = filter->ip_protocol;
3825 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3826 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3828 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3829 req.src_ipaddr_mask[0] =
3830 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3832 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3833 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3835 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3836 req.dst_ipaddr_mask[0] =
3837 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3839 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3840 req.src_port = rte_cpu_to_le_16(filter->src_port);
3842 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3843 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3845 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3846 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3848 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3849 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3851 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3852 req.mirror_vnic_id = filter->mirror_vnic_id;
3854 req.enables = rte_cpu_to_le_32(enables);
3856 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3858 HWRM_CHECK_RESULT();
3860 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3866 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3867 struct bnxt_filter_info *filter)
3870 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3871 struct hwrm_cfa_ntuple_filter_free_output *resp =
3872 bp->hwrm_cmd_resp_addr;
3874 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3877 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
3879 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3881 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3883 HWRM_CHECK_RESULT();
3886 filter->fw_ntuple_filter_id = UINT64_MAX;
3891 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3893 unsigned int rss_idx, fw_idx, i;
3895 if (vnic->rss_table && vnic->hash_type) {
3897 * Fill the RSS hash & redirection table with
3898 * ring group ids for all VNICs
3900 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3901 rss_idx++, fw_idx++) {
3902 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3903 fw_idx %= bp->rx_cp_nr_rings;
3904 if (vnic->fw_grp_ids[fw_idx] !=
3909 if (i == bp->rx_cp_nr_rings)
3911 vnic->rss_table[rss_idx] =
3912 vnic->fw_grp_ids[fw_idx];
3914 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
3919 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
3920 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
3924 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
3926 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3927 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
3929 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
3930 req->num_cmpl_dma_aggr_during_int =
3931 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
3933 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
3935 /* min timer set to 1/2 of interrupt timer */
3936 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
3938 /* buf timer set to 1/4 of interrupt timer */
3939 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
3941 req->cmpl_aggr_dma_tmr_during_int =
3942 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
3944 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
3945 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
3946 req->flags = rte_cpu_to_le_16(flags);
3949 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
3950 struct bnxt_coal *coal, uint16_t ring_id)
3952 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3953 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
3954 bp->hwrm_cmd_resp_addr;
3957 /* Set ring coalesce parameters only for Stratus 100G NIC */
3958 if (!bnxt_stratus_device(bp))
3961 HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB);
3962 bnxt_hwrm_set_coal_params(coal, &req);
3963 req.ring_id = rte_cpu_to_le_16(ring_id);
3964 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3965 HWRM_CHECK_RESULT();
3970 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
3972 struct hwrm_port_qstats_ext_input req = {0};
3973 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
3974 struct bnxt_pf_info *pf = &bp->pf;
3977 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
3978 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
3981 HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
3983 req.port_id = rte_cpu_to_le_16(pf->port_id);
3984 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
3985 req.tx_stat_host_addr =
3986 rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3988 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
3990 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
3991 req.rx_stat_host_addr =
3992 rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3994 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
3996 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3999 bp->fw_rx_port_stats_ext_size = 0;
4000 bp->fw_tx_port_stats_ext_size = 0;
4002 bp->fw_rx_port_stats_ext_size =
4003 rte_le_to_cpu_16(resp->rx_stat_size);
4004 bp->fw_tx_port_stats_ext_size =
4005 rte_le_to_cpu_16(resp->tx_stat_size);
4008 HWRM_CHECK_RESULT();