1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
30 #define HWRM_VERSION_1_9_1 0x10901
32 struct bnxt_plcmodes_cfg {
34 uint16_t jumbo_thresh;
36 uint16_t hds_threshold;
39 static int page_getenum(size_t size)
55 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
56 return sizeof(void *) * 8 - 1;
59 static int page_roundup(size_t size)
61 return 1 << page_getenum(size);
65 * HWRM Functions (sent to HWRM)
66 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
67 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
68 * command was failed by the ChiMP.
71 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
75 struct input *req = msg;
76 struct output *resp = bp->hwrm_cmd_resp_addr;
80 uint16_t max_req_len = bp->max_req_len;
81 struct hwrm_short_input short_input = { 0 };
83 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
84 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
86 memset(short_cmd_req, 0, bp->max_req_len);
87 memcpy(short_cmd_req, req, msg_len);
89 short_input.req_type = rte_cpu_to_le_16(req->req_type);
90 short_input.signature = rte_cpu_to_le_16(
91 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
92 short_input.size = rte_cpu_to_le_16(msg_len);
93 short_input.req_addr =
94 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
96 data = (uint32_t *)&short_input;
97 msg_len = sizeof(short_input);
99 /* Sync memory write before updating doorbell */
102 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
105 /* Write request msg to hwrm channel */
106 for (i = 0; i < msg_len; i += 4) {
107 bar = (uint8_t *)bp->bar0 + i;
108 rte_write32(*data, bar);
112 /* Zero the rest of the request space */
113 for (; i < max_req_len; i += 4) {
114 bar = (uint8_t *)bp->bar0 + i;
118 /* Ring channel doorbell */
119 bar = (uint8_t *)bp->bar0 + 0x100;
122 /* Poll for the valid bit */
123 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
124 /* Sanity check on the resp->resp_len */
126 if (resp->resp_len && resp->resp_len <=
128 /* Last byte of resp contains the valid key */
129 valid = (uint8_t *)resp + resp->resp_len - 1;
130 if (*valid == HWRM_RESP_VALID_KEY)
136 if (i >= HWRM_CMD_TIMEOUT) {
137 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
148 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
149 * spinlock, and does initial processing.
151 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
152 * releases the spinlock only if it returns. If the regular int return codes
153 * are not used by the function, HWRM_CHECK_RESULT() should not be used
154 * directly, rather it should be copied and modified to suit the function.
156 * HWRM_UNLOCK() must be called after all response processing is completed.
158 #define HWRM_PREP(req, type) do { \
159 rte_spinlock_lock(&bp->hwrm_lock); \
160 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
161 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
162 req.cmpl_ring = rte_cpu_to_le_16(-1); \
163 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
164 req.target_id = rte_cpu_to_le_16(0xffff); \
165 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
168 #define HWRM_CHECK_RESULT() do {\
170 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
171 rte_spinlock_unlock(&bp->hwrm_lock); \
174 if (resp->error_code) { \
175 rc = rte_le_to_cpu_16(resp->error_code); \
176 if (resp->resp_len >= 16) { \
177 struct hwrm_err_output *tmp_hwrm_err_op = \
180 "error %d:%d:%08x:%04x\n", \
181 rc, tmp_hwrm_err_op->cmd_err, \
183 tmp_hwrm_err_op->opaque_0), \
185 tmp_hwrm_err_op->opaque_1)); \
187 PMD_DRV_LOG(ERR, "error %d\n", rc); \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
194 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
196 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
199 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
200 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
202 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
203 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
206 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
214 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
215 struct bnxt_vnic_info *vnic,
217 struct bnxt_vlan_table_entry *vlan_table)
220 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
221 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
224 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
227 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
228 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230 /* FIXME add multicast flag, when multicast adding options is supported
233 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
234 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
235 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
236 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
237 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
238 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
239 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
240 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
241 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
242 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
243 if (vnic->mc_addr_cnt) {
244 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
245 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
246 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
249 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
250 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
251 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
252 rte_mem_virt2iova(vlan_table));
253 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
255 req.mask = rte_cpu_to_le_32(mask);
257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
265 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
267 struct bnxt_vlan_antispoof_table_entry *vlan_table)
270 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
271 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
272 bp->hwrm_cmd_resp_addr;
275 * Older HWRM versions did not support this command, and the set_rx_mask
276 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
277 * removed from set_rx_mask call, and this command was added.
279 * This command is also present from 1.7.8.11 and higher,
282 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
283 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
284 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
289 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
290 req.fid = rte_cpu_to_le_16(fid);
292 req.vlan_tag_mask_tbl_addr =
293 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
294 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
296 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
304 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
305 struct bnxt_filter_info *filter)
308 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
309 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
311 if (filter->fw_l2_filter_id == UINT64_MAX)
314 HWRM_PREP(req, CFA_L2_FILTER_FREE);
316 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
318 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
323 filter->fw_l2_filter_id = UINT64_MAX;
328 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
330 struct bnxt_filter_info *filter)
333 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
334 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
335 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
336 const struct rte_eth_vmdq_rx_conf *conf =
337 &dev_conf->rx_adv_conf.vmdq_rx_conf;
338 uint32_t enables = 0;
339 uint16_t j = dst_id - 1;
341 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
342 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
343 conf->pool_map[j].pools & (1UL << j)) {
345 "Add vlan %u to vmdq pool %u\n",
346 conf->pool_map[j].vlan_id, j);
348 filter->l2_ivlan = conf->pool_map[j].vlan_id;
350 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
351 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
354 if (filter->fw_l2_filter_id != UINT64_MAX)
355 bnxt_hwrm_clear_l2_filter(bp, filter);
357 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
359 req.flags = rte_cpu_to_le_32(filter->flags);
361 enables = filter->enables |
362 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
363 req.dst_id = rte_cpu_to_le_16(dst_id);
366 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
367 memcpy(req.l2_addr, filter->l2_addr,
370 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
371 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
374 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
375 req.l2_ovlan = filter->l2_ovlan;
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
378 req.l2_ovlan = filter->l2_ivlan;
380 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
381 req.l2_ovlan_mask = filter->l2_ovlan_mask;
383 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
384 req.l2_ovlan_mask = filter->l2_ivlan_mask;
385 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
386 req.src_id = rte_cpu_to_le_32(filter->src_id);
387 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
388 req.src_type = filter->src_type;
390 req.enables = rte_cpu_to_le_32(enables);
392 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
396 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
402 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
404 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
405 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
412 HWRM_PREP(req, PORT_MAC_CFG);
415 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
418 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
419 if (ptp->tx_tstamp_en)
420 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
423 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
424 req.flags = rte_cpu_to_le_32(flags);
425 req.enables = rte_cpu_to_le_32
426 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
427 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
429 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
435 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
438 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
439 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
440 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
442 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
446 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
448 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
450 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
454 if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
457 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
461 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
462 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
463 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
464 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
465 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
466 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
467 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
468 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
469 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
470 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
471 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
472 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
473 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
474 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
475 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
476 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
477 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
478 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
486 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
489 struct hwrm_func_qcaps_input req = {.req_type = 0 };
490 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
491 uint16_t new_max_vfs;
495 HWRM_PREP(req, FUNC_QCAPS);
497 req.fid = rte_cpu_to_le_16(0xffff);
499 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
503 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
504 flags = rte_le_to_cpu_32(resp->flags);
506 bp->pf.port_id = resp->port_id;
507 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
508 new_max_vfs = bp->pdev->max_vfs;
509 if (new_max_vfs != bp->pf.max_vfs) {
511 rte_free(bp->pf.vf_info);
512 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
513 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
514 bp->pf.max_vfs = new_max_vfs;
515 for (i = 0; i < new_max_vfs; i++) {
516 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
517 bp->pf.vf_info[i].vlan_table =
518 rte_zmalloc("VF VLAN table",
521 if (bp->pf.vf_info[i].vlan_table == NULL)
523 "Fail to alloc VLAN table for VF %d\n",
527 bp->pf.vf_info[i].vlan_table);
528 bp->pf.vf_info[i].vlan_as_table =
529 rte_zmalloc("VF VLAN AS table",
532 if (bp->pf.vf_info[i].vlan_as_table == NULL)
534 "Alloc VLAN AS table for VF %d fail\n",
538 bp->pf.vf_info[i].vlan_as_table);
539 STAILQ_INIT(&bp->pf.vf_info[i].filter);
544 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
545 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
546 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
547 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
548 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
549 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
550 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
551 /* TODO: For now, do not support VMDq/RFS on VFs. */
556 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
560 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
562 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
563 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
564 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
565 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
567 bnxt_hwrm_ptp_qcfg(bp);
576 int bnxt_hwrm_func_reset(struct bnxt *bp)
579 struct hwrm_func_reset_input req = {.req_type = 0 };
580 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
582 HWRM_PREP(req, FUNC_RESET);
584 req.enables = rte_cpu_to_le_32(0);
586 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
594 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
597 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
598 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
600 if (bp->flags & BNXT_FLAG_REGISTERED)
603 HWRM_PREP(req, FUNC_DRV_RGTR);
604 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
605 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
606 req.ver_maj = RTE_VER_YEAR;
607 req.ver_min = RTE_VER_MONTH;
608 req.ver_upd = RTE_VER_MINOR;
611 req.enables |= rte_cpu_to_le_32(
612 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
613 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
614 RTE_MIN(sizeof(req.vf_req_fwd),
615 sizeof(bp->pf.vf_req_fwd)));
618 * PF can sniff HWRM API issued by VF. This can be set up by
619 * linux driver and inherited by the DPDK PF driver. Clear
620 * this HWRM sniffer list in FW because DPDK PF driver does
624 rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE);
627 req.async_event_fwd[0] |=
628 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
629 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
630 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
631 req.async_event_fwd[1] |=
632 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
633 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
640 bp->flags |= BNXT_FLAG_REGISTERED;
645 int bnxt_hwrm_ver_get(struct bnxt *bp)
648 struct hwrm_ver_get_input req = {.req_type = 0 };
649 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
652 uint16_t max_resp_len;
653 char type[RTE_MEMZONE_NAMESIZE];
654 uint32_t dev_caps_cfg;
656 bp->max_req_len = HWRM_MAX_REQ_LEN;
657 HWRM_PREP(req, VER_GET);
659 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
660 req.hwrm_intf_min = HWRM_VERSION_MINOR;
661 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
663 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
667 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
668 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
669 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
670 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
671 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
672 (resp->hwrm_fw_min_8b << 16) |
673 (resp->hwrm_fw_bld_8b << 8) |
674 resp->hwrm_fw_rsvd_8b;
675 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
676 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
678 my_version = HWRM_VERSION_MAJOR << 16;
679 my_version |= HWRM_VERSION_MINOR << 8;
680 my_version |= HWRM_VERSION_UPDATE;
682 fw_version = resp->hwrm_intf_maj_8b << 16;
683 fw_version |= resp->hwrm_intf_min_8b << 8;
684 fw_version |= resp->hwrm_intf_upd_8b;
685 bp->hwrm_spec_code = fw_version;
687 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
688 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
693 if (my_version != fw_version) {
694 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
695 if (my_version < fw_version) {
697 "Firmware API version is newer than driver.\n");
699 "The driver may be missing features.\n");
702 "Firmware API version is older than driver.\n");
704 "Not all driver features may be functional.\n");
708 if (bp->max_req_len > resp->max_req_win_len) {
709 PMD_DRV_LOG(ERR, "Unsupported request length\n");
712 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
713 max_resp_len = resp->max_resp_len;
714 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
716 if (bp->max_resp_len != max_resp_len) {
717 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
718 bp->pdev->addr.domain, bp->pdev->addr.bus,
719 bp->pdev->addr.devid, bp->pdev->addr.function);
721 rte_free(bp->hwrm_cmd_resp_addr);
723 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
724 if (bp->hwrm_cmd_resp_addr == NULL) {
728 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
729 bp->hwrm_cmd_resp_dma_addr =
730 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
731 if (bp->hwrm_cmd_resp_dma_addr == 0) {
733 "Unable to map response buffer to physical memory.\n");
737 bp->max_resp_len = max_resp_len;
741 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
743 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
744 PMD_DRV_LOG(DEBUG, "Short command supported\n");
746 rte_free(bp->hwrm_short_cmd_req_addr);
748 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
750 if (bp->hwrm_short_cmd_req_addr == NULL) {
754 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
755 bp->hwrm_short_cmd_req_dma_addr =
756 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
757 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
758 rte_free(bp->hwrm_short_cmd_req_addr);
760 "Unable to map buffer to physical memory.\n");
765 bp->flags |= BNXT_FLAG_SHORT_CMD;
773 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
776 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
777 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
779 if (!(bp->flags & BNXT_FLAG_REGISTERED))
782 HWRM_PREP(req, FUNC_DRV_UNRGTR);
785 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
790 bp->flags &= ~BNXT_FLAG_REGISTERED;
795 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
798 struct hwrm_port_phy_cfg_input req = {0};
799 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
800 uint32_t enables = 0;
802 HWRM_PREP(req, PORT_PHY_CFG);
805 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
806 if (bp->link_info.auto_mode && conf->link_speed) {
807 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
808 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
811 req.flags = rte_cpu_to_le_32(conf->phy_flags);
812 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
813 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
815 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
816 * any auto mode, even "none".
818 if (!conf->link_speed) {
819 /* No speeds specified. Enable AutoNeg - all speeds */
821 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
823 /* AutoNeg - Advertise speeds specified. */
824 if (conf->auto_link_speed_mask &&
825 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
827 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
828 req.auto_link_speed_mask =
829 conf->auto_link_speed_mask;
831 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
834 req.auto_duplex = conf->duplex;
835 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
836 req.auto_pause = conf->auto_pause;
837 req.force_pause = conf->force_pause;
838 /* Set force_pause if there is no auto or if there is a force */
839 if (req.auto_pause && !req.force_pause)
840 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
842 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
844 req.enables = rte_cpu_to_le_32(enables);
847 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
848 PMD_DRV_LOG(INFO, "Force Link Down\n");
851 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
859 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
860 struct bnxt_link_info *link_info)
863 struct hwrm_port_phy_qcfg_input req = {0};
864 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
866 HWRM_PREP(req, PORT_PHY_QCFG);
868 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
872 link_info->phy_link_status = resp->link;
874 (link_info->phy_link_status ==
875 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
876 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
877 link_info->duplex = resp->duplex_cfg;
878 link_info->pause = resp->pause;
879 link_info->auto_pause = resp->auto_pause;
880 link_info->force_pause = resp->force_pause;
881 link_info->auto_mode = resp->auto_mode;
882 link_info->phy_type = resp->phy_type;
883 link_info->media_type = resp->media_type;
885 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
886 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
887 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
888 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
889 link_info->phy_ver[0] = resp->phy_maj;
890 link_info->phy_ver[1] = resp->phy_min;
891 link_info->phy_ver[2] = resp->phy_bld;
895 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
896 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
897 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
898 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
899 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
900 link_info->auto_link_speed_mask);
901 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
902 link_info->force_link_speed);
907 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
910 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
911 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
914 HWRM_PREP(req, QUEUE_QPORTCFG);
916 req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
917 /* HWRM Version >= 1.9.1 */
918 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1)
920 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
921 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
925 #define GET_QUEUE_INFO(x) \
926 bp->cos_queue[x].id = resp->queue_id##x; \
927 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
940 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
941 bp->tx_cosq_id = bp->cos_queue[0].id;
943 /* iterate and find the COSq profile to use for Tx */
944 for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
945 if (bp->cos_queue[i].profile ==
946 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
947 bp->tx_cosq_id = bp->cos_queue[i].id;
952 PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id);
957 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
958 struct bnxt_ring *ring,
959 uint32_t ring_type, uint32_t map_index,
960 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
963 uint32_t enables = 0;
964 struct hwrm_ring_alloc_input req = {.req_type = 0 };
965 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
967 HWRM_PREP(req, RING_ALLOC);
969 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
970 req.fbo = rte_cpu_to_le_32(0);
971 /* Association of ring index with doorbell index */
972 req.logical_id = rte_cpu_to_le_16(map_index);
973 req.length = rte_cpu_to_le_32(ring->ring_size);
976 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
977 req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id);
979 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
980 req.ring_type = ring_type;
981 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
982 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
983 if (stats_ctx_id != INVALID_STATS_CTX_ID)
985 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
987 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
988 req.ring_type = ring_type;
990 * TODO: Some HWRM versions crash with
991 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
993 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
996 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1001 req.enables = rte_cpu_to_le_32(enables);
1003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1005 if (rc || resp->error_code) {
1006 if (rc == 0 && resp->error_code)
1007 rc = rte_le_to_cpu_16(resp->error_code);
1008 switch (ring_type) {
1009 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1011 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1014 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1016 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1019 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1021 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1025 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1031 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1036 int bnxt_hwrm_ring_free(struct bnxt *bp,
1037 struct bnxt_ring *ring, uint32_t ring_type)
1040 struct hwrm_ring_free_input req = {.req_type = 0 };
1041 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1043 HWRM_PREP(req, RING_FREE);
1045 req.ring_type = ring_type;
1046 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1048 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1050 if (rc || resp->error_code) {
1051 if (rc == 0 && resp->error_code)
1052 rc = rte_le_to_cpu_16(resp->error_code);
1055 switch (ring_type) {
1056 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1057 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1060 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1061 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1064 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1065 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1069 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1077 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1080 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1081 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1083 HWRM_PREP(req, RING_GRP_ALLOC);
1085 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1086 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1087 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1088 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1090 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1092 HWRM_CHECK_RESULT();
1094 bp->grp_info[idx].fw_grp_id =
1095 rte_le_to_cpu_16(resp->ring_group_id);
1102 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1105 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1106 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1108 HWRM_PREP(req, RING_GRP_FREE);
1110 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1112 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1114 HWRM_CHECK_RESULT();
1117 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1121 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1124 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1125 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1127 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1130 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1132 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1134 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1136 HWRM_CHECK_RESULT();
1142 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1143 unsigned int idx __rte_unused)
1146 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1147 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1149 HWRM_PREP(req, STAT_CTX_ALLOC);
1151 req.update_period_ms = rte_cpu_to_le_32(0);
1153 req.stats_dma_addr =
1154 rte_cpu_to_le_64(cpr->hw_stats_map);
1156 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1158 HWRM_CHECK_RESULT();
1160 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1167 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1168 unsigned int idx __rte_unused)
1171 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1172 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1174 HWRM_PREP(req, STAT_CTX_FREE);
1176 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1178 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1180 HWRM_CHECK_RESULT();
1186 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1189 struct hwrm_vnic_alloc_input req = { 0 };
1190 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1192 /* map ring groups to this vnic */
1193 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1194 vnic->start_grp_id, vnic->end_grp_id);
1195 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1196 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1197 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1198 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1199 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1200 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1201 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1202 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1203 HWRM_PREP(req, VNIC_ALLOC);
1205 if (vnic->func_default)
1207 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1208 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1210 HWRM_CHECK_RESULT();
1212 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1214 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1218 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1219 struct bnxt_vnic_info *vnic,
1220 struct bnxt_plcmodes_cfg *pmode)
1223 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1224 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1226 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1228 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1230 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1232 HWRM_CHECK_RESULT();
1234 pmode->flags = rte_le_to_cpu_32(resp->flags);
1235 /* dflt_vnic bit doesn't exist in the _cfg command */
1236 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1237 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1238 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1239 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1246 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1247 struct bnxt_vnic_info *vnic,
1248 struct bnxt_plcmodes_cfg *pmode)
1251 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1252 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1254 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1257 req.flags = rte_cpu_to_le_32(pmode->flags);
1258 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1259 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1260 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1261 req.enables = rte_cpu_to_le_32(
1262 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1263 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1264 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1267 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1269 HWRM_CHECK_RESULT();
1275 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1278 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1279 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1280 uint32_t ctx_enable_flag = 0;
1281 struct bnxt_plcmodes_cfg pmodes;
1283 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1284 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1288 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1292 HWRM_PREP(req, VNIC_CFG);
1294 /* Only RSS support for now TBD: COS & LB */
1296 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1297 if (vnic->lb_rule != 0xffff)
1298 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1299 if (vnic->cos_rule != 0xffff)
1300 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1301 if (vnic->rss_rule != 0xffff) {
1302 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1303 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1305 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1306 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1307 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1308 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1309 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1310 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1311 req.mru = rte_cpu_to_le_16(vnic->mru);
1312 if (vnic->func_default)
1314 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1315 if (vnic->vlan_strip)
1317 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1320 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1321 if (vnic->roce_dual)
1322 req.flags |= rte_cpu_to_le_32(
1323 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1324 if (vnic->roce_only)
1325 req.flags |= rte_cpu_to_le_32(
1326 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1327 if (vnic->rss_dflt_cr)
1328 req.flags |= rte_cpu_to_le_32(
1329 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1331 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1333 HWRM_CHECK_RESULT();
1336 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1341 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1345 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1346 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1348 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1349 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1352 HWRM_PREP(req, VNIC_QCFG);
1355 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1356 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1357 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1359 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1361 HWRM_CHECK_RESULT();
1363 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1364 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1365 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1366 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1367 vnic->mru = rte_le_to_cpu_16(resp->mru);
1368 vnic->func_default = rte_le_to_cpu_32(
1369 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1370 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1371 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1372 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1373 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1374 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1375 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1376 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1377 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1378 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1379 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1386 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1389 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1390 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1391 bp->hwrm_cmd_resp_addr;
1393 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1395 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1397 HWRM_CHECK_RESULT();
1399 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1401 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1406 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1409 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1410 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1411 bp->hwrm_cmd_resp_addr;
1413 if (vnic->rss_rule == 0xffff) {
1414 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1417 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1419 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1421 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1423 HWRM_CHECK_RESULT();
1426 vnic->rss_rule = INVALID_HW_RING_ID;
1431 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1434 struct hwrm_vnic_free_input req = {.req_type = 0 };
1435 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1437 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1438 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1442 HWRM_PREP(req, VNIC_FREE);
1444 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1446 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1448 HWRM_CHECK_RESULT();
1451 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1455 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1456 struct bnxt_vnic_info *vnic)
1459 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1460 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1462 HWRM_PREP(req, VNIC_RSS_CFG);
1464 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1465 req.hash_mode_flags = vnic->hash_mode;
1467 req.ring_grp_tbl_addr =
1468 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1469 req.hash_key_tbl_addr =
1470 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1471 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1473 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1475 HWRM_CHECK_RESULT();
1481 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1482 struct bnxt_vnic_info *vnic)
1485 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1486 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1489 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1491 req.flags = rte_cpu_to_le_32(
1492 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1494 req.enables = rte_cpu_to_le_32(
1495 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1497 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1498 size -= RTE_PKTMBUF_HEADROOM;
1500 req.jumbo_thresh = rte_cpu_to_le_16(size);
1501 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1503 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1505 HWRM_CHECK_RESULT();
1511 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1512 struct bnxt_vnic_info *vnic, bool enable)
1515 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1516 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1518 HWRM_PREP(req, VNIC_TPA_CFG);
1521 req.enables = rte_cpu_to_le_32(
1522 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1523 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1524 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1525 req.flags = rte_cpu_to_le_32(
1526 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1527 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1528 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1529 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1530 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1531 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1532 req.max_agg_segs = rte_cpu_to_le_16(5);
1534 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1535 req.min_agg_len = rte_cpu_to_le_32(512);
1537 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1539 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1541 HWRM_CHECK_RESULT();
1547 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1549 struct hwrm_func_cfg_input req = {0};
1550 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1553 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1554 req.enables = rte_cpu_to_le_32(
1555 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1556 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1557 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1559 HWRM_PREP(req, FUNC_CFG);
1561 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1562 HWRM_CHECK_RESULT();
1565 bp->pf.vf_info[vf].random_mac = false;
1570 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1574 struct hwrm_func_qstats_input req = {.req_type = 0};
1575 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1577 HWRM_PREP(req, FUNC_QSTATS);
1579 req.fid = rte_cpu_to_le_16(fid);
1581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1583 HWRM_CHECK_RESULT();
1586 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1593 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1594 struct rte_eth_stats *stats)
1597 struct hwrm_func_qstats_input req = {.req_type = 0};
1598 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1600 HWRM_PREP(req, FUNC_QSTATS);
1602 req.fid = rte_cpu_to_le_16(fid);
1604 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1606 HWRM_CHECK_RESULT();
1608 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1609 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1610 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1611 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1612 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1613 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1615 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1616 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1617 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1618 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1619 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1620 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1622 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
1623 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
1624 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
1631 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1634 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1635 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1637 HWRM_PREP(req, FUNC_CLR_STATS);
1639 req.fid = rte_cpu_to_le_16(fid);
1641 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1643 HWRM_CHECK_RESULT();
1650 * HWRM utility functions
1653 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1658 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1659 struct bnxt_tx_queue *txq;
1660 struct bnxt_rx_queue *rxq;
1661 struct bnxt_cp_ring_info *cpr;
1663 if (i >= bp->rx_cp_nr_rings) {
1664 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1667 rxq = bp->rx_queues[i];
1671 rc = bnxt_hwrm_stat_clear(bp, cpr);
1678 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1682 struct bnxt_cp_ring_info *cpr;
1684 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1686 if (i >= bp->rx_cp_nr_rings) {
1687 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1689 cpr = bp->rx_queues[i]->cp_ring;
1690 bp->grp_info[i].fw_stats_ctx = -1;
1692 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1693 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1694 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1702 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1707 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1708 struct bnxt_tx_queue *txq;
1709 struct bnxt_rx_queue *rxq;
1710 struct bnxt_cp_ring_info *cpr;
1712 if (i >= bp->rx_cp_nr_rings) {
1713 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1716 rxq = bp->rx_queues[i];
1720 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1728 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1733 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1735 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1738 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1746 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1747 unsigned int idx __rte_unused)
1749 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1751 bnxt_hwrm_ring_free(bp, cp_ring,
1752 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1753 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1754 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1755 sizeof(*cpr->cp_desc_ring));
1756 cpr->cp_raw_cons = 0;
1759 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1764 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1765 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1766 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1767 struct bnxt_ring *ring = txr->tx_ring_struct;
1768 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1769 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1771 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1772 bnxt_hwrm_ring_free(bp, ring,
1773 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1774 ring->fw_ring_id = INVALID_HW_RING_ID;
1775 memset(txr->tx_desc_ring, 0,
1776 txr->tx_ring_struct->ring_size *
1777 sizeof(*txr->tx_desc_ring));
1778 memset(txr->tx_buf_ring, 0,
1779 txr->tx_ring_struct->ring_size *
1780 sizeof(*txr->tx_buf_ring));
1784 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1785 bnxt_free_cp_ring(bp, cpr, idx);
1786 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1790 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1791 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1792 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1793 struct bnxt_ring *ring = rxr->rx_ring_struct;
1794 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1795 unsigned int idx = i + 1;
1797 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1798 bnxt_hwrm_ring_free(bp, ring,
1799 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1800 ring->fw_ring_id = INVALID_HW_RING_ID;
1801 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1802 memset(rxr->rx_desc_ring, 0,
1803 rxr->rx_ring_struct->ring_size *
1804 sizeof(*rxr->rx_desc_ring));
1805 memset(rxr->rx_buf_ring, 0,
1806 rxr->rx_ring_struct->ring_size *
1807 sizeof(*rxr->rx_buf_ring));
1810 ring = rxr->ag_ring_struct;
1811 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1812 bnxt_hwrm_ring_free(bp, ring,
1813 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1814 ring->fw_ring_id = INVALID_HW_RING_ID;
1815 memset(rxr->ag_buf_ring, 0,
1816 rxr->ag_ring_struct->ring_size *
1817 sizeof(*rxr->ag_buf_ring));
1819 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1821 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1822 bnxt_free_cp_ring(bp, cpr, idx);
1823 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1824 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1828 /* Default completion ring */
1830 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1832 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1833 bnxt_free_cp_ring(bp, cpr, 0);
1834 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1841 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1846 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1847 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1854 void bnxt_free_hwrm_resources(struct bnxt *bp)
1856 /* Release memzone */
1857 rte_free(bp->hwrm_cmd_resp_addr);
1858 rte_free(bp->hwrm_short_cmd_req_addr);
1859 bp->hwrm_cmd_resp_addr = NULL;
1860 bp->hwrm_short_cmd_req_addr = NULL;
1861 bp->hwrm_cmd_resp_dma_addr = 0;
1862 bp->hwrm_short_cmd_req_dma_addr = 0;
1865 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1867 struct rte_pci_device *pdev = bp->pdev;
1868 char type[RTE_MEMZONE_NAMESIZE];
1870 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1871 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1872 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1873 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1874 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1875 if (bp->hwrm_cmd_resp_addr == NULL)
1877 bp->hwrm_cmd_resp_dma_addr =
1878 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1879 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1881 "unable to map response address to physical memory\n");
1884 rte_spinlock_init(&bp->hwrm_lock);
1889 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1891 struct bnxt_filter_info *filter;
1894 STAILQ_FOREACH(filter, &vnic->filter, next) {
1895 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1896 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1897 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1898 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1900 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1908 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1910 struct bnxt_filter_info *filter;
1911 struct rte_flow *flow;
1914 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1915 filter = flow->filter;
1916 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1917 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1918 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1919 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1920 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1922 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1924 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1932 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1934 struct bnxt_filter_info *filter;
1937 STAILQ_FOREACH(filter, &vnic->filter, next) {
1938 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1939 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1941 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1942 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1945 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1953 void bnxt_free_tunnel_ports(struct bnxt *bp)
1955 if (bp->vxlan_port_cnt)
1956 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1957 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1959 if (bp->geneve_port_cnt)
1960 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1961 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1962 bp->geneve_port = 0;
1965 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1969 if (bp->vnic_info == NULL)
1973 * Cleanup VNICs in reverse order, to make sure the L2 filter
1974 * from vnic0 is last to be cleaned up.
1976 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1977 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1979 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1981 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1983 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1985 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1987 bnxt_hwrm_vnic_free(bp, vnic);
1989 /* Ring resources */
1990 bnxt_free_all_hwrm_rings(bp);
1991 bnxt_free_all_hwrm_ring_grps(bp);
1992 bnxt_free_all_hwrm_stat_ctxs(bp);
1993 bnxt_free_tunnel_ports(bp);
1996 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1998 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2000 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2001 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2003 switch (conf_link_speed) {
2004 case ETH_LINK_SPEED_10M_HD:
2005 case ETH_LINK_SPEED_100M_HD:
2007 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2009 return hw_link_duplex;
2012 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2014 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2017 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2019 uint16_t eth_link_speed = 0;
2021 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2022 return ETH_LINK_SPEED_AUTONEG;
2024 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2025 case ETH_LINK_SPEED_100M:
2026 case ETH_LINK_SPEED_100M_HD:
2029 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2031 case ETH_LINK_SPEED_1G:
2033 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2035 case ETH_LINK_SPEED_2_5G:
2037 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2039 case ETH_LINK_SPEED_10G:
2041 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2043 case ETH_LINK_SPEED_20G:
2045 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2047 case ETH_LINK_SPEED_25G:
2049 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2051 case ETH_LINK_SPEED_40G:
2053 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2055 case ETH_LINK_SPEED_50G:
2057 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2059 case ETH_LINK_SPEED_100G:
2061 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2065 "Unsupported link speed %d; default to AUTO\n",
2069 return eth_link_speed;
2072 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2073 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2074 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2075 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2077 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2081 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2084 if (link_speed & ETH_LINK_SPEED_FIXED) {
2085 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2087 if (one_speed & (one_speed - 1)) {
2089 "Invalid advertised speeds (%u) for port %u\n",
2090 link_speed, port_id);
2093 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2095 "Unsupported advertised speed (%u) for port %u\n",
2096 link_speed, port_id);
2100 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2102 "Unsupported advertised speeds (%u) for port %u\n",
2103 link_speed, port_id);
2111 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2115 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2116 if (bp->link_info.support_speeds)
2117 return bp->link_info.support_speeds;
2118 link_speed = BNXT_SUPPORTED_SPEEDS;
2121 if (link_speed & ETH_LINK_SPEED_100M)
2122 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2123 if (link_speed & ETH_LINK_SPEED_100M_HD)
2124 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2125 if (link_speed & ETH_LINK_SPEED_1G)
2126 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2127 if (link_speed & ETH_LINK_SPEED_2_5G)
2128 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2129 if (link_speed & ETH_LINK_SPEED_10G)
2130 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2131 if (link_speed & ETH_LINK_SPEED_20G)
2132 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2133 if (link_speed & ETH_LINK_SPEED_25G)
2134 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2135 if (link_speed & ETH_LINK_SPEED_40G)
2136 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2137 if (link_speed & ETH_LINK_SPEED_50G)
2138 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2139 if (link_speed & ETH_LINK_SPEED_100G)
2140 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2144 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2146 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2148 switch (hw_link_speed) {
2149 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2150 eth_link_speed = ETH_SPEED_NUM_100M;
2152 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2153 eth_link_speed = ETH_SPEED_NUM_1G;
2155 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2156 eth_link_speed = ETH_SPEED_NUM_2_5G;
2158 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2159 eth_link_speed = ETH_SPEED_NUM_10G;
2161 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2162 eth_link_speed = ETH_SPEED_NUM_20G;
2164 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2165 eth_link_speed = ETH_SPEED_NUM_25G;
2167 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2168 eth_link_speed = ETH_SPEED_NUM_40G;
2170 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2171 eth_link_speed = ETH_SPEED_NUM_50G;
2173 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2174 eth_link_speed = ETH_SPEED_NUM_100G;
2176 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2178 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2182 return eth_link_speed;
2185 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2187 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2189 switch (hw_link_duplex) {
2190 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2191 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2193 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2195 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2196 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2199 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2203 return eth_link_duplex;
2206 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2209 struct bnxt_link_info *link_info = &bp->link_info;
2211 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2214 "Get link config failed with rc %d\n", rc);
2217 if (link_info->link_speed)
2219 bnxt_parse_hw_link_speed(link_info->link_speed);
2221 link->link_speed = ETH_SPEED_NUM_NONE;
2222 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2223 link->link_status = link_info->link_up;
2224 link->link_autoneg = link_info->auto_mode ==
2225 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2226 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2231 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2234 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2235 struct bnxt_link_info link_req;
2236 uint16_t speed, autoneg;
2238 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2241 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2242 bp->eth_dev->data->port_id);
2246 memset(&link_req, 0, sizeof(link_req));
2247 link_req.link_up = link_up;
2251 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2252 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2253 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2254 /* Autoneg can be done only when the FW allows */
2255 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2256 bp->link_info.force_link_speed)) {
2257 link_req.phy_flags |=
2258 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2259 link_req.auto_link_speed_mask =
2260 bnxt_parse_eth_link_speed_mask(bp,
2261 dev_conf->link_speeds);
2263 if (bp->link_info.phy_type ==
2264 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2265 bp->link_info.phy_type ==
2266 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2267 bp->link_info.media_type ==
2268 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2269 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2273 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2274 /* If user wants a particular speed try that first. */
2276 link_req.link_speed = speed;
2277 else if (bp->link_info.force_link_speed)
2278 link_req.link_speed = bp->link_info.force_link_speed;
2280 link_req.link_speed = bp->link_info.auto_link_speed;
2282 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2283 link_req.auto_pause = bp->link_info.auto_pause;
2284 link_req.force_pause = bp->link_info.force_pause;
2287 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2290 "Set link config failed with rc %d\n", rc);
2298 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2300 struct hwrm_func_qcfg_input req = {0};
2301 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2305 HWRM_PREP(req, FUNC_QCFG);
2306 req.fid = rte_cpu_to_le_16(0xffff);
2308 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2310 HWRM_CHECK_RESULT();
2312 /* Hard Coded.. 0xfff VLAN ID mask */
2313 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2314 flags = rte_le_to_cpu_16(resp->flags);
2315 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2316 bp->flags |= BNXT_FLAG_MULTI_HOST;
2318 switch (resp->port_partition_type) {
2319 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2320 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2321 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2323 bp->port_partition_type = resp->port_partition_type;
2326 bp->port_partition_type = 0;
2335 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2336 struct hwrm_func_qcaps_output *qcaps)
2338 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2339 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2340 sizeof(qcaps->mac_address));
2341 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2342 qcaps->max_rx_rings = fcfg->num_rx_rings;
2343 qcaps->max_tx_rings = fcfg->num_tx_rings;
2344 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2345 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2347 qcaps->first_vf_id = 0;
2348 qcaps->max_vnics = fcfg->num_vnics;
2349 qcaps->max_decap_records = 0;
2350 qcaps->max_encap_records = 0;
2351 qcaps->max_tx_wm_flows = 0;
2352 qcaps->max_tx_em_flows = 0;
2353 qcaps->max_rx_wm_flows = 0;
2354 qcaps->max_rx_em_flows = 0;
2355 qcaps->max_flow_id = 0;
2356 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2357 qcaps->max_sp_tx_rings = 0;
2358 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2361 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2363 struct hwrm_func_cfg_input req = {0};
2364 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2367 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2368 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2369 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2370 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2371 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2372 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2373 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2374 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2375 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2376 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2377 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2378 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2379 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2380 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2382 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2383 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2384 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2385 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2386 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2387 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2388 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2389 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2390 req.fid = rte_cpu_to_le_16(0xffff);
2392 HWRM_PREP(req, FUNC_CFG);
2394 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2396 HWRM_CHECK_RESULT();
2402 static void populate_vf_func_cfg_req(struct bnxt *bp,
2403 struct hwrm_func_cfg_input *req,
2406 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2407 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2408 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2409 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2410 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2411 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2412 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2413 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2414 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2415 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2417 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2418 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2420 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2421 ETHER_CRC_LEN + VLAN_TAG_SIZE *
2423 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2425 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2426 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2428 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2429 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2430 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2431 /* TODO: For now, do not support VMDq/RFS on VFs. */
2432 req->num_vnics = rte_cpu_to_le_16(1);
2433 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2437 static void add_random_mac_if_needed(struct bnxt *bp,
2438 struct hwrm_func_cfg_input *cfg_req,
2441 struct ether_addr mac;
2443 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2446 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2448 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2449 eth_random_addr(cfg_req->dflt_mac_addr);
2450 bp->pf.vf_info[vf].random_mac = true;
2452 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2456 static void reserve_resources_from_vf(struct bnxt *bp,
2457 struct hwrm_func_cfg_input *cfg_req,
2460 struct hwrm_func_qcaps_input req = {0};
2461 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2464 /* Get the actual allocated values now */
2465 HWRM_PREP(req, FUNC_QCAPS);
2466 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2467 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2470 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2471 copy_func_cfg_to_qcaps(cfg_req, resp);
2472 } else if (resp->error_code) {
2473 rc = rte_le_to_cpu_16(resp->error_code);
2474 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2475 copy_func_cfg_to_qcaps(cfg_req, resp);
2478 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2479 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2480 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2481 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2482 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2483 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2485 * TODO: While not supporting VMDq with VFs, max_vnics is always
2486 * forced to 1 in this case
2488 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2489 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2494 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2496 struct hwrm_func_qcfg_input req = {0};
2497 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2500 /* Check for zero MAC address */
2501 HWRM_PREP(req, FUNC_QCFG);
2502 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2503 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2505 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2507 } else if (resp->error_code) {
2508 rc = rte_le_to_cpu_16(resp->error_code);
2509 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2512 rc = rte_le_to_cpu_16(resp->vlan);
2519 static int update_pf_resource_max(struct bnxt *bp)
2521 struct hwrm_func_qcfg_input req = {0};
2522 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2525 /* And copy the allocated numbers into the pf struct */
2526 HWRM_PREP(req, FUNC_QCFG);
2527 req.fid = rte_cpu_to_le_16(0xffff);
2528 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2529 HWRM_CHECK_RESULT();
2531 /* Only TX ring value reflects actual allocation? TODO */
2532 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2533 bp->pf.evb_mode = resp->evb_mode;
2540 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2545 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2549 rc = bnxt_hwrm_func_qcaps(bp);
2553 bp->pf.func_cfg_flags &=
2554 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2555 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2556 bp->pf.func_cfg_flags |=
2557 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2558 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2562 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2564 struct hwrm_func_cfg_input req = {0};
2565 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2572 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2576 rc = bnxt_hwrm_func_qcaps(bp);
2581 bp->pf.active_vfs = num_vfs;
2584 * First, configure the PF to only use one TX ring. This ensures that
2585 * there are enough rings for all VFs.
2587 * If we don't do this, when we call func_alloc() later, we will lock
2588 * extra rings to the PF that won't be available during func_cfg() of
2591 * This has been fixed with firmware versions above 20.6.54
2593 bp->pf.func_cfg_flags &=
2594 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2595 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2596 bp->pf.func_cfg_flags |=
2597 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2598 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2603 * Now, create and register a buffer to hold forwarded VF requests
2605 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2606 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2607 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2608 if (bp->pf.vf_req_buf == NULL) {
2612 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2613 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2614 for (i = 0; i < num_vfs; i++)
2615 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2616 (i * HWRM_MAX_REQ_LEN);
2618 rc = bnxt_hwrm_func_buf_rgtr(bp);
2622 populate_vf_func_cfg_req(bp, &req, num_vfs);
2624 bp->pf.active_vfs = 0;
2625 for (i = 0; i < num_vfs; i++) {
2626 add_random_mac_if_needed(bp, &req, i);
2628 HWRM_PREP(req, FUNC_CFG);
2629 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2630 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2631 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2633 /* Clear enable flag for next pass */
2634 req.enables &= ~rte_cpu_to_le_32(
2635 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2637 if (rc || resp->error_code) {
2639 "Failed to initizlie VF %d\n", i);
2641 "Not all VFs available. (%d, %d)\n",
2642 rc, resp->error_code);
2649 reserve_resources_from_vf(bp, &req, i);
2650 bp->pf.active_vfs++;
2651 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2655 * Now configure the PF to use "the rest" of the resources
2656 * We're using STD_TX_RING_MODE here though which will limit the TX
2657 * rings. This will allow QoS to function properly. Not setting this
2658 * will cause PF rings to break bandwidth settings.
2660 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2664 rc = update_pf_resource_max(bp);
2671 bnxt_hwrm_func_buf_unrgtr(bp);
2675 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2677 struct hwrm_func_cfg_input req = {0};
2678 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2681 HWRM_PREP(req, FUNC_CFG);
2683 req.fid = rte_cpu_to_le_16(0xffff);
2684 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2685 req.evb_mode = bp->pf.evb_mode;
2687 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2688 HWRM_CHECK_RESULT();
2694 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2695 uint8_t tunnel_type)
2697 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2698 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2701 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2702 req.tunnel_type = tunnel_type;
2703 req.tunnel_dst_port_val = port;
2704 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2705 HWRM_CHECK_RESULT();
2707 switch (tunnel_type) {
2708 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2709 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2710 bp->vxlan_port = port;
2712 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2713 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2714 bp->geneve_port = port;
2725 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2726 uint8_t tunnel_type)
2728 struct hwrm_tunnel_dst_port_free_input req = {0};
2729 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2732 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2734 req.tunnel_type = tunnel_type;
2735 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2736 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2738 HWRM_CHECK_RESULT();
2744 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2747 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2748 struct hwrm_func_cfg_input req = {0};
2751 HWRM_PREP(req, FUNC_CFG);
2753 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2754 req.flags = rte_cpu_to_le_32(flags);
2755 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2757 HWRM_CHECK_RESULT();
2763 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2765 uint32_t *flag = flagp;
2767 vnic->flags = *flag;
2770 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2772 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2775 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2778 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2779 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2781 HWRM_PREP(req, FUNC_BUF_RGTR);
2783 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2784 req.req_buf_page_size = rte_cpu_to_le_16(
2785 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2786 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2787 req.req_buf_page_addr0 =
2788 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2789 if (req.req_buf_page_addr0 == 0) {
2791 "unable to map buffer address to physical memory\n");
2795 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2797 HWRM_CHECK_RESULT();
2803 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2806 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2807 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2809 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2811 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2813 HWRM_CHECK_RESULT();
2819 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2821 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2822 struct hwrm_func_cfg_input req = {0};
2825 HWRM_PREP(req, FUNC_CFG);
2827 req.fid = rte_cpu_to_le_16(0xffff);
2828 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2829 req.enables = rte_cpu_to_le_32(
2830 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2831 req.async_event_cr = rte_cpu_to_le_16(
2832 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2833 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2835 HWRM_CHECK_RESULT();
2841 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2843 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2844 struct hwrm_func_vf_cfg_input req = {0};
2847 HWRM_PREP(req, FUNC_VF_CFG);
2849 req.enables = rte_cpu_to_le_32(
2850 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2851 req.async_event_cr = rte_cpu_to_le_16(
2852 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2853 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2855 HWRM_CHECK_RESULT();
2861 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2863 struct hwrm_func_cfg_input req = {0};
2864 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2865 uint16_t dflt_vlan, fid;
2866 uint32_t func_cfg_flags;
2869 HWRM_PREP(req, FUNC_CFG);
2872 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2873 fid = bp->pf.vf_info[vf].fid;
2874 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2876 fid = rte_cpu_to_le_16(0xffff);
2877 func_cfg_flags = bp->pf.func_cfg_flags;
2878 dflt_vlan = bp->vlan;
2881 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2882 req.fid = rte_cpu_to_le_16(fid);
2883 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2884 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2886 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2888 HWRM_CHECK_RESULT();
2894 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2895 uint16_t max_bw, uint16_t enables)
2897 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2898 struct hwrm_func_cfg_input req = {0};
2901 HWRM_PREP(req, FUNC_CFG);
2903 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2904 req.enables |= rte_cpu_to_le_32(enables);
2905 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2906 req.max_bw = rte_cpu_to_le_32(max_bw);
2907 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2909 HWRM_CHECK_RESULT();
2915 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2917 struct hwrm_func_cfg_input req = {0};
2918 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2921 HWRM_PREP(req, FUNC_CFG);
2923 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2924 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2925 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2926 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2930 HWRM_CHECK_RESULT();
2936 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2937 void *encaped, size_t ec_size)
2940 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2941 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2943 if (ec_size > sizeof(req.encap_request))
2946 HWRM_PREP(req, REJECT_FWD_RESP);
2948 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2949 memcpy(req.encap_request, encaped, ec_size);
2951 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2953 HWRM_CHECK_RESULT();
2959 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2960 struct ether_addr *mac)
2962 struct hwrm_func_qcfg_input req = {0};
2963 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2966 HWRM_PREP(req, FUNC_QCFG);
2968 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2969 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2971 HWRM_CHECK_RESULT();
2973 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2980 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2981 void *encaped, size_t ec_size)
2984 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2985 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2987 if (ec_size > sizeof(req.encap_request))
2990 HWRM_PREP(req, EXEC_FWD_RESP);
2992 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2993 memcpy(req.encap_request, encaped, ec_size);
2995 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2997 HWRM_CHECK_RESULT();
3003 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3004 struct rte_eth_stats *stats, uint8_t rx)
3007 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3008 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3010 HWRM_PREP(req, STAT_CTX_QUERY);
3012 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3014 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3016 HWRM_CHECK_RESULT();
3019 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3020 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3021 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3022 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3023 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3024 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3025 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3026 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3028 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3029 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3030 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3031 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3032 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3033 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3034 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3043 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3045 struct hwrm_port_qstats_input req = {0};
3046 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3047 struct bnxt_pf_info *pf = &bp->pf;
3050 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3053 HWRM_PREP(req, PORT_QSTATS);
3055 req.port_id = rte_cpu_to_le_16(pf->port_id);
3056 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3057 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3058 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3060 HWRM_CHECK_RESULT();
3066 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3068 struct hwrm_port_clr_stats_input req = {0};
3069 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3070 struct bnxt_pf_info *pf = &bp->pf;
3073 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3076 HWRM_PREP(req, PORT_CLR_STATS);
3078 req.port_id = rte_cpu_to_le_16(pf->port_id);
3079 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3081 HWRM_CHECK_RESULT();
3087 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3089 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3090 struct hwrm_port_led_qcaps_input req = {0};
3096 HWRM_PREP(req, PORT_LED_QCAPS);
3097 req.port_id = bp->pf.port_id;
3098 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3100 HWRM_CHECK_RESULT();
3102 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3105 bp->num_leds = resp->num_leds;
3106 memcpy(bp->leds, &resp->led0_id,
3107 sizeof(bp->leds[0]) * bp->num_leds);
3108 for (i = 0; i < bp->num_leds; i++) {
3109 struct bnxt_led_info *led = &bp->leds[i];
3111 uint16_t caps = led->led_state_caps;
3113 if (!led->led_group_id ||
3114 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3126 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3128 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3129 struct hwrm_port_led_cfg_input req = {0};
3130 struct bnxt_led_cfg *led_cfg;
3131 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3132 uint16_t duration = 0;
3135 if (!bp->num_leds || BNXT_VF(bp))
3138 HWRM_PREP(req, PORT_LED_CFG);
3141 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3142 duration = rte_cpu_to_le_16(500);
3144 req.port_id = bp->pf.port_id;
3145 req.num_leds = bp->num_leds;
3146 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3147 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3148 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3149 led_cfg->led_id = bp->leds[i].led_id;
3150 led_cfg->led_state = led_state;
3151 led_cfg->led_blink_on = duration;
3152 led_cfg->led_blink_off = duration;
3153 led_cfg->led_group_id = bp->leds[i].led_group_id;
3156 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3158 HWRM_CHECK_RESULT();
3164 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3168 struct hwrm_nvm_get_dir_info_input req = {0};
3169 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3171 HWRM_PREP(req, NVM_GET_DIR_INFO);
3173 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3175 HWRM_CHECK_RESULT();
3179 *entries = rte_le_to_cpu_32(resp->entries);
3180 *length = rte_le_to_cpu_32(resp->entry_length);
3185 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3188 uint32_t dir_entries;
3189 uint32_t entry_length;
3192 rte_iova_t dma_handle;
3193 struct hwrm_nvm_get_dir_entries_input req = {0};
3194 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3196 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3200 *data++ = dir_entries;
3201 *data++ = entry_length;
3203 memset(data, 0xff, len);
3205 buflen = dir_entries * entry_length;
3206 buf = rte_malloc("nvm_dir", buflen, 0);
3207 rte_mem_lock_page(buf);
3210 dma_handle = rte_mem_virt2iova(buf);
3211 if (dma_handle == 0) {
3213 "unable to map response address to physical memory\n");
3216 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3217 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3218 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3220 HWRM_CHECK_RESULT();
3224 memcpy(data, buf, len > buflen ? buflen : len);
3231 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3232 uint32_t offset, uint32_t length,
3237 rte_iova_t dma_handle;
3238 struct hwrm_nvm_read_input req = {0};
3239 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3241 buf = rte_malloc("nvm_item", length, 0);
3242 rte_mem_lock_page(buf);
3246 dma_handle = rte_mem_virt2iova(buf);
3247 if (dma_handle == 0) {
3249 "unable to map response address to physical memory\n");
3252 HWRM_PREP(req, NVM_READ);
3253 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3254 req.dir_idx = rte_cpu_to_le_16(index);
3255 req.offset = rte_cpu_to_le_32(offset);
3256 req.len = rte_cpu_to_le_32(length);
3257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3258 HWRM_CHECK_RESULT();
3261 memcpy(data, buf, length);
3267 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3270 struct hwrm_nvm_erase_dir_entry_input req = {0};
3271 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3273 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3274 req.dir_idx = rte_cpu_to_le_16(index);
3275 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3276 HWRM_CHECK_RESULT();
3283 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3284 uint16_t dir_ordinal, uint16_t dir_ext,
3285 uint16_t dir_attr, const uint8_t *data,
3289 struct hwrm_nvm_write_input req = {0};
3290 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3291 rte_iova_t dma_handle;
3294 HWRM_PREP(req, NVM_WRITE);
3296 req.dir_type = rte_cpu_to_le_16(dir_type);
3297 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3298 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3299 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3300 req.dir_data_length = rte_cpu_to_le_32(data_len);
3302 buf = rte_malloc("nvm_write", data_len, 0);
3303 rte_mem_lock_page(buf);
3307 dma_handle = rte_mem_virt2iova(buf);
3308 if (dma_handle == 0) {
3310 "unable to map response address to physical memory\n");
3313 memcpy(buf, data, data_len);
3314 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3316 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3318 HWRM_CHECK_RESULT();
3326 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3328 uint32_t *count = cbdata;
3330 *count = *count + 1;
3333 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3334 struct bnxt_vnic_info *vnic __rte_unused)
3339 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3343 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3344 &count, bnxt_vnic_count_hwrm_stub);
3349 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3352 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3353 struct hwrm_func_vf_vnic_ids_query_output *resp =
3354 bp->hwrm_cmd_resp_addr;
3357 /* First query all VNIC ids */
3358 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3360 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3361 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3362 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3364 if (req.vnic_id_tbl_addr == 0) {
3367 "unable to map VNIC ID table address to physical memory\n");
3370 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3373 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3375 } else if (resp->error_code) {
3376 rc = rte_le_to_cpu_16(resp->error_code);
3378 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3381 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3389 * This function queries the VNIC IDs for a specified VF. It then calls
3390 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3391 * Then it calls the hwrm_cb function to program this new vnic configuration.
3393 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3394 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3395 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3397 struct bnxt_vnic_info vnic;
3399 int i, num_vnic_ids;
3404 /* First query all VNIC ids */
3405 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3406 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3407 RTE_CACHE_LINE_SIZE);
3408 if (vnic_ids == NULL) {
3412 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3413 rte_mem_lock_page(((char *)vnic_ids) + sz);
3415 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3417 if (num_vnic_ids < 0)
3418 return num_vnic_ids;
3420 /* Retrieve VNIC, update bd_stall then update */
3422 for (i = 0; i < num_vnic_ids; i++) {
3423 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3424 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3425 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3428 if (vnic.mru <= 4) /* Indicates unallocated */
3431 vnic_cb(&vnic, cbdata);
3433 rc = hwrm_cb(bp, &vnic);
3443 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3446 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3447 struct hwrm_func_cfg_input req = {0};
3450 HWRM_PREP(req, FUNC_CFG);
3452 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3453 req.enables |= rte_cpu_to_le_32(
3454 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3455 req.vlan_antispoof_mode = on ?
3456 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3457 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3458 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3460 HWRM_CHECK_RESULT();
3466 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3468 struct bnxt_vnic_info vnic;
3471 int num_vnic_ids, i;
3475 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3476 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3477 RTE_CACHE_LINE_SIZE);
3478 if (vnic_ids == NULL) {
3483 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3484 rte_mem_lock_page(((char *)vnic_ids) + sz);
3486 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3492 * Loop through to find the default VNIC ID.
3493 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3494 * by sending the hwrm_func_qcfg command to the firmware.
3496 for (i = 0; i < num_vnic_ids; i++) {
3497 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3498 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3499 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3500 bp->pf.first_vf_id + vf);
3503 if (vnic.func_default) {
3505 return vnic.fw_vnic_id;
3508 /* Could not find a default VNIC. */
3509 PMD_DRV_LOG(ERR, "No default VNIC\n");
3515 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3517 struct bnxt_filter_info *filter)
3520 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3521 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3522 uint32_t enables = 0;
3524 if (filter->fw_em_filter_id != UINT64_MAX)
3525 bnxt_hwrm_clear_em_filter(bp, filter);
3527 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3529 req.flags = rte_cpu_to_le_32(filter->flags);
3531 enables = filter->enables |
3532 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3533 req.dst_id = rte_cpu_to_le_16(dst_id);
3535 if (filter->ip_addr_type) {
3536 req.ip_addr_type = filter->ip_addr_type;
3537 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3540 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3541 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3543 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3544 memcpy(req.src_macaddr, filter->src_macaddr,
3547 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3548 memcpy(req.dst_macaddr, filter->dst_macaddr,
3551 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3552 req.ovlan_vid = filter->l2_ovlan;
3554 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3555 req.ivlan_vid = filter->l2_ivlan;
3557 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3558 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3560 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3561 req.ip_protocol = filter->ip_protocol;
3563 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3564 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3566 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3567 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3569 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3570 req.src_port = rte_cpu_to_be_16(filter->src_port);
3572 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3573 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3575 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3576 req.mirror_vnic_id = filter->mirror_vnic_id;
3578 req.enables = rte_cpu_to_le_32(enables);
3580 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3582 HWRM_CHECK_RESULT();
3584 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3590 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3593 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3594 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3596 if (filter->fw_em_filter_id == UINT64_MAX)
3599 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3600 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3602 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3604 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3606 HWRM_CHECK_RESULT();
3609 filter->fw_em_filter_id = UINT64_MAX;
3610 filter->fw_l2_filter_id = UINT64_MAX;
3615 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3617 struct bnxt_filter_info *filter)
3620 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3621 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3622 bp->hwrm_cmd_resp_addr;
3623 uint32_t enables = 0;
3625 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3626 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3628 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3630 req.flags = rte_cpu_to_le_32(filter->flags);
3632 enables = filter->enables |
3633 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3634 req.dst_id = rte_cpu_to_le_16(dst_id);
3637 if (filter->ip_addr_type) {
3638 req.ip_addr_type = filter->ip_addr_type;
3640 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3643 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3644 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3646 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3647 memcpy(req.src_macaddr, filter->src_macaddr,
3650 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3651 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3654 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3655 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3657 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3658 req.ip_protocol = filter->ip_protocol;
3660 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3661 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3663 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3664 req.src_ipaddr_mask[0] =
3665 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3667 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3668 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3670 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3671 req.dst_ipaddr_mask[0] =
3672 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3674 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3675 req.src_port = rte_cpu_to_le_16(filter->src_port);
3677 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3678 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3680 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3681 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3683 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3684 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3686 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3687 req.mirror_vnic_id = filter->mirror_vnic_id;
3689 req.enables = rte_cpu_to_le_32(enables);
3691 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3693 HWRM_CHECK_RESULT();
3695 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3701 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3702 struct bnxt_filter_info *filter)
3705 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3706 struct hwrm_cfa_ntuple_filter_free_output *resp =
3707 bp->hwrm_cmd_resp_addr;
3709 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3712 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3714 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3716 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3718 HWRM_CHECK_RESULT();
3721 filter->fw_ntuple_filter_id = UINT64_MAX;
3722 filter->fw_l2_filter_id = UINT64_MAX;
3727 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3729 unsigned int rss_idx, fw_idx, i;
3731 if (vnic->rss_table && vnic->hash_type) {
3733 * Fill the RSS hash & redirection table with
3734 * ring group ids for all VNICs
3736 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3737 rss_idx++, fw_idx++) {
3738 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3739 fw_idx %= bp->rx_cp_nr_rings;
3740 if (vnic->fw_grp_ids[fw_idx] !=
3745 if (i == bp->rx_cp_nr_rings)
3747 vnic->rss_table[rss_idx] =
3748 vnic->fw_grp_ids[fw_idx];
3750 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);