1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
29 #define HWRM_CMD_TIMEOUT 10000
31 struct bnxt_plcmodes_cfg {
33 uint16_t jumbo_thresh;
35 uint16_t hds_threshold;
38 static int page_getenum(size_t size)
54 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 return sizeof(void *) * 8 - 1;
58 static int page_roundup(size_t size)
60 return 1 << page_getenum(size);
64 * HWRM Functions (sent to HWRM)
65 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
66 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
67 * command was failed by the ChiMP.
70 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
74 struct input *req = msg;
75 struct output *resp = bp->hwrm_cmd_resp_addr;
79 uint16_t max_req_len = bp->max_req_len;
80 struct hwrm_short_input short_input = { 0 };
82 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
83 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
85 memset(short_cmd_req, 0, bp->max_req_len);
86 memcpy(short_cmd_req, req, msg_len);
88 short_input.req_type = rte_cpu_to_le_16(req->req_type);
89 short_input.signature = rte_cpu_to_le_16(
90 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
91 short_input.size = rte_cpu_to_le_16(msg_len);
92 short_input.req_addr =
93 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
95 data = (uint32_t *)&short_input;
96 msg_len = sizeof(short_input);
98 /* Sync memory write before updating doorbell */
101 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
104 /* Write request msg to hwrm channel */
105 for (i = 0; i < msg_len; i += 4) {
106 bar = (uint8_t *)bp->bar0 + i;
107 rte_write32(*data, bar);
111 /* Zero the rest of the request space */
112 for (; i < max_req_len; i += 4) {
113 bar = (uint8_t *)bp->bar0 + i;
117 /* Ring channel doorbell */
118 bar = (uint8_t *)bp->bar0 + 0x100;
121 /* Poll for the valid bit */
122 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
123 /* Sanity check on the resp->resp_len */
125 if (resp->resp_len && resp->resp_len <=
127 /* Last byte of resp contains the valid key */
128 valid = (uint8_t *)resp + resp->resp_len - 1;
129 if (*valid == HWRM_RESP_VALID_KEY)
135 if (i >= HWRM_CMD_TIMEOUT) {
136 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
147 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
148 * spinlock, and does initial processing.
150 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
151 * releases the spinlock only if it returns. If the regular int return codes
152 * are not used by the function, HWRM_CHECK_RESULT() should not be used
153 * directly, rather it should be copied and modified to suit the function.
155 * HWRM_UNLOCK() must be called after all response processing is completed.
157 #define HWRM_PREP(req, type) do { \
158 rte_spinlock_lock(&bp->hwrm_lock); \
159 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
160 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
161 req.cmpl_ring = rte_cpu_to_le_16(-1); \
162 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
163 req.target_id = rte_cpu_to_le_16(0xffff); \
164 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
167 #define HWRM_CHECK_RESULT() do {\
169 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
170 rte_spinlock_unlock(&bp->hwrm_lock); \
173 if (resp->error_code) { \
174 rc = rte_le_to_cpu_16(resp->error_code); \
175 if (resp->resp_len >= 16) { \
176 struct hwrm_err_output *tmp_hwrm_err_op = \
179 "error %d:%d:%08x:%04x\n", \
180 rc, tmp_hwrm_err_op->cmd_err, \
182 tmp_hwrm_err_op->opaque_0), \
184 tmp_hwrm_err_op->opaque_1)); \
186 PMD_DRV_LOG(ERR, "error %d\n", rc); \
188 rte_spinlock_unlock(&bp->hwrm_lock); \
193 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
195 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
198 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
199 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
201 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
202 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
205 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
213 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
214 struct bnxt_vnic_info *vnic,
216 struct bnxt_vlan_table_entry *vlan_table)
219 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
220 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
223 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
224 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
226 /* FIXME add multicast flag, when multicast adding options is supported
229 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
230 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
231 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
232 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
233 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
234 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
235 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
236 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
237 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
238 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
239 if (vnic->mc_addr_cnt) {
240 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
241 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
242 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
245 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
246 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
247 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
248 rte_mem_virt2iova(vlan_table));
249 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
251 req.mask = rte_cpu_to_le_32(mask);
253 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
261 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
263 struct bnxt_vlan_antispoof_table_entry *vlan_table)
266 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
267 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
268 bp->hwrm_cmd_resp_addr;
271 * Older HWRM versions did not support this command, and the set_rx_mask
272 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
273 * removed from set_rx_mask call, and this command was added.
275 * This command is also present from 1.7.8.11 and higher,
278 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
279 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
280 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
285 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
286 req.fid = rte_cpu_to_le_16(fid);
288 req.vlan_tag_mask_tbl_addr =
289 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
290 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
292 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
300 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
301 struct bnxt_filter_info *filter)
304 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
305 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
307 if (filter->fw_l2_filter_id == UINT64_MAX)
310 HWRM_PREP(req, CFA_L2_FILTER_FREE);
312 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
314 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
319 filter->fw_l2_filter_id = -1;
324 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
326 struct bnxt_filter_info *filter)
329 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
330 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
331 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
332 const struct rte_eth_vmdq_rx_conf *conf =
333 &dev_conf->rx_adv_conf.vmdq_rx_conf;
334 uint32_t enables = 0;
335 uint16_t j = dst_id - 1;
337 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
338 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
339 conf->pool_map[j].pools & (1UL << j)) {
341 "Add vlan %u to vmdq pool %u\n",
342 conf->pool_map[j].vlan_id, j);
344 filter->l2_ivlan = conf->pool_map[j].vlan_id;
346 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
347 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
350 if (filter->fw_l2_filter_id != UINT64_MAX)
351 bnxt_hwrm_clear_l2_filter(bp, filter);
353 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
355 req.flags = rte_cpu_to_le_32(filter->flags);
357 enables = filter->enables |
358 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
359 req.dst_id = rte_cpu_to_le_16(dst_id);
362 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
363 memcpy(req.l2_addr, filter->l2_addr,
366 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
367 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
370 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
371 req.l2_ovlan = filter->l2_ovlan;
373 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
374 req.l2_ovlan = filter->l2_ivlan;
376 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
377 req.l2_ovlan_mask = filter->l2_ovlan_mask;
379 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
380 req.l2_ovlan_mask = filter->l2_ivlan_mask;
381 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
382 req.src_id = rte_cpu_to_le_32(filter->src_id);
383 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
384 req.src_type = filter->src_type;
386 req.enables = rte_cpu_to_le_32(enables);
388 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
392 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
398 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
400 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
401 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
408 HWRM_PREP(req, PORT_MAC_CFG);
411 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
413 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
414 if (ptp->tx_tstamp_en)
415 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
417 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
418 req.flags = rte_cpu_to_le_32(flags);
420 rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
421 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
423 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
429 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
432 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
433 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
434 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
436 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
440 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
442 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
444 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
448 if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
451 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
455 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
456 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
457 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
458 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
459 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
460 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
461 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
462 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
463 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
464 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
465 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
466 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
467 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
468 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
469 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
470 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
471 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
472 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
480 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
483 struct hwrm_func_qcaps_input req = {.req_type = 0 };
484 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
485 uint16_t new_max_vfs;
489 HWRM_PREP(req, FUNC_QCAPS);
491 req.fid = rte_cpu_to_le_16(0xffff);
493 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
497 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
498 flags = rte_le_to_cpu_32(resp->flags);
500 bp->pf.port_id = resp->port_id;
501 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
502 new_max_vfs = bp->pdev->max_vfs;
503 if (new_max_vfs != bp->pf.max_vfs) {
505 rte_free(bp->pf.vf_info);
506 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
507 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
508 bp->pf.max_vfs = new_max_vfs;
509 for (i = 0; i < new_max_vfs; i++) {
510 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
511 bp->pf.vf_info[i].vlan_table =
512 rte_zmalloc("VF VLAN table",
515 if (bp->pf.vf_info[i].vlan_table == NULL)
517 "Fail to alloc VLAN table for VF %d\n",
521 bp->pf.vf_info[i].vlan_table);
522 bp->pf.vf_info[i].vlan_as_table =
523 rte_zmalloc("VF VLAN AS table",
526 if (bp->pf.vf_info[i].vlan_as_table == NULL)
528 "Alloc VLAN AS table for VF %d fail\n",
532 bp->pf.vf_info[i].vlan_as_table);
533 STAILQ_INIT(&bp->pf.vf_info[i].filter);
538 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
539 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
540 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
541 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
542 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
543 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
544 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
545 /* TODO: For now, do not support VMDq/RFS on VFs. */
550 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
554 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
556 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
557 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
558 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
559 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
561 bnxt_hwrm_ptp_qcfg(bp);
570 int bnxt_hwrm_func_reset(struct bnxt *bp)
573 struct hwrm_func_reset_input req = {.req_type = 0 };
574 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
576 HWRM_PREP(req, FUNC_RESET);
578 req.enables = rte_cpu_to_le_32(0);
580 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
588 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
591 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
592 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
594 if (bp->flags & BNXT_FLAG_REGISTERED)
597 HWRM_PREP(req, FUNC_DRV_RGTR);
598 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
599 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
600 req.ver_maj = RTE_VER_YEAR;
601 req.ver_min = RTE_VER_MONTH;
602 req.ver_upd = RTE_VER_MINOR;
605 req.enables |= rte_cpu_to_le_32(
606 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
607 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
608 RTE_MIN(sizeof(req.vf_req_fwd),
609 sizeof(bp->pf.vf_req_fwd)));
612 req.async_event_fwd[0] |=
613 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
614 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
615 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
616 req.async_event_fwd[1] |=
617 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
618 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
620 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
625 bp->flags |= BNXT_FLAG_REGISTERED;
630 int bnxt_hwrm_ver_get(struct bnxt *bp)
633 struct hwrm_ver_get_input req = {.req_type = 0 };
634 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
637 uint16_t max_resp_len;
638 char type[RTE_MEMZONE_NAMESIZE];
639 uint32_t dev_caps_cfg;
641 bp->max_req_len = HWRM_MAX_REQ_LEN;
642 HWRM_PREP(req, VER_GET);
644 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
645 req.hwrm_intf_min = HWRM_VERSION_MINOR;
646 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
648 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
652 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
653 resp->hwrm_intf_maj, resp->hwrm_intf_min,
655 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
656 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
657 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
658 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
659 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
661 my_version = HWRM_VERSION_MAJOR << 16;
662 my_version |= HWRM_VERSION_MINOR << 8;
663 my_version |= HWRM_VERSION_UPDATE;
665 fw_version = resp->hwrm_intf_maj << 16;
666 fw_version |= resp->hwrm_intf_min << 8;
667 fw_version |= resp->hwrm_intf_upd;
669 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
670 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
675 if (my_version != fw_version) {
676 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
677 if (my_version < fw_version) {
679 "Firmware API version is newer than driver.\n");
681 "The driver may be missing features.\n");
684 "Firmware API version is older than driver.\n");
686 "Not all driver features may be functional.\n");
690 if (bp->max_req_len > resp->max_req_win_len) {
691 PMD_DRV_LOG(ERR, "Unsupported request length\n");
694 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
695 max_resp_len = resp->max_resp_len;
696 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
698 if (bp->max_resp_len != max_resp_len) {
699 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
700 bp->pdev->addr.domain, bp->pdev->addr.bus,
701 bp->pdev->addr.devid, bp->pdev->addr.function);
703 rte_free(bp->hwrm_cmd_resp_addr);
705 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
706 if (bp->hwrm_cmd_resp_addr == NULL) {
710 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
711 bp->hwrm_cmd_resp_dma_addr =
712 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
713 if (bp->hwrm_cmd_resp_dma_addr == 0) {
715 "Unable to map response buffer to physical memory.\n");
719 bp->max_resp_len = max_resp_len;
723 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
725 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
726 PMD_DRV_LOG(DEBUG, "Short command supported\n");
728 rte_free(bp->hwrm_short_cmd_req_addr);
730 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
732 if (bp->hwrm_short_cmd_req_addr == NULL) {
736 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
737 bp->hwrm_short_cmd_req_dma_addr =
738 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
739 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
740 rte_free(bp->hwrm_short_cmd_req_addr);
742 "Unable to map buffer to physical memory.\n");
747 bp->flags |= BNXT_FLAG_SHORT_CMD;
755 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
758 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
759 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
761 if (!(bp->flags & BNXT_FLAG_REGISTERED))
764 HWRM_PREP(req, FUNC_DRV_UNRGTR);
767 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
772 bp->flags &= ~BNXT_FLAG_REGISTERED;
777 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
780 struct hwrm_port_phy_cfg_input req = {0};
781 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
782 uint32_t enables = 0;
784 HWRM_PREP(req, PORT_PHY_CFG);
787 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
788 if (bp->link_info.auto_mode && conf->link_speed) {
789 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
790 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
793 req.flags = rte_cpu_to_le_32(conf->phy_flags);
794 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
795 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
797 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
798 * any auto mode, even "none".
800 if (!conf->link_speed) {
801 /* No speeds specified. Enable AutoNeg - all speeds */
803 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
805 /* AutoNeg - Advertise speeds specified. */
806 if (conf->auto_link_speed_mask &&
807 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
809 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
810 req.auto_link_speed_mask =
811 conf->auto_link_speed_mask;
813 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
816 req.auto_duplex = conf->duplex;
817 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
818 req.auto_pause = conf->auto_pause;
819 req.force_pause = conf->force_pause;
820 /* Set force_pause if there is no auto or if there is a force */
821 if (req.auto_pause && !req.force_pause)
822 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
824 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
826 req.enables = rte_cpu_to_le_32(enables);
829 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
830 PMD_DRV_LOG(INFO, "Force Link Down\n");
833 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
841 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
842 struct bnxt_link_info *link_info)
845 struct hwrm_port_phy_qcfg_input req = {0};
846 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
848 HWRM_PREP(req, PORT_PHY_QCFG);
850 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
854 link_info->phy_link_status = resp->link;
856 (link_info->phy_link_status ==
857 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
858 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
859 link_info->duplex = resp->duplex_cfg;
860 link_info->pause = resp->pause;
861 link_info->auto_pause = resp->auto_pause;
862 link_info->force_pause = resp->force_pause;
863 link_info->auto_mode = resp->auto_mode;
864 link_info->phy_type = resp->phy_type;
865 link_info->media_type = resp->media_type;
867 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
868 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
869 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
870 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
871 link_info->phy_ver[0] = resp->phy_maj;
872 link_info->phy_ver[1] = resp->phy_min;
873 link_info->phy_ver[2] = resp->phy_bld;
877 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
878 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
879 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
880 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
881 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
882 link_info->auto_link_speed_mask);
883 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
884 link_info->force_link_speed);
889 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
892 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
893 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
895 HWRM_PREP(req, QUEUE_QPORTCFG);
897 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
901 #define GET_QUEUE_INFO(x) \
902 bp->cos_queue[x].id = resp->queue_id##x; \
903 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
919 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
920 struct bnxt_ring *ring,
921 uint32_t ring_type, uint32_t map_index,
922 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
925 uint32_t enables = 0;
926 struct hwrm_ring_alloc_input req = {.req_type = 0 };
927 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
929 HWRM_PREP(req, RING_ALLOC);
931 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
932 req.fbo = rte_cpu_to_le_32(0);
933 /* Association of ring index with doorbell index */
934 req.logical_id = rte_cpu_to_le_16(map_index);
935 req.length = rte_cpu_to_le_32(ring->ring_size);
938 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
939 req.queue_id = bp->cos_queue[0].id;
941 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
942 req.ring_type = ring_type;
943 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
944 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
945 if (stats_ctx_id != INVALID_STATS_CTX_ID)
947 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
949 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
950 req.ring_type = ring_type;
952 * TODO: Some HWRM versions crash with
953 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
955 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
958 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
963 req.enables = rte_cpu_to_le_32(enables);
965 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
967 if (rc || resp->error_code) {
968 if (rc == 0 && resp->error_code)
969 rc = rte_le_to_cpu_16(resp->error_code);
971 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
973 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
976 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
978 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
981 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
983 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
987 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
993 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
998 int bnxt_hwrm_ring_free(struct bnxt *bp,
999 struct bnxt_ring *ring, uint32_t ring_type)
1002 struct hwrm_ring_free_input req = {.req_type = 0 };
1003 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1005 HWRM_PREP(req, RING_FREE);
1007 req.ring_type = ring_type;
1008 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1010 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1012 if (rc || resp->error_code) {
1013 if (rc == 0 && resp->error_code)
1014 rc = rte_le_to_cpu_16(resp->error_code);
1017 switch (ring_type) {
1018 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1019 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1022 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1023 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1026 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1027 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1031 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1039 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1042 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1043 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1045 HWRM_PREP(req, RING_GRP_ALLOC);
1047 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1048 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1049 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1050 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1052 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1054 HWRM_CHECK_RESULT();
1056 bp->grp_info[idx].fw_grp_id =
1057 rte_le_to_cpu_16(resp->ring_group_id);
1064 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1067 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1068 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1070 HWRM_PREP(req, RING_GRP_FREE);
1072 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1074 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1076 HWRM_CHECK_RESULT();
1079 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1083 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1086 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1087 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1089 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1092 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1094 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1096 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1098 HWRM_CHECK_RESULT();
1104 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1105 unsigned int idx __rte_unused)
1108 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1109 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1111 HWRM_PREP(req, STAT_CTX_ALLOC);
1113 req.update_period_ms = rte_cpu_to_le_32(0);
1115 req.stats_dma_addr =
1116 rte_cpu_to_le_64(cpr->hw_stats_map);
1118 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1120 HWRM_CHECK_RESULT();
1122 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1129 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1130 unsigned int idx __rte_unused)
1133 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1134 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1136 HWRM_PREP(req, STAT_CTX_FREE);
1138 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1142 HWRM_CHECK_RESULT();
1148 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1151 struct hwrm_vnic_alloc_input req = { 0 };
1152 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1154 /* map ring groups to this vnic */
1155 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1156 vnic->start_grp_id, vnic->end_grp_id);
1157 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1158 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1159 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1160 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1161 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1162 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1163 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1164 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1165 HWRM_PREP(req, VNIC_ALLOC);
1167 if (vnic->func_default)
1168 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1169 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1171 HWRM_CHECK_RESULT();
1173 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1175 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1179 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1180 struct bnxt_vnic_info *vnic,
1181 struct bnxt_plcmodes_cfg *pmode)
1184 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1185 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1187 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1189 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1191 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1193 HWRM_CHECK_RESULT();
1195 pmode->flags = rte_le_to_cpu_32(resp->flags);
1196 /* dflt_vnic bit doesn't exist in the _cfg command */
1197 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1198 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1199 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1200 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1207 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1208 struct bnxt_vnic_info *vnic,
1209 struct bnxt_plcmodes_cfg *pmode)
1212 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1213 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1215 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1217 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1218 req.flags = rte_cpu_to_le_32(pmode->flags);
1219 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1220 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1221 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1222 req.enables = rte_cpu_to_le_32(
1223 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1224 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1225 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1228 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1230 HWRM_CHECK_RESULT();
1236 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1239 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1240 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1241 uint32_t ctx_enable_flag = 0;
1242 struct bnxt_plcmodes_cfg pmodes;
1244 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1245 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1249 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1253 HWRM_PREP(req, VNIC_CFG);
1255 /* Only RSS support for now TBD: COS & LB */
1257 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1258 if (vnic->lb_rule != 0xffff)
1259 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1260 if (vnic->cos_rule != 0xffff)
1261 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1262 if (vnic->rss_rule != 0xffff) {
1263 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1264 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1266 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1267 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1268 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1269 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1270 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1271 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1272 req.mru = rte_cpu_to_le_16(vnic->mru);
1273 if (vnic->func_default)
1275 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1276 if (vnic->vlan_strip)
1278 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1281 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1282 if (vnic->roce_dual)
1283 req.flags |= rte_cpu_to_le_32(
1284 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1285 if (vnic->roce_only)
1286 req.flags |= rte_cpu_to_le_32(
1287 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1288 if (vnic->rss_dflt_cr)
1289 req.flags |= rte_cpu_to_le_32(
1290 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1292 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1294 HWRM_CHECK_RESULT();
1297 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1302 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1306 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1307 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1309 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1310 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1313 HWRM_PREP(req, VNIC_QCFG);
1316 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1317 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1318 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1320 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1322 HWRM_CHECK_RESULT();
1324 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1325 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1326 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1327 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1328 vnic->mru = rte_le_to_cpu_16(resp->mru);
1329 vnic->func_default = rte_le_to_cpu_32(
1330 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1331 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1332 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1333 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1334 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1335 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1336 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1337 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1338 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1339 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1340 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1347 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1350 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1351 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1352 bp->hwrm_cmd_resp_addr;
1354 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1356 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1358 HWRM_CHECK_RESULT();
1360 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1362 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1367 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1370 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1371 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1372 bp->hwrm_cmd_resp_addr;
1374 if (vnic->rss_rule == 0xffff) {
1375 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1378 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1380 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1382 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1384 HWRM_CHECK_RESULT();
1387 vnic->rss_rule = INVALID_HW_RING_ID;
1392 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1395 struct hwrm_vnic_free_input req = {.req_type = 0 };
1396 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1398 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1399 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1403 HWRM_PREP(req, VNIC_FREE);
1405 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1407 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1409 HWRM_CHECK_RESULT();
1412 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1416 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1417 struct bnxt_vnic_info *vnic)
1420 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1421 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1423 HWRM_PREP(req, VNIC_RSS_CFG);
1425 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1427 req.ring_grp_tbl_addr =
1428 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1429 req.hash_key_tbl_addr =
1430 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1431 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1433 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1435 HWRM_CHECK_RESULT();
1441 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1442 struct bnxt_vnic_info *vnic)
1445 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1446 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1449 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1451 req.flags = rte_cpu_to_le_32(
1452 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1454 req.enables = rte_cpu_to_le_32(
1455 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1457 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1458 size -= RTE_PKTMBUF_HEADROOM;
1460 req.jumbo_thresh = rte_cpu_to_le_16(size);
1461 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1463 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1465 HWRM_CHECK_RESULT();
1471 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1472 struct bnxt_vnic_info *vnic, bool enable)
1475 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1476 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1478 HWRM_PREP(req, VNIC_TPA_CFG);
1481 req.enables = rte_cpu_to_le_32(
1482 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1483 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1484 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1485 req.flags = rte_cpu_to_le_32(
1486 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1487 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1488 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1489 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1490 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1491 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1492 req.max_agg_segs = rte_cpu_to_le_16(5);
1494 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1495 req.min_agg_len = rte_cpu_to_le_32(512);
1497 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1499 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1501 HWRM_CHECK_RESULT();
1507 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1509 struct hwrm_func_cfg_input req = {0};
1510 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1513 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1514 req.enables = rte_cpu_to_le_32(
1515 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1516 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1517 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1519 HWRM_PREP(req, FUNC_CFG);
1521 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1522 HWRM_CHECK_RESULT();
1525 bp->pf.vf_info[vf].random_mac = false;
1530 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1534 struct hwrm_func_qstats_input req = {.req_type = 0};
1535 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1537 HWRM_PREP(req, FUNC_QSTATS);
1539 req.fid = rte_cpu_to_le_16(fid);
1541 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1543 HWRM_CHECK_RESULT();
1546 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1553 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1554 struct rte_eth_stats *stats)
1557 struct hwrm_func_qstats_input req = {.req_type = 0};
1558 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1560 HWRM_PREP(req, FUNC_QSTATS);
1562 req.fid = rte_cpu_to_le_16(fid);
1564 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1566 HWRM_CHECK_RESULT();
1568 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1569 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1570 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1571 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1572 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1573 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1575 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1576 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1577 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1578 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1579 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1580 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1582 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1583 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1585 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1592 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1595 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1596 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1598 HWRM_PREP(req, FUNC_CLR_STATS);
1600 req.fid = rte_cpu_to_le_16(fid);
1602 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1604 HWRM_CHECK_RESULT();
1611 * HWRM utility functions
1614 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1619 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1620 struct bnxt_tx_queue *txq;
1621 struct bnxt_rx_queue *rxq;
1622 struct bnxt_cp_ring_info *cpr;
1624 if (i >= bp->rx_cp_nr_rings) {
1625 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1628 rxq = bp->rx_queues[i];
1632 rc = bnxt_hwrm_stat_clear(bp, cpr);
1639 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1643 struct bnxt_cp_ring_info *cpr;
1645 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1647 if (i >= bp->rx_cp_nr_rings) {
1648 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1650 cpr = bp->rx_queues[i]->cp_ring;
1651 bp->grp_info[i].fw_stats_ctx = -1;
1653 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1654 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1655 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1663 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1668 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1669 struct bnxt_tx_queue *txq;
1670 struct bnxt_rx_queue *rxq;
1671 struct bnxt_cp_ring_info *cpr;
1673 if (i >= bp->rx_cp_nr_rings) {
1674 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1677 rxq = bp->rx_queues[i];
1681 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1689 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1694 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1696 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1699 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1707 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1708 unsigned int idx __rte_unused)
1710 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1712 bnxt_hwrm_ring_free(bp, cp_ring,
1713 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1714 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1715 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1716 sizeof(*cpr->cp_desc_ring));
1717 cpr->cp_raw_cons = 0;
1720 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1725 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1726 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1727 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1728 struct bnxt_ring *ring = txr->tx_ring_struct;
1729 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1730 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1732 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1733 bnxt_hwrm_ring_free(bp, ring,
1734 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1735 ring->fw_ring_id = INVALID_HW_RING_ID;
1736 memset(txr->tx_desc_ring, 0,
1737 txr->tx_ring_struct->ring_size *
1738 sizeof(*txr->tx_desc_ring));
1739 memset(txr->tx_buf_ring, 0,
1740 txr->tx_ring_struct->ring_size *
1741 sizeof(*txr->tx_buf_ring));
1745 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1746 bnxt_free_cp_ring(bp, cpr, idx);
1747 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1751 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1752 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1753 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1754 struct bnxt_ring *ring = rxr->rx_ring_struct;
1755 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1756 unsigned int idx = i + 1;
1758 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1759 bnxt_hwrm_ring_free(bp, ring,
1760 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1761 ring->fw_ring_id = INVALID_HW_RING_ID;
1762 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1763 memset(rxr->rx_desc_ring, 0,
1764 rxr->rx_ring_struct->ring_size *
1765 sizeof(*rxr->rx_desc_ring));
1766 memset(rxr->rx_buf_ring, 0,
1767 rxr->rx_ring_struct->ring_size *
1768 sizeof(*rxr->rx_buf_ring));
1771 ring = rxr->ag_ring_struct;
1772 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1773 bnxt_hwrm_ring_free(bp, ring,
1774 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1775 ring->fw_ring_id = INVALID_HW_RING_ID;
1776 memset(rxr->ag_buf_ring, 0,
1777 rxr->ag_ring_struct->ring_size *
1778 sizeof(*rxr->ag_buf_ring));
1780 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1782 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1783 bnxt_free_cp_ring(bp, cpr, idx);
1784 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1785 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1789 /* Default completion ring */
1791 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1793 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1794 bnxt_free_cp_ring(bp, cpr, 0);
1795 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1802 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1807 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1808 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1815 void bnxt_free_hwrm_resources(struct bnxt *bp)
1817 /* Release memzone */
1818 rte_free(bp->hwrm_cmd_resp_addr);
1819 rte_free(bp->hwrm_short_cmd_req_addr);
1820 bp->hwrm_cmd_resp_addr = NULL;
1821 bp->hwrm_short_cmd_req_addr = NULL;
1822 bp->hwrm_cmd_resp_dma_addr = 0;
1823 bp->hwrm_short_cmd_req_dma_addr = 0;
1826 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1828 struct rte_pci_device *pdev = bp->pdev;
1829 char type[RTE_MEMZONE_NAMESIZE];
1831 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1832 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1833 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1834 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1835 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1836 if (bp->hwrm_cmd_resp_addr == NULL)
1838 bp->hwrm_cmd_resp_dma_addr =
1839 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1840 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1842 "unable to map response address to physical memory\n");
1845 rte_spinlock_init(&bp->hwrm_lock);
1850 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1852 struct bnxt_filter_info *filter;
1855 STAILQ_FOREACH(filter, &vnic->filter, next) {
1856 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1857 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1858 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1859 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1861 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1869 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1871 struct bnxt_filter_info *filter;
1872 struct rte_flow *flow;
1875 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1876 filter = flow->filter;
1877 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1878 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1879 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1880 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1881 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1883 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1885 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1893 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1895 struct bnxt_filter_info *filter;
1898 STAILQ_FOREACH(filter, &vnic->filter, next) {
1899 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1900 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1902 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1903 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1906 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1914 void bnxt_free_tunnel_ports(struct bnxt *bp)
1916 if (bp->vxlan_port_cnt)
1917 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1918 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1920 if (bp->geneve_port_cnt)
1921 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1922 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1923 bp->geneve_port = 0;
1926 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1930 if (bp->vnic_info == NULL)
1934 * Cleanup VNICs in reverse order, to make sure the L2 filter
1935 * from vnic0 is last to be cleaned up.
1937 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1938 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1940 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1942 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1944 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1946 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1948 bnxt_hwrm_vnic_free(bp, vnic);
1950 /* Ring resources */
1951 bnxt_free_all_hwrm_rings(bp);
1952 bnxt_free_all_hwrm_ring_grps(bp);
1953 bnxt_free_all_hwrm_stat_ctxs(bp);
1954 bnxt_free_tunnel_ports(bp);
1957 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1959 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1961 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1962 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1964 switch (conf_link_speed) {
1965 case ETH_LINK_SPEED_10M_HD:
1966 case ETH_LINK_SPEED_100M_HD:
1967 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1969 return hw_link_duplex;
1972 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1974 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1977 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1979 uint16_t eth_link_speed = 0;
1981 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1982 return ETH_LINK_SPEED_AUTONEG;
1984 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1985 case ETH_LINK_SPEED_100M:
1986 case ETH_LINK_SPEED_100M_HD:
1988 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1990 case ETH_LINK_SPEED_1G:
1992 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1994 case ETH_LINK_SPEED_2_5G:
1996 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1998 case ETH_LINK_SPEED_10G:
2000 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2002 case ETH_LINK_SPEED_20G:
2004 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2006 case ETH_LINK_SPEED_25G:
2008 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2010 case ETH_LINK_SPEED_40G:
2012 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2014 case ETH_LINK_SPEED_50G:
2016 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2018 case ETH_LINK_SPEED_100G:
2020 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2024 "Unsupported link speed %d; default to AUTO\n",
2028 return eth_link_speed;
2031 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2032 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2033 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2034 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2036 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2040 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2043 if (link_speed & ETH_LINK_SPEED_FIXED) {
2044 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2046 if (one_speed & (one_speed - 1)) {
2048 "Invalid advertised speeds (%u) for port %u\n",
2049 link_speed, port_id);
2052 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2054 "Unsupported advertised speed (%u) for port %u\n",
2055 link_speed, port_id);
2059 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2061 "Unsupported advertised speeds (%u) for port %u\n",
2062 link_speed, port_id);
2070 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2074 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2075 if (bp->link_info.support_speeds)
2076 return bp->link_info.support_speeds;
2077 link_speed = BNXT_SUPPORTED_SPEEDS;
2080 if (link_speed & ETH_LINK_SPEED_100M)
2081 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2082 if (link_speed & ETH_LINK_SPEED_100M_HD)
2083 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2084 if (link_speed & ETH_LINK_SPEED_1G)
2085 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2086 if (link_speed & ETH_LINK_SPEED_2_5G)
2087 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2088 if (link_speed & ETH_LINK_SPEED_10G)
2089 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2090 if (link_speed & ETH_LINK_SPEED_20G)
2091 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2092 if (link_speed & ETH_LINK_SPEED_25G)
2093 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2094 if (link_speed & ETH_LINK_SPEED_40G)
2095 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2096 if (link_speed & ETH_LINK_SPEED_50G)
2097 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2098 if (link_speed & ETH_LINK_SPEED_100G)
2099 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2103 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2105 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2107 switch (hw_link_speed) {
2108 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2109 eth_link_speed = ETH_SPEED_NUM_100M;
2111 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2112 eth_link_speed = ETH_SPEED_NUM_1G;
2114 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2115 eth_link_speed = ETH_SPEED_NUM_2_5G;
2117 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2118 eth_link_speed = ETH_SPEED_NUM_10G;
2120 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2121 eth_link_speed = ETH_SPEED_NUM_20G;
2123 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2124 eth_link_speed = ETH_SPEED_NUM_25G;
2126 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2127 eth_link_speed = ETH_SPEED_NUM_40G;
2129 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2130 eth_link_speed = ETH_SPEED_NUM_50G;
2132 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2133 eth_link_speed = ETH_SPEED_NUM_100G;
2135 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2137 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2141 return eth_link_speed;
2144 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2146 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2148 switch (hw_link_duplex) {
2149 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2150 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2151 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2153 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2154 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2157 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2161 return eth_link_duplex;
2164 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2167 struct bnxt_link_info *link_info = &bp->link_info;
2169 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2172 "Get link config failed with rc %d\n", rc);
2175 if (link_info->link_speed)
2177 bnxt_parse_hw_link_speed(link_info->link_speed);
2179 link->link_speed = ETH_SPEED_NUM_NONE;
2180 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2181 link->link_status = link_info->link_up;
2182 link->link_autoneg = link_info->auto_mode ==
2183 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2184 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2189 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2192 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2193 struct bnxt_link_info link_req;
2194 uint16_t speed, autoneg;
2196 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2199 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2200 bp->eth_dev->data->port_id);
2204 memset(&link_req, 0, sizeof(link_req));
2205 link_req.link_up = link_up;
2209 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2210 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2211 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2212 /* Autoneg can be done only when the FW allows */
2213 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2214 bp->link_info.force_link_speed)) {
2215 link_req.phy_flags |=
2216 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2217 link_req.auto_link_speed_mask =
2218 bnxt_parse_eth_link_speed_mask(bp,
2219 dev_conf->link_speeds);
2221 if (bp->link_info.phy_type ==
2222 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2223 bp->link_info.phy_type ==
2224 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2225 bp->link_info.media_type ==
2226 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2227 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2231 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2232 /* If user wants a particular speed try that first. */
2234 link_req.link_speed = speed;
2235 else if (bp->link_info.force_link_speed)
2236 link_req.link_speed = bp->link_info.force_link_speed;
2238 link_req.link_speed = bp->link_info.auto_link_speed;
2240 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2241 link_req.auto_pause = bp->link_info.auto_pause;
2242 link_req.force_pause = bp->link_info.force_pause;
2245 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2248 "Set link config failed with rc %d\n", rc);
2256 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2258 struct hwrm_func_qcfg_input req = {0};
2259 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2263 HWRM_PREP(req, FUNC_QCFG);
2264 req.fid = rte_cpu_to_le_16(0xffff);
2266 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2268 HWRM_CHECK_RESULT();
2270 /* Hard Coded.. 0xfff VLAN ID mask */
2271 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2272 flags = rte_le_to_cpu_16(resp->flags);
2273 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2274 bp->flags |= BNXT_FLAG_MULTI_HOST;
2276 switch (resp->port_partition_type) {
2277 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2278 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2279 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2280 bp->port_partition_type = resp->port_partition_type;
2283 bp->port_partition_type = 0;
2292 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2293 struct hwrm_func_qcaps_output *qcaps)
2295 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2296 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2297 sizeof(qcaps->mac_address));
2298 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2299 qcaps->max_rx_rings = fcfg->num_rx_rings;
2300 qcaps->max_tx_rings = fcfg->num_tx_rings;
2301 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2302 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2304 qcaps->first_vf_id = 0;
2305 qcaps->max_vnics = fcfg->num_vnics;
2306 qcaps->max_decap_records = 0;
2307 qcaps->max_encap_records = 0;
2308 qcaps->max_tx_wm_flows = 0;
2309 qcaps->max_tx_em_flows = 0;
2310 qcaps->max_rx_wm_flows = 0;
2311 qcaps->max_rx_em_flows = 0;
2312 qcaps->max_flow_id = 0;
2313 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2314 qcaps->max_sp_tx_rings = 0;
2315 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2318 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2320 struct hwrm_func_cfg_input req = {0};
2321 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2324 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2325 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2326 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2327 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2328 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2329 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2330 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2331 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2332 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2333 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2334 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2335 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2336 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2337 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2338 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2339 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2340 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2341 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2342 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2343 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2344 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2345 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2346 req.fid = rte_cpu_to_le_16(0xffff);
2348 HWRM_PREP(req, FUNC_CFG);
2350 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2352 HWRM_CHECK_RESULT();
2358 static void populate_vf_func_cfg_req(struct bnxt *bp,
2359 struct hwrm_func_cfg_input *req,
2362 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2363 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2364 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2365 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2366 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2367 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2368 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2369 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2370 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2371 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2373 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2374 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2375 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2376 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2377 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2379 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2380 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2382 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2383 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2384 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2385 /* TODO: For now, do not support VMDq/RFS on VFs. */
2386 req->num_vnics = rte_cpu_to_le_16(1);
2387 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2391 static void add_random_mac_if_needed(struct bnxt *bp,
2392 struct hwrm_func_cfg_input *cfg_req,
2395 struct ether_addr mac;
2397 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2400 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2402 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2403 eth_random_addr(cfg_req->dflt_mac_addr);
2404 bp->pf.vf_info[vf].random_mac = true;
2406 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2410 static void reserve_resources_from_vf(struct bnxt *bp,
2411 struct hwrm_func_cfg_input *cfg_req,
2414 struct hwrm_func_qcaps_input req = {0};
2415 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2418 /* Get the actual allocated values now */
2419 HWRM_PREP(req, FUNC_QCAPS);
2420 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2421 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2424 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2425 copy_func_cfg_to_qcaps(cfg_req, resp);
2426 } else if (resp->error_code) {
2427 rc = rte_le_to_cpu_16(resp->error_code);
2428 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2429 copy_func_cfg_to_qcaps(cfg_req, resp);
2432 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2433 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2434 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2435 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2436 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2437 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2439 * TODO: While not supporting VMDq with VFs, max_vnics is always
2440 * forced to 1 in this case
2442 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2443 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2448 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2450 struct hwrm_func_qcfg_input req = {0};
2451 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2454 /* Check for zero MAC address */
2455 HWRM_PREP(req, FUNC_QCFG);
2456 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2459 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2461 } else if (resp->error_code) {
2462 rc = rte_le_to_cpu_16(resp->error_code);
2463 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2466 rc = rte_le_to_cpu_16(resp->vlan);
2473 static int update_pf_resource_max(struct bnxt *bp)
2475 struct hwrm_func_qcfg_input req = {0};
2476 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2479 /* And copy the allocated numbers into the pf struct */
2480 HWRM_PREP(req, FUNC_QCFG);
2481 req.fid = rte_cpu_to_le_16(0xffff);
2482 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2483 HWRM_CHECK_RESULT();
2485 /* Only TX ring value reflects actual allocation? TODO */
2486 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2487 bp->pf.evb_mode = resp->evb_mode;
2494 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2499 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2503 rc = bnxt_hwrm_func_qcaps(bp);
2507 bp->pf.func_cfg_flags &=
2508 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2509 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2510 bp->pf.func_cfg_flags |=
2511 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2512 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2516 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2518 struct hwrm_func_cfg_input req = {0};
2519 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2526 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2530 rc = bnxt_hwrm_func_qcaps(bp);
2535 bp->pf.active_vfs = num_vfs;
2538 * First, configure the PF to only use one TX ring. This ensures that
2539 * there are enough rings for all VFs.
2541 * If we don't do this, when we call func_alloc() later, we will lock
2542 * extra rings to the PF that won't be available during func_cfg() of
2545 * This has been fixed with firmware versions above 20.6.54
2547 bp->pf.func_cfg_flags &=
2548 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2549 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2550 bp->pf.func_cfg_flags |=
2551 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2552 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2557 * Now, create and register a buffer to hold forwarded VF requests
2559 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2560 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2561 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2562 if (bp->pf.vf_req_buf == NULL) {
2566 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2567 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2568 for (i = 0; i < num_vfs; i++)
2569 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2570 (i * HWRM_MAX_REQ_LEN);
2572 rc = bnxt_hwrm_func_buf_rgtr(bp);
2576 populate_vf_func_cfg_req(bp, &req, num_vfs);
2578 bp->pf.active_vfs = 0;
2579 for (i = 0; i < num_vfs; i++) {
2580 add_random_mac_if_needed(bp, &req, i);
2582 HWRM_PREP(req, FUNC_CFG);
2583 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2584 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2585 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2587 /* Clear enable flag for next pass */
2588 req.enables &= ~rte_cpu_to_le_32(
2589 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2591 if (rc || resp->error_code) {
2593 "Failed to initizlie VF %d\n", i);
2595 "Not all VFs available. (%d, %d)\n",
2596 rc, resp->error_code);
2603 reserve_resources_from_vf(bp, &req, i);
2604 bp->pf.active_vfs++;
2605 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2609 * Now configure the PF to use "the rest" of the resources
2610 * We're using STD_TX_RING_MODE here though which will limit the TX
2611 * rings. This will allow QoS to function properly. Not setting this
2612 * will cause PF rings to break bandwidth settings.
2614 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2618 rc = update_pf_resource_max(bp);
2625 bnxt_hwrm_func_buf_unrgtr(bp);
2629 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2631 struct hwrm_func_cfg_input req = {0};
2632 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2635 HWRM_PREP(req, FUNC_CFG);
2637 req.fid = rte_cpu_to_le_16(0xffff);
2638 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2639 req.evb_mode = bp->pf.evb_mode;
2641 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2642 HWRM_CHECK_RESULT();
2648 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2649 uint8_t tunnel_type)
2651 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2652 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2655 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2656 req.tunnel_type = tunnel_type;
2657 req.tunnel_dst_port_val = port;
2658 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2659 HWRM_CHECK_RESULT();
2661 switch (tunnel_type) {
2662 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2663 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2664 bp->vxlan_port = port;
2666 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2667 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2668 bp->geneve_port = port;
2679 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2680 uint8_t tunnel_type)
2682 struct hwrm_tunnel_dst_port_free_input req = {0};
2683 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2686 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2688 req.tunnel_type = tunnel_type;
2689 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2690 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2692 HWRM_CHECK_RESULT();
2698 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2701 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2702 struct hwrm_func_cfg_input req = {0};
2705 HWRM_PREP(req, FUNC_CFG);
2707 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2708 req.flags = rte_cpu_to_le_32(flags);
2709 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2711 HWRM_CHECK_RESULT();
2717 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2719 uint32_t *flag = flagp;
2721 vnic->flags = *flag;
2724 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2726 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2729 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2732 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2733 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2735 HWRM_PREP(req, FUNC_BUF_RGTR);
2737 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2738 req.req_buf_page_size = rte_cpu_to_le_16(
2739 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2740 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2741 req.req_buf_page_addr[0] =
2742 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2743 if (req.req_buf_page_addr[0] == 0) {
2745 "unable to map buffer address to physical memory\n");
2749 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2751 HWRM_CHECK_RESULT();
2757 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2760 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2761 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2763 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2765 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2767 HWRM_CHECK_RESULT();
2773 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2775 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2776 struct hwrm_func_cfg_input req = {0};
2779 HWRM_PREP(req, FUNC_CFG);
2781 req.fid = rte_cpu_to_le_16(0xffff);
2782 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2783 req.enables = rte_cpu_to_le_32(
2784 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2785 req.async_event_cr = rte_cpu_to_le_16(
2786 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2787 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2789 HWRM_CHECK_RESULT();
2795 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2797 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2798 struct hwrm_func_vf_cfg_input req = {0};
2801 HWRM_PREP(req, FUNC_VF_CFG);
2803 req.enables = rte_cpu_to_le_32(
2804 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2805 req.async_event_cr = rte_cpu_to_le_16(
2806 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2807 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2809 HWRM_CHECK_RESULT();
2815 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2817 struct hwrm_func_cfg_input req = {0};
2818 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2819 uint16_t dflt_vlan, fid;
2820 uint32_t func_cfg_flags;
2823 HWRM_PREP(req, FUNC_CFG);
2826 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2827 fid = bp->pf.vf_info[vf].fid;
2828 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2830 fid = rte_cpu_to_le_16(0xffff);
2831 func_cfg_flags = bp->pf.func_cfg_flags;
2832 dflt_vlan = bp->vlan;
2835 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2836 req.fid = rte_cpu_to_le_16(fid);
2837 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2838 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2840 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2842 HWRM_CHECK_RESULT();
2848 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2849 uint16_t max_bw, uint16_t enables)
2851 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2852 struct hwrm_func_cfg_input req = {0};
2855 HWRM_PREP(req, FUNC_CFG);
2857 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2858 req.enables |= rte_cpu_to_le_32(enables);
2859 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2860 req.max_bw = rte_cpu_to_le_32(max_bw);
2861 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2863 HWRM_CHECK_RESULT();
2869 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2871 struct hwrm_func_cfg_input req = {0};
2872 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2875 HWRM_PREP(req, FUNC_CFG);
2877 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2878 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2879 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2880 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2882 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2884 HWRM_CHECK_RESULT();
2890 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2891 void *encaped, size_t ec_size)
2894 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2895 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2897 if (ec_size > sizeof(req.encap_request))
2900 HWRM_PREP(req, REJECT_FWD_RESP);
2902 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2903 memcpy(req.encap_request, encaped, ec_size);
2905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2907 HWRM_CHECK_RESULT();
2913 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2914 struct ether_addr *mac)
2916 struct hwrm_func_qcfg_input req = {0};
2917 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2920 HWRM_PREP(req, FUNC_QCFG);
2922 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2923 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2925 HWRM_CHECK_RESULT();
2927 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2934 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2935 void *encaped, size_t ec_size)
2938 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2939 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2941 if (ec_size > sizeof(req.encap_request))
2944 HWRM_PREP(req, EXEC_FWD_RESP);
2946 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2947 memcpy(req.encap_request, encaped, ec_size);
2949 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2951 HWRM_CHECK_RESULT();
2957 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2958 struct rte_eth_stats *stats, uint8_t rx)
2961 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2962 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2964 HWRM_PREP(req, STAT_CTX_QUERY);
2966 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2968 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2970 HWRM_CHECK_RESULT();
2973 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2974 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2975 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2976 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2977 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2978 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2979 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2980 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2982 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2983 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2984 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2985 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2986 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2987 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2988 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2997 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2999 struct hwrm_port_qstats_input req = {0};
3000 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3001 struct bnxt_pf_info *pf = &bp->pf;
3004 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3007 HWRM_PREP(req, PORT_QSTATS);
3009 req.port_id = rte_cpu_to_le_16(pf->port_id);
3010 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3011 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3012 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3014 HWRM_CHECK_RESULT();
3020 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3022 struct hwrm_port_clr_stats_input req = {0};
3023 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3024 struct bnxt_pf_info *pf = &bp->pf;
3027 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3030 HWRM_PREP(req, PORT_CLR_STATS);
3032 req.port_id = rte_cpu_to_le_16(pf->port_id);
3033 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3035 HWRM_CHECK_RESULT();
3041 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3043 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3044 struct hwrm_port_led_qcaps_input req = {0};
3050 HWRM_PREP(req, PORT_LED_QCAPS);
3051 req.port_id = bp->pf.port_id;
3052 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3054 HWRM_CHECK_RESULT();
3056 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3059 bp->num_leds = resp->num_leds;
3060 memcpy(bp->leds, &resp->led0_id,
3061 sizeof(bp->leds[0]) * bp->num_leds);
3062 for (i = 0; i < bp->num_leds; i++) {
3063 struct bnxt_led_info *led = &bp->leds[i];
3065 uint16_t caps = led->led_state_caps;
3067 if (!led->led_group_id ||
3068 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3080 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3082 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3083 struct hwrm_port_led_cfg_input req = {0};
3084 struct bnxt_led_cfg *led_cfg;
3085 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3086 uint16_t duration = 0;
3089 if (!bp->num_leds || BNXT_VF(bp))
3092 HWRM_PREP(req, PORT_LED_CFG);
3095 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3096 duration = rte_cpu_to_le_16(500);
3098 req.port_id = bp->pf.port_id;
3099 req.num_leds = bp->num_leds;
3100 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3101 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3102 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3103 led_cfg->led_id = bp->leds[i].led_id;
3104 led_cfg->led_state = led_state;
3105 led_cfg->led_blink_on = duration;
3106 led_cfg->led_blink_off = duration;
3107 led_cfg->led_group_id = bp->leds[i].led_group_id;
3110 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3112 HWRM_CHECK_RESULT();
3118 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3122 struct hwrm_nvm_get_dir_info_input req = {0};
3123 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3125 HWRM_PREP(req, NVM_GET_DIR_INFO);
3127 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3129 HWRM_CHECK_RESULT();
3133 *entries = rte_le_to_cpu_32(resp->entries);
3134 *length = rte_le_to_cpu_32(resp->entry_length);
3139 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3142 uint32_t dir_entries;
3143 uint32_t entry_length;
3146 rte_iova_t dma_handle;
3147 struct hwrm_nvm_get_dir_entries_input req = {0};
3148 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3150 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3154 *data++ = dir_entries;
3155 *data++ = entry_length;
3157 memset(data, 0xff, len);
3159 buflen = dir_entries * entry_length;
3160 buf = rte_malloc("nvm_dir", buflen, 0);
3161 rte_mem_lock_page(buf);
3164 dma_handle = rte_mem_virt2iova(buf);
3165 if (dma_handle == 0) {
3167 "unable to map response address to physical memory\n");
3170 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3171 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3172 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3174 HWRM_CHECK_RESULT();
3178 memcpy(data, buf, len > buflen ? buflen : len);
3185 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3186 uint32_t offset, uint32_t length,
3191 rte_iova_t dma_handle;
3192 struct hwrm_nvm_read_input req = {0};
3193 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3195 buf = rte_malloc("nvm_item", length, 0);
3196 rte_mem_lock_page(buf);
3200 dma_handle = rte_mem_virt2iova(buf);
3201 if (dma_handle == 0) {
3203 "unable to map response address to physical memory\n");
3206 HWRM_PREP(req, NVM_READ);
3207 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3208 req.dir_idx = rte_cpu_to_le_16(index);
3209 req.offset = rte_cpu_to_le_32(offset);
3210 req.len = rte_cpu_to_le_32(length);
3211 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3212 HWRM_CHECK_RESULT();
3215 memcpy(data, buf, length);
3221 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3224 struct hwrm_nvm_erase_dir_entry_input req = {0};
3225 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3227 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3228 req.dir_idx = rte_cpu_to_le_16(index);
3229 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3230 HWRM_CHECK_RESULT();
3237 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3238 uint16_t dir_ordinal, uint16_t dir_ext,
3239 uint16_t dir_attr, const uint8_t *data,
3243 struct hwrm_nvm_write_input req = {0};
3244 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3245 rte_iova_t dma_handle;
3248 HWRM_PREP(req, NVM_WRITE);
3250 req.dir_type = rte_cpu_to_le_16(dir_type);
3251 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3252 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3253 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3254 req.dir_data_length = rte_cpu_to_le_32(data_len);
3256 buf = rte_malloc("nvm_write", data_len, 0);
3257 rte_mem_lock_page(buf);
3261 dma_handle = rte_mem_virt2iova(buf);
3262 if (dma_handle == 0) {
3264 "unable to map response address to physical memory\n");
3267 memcpy(buf, data, data_len);
3268 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3270 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3272 HWRM_CHECK_RESULT();
3280 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3282 uint32_t *count = cbdata;
3284 *count = *count + 1;
3287 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3288 struct bnxt_vnic_info *vnic __rte_unused)
3293 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3297 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3298 &count, bnxt_vnic_count_hwrm_stub);
3303 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3306 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3307 struct hwrm_func_vf_vnic_ids_query_output *resp =
3308 bp->hwrm_cmd_resp_addr;
3311 /* First query all VNIC ids */
3312 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3314 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3315 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3316 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3318 if (req.vnic_id_tbl_addr == 0) {
3321 "unable to map VNIC ID table address to physical memory\n");
3324 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3327 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3329 } else if (resp->error_code) {
3330 rc = rte_le_to_cpu_16(resp->error_code);
3332 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3335 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3343 * This function queries the VNIC IDs for a specified VF. It then calls
3344 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3345 * Then it calls the hwrm_cb function to program this new vnic configuration.
3347 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3348 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3349 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3351 struct bnxt_vnic_info vnic;
3353 int i, num_vnic_ids;
3358 /* First query all VNIC ids */
3359 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3360 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3361 RTE_CACHE_LINE_SIZE);
3362 if (vnic_ids == NULL) {
3366 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3367 rte_mem_lock_page(((char *)vnic_ids) + sz);
3369 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3371 if (num_vnic_ids < 0)
3372 return num_vnic_ids;
3374 /* Retrieve VNIC, update bd_stall then update */
3376 for (i = 0; i < num_vnic_ids; i++) {
3377 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3378 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3379 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3382 if (vnic.mru <= 4) /* Indicates unallocated */
3385 vnic_cb(&vnic, cbdata);
3387 rc = hwrm_cb(bp, &vnic);
3397 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3400 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3401 struct hwrm_func_cfg_input req = {0};
3404 HWRM_PREP(req, FUNC_CFG);
3406 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3407 req.enables |= rte_cpu_to_le_32(
3408 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3409 req.vlan_antispoof_mode = on ?
3410 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3411 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3412 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3414 HWRM_CHECK_RESULT();
3420 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3422 struct bnxt_vnic_info vnic;
3425 int num_vnic_ids, i;
3429 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3430 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3431 RTE_CACHE_LINE_SIZE);
3432 if (vnic_ids == NULL) {
3437 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3438 rte_mem_lock_page(((char *)vnic_ids) + sz);
3440 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3446 * Loop through to find the default VNIC ID.
3447 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3448 * by sending the hwrm_func_qcfg command to the firmware.
3450 for (i = 0; i < num_vnic_ids; i++) {
3451 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3452 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3453 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3454 bp->pf.first_vf_id + vf);
3457 if (vnic.func_default) {
3459 return vnic.fw_vnic_id;
3462 /* Could not find a default VNIC. */
3463 PMD_DRV_LOG(ERR, "No default VNIC\n");
3469 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3471 struct bnxt_filter_info *filter)
3474 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3475 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3476 uint32_t enables = 0;
3478 if (filter->fw_em_filter_id != UINT64_MAX)
3479 bnxt_hwrm_clear_em_filter(bp, filter);
3481 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3483 req.flags = rte_cpu_to_le_32(filter->flags);
3485 enables = filter->enables |
3486 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3487 req.dst_id = rte_cpu_to_le_16(dst_id);
3489 if (filter->ip_addr_type) {
3490 req.ip_addr_type = filter->ip_addr_type;
3491 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3494 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3495 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3497 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3498 memcpy(req.src_macaddr, filter->src_macaddr,
3501 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3502 memcpy(req.dst_macaddr, filter->dst_macaddr,
3505 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3506 req.ovlan_vid = filter->l2_ovlan;
3508 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3509 req.ivlan_vid = filter->l2_ivlan;
3511 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3512 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3514 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3515 req.ip_protocol = filter->ip_protocol;
3517 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3518 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3520 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3521 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3523 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3524 req.src_port = rte_cpu_to_be_16(filter->src_port);
3526 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3527 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3529 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3530 req.mirror_vnic_id = filter->mirror_vnic_id;
3532 req.enables = rte_cpu_to_le_32(enables);
3534 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3536 HWRM_CHECK_RESULT();
3538 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3544 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3547 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3548 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3550 if (filter->fw_em_filter_id == UINT64_MAX)
3553 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3554 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3556 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3558 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3560 HWRM_CHECK_RESULT();
3563 filter->fw_em_filter_id = -1;
3564 filter->fw_l2_filter_id = -1;
3569 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3571 struct bnxt_filter_info *filter)
3574 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3575 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3576 bp->hwrm_cmd_resp_addr;
3577 uint32_t enables = 0;
3579 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3580 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3582 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3584 req.flags = rte_cpu_to_le_32(filter->flags);
3586 enables = filter->enables |
3587 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3588 req.dst_id = rte_cpu_to_le_16(dst_id);
3591 if (filter->ip_addr_type) {
3592 req.ip_addr_type = filter->ip_addr_type;
3594 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3597 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3598 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3600 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3601 memcpy(req.src_macaddr, filter->src_macaddr,
3604 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3605 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3608 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3609 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3611 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3612 req.ip_protocol = filter->ip_protocol;
3614 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3615 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3617 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3618 req.src_ipaddr_mask[0] =
3619 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3621 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3622 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3624 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3625 req.dst_ipaddr_mask[0] =
3626 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3628 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3629 req.src_port = rte_cpu_to_le_16(filter->src_port);
3631 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3632 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3634 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3635 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3637 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3638 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3640 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3641 req.mirror_vnic_id = filter->mirror_vnic_id;
3643 req.enables = rte_cpu_to_le_32(enables);
3645 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3647 HWRM_CHECK_RESULT();
3649 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3655 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3656 struct bnxt_filter_info *filter)
3659 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3660 struct hwrm_cfa_ntuple_filter_free_output *resp =
3661 bp->hwrm_cmd_resp_addr;
3663 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3666 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3668 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3670 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3672 HWRM_CHECK_RESULT();
3675 filter->fw_ntuple_filter_id = -1;
3680 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3682 unsigned int rss_idx, fw_idx, i;
3684 if (vnic->rss_table && vnic->hash_type) {
3686 * Fill the RSS hash & redirection table with
3687 * ring group ids for all VNICs
3689 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3690 rss_idx++, fw_idx++) {
3691 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3692 fw_idx %= bp->rx_cp_nr_rings;
3693 if (vnic->fw_grp_ids[fw_idx] !=
3698 if (i == bp->rx_cp_nr_rings)
3700 vnic->rss_table[rss_idx] =
3701 vnic->fw_grp_ids[fw_idx];
3703 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);