1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
27 #define HWRM_SPEC_CODE_1_8_3 0x10803
28 #define HWRM_VERSION_1_9_1 0x10901
29 #define HWRM_VERSION_1_9_2 0x10903
31 struct bnxt_plcmodes_cfg {
33 uint16_t jumbo_thresh;
35 uint16_t hds_threshold;
38 static int page_getenum(size_t size)
54 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 return sizeof(void *) * 8 - 1;
58 static int page_roundup(size_t size)
60 return 1 << page_getenum(size);
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
67 if (rmem->nr_pages > 1) {
69 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
71 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76 * HWRM Functions (sent to HWRM)
77 * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78 * HWRM command times out, or a negative error code if the HWRM
79 * command was failed by the FW.
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83 uint32_t msg_len, bool use_kong_mb)
86 struct input *req = msg;
87 struct output *resp = bp->hwrm_cmd_resp_addr;
91 uint16_t max_req_len = bp->max_req_len;
92 struct hwrm_short_input short_input = { 0 };
93 uint16_t bar_offset = use_kong_mb ?
94 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95 uint16_t mb_trigger_offset = use_kong_mb ?
96 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
99 /* Do not send HWRM commands to firmware in error state */
100 if (bp->flags & BNXT_FLAG_FATAL_ERROR)
103 timeout = bp->hwrm_cmd_timeout;
105 if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106 msg_len > bp->max_req_len) {
107 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
109 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110 memcpy(short_cmd_req, req, msg_len);
112 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113 short_input.signature = rte_cpu_to_le_16(
114 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115 short_input.size = rte_cpu_to_le_16(msg_len);
116 short_input.req_addr =
117 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
119 data = (uint32_t *)&short_input;
120 msg_len = sizeof(short_input);
122 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
125 /* Write request msg to hwrm channel */
126 for (i = 0; i < msg_len; i += 4) {
127 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128 rte_write32(*data, bar);
132 /* Zero the rest of the request space */
133 for (; i < max_req_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + bar_offset + i;
138 /* Ring channel doorbell */
139 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
142 * Make sure the channel doorbell ring command complete before
143 * reading the response to avoid getting stale or invalid
148 /* Poll for the valid bit */
149 for (i = 0; i < timeout; i++) {
150 /* Sanity check on the resp->resp_len */
152 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153 /* Last byte of resp contains the valid key */
154 valid = (uint8_t *)resp + resp->resp_len - 1;
155 if (*valid == HWRM_RESP_VALID_KEY)
162 /* Suppress VER_GET timeout messages during reset recovery */
163 if (bp->flags & BNXT_FLAG_FW_RESET &&
164 rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
167 PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type, kong) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 if (bp->hwrm_cmd_resp_addr == NULL) { \
188 rte_spinlock_unlock(&bp->hwrm_lock); \
191 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
192 (req)->req_type = rte_cpu_to_le_16(type); \
193 (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
194 (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
195 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
196 (req)->target_id = rte_cpu_to_le_16(0xffff); \
197 (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
200 #define HWRM_CHECK_RESULT_SILENT() do {\
202 rte_spinlock_unlock(&bp->hwrm_lock); \
205 if (resp->error_code) { \
206 rc = rte_le_to_cpu_16(resp->error_code); \
207 rte_spinlock_unlock(&bp->hwrm_lock); \
212 #define HWRM_CHECK_RESULT() do {\
214 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
215 rte_spinlock_unlock(&bp->hwrm_lock); \
216 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
218 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
220 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
222 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
224 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
230 if (resp->error_code) { \
231 rc = rte_le_to_cpu_16(resp->error_code); \
232 if (resp->resp_len >= 16) { \
233 struct hwrm_err_output *tmp_hwrm_err_op = \
236 "error %d:%d:%08x:%04x\n", \
237 rc, tmp_hwrm_err_op->cmd_err, \
239 tmp_hwrm_err_op->opaque_0), \
241 tmp_hwrm_err_op->opaque_1)); \
243 PMD_DRV_LOG(ERR, "error %d\n", rc); \
245 rte_spinlock_unlock(&bp->hwrm_lock); \
246 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
248 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
250 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
252 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
254 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
262 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
264 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
273 bool mailbox = BNXT_USE_CHIMP_MB;
274 struct input *req = msg;
275 struct output *resp = bp->hwrm_cmd_resp_addr;
278 mailbox = BNXT_USE_KONG(bp);
280 HWRM_PREP(req, msg_type, mailbox);
282 rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
287 memcpy(resp_msg, resp, resp_len);
294 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
298 uint32_t *tf_response_code,
302 uint32_t response_len)
305 struct hwrm_cfa_tflib_input req = { .req_type = 0 };
306 struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
307 bool mailbox = BNXT_USE_CHIMP_MB;
309 if (msg_len > sizeof(req.tf_req))
313 mailbox = BNXT_USE_KONG(bp);
315 HWRM_PREP(&req, HWRM_TF, mailbox);
316 /* Build request using the user supplied request payload.
317 * TLV request size is checked at build time against HWRM
318 * request max size, thus no checking required.
320 req.tf_type = tf_type;
321 req.tf_subtype = tf_subtype;
322 memcpy(req.tf_req, msg, msg_len);
324 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
327 /* Copy the resp to user provided response buffer */
328 if (response != NULL)
329 /* Post process response data. We need to copy only
330 * the 'payload' as the HWRM data structure really is
331 * HWRM header + msg header + payload and the TFLIB
332 * only provided a payload place holder.
334 if (response_len != 0) {
340 /* Extract the internal tflib response code */
341 *tf_response_code = resp->tf_resp_code;
347 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
350 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
351 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
353 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
354 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
357 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
365 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
366 struct bnxt_vnic_info *vnic,
368 struct bnxt_vlan_table_entry *vlan_table)
371 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
372 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
375 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
378 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
379 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
381 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
382 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
383 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
384 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
386 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
387 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
389 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
390 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
391 } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
392 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
393 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
394 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
397 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
398 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
399 req.vlan_tag_tbl_addr =
400 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
401 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
403 req.mask = rte_cpu_to_le_32(mask);
405 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
413 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
415 struct bnxt_vlan_antispoof_table_entry *vlan_table)
418 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
419 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
420 bp->hwrm_cmd_resp_addr;
423 * Older HWRM versions did not support this command, and the set_rx_mask
424 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
425 * removed from set_rx_mask call, and this command was added.
427 * This command is also present from 1.7.8.11 and higher,
430 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
431 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
432 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
437 HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
438 req.fid = rte_cpu_to_le_16(fid);
440 req.vlan_tag_mask_tbl_addr =
441 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
442 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
444 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
452 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
453 struct bnxt_filter_info *filter)
456 struct bnxt_filter_info *l2_filter = filter;
457 struct bnxt_vnic_info *vnic = NULL;
458 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
459 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
461 if (filter->fw_l2_filter_id == UINT64_MAX)
464 if (filter->matching_l2_fltr_ptr)
465 l2_filter = filter->matching_l2_fltr_ptr;
467 PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
468 filter, l2_filter, l2_filter->l2_ref_cnt);
470 if (l2_filter->l2_ref_cnt == 0)
473 if (l2_filter->l2_ref_cnt > 0)
474 l2_filter->l2_ref_cnt--;
476 if (l2_filter->l2_ref_cnt > 0)
479 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
481 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
483 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
488 filter->fw_l2_filter_id = UINT64_MAX;
489 if (l2_filter->l2_ref_cnt == 0) {
490 vnic = l2_filter->vnic;
492 STAILQ_REMOVE(&vnic->filter, l2_filter,
493 bnxt_filter_info, next);
494 bnxt_free_filter(bp, l2_filter);
501 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
503 struct bnxt_filter_info *filter)
506 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
507 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
508 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
509 const struct rte_eth_vmdq_rx_conf *conf =
510 &dev_conf->rx_adv_conf.vmdq_rx_conf;
511 uint32_t enables = 0;
512 uint16_t j = dst_id - 1;
514 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
515 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
516 conf->pool_map[j].pools & (1UL << j)) {
518 "Add vlan %u to vmdq pool %u\n",
519 conf->pool_map[j].vlan_id, j);
521 filter->l2_ivlan = conf->pool_map[j].vlan_id;
523 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
524 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
527 if (filter->fw_l2_filter_id != UINT64_MAX)
528 bnxt_hwrm_clear_l2_filter(bp, filter);
530 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
532 req.flags = rte_cpu_to_le_32(filter->flags);
534 enables = filter->enables |
535 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
536 req.dst_id = rte_cpu_to_le_16(dst_id);
539 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
540 memcpy(req.l2_addr, filter->l2_addr,
543 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
544 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
547 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
548 req.l2_ovlan = filter->l2_ovlan;
550 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
551 req.l2_ivlan = filter->l2_ivlan;
553 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
554 req.l2_ovlan_mask = filter->l2_ovlan_mask;
556 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
557 req.l2_ivlan_mask = filter->l2_ivlan_mask;
558 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
559 req.src_id = rte_cpu_to_le_32(filter->src_id);
560 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
561 req.src_type = filter->src_type;
562 if (filter->pri_hint) {
563 req.pri_hint = filter->pri_hint;
564 req.l2_filter_id_hint =
565 rte_cpu_to_le_64(filter->l2_filter_id_hint);
568 req.enables = rte_cpu_to_le_32(enables);
570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
574 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
575 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
578 filter->l2_ref_cnt++;
583 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
585 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
586 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
593 HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
596 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
599 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
600 if (ptp->tx_tstamp_en)
601 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
604 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
605 req.flags = rte_cpu_to_le_32(flags);
606 req.enables = rte_cpu_to_le_32
607 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
608 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
610 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
616 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
619 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
620 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
621 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
626 HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
628 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
630 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
634 if (!BNXT_CHIP_THOR(bp) &&
635 !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
638 if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
639 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
641 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
645 if (!BNXT_CHIP_THOR(bp)) {
646 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
647 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
648 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
649 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
650 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
651 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
652 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
653 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
654 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
655 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
656 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
657 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
658 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
659 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
660 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
661 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
662 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
663 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
672 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
675 struct hwrm_func_qcaps_input req = {.req_type = 0 };
676 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
677 uint16_t new_max_vfs;
681 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
683 req.fid = rte_cpu_to_le_16(0xffff);
685 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
689 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
690 flags = rte_le_to_cpu_32(resp->flags);
692 bp->pf.port_id = resp->port_id;
693 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
694 bp->pf.total_vfs = rte_le_to_cpu_16(resp->max_vfs);
695 new_max_vfs = bp->pdev->max_vfs;
696 if (new_max_vfs != bp->pf.max_vfs) {
698 rte_free(bp->pf.vf_info);
699 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
700 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
701 bp->pf.max_vfs = new_max_vfs;
702 for (i = 0; i < new_max_vfs; i++) {
703 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
704 bp->pf.vf_info[i].vlan_table =
705 rte_zmalloc("VF VLAN table",
708 if (bp->pf.vf_info[i].vlan_table == NULL)
710 "Fail to alloc VLAN table for VF %d\n",
714 bp->pf.vf_info[i].vlan_table);
715 bp->pf.vf_info[i].vlan_as_table =
716 rte_zmalloc("VF VLAN AS table",
719 if (bp->pf.vf_info[i].vlan_as_table == NULL)
721 "Alloc VLAN AS table for VF %d fail\n",
725 bp->pf.vf_info[i].vlan_as_table);
726 STAILQ_INIT(&bp->pf.vf_info[i].filter);
731 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
732 memcpy(bp->dflt_mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
733 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
734 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
735 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
736 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
737 bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
738 bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
739 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
740 if (!BNXT_CHIP_THOR(bp))
741 bp->max_l2_ctx += bp->max_rx_em_flows;
742 /* TODO: For now, do not support VMDq/RFS on VFs. */
747 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
751 PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
752 bp->max_l2_ctx, bp->max_vnics);
753 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
755 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
756 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
757 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
758 PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
760 bnxt_hwrm_ptp_qcfg(bp);
764 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
765 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
767 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
768 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
769 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
772 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
773 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
775 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
776 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
783 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
787 rc = __bnxt_hwrm_func_qcaps(bp);
788 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
789 rc = bnxt_alloc_ctx_mem(bp);
793 rc = bnxt_hwrm_func_resc_qcaps(bp);
795 bp->flags |= BNXT_FLAG_NEW_RM;
799 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
800 * But the error can be ignored. Return success.
806 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
807 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
810 struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
811 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
813 HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
815 req.target_id = rte_cpu_to_le_16(0xffff);
817 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
821 if (rte_le_to_cpu_32(resp->flags) &
822 HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
823 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
824 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
827 bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
834 int bnxt_hwrm_func_reset(struct bnxt *bp)
837 struct hwrm_func_reset_input req = {.req_type = 0 };
838 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
840 HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
842 req.enables = rte_cpu_to_le_32(0);
844 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
852 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
856 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
857 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
859 if (bp->flags & BNXT_FLAG_REGISTERED)
862 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
863 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
864 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
865 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
867 /* PFs and trusted VFs should indicate the support of the
868 * Master capability on non Stingray platform
870 if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
871 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
873 HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
874 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
875 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
876 req.ver_maj = RTE_VER_YEAR;
877 req.ver_min = RTE_VER_MONTH;
878 req.ver_upd = RTE_VER_MINOR;
881 req.enables |= rte_cpu_to_le_32(
882 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
883 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
884 RTE_MIN(sizeof(req.vf_req_fwd),
885 sizeof(bp->pf.vf_req_fwd)));
888 * PF can sniff HWRM API issued by VF. This can be set up by
889 * linux driver and inherited by the DPDK PF driver. Clear
890 * this HWRM sniffer list in FW because DPDK PF driver does
893 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
896 req.flags = rte_cpu_to_le_32(flags);
898 req.async_event_fwd[0] |=
899 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
900 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
901 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
902 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
903 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
904 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
905 req.async_event_fwd[0] |=
906 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
907 req.async_event_fwd[1] |=
908 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
909 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
911 req.async_event_fwd[1] |=
912 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
914 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
918 flags = rte_le_to_cpu_32(resp->flags);
919 if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
920 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
924 bp->flags |= BNXT_FLAG_REGISTERED;
929 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
931 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
934 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
937 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
942 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
943 struct hwrm_func_vf_cfg_input req = {0};
945 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
947 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
948 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
949 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
950 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
951 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
953 if (BNXT_HAS_RING_GRPS(bp)) {
954 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
955 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
958 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
959 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
960 AGG_RING_MULTIPLIER);
961 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
962 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
964 BNXT_NUM_ASYNC_CPR(bp));
965 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
966 if (bp->vf_resv_strategy ==
967 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
968 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
969 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
970 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
971 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
972 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
973 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
974 } else if (bp->vf_resv_strategy ==
975 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
976 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
977 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
981 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
982 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
983 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
984 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
985 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
986 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
988 if (test && BNXT_HAS_RING_GRPS(bp))
989 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
991 req.flags = rte_cpu_to_le_32(flags);
992 req.enables |= rte_cpu_to_le_32(enables);
994 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
997 HWRM_CHECK_RESULT_SILENT();
1005 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1008 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1009 struct hwrm_func_resource_qcaps_input req = {0};
1011 HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1012 req.fid = rte_cpu_to_le_16(0xffff);
1014 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1016 HWRM_CHECK_RESULT_SILENT();
1019 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1020 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1021 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1022 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1023 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1024 /* func_resource_qcaps does not return max_rx_em_flows.
1025 * So use the value provided by func_qcaps.
1027 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1028 if (!BNXT_CHIP_THOR(bp))
1029 bp->max_l2_ctx += bp->max_rx_em_flows;
1030 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1031 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1033 bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1034 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1035 if (bp->vf_resv_strategy >
1036 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1037 bp->vf_resv_strategy =
1038 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1044 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1047 struct hwrm_ver_get_input req = {.req_type = 0 };
1048 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1049 uint32_t fw_version;
1050 uint16_t max_resp_len;
1051 char type[RTE_MEMZONE_NAMESIZE];
1052 uint32_t dev_caps_cfg;
1054 bp->max_req_len = HWRM_MAX_REQ_LEN;
1055 bp->hwrm_cmd_timeout = timeout;
1056 HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1058 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1059 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1060 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1062 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1064 if (bp->flags & BNXT_FLAG_FW_RESET)
1065 HWRM_CHECK_RESULT_SILENT();
1067 HWRM_CHECK_RESULT();
1069 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1070 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1071 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1072 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1073 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1074 (resp->hwrm_fw_min_8b << 16) |
1075 (resp->hwrm_fw_bld_8b << 8) |
1076 resp->hwrm_fw_rsvd_8b;
1077 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1078 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1080 fw_version = resp->hwrm_intf_maj_8b << 16;
1081 fw_version |= resp->hwrm_intf_min_8b << 8;
1082 fw_version |= resp->hwrm_intf_upd_8b;
1083 bp->hwrm_spec_code = fw_version;
1085 /* def_req_timeout value is in milliseconds */
1086 bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1087 /* convert timeout to usec */
1088 bp->hwrm_cmd_timeout *= 1000;
1089 if (!bp->hwrm_cmd_timeout)
1090 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1092 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1093 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1098 if (bp->max_req_len > resp->max_req_win_len) {
1099 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1102 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1103 bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1104 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1105 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1107 max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1108 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1110 if (bp->max_resp_len != max_resp_len) {
1111 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1112 bp->pdev->addr.domain, bp->pdev->addr.bus,
1113 bp->pdev->addr.devid, bp->pdev->addr.function);
1115 rte_free(bp->hwrm_cmd_resp_addr);
1117 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1118 if (bp->hwrm_cmd_resp_addr == NULL) {
1122 bp->hwrm_cmd_resp_dma_addr =
1123 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1124 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1126 "Unable to map response buffer to physical memory.\n");
1130 bp->max_resp_len = max_resp_len;
1134 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1136 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1137 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1138 bp->flags |= BNXT_FLAG_SHORT_CMD;
1141 if (((dev_caps_cfg &
1142 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1144 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1145 bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1146 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1147 bp->pdev->addr.domain, bp->pdev->addr.bus,
1148 bp->pdev->addr.devid, bp->pdev->addr.function);
1150 rte_free(bp->hwrm_short_cmd_req_addr);
1152 bp->hwrm_short_cmd_req_addr =
1153 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1154 if (bp->hwrm_short_cmd_req_addr == NULL) {
1158 bp->hwrm_short_cmd_req_dma_addr =
1159 rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1160 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1161 rte_free(bp->hwrm_short_cmd_req_addr);
1163 "Unable to map buffer to physical memory.\n");
1169 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1170 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1171 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1174 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1175 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1177 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1178 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1179 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1183 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1184 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1185 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1194 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1197 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1198 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1200 if (!(bp->flags & BNXT_FLAG_REGISTERED))
1203 HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1206 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1208 HWRM_CHECK_RESULT();
1214 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1217 struct hwrm_port_phy_cfg_input req = {0};
1218 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1219 uint32_t enables = 0;
1221 HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1223 if (conf->link_up) {
1224 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1225 if (bp->link_info.auto_mode && conf->link_speed) {
1226 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1227 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1230 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1231 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
1232 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1234 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1235 * any auto mode, even "none".
1237 if (!conf->link_speed) {
1238 /* No speeds specified. Enable AutoNeg - all speeds */
1240 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1242 /* AutoNeg - Advertise speeds specified. */
1243 if (conf->auto_link_speed_mask &&
1244 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1246 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1247 req.auto_link_speed_mask =
1248 conf->auto_link_speed_mask;
1250 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
1253 req.auto_duplex = conf->duplex;
1254 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1255 req.auto_pause = conf->auto_pause;
1256 req.force_pause = conf->force_pause;
1257 /* Set force_pause if there is no auto or if there is a force */
1258 if (req.auto_pause && !req.force_pause)
1259 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1261 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1263 req.enables = rte_cpu_to_le_32(enables);
1266 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1267 PMD_DRV_LOG(INFO, "Force Link Down\n");
1270 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1272 HWRM_CHECK_RESULT();
1278 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1279 struct bnxt_link_info *link_info)
1282 struct hwrm_port_phy_qcfg_input req = {0};
1283 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1285 HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1287 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1289 HWRM_CHECK_RESULT();
1291 link_info->phy_link_status = resp->link;
1292 link_info->link_up =
1293 (link_info->phy_link_status ==
1294 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1295 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1296 link_info->duplex = resp->duplex_cfg;
1297 link_info->pause = resp->pause;
1298 link_info->auto_pause = resp->auto_pause;
1299 link_info->force_pause = resp->force_pause;
1300 link_info->auto_mode = resp->auto_mode;
1301 link_info->phy_type = resp->phy_type;
1302 link_info->media_type = resp->media_type;
1304 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1305 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1306 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1307 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1308 link_info->phy_ver[0] = resp->phy_maj;
1309 link_info->phy_ver[1] = resp->phy_min;
1310 link_info->phy_ver[2] = resp->phy_bld;
1314 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
1315 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
1316 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
1317 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
1318 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
1319 link_info->auto_link_speed_mask);
1320 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
1321 link_info->force_link_speed);
1326 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1330 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1331 if (bp->tx_cos_queue[i].profile ==
1332 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1333 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1340 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1344 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1345 if (bp->tx_cos_queue[i].profile !=
1346 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1347 bp->tx_cos_queue[i].id !=
1348 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1349 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1355 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1358 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1359 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1360 uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1364 HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1366 req.flags = rte_cpu_to_le_32(dir);
1367 /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1368 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1369 !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1371 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1372 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1374 HWRM_CHECK_RESULT();
1376 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1377 GET_TX_QUEUE_INFO(0);
1378 GET_TX_QUEUE_INFO(1);
1379 GET_TX_QUEUE_INFO(2);
1380 GET_TX_QUEUE_INFO(3);
1381 GET_TX_QUEUE_INFO(4);
1382 GET_TX_QUEUE_INFO(5);
1383 GET_TX_QUEUE_INFO(6);
1384 GET_TX_QUEUE_INFO(7);
1386 GET_RX_QUEUE_INFO(0);
1387 GET_RX_QUEUE_INFO(1);
1388 GET_RX_QUEUE_INFO(2);
1389 GET_RX_QUEUE_INFO(3);
1390 GET_RX_QUEUE_INFO(4);
1391 GET_RX_QUEUE_INFO(5);
1392 GET_RX_QUEUE_INFO(6);
1393 GET_RX_QUEUE_INFO(7);
1398 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1401 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1402 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1406 /* iterate and find the COSq profile to use for Tx */
1407 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1408 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1409 if (bp->tx_cos_queue[i].id != 0xff)
1410 bp->tx_cosq_id[j++] =
1411 bp->tx_cos_queue[i].id;
1414 /* When CoS classification is disabled, for normal NIC
1415 * operations, ideally we should look to use LOSSY.
1416 * If not found, fallback to the first valid profile
1418 if (!bnxt_find_lossy_profile(bp))
1419 bnxt_find_first_valid_profile(bp);
1424 bp->max_tc = resp->max_configurable_queues;
1425 bp->max_lltc = resp->max_configurable_lossless_queues;
1426 if (bp->max_tc > BNXT_MAX_QUEUE)
1427 bp->max_tc = BNXT_MAX_QUEUE;
1428 bp->max_q = bp->max_tc;
1430 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1431 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1439 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1440 struct bnxt_ring *ring,
1441 uint32_t ring_type, uint32_t map_index,
1442 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1443 uint16_t tx_cosq_id)
1446 uint32_t enables = 0;
1447 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1448 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1449 struct rte_mempool *mb_pool;
1450 uint16_t rx_buf_size;
1452 HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1454 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1455 req.fbo = rte_cpu_to_le_32(0);
1456 /* Association of ring index with doorbell index */
1457 req.logical_id = rte_cpu_to_le_16(map_index);
1458 req.length = rte_cpu_to_le_32(ring->ring_size);
1460 switch (ring_type) {
1461 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1462 req.ring_type = ring_type;
1463 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1464 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1465 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1466 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1468 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1470 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1471 req.ring_type = ring_type;
1472 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1473 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1474 if (BNXT_CHIP_THOR(bp)) {
1475 mb_pool = bp->rx_queues[0]->mb_pool;
1476 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1477 RTE_PKTMBUF_HEADROOM;
1478 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1479 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1481 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1483 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1485 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1487 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1488 req.ring_type = ring_type;
1489 if (BNXT_HAS_NQ(bp)) {
1490 /* Association of cp ring with nq */
1491 req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1493 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1495 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1497 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1498 req.ring_type = ring_type;
1499 req.page_size = BNXT_PAGE_SHFT;
1500 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1502 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1503 req.ring_type = ring_type;
1504 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1506 mb_pool = bp->rx_queues[0]->mb_pool;
1507 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1508 RTE_PKTMBUF_HEADROOM;
1509 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1510 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1512 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1513 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1514 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1515 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1518 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1523 req.enables = rte_cpu_to_le_32(enables);
1525 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1527 if (rc || resp->error_code) {
1528 if (rc == 0 && resp->error_code)
1529 rc = rte_le_to_cpu_16(resp->error_code);
1530 switch (ring_type) {
1531 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1533 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1536 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1538 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1541 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1543 "hwrm_ring_alloc rx agg failed. rc:%d\n",
1547 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1549 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1552 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1554 "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1558 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1564 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1569 int bnxt_hwrm_ring_free(struct bnxt *bp,
1570 struct bnxt_ring *ring, uint32_t ring_type)
1573 struct hwrm_ring_free_input req = {.req_type = 0 };
1574 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1576 HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1578 req.ring_type = ring_type;
1579 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1583 if (rc || resp->error_code) {
1584 if (rc == 0 && resp->error_code)
1585 rc = rte_le_to_cpu_16(resp->error_code);
1588 switch (ring_type) {
1589 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1590 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1593 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1594 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1597 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1598 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1601 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1603 "hwrm_ring_free nq failed. rc:%d\n", rc);
1605 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1607 "hwrm_ring_free agg failed. rc:%d\n", rc);
1610 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1618 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1621 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1622 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1624 HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1626 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1627 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1628 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1629 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1631 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1633 HWRM_CHECK_RESULT();
1635 bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1642 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1645 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1646 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1648 HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1650 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1652 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1654 HWRM_CHECK_RESULT();
1657 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1661 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1664 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1665 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1667 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1670 HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1672 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1674 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1676 HWRM_CHECK_RESULT();
1682 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1683 unsigned int idx __rte_unused)
1686 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1687 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1689 HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1691 req.update_period_ms = rte_cpu_to_le_32(0);
1693 req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1695 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1697 HWRM_CHECK_RESULT();
1699 cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1706 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1707 unsigned int idx __rte_unused)
1710 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1711 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1713 HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1715 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1717 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1719 HWRM_CHECK_RESULT();
1725 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1728 struct hwrm_vnic_alloc_input req = { 0 };
1729 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1731 if (!BNXT_HAS_RING_GRPS(bp))
1732 goto skip_ring_grps;
1734 /* map ring groups to this vnic */
1735 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1736 vnic->start_grp_id, vnic->end_grp_id);
1737 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1738 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1740 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1741 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1742 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1743 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1746 vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1747 HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1749 if (vnic->func_default)
1751 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1752 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1754 HWRM_CHECK_RESULT();
1756 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1758 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1762 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1763 struct bnxt_vnic_info *vnic,
1764 struct bnxt_plcmodes_cfg *pmode)
1767 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1768 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1770 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1772 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1774 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1776 HWRM_CHECK_RESULT();
1778 pmode->flags = rte_le_to_cpu_32(resp->flags);
1779 /* dflt_vnic bit doesn't exist in the _cfg command */
1780 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1781 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1782 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1783 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1790 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1791 struct bnxt_vnic_info *vnic,
1792 struct bnxt_plcmodes_cfg *pmode)
1795 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1796 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1798 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1799 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1803 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1805 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1806 req.flags = rte_cpu_to_le_32(pmode->flags);
1807 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1808 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1809 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1810 req.enables = rte_cpu_to_le_32(
1811 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1812 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1813 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1816 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1818 HWRM_CHECK_RESULT();
1824 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1827 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1828 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1829 struct bnxt_plcmodes_cfg pmodes = { 0 };
1830 uint32_t ctx_enable_flag = 0;
1831 uint32_t enables = 0;
1833 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1834 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1838 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1842 HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1844 if (BNXT_CHIP_THOR(bp)) {
1845 int dflt_rxq = vnic->start_grp_id;
1846 struct bnxt_rx_ring_info *rxr;
1847 struct bnxt_cp_ring_info *cpr;
1848 struct bnxt_rx_queue *rxq;
1852 * The first active receive ring is used as the VNIC
1853 * default receive ring. If there are no active receive
1854 * rings (all corresponding receive queues are stopped),
1855 * the first receive ring is used.
1857 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1858 rxq = bp->eth_dev->data->rx_queues[i];
1859 if (rxq->rx_started) {
1865 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1869 req.default_rx_ring_id =
1870 rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1871 req.default_cmpl_ring_id =
1872 rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1873 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1874 HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1878 /* Only RSS support for now TBD: COS & LB */
1879 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1880 if (vnic->lb_rule != 0xffff)
1881 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1882 if (vnic->cos_rule != 0xffff)
1883 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1884 if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1885 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1886 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1888 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1889 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1890 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1893 enables |= ctx_enable_flag;
1894 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1895 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1896 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1897 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1900 req.enables = rte_cpu_to_le_32(enables);
1901 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1902 req.mru = rte_cpu_to_le_16(vnic->mru);
1903 /* Configure default VNIC only once. */
1904 if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1906 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1907 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1909 if (vnic->vlan_strip)
1911 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1914 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1915 if (vnic->roce_dual)
1916 req.flags |= rte_cpu_to_le_32(
1917 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1918 if (vnic->roce_only)
1919 req.flags |= rte_cpu_to_le_32(
1920 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1921 if (vnic->rss_dflt_cr)
1922 req.flags |= rte_cpu_to_le_32(
1923 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1925 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1927 HWRM_CHECK_RESULT();
1930 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1935 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1939 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1940 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1942 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1943 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1946 HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
1949 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1950 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1951 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1953 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1955 HWRM_CHECK_RESULT();
1957 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1958 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1959 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1960 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1961 vnic->mru = rte_le_to_cpu_16(resp->mru);
1962 vnic->func_default = rte_le_to_cpu_32(
1963 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1964 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1965 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1966 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1967 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1968 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1969 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1970 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1971 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1972 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1973 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1980 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
1981 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
1985 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1986 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1987 bp->hwrm_cmd_resp_addr;
1989 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1991 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1992 HWRM_CHECK_RESULT();
1994 ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1995 if (!BNXT_HAS_RING_GRPS(bp))
1996 vnic->fw_grp_ids[ctx_idx] = ctx_id;
1997 else if (ctx_idx == 0)
1998 vnic->rss_rule = ctx_id;
2006 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2007 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2010 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2011 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2012 bp->hwrm_cmd_resp_addr;
2014 if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2015 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2018 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2020 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2022 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2024 HWRM_CHECK_RESULT();
2030 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2034 if (BNXT_CHIP_THOR(bp)) {
2037 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2038 rc = _bnxt_hwrm_vnic_ctx_free(bp,
2040 vnic->fw_grp_ids[j]);
2041 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2043 vnic->num_lb_ctxts = 0;
2045 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2046 vnic->rss_rule = INVALID_HW_RING_ID;
2052 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2055 struct hwrm_vnic_free_input req = {.req_type = 0 };
2056 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2058 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2059 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2063 HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2065 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2067 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2069 HWRM_CHECK_RESULT();
2072 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2073 /* Configure default VNIC again if necessary. */
2074 if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2075 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2081 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2085 int nr_ctxs = vnic->num_lb_ctxts;
2086 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2087 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2089 for (i = 0; i < nr_ctxs; i++) {
2090 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2092 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2093 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2094 req.hash_mode_flags = vnic->hash_mode;
2096 req.hash_key_tbl_addr =
2097 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2099 req.ring_grp_tbl_addr =
2100 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2101 i * HW_HASH_INDEX_SIZE);
2102 req.ring_table_pair_index = i;
2103 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2105 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2108 HWRM_CHECK_RESULT();
2115 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2116 struct bnxt_vnic_info *vnic)
2119 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2120 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2122 if (!vnic->rss_table)
2125 if (BNXT_CHIP_THOR(bp))
2126 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2128 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2130 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2131 req.hash_mode_flags = vnic->hash_mode;
2133 req.ring_grp_tbl_addr =
2134 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2135 req.hash_key_tbl_addr =
2136 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2137 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2138 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2142 HWRM_CHECK_RESULT();
2148 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2149 struct bnxt_vnic_info *vnic)
2152 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2153 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2156 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2157 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2161 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2163 req.flags = rte_cpu_to_le_32(
2164 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2166 req.enables = rte_cpu_to_le_32(
2167 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2169 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2170 size -= RTE_PKTMBUF_HEADROOM;
2171 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2173 req.jumbo_thresh = rte_cpu_to_le_16(size);
2174 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2176 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2178 HWRM_CHECK_RESULT();
2184 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2185 struct bnxt_vnic_info *vnic, bool enable)
2188 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2189 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2191 if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2193 PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2197 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2198 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2202 HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2205 req.enables = rte_cpu_to_le_32(
2206 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2207 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2208 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2209 req.flags = rte_cpu_to_le_32(
2210 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2211 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2212 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2213 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2214 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2215 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2216 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2217 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2218 req.min_agg_len = rte_cpu_to_le_32(512);
2220 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2222 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2224 HWRM_CHECK_RESULT();
2230 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2232 struct hwrm_func_cfg_input req = {0};
2233 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2236 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2237 req.enables = rte_cpu_to_le_32(
2238 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2239 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2240 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2242 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2244 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2245 HWRM_CHECK_RESULT();
2248 bp->pf.vf_info[vf].random_mac = false;
2253 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2257 struct hwrm_func_qstats_input req = {.req_type = 0};
2258 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2260 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2262 req.fid = rte_cpu_to_le_16(fid);
2264 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2266 HWRM_CHECK_RESULT();
2269 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2276 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2277 struct rte_eth_stats *stats,
2278 struct hwrm_func_qstats_output *func_qstats)
2281 struct hwrm_func_qstats_input req = {.req_type = 0};
2282 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2284 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2286 req.fid = rte_cpu_to_le_16(fid);
2288 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2290 HWRM_CHECK_RESULT();
2292 memcpy(func_qstats, resp,
2293 sizeof(struct hwrm_func_qstats_output));
2298 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2299 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2300 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2301 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2302 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2303 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2305 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2306 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2307 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2308 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2309 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2310 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2312 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2313 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2314 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2322 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2325 struct hwrm_func_clr_stats_input req = {.req_type = 0};
2326 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2328 HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2330 req.fid = rte_cpu_to_le_16(fid);
2332 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2334 HWRM_CHECK_RESULT();
2340 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2345 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2346 struct bnxt_tx_queue *txq;
2347 struct bnxt_rx_queue *rxq;
2348 struct bnxt_cp_ring_info *cpr;
2350 if (i >= bp->rx_cp_nr_rings) {
2351 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2354 rxq = bp->rx_queues[i];
2358 rc = bnxt_hwrm_stat_clear(bp, cpr);
2366 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2370 struct bnxt_cp_ring_info *cpr;
2372 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2374 if (i >= bp->rx_cp_nr_rings) {
2375 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2377 cpr = bp->rx_queues[i]->cp_ring;
2378 if (BNXT_HAS_RING_GRPS(bp))
2379 bp->grp_info[i].fw_stats_ctx = -1;
2381 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2382 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2383 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2391 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2396 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2397 struct bnxt_tx_queue *txq;
2398 struct bnxt_rx_queue *rxq;
2399 struct bnxt_cp_ring_info *cpr;
2401 if (i >= bp->rx_cp_nr_rings) {
2402 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2405 rxq = bp->rx_queues[i];
2409 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2418 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2423 if (!BNXT_HAS_RING_GRPS(bp))
2426 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2428 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2431 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2439 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2441 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2443 bnxt_hwrm_ring_free(bp, cp_ring,
2444 HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2445 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2446 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2447 sizeof(*cpr->cp_desc_ring));
2448 cpr->cp_raw_cons = 0;
2452 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2454 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2456 bnxt_hwrm_ring_free(bp, cp_ring,
2457 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2458 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2459 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2460 sizeof(*cpr->cp_desc_ring));
2461 cpr->cp_raw_cons = 0;
2465 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2467 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2468 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2469 struct bnxt_ring *ring = rxr->rx_ring_struct;
2470 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2472 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2473 bnxt_hwrm_ring_free(bp, ring,
2474 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2475 ring->fw_ring_id = INVALID_HW_RING_ID;
2476 if (BNXT_HAS_RING_GRPS(bp))
2477 bp->grp_info[queue_index].rx_fw_ring_id =
2480 ring = rxr->ag_ring_struct;
2481 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2482 bnxt_hwrm_ring_free(bp, ring,
2483 BNXT_CHIP_THOR(bp) ?
2484 HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2485 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2486 if (BNXT_HAS_RING_GRPS(bp))
2487 bp->grp_info[queue_index].ag_fw_ring_id =
2490 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2491 bnxt_free_cp_ring(bp, cpr);
2493 if (BNXT_HAS_RING_GRPS(bp))
2494 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2498 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2502 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2503 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2504 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2505 struct bnxt_ring *ring = txr->tx_ring_struct;
2506 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2508 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2509 bnxt_hwrm_ring_free(bp, ring,
2510 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2511 ring->fw_ring_id = INVALID_HW_RING_ID;
2512 memset(txr->tx_desc_ring, 0,
2513 txr->tx_ring_struct->ring_size *
2514 sizeof(*txr->tx_desc_ring));
2515 memset(txr->tx_buf_ring, 0,
2516 txr->tx_ring_struct->ring_size *
2517 sizeof(*txr->tx_buf_ring));
2521 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2522 bnxt_free_cp_ring(bp, cpr);
2523 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2527 for (i = 0; i < bp->rx_cp_nr_rings; i++)
2528 bnxt_free_hwrm_rx_ring(bp, i);
2533 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2538 if (!BNXT_HAS_RING_GRPS(bp))
2541 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2542 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2550 * HWRM utility functions
2553 void bnxt_free_hwrm_resources(struct bnxt *bp)
2555 /* Release memzone */
2556 rte_free(bp->hwrm_cmd_resp_addr);
2557 rte_free(bp->hwrm_short_cmd_req_addr);
2558 bp->hwrm_cmd_resp_addr = NULL;
2559 bp->hwrm_short_cmd_req_addr = NULL;
2560 bp->hwrm_cmd_resp_dma_addr = 0;
2561 bp->hwrm_short_cmd_req_dma_addr = 0;
2564 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2566 struct rte_pci_device *pdev = bp->pdev;
2567 char type[RTE_MEMZONE_NAMESIZE];
2569 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2570 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2571 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2572 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2573 if (bp->hwrm_cmd_resp_addr == NULL)
2575 bp->hwrm_cmd_resp_dma_addr =
2576 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2577 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2579 "unable to map response address to physical memory\n");
2582 rte_spinlock_init(&bp->hwrm_lock);
2588 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2590 struct bnxt_filter_info *filter;
2593 STAILQ_FOREACH(filter, &vnic->filter, next) {
2594 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2595 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2596 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2597 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2598 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2599 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2600 bnxt_free_filter(bp, filter);
2606 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2608 struct bnxt_filter_info *filter;
2609 struct rte_flow *flow;
2612 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2613 flow = STAILQ_FIRST(&vnic->flow_list);
2614 filter = flow->filter;
2615 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2616 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2617 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2618 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2619 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2620 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2622 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2628 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2630 struct bnxt_filter_info *filter;
2633 STAILQ_FOREACH(filter, &vnic->filter, next) {
2634 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2635 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2637 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2638 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2641 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2650 bnxt_free_tunnel_ports(struct bnxt *bp)
2652 if (bp->vxlan_port_cnt)
2653 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2654 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2656 if (bp->geneve_port_cnt)
2657 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2658 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2659 bp->geneve_port = 0;
2662 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2666 if (bp->vnic_info == NULL)
2670 * Cleanup VNICs in reverse order, to make sure the L2 filter
2671 * from vnic0 is last to be cleaned up.
2673 for (i = bp->max_vnics - 1; i >= 0; i--) {
2674 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2676 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2679 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2681 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2683 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2685 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2687 bnxt_hwrm_vnic_free(bp, vnic);
2689 rte_free(vnic->fw_grp_ids);
2691 /* Ring resources */
2692 bnxt_free_all_hwrm_rings(bp);
2693 bnxt_free_all_hwrm_ring_grps(bp);
2694 bnxt_free_all_hwrm_stat_ctxs(bp);
2695 bnxt_free_tunnel_ports(bp);
2698 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2700 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2702 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2703 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2705 switch (conf_link_speed) {
2706 case ETH_LINK_SPEED_10M_HD:
2707 case ETH_LINK_SPEED_100M_HD:
2709 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2711 return hw_link_duplex;
2714 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2716 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2719 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2721 uint16_t eth_link_speed = 0;
2723 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2724 return ETH_LINK_SPEED_AUTONEG;
2726 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2727 case ETH_LINK_SPEED_100M:
2728 case ETH_LINK_SPEED_100M_HD:
2731 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2733 case ETH_LINK_SPEED_1G:
2735 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2737 case ETH_LINK_SPEED_2_5G:
2739 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2741 case ETH_LINK_SPEED_10G:
2743 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2745 case ETH_LINK_SPEED_20G:
2747 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2749 case ETH_LINK_SPEED_25G:
2751 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2753 case ETH_LINK_SPEED_40G:
2755 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2757 case ETH_LINK_SPEED_50G:
2759 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2761 case ETH_LINK_SPEED_100G:
2763 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2767 "Unsupported link speed %d; default to AUTO\n",
2771 return eth_link_speed;
2774 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2775 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2776 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2777 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2779 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2783 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2786 if (link_speed & ETH_LINK_SPEED_FIXED) {
2787 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2789 if (one_speed & (one_speed - 1)) {
2791 "Invalid advertised speeds (%u) for port %u\n",
2792 link_speed, port_id);
2795 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2797 "Unsupported advertised speed (%u) for port %u\n",
2798 link_speed, port_id);
2802 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2804 "Unsupported advertised speeds (%u) for port %u\n",
2805 link_speed, port_id);
2813 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2817 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2818 if (bp->link_info.support_speeds)
2819 return bp->link_info.support_speeds;
2820 link_speed = BNXT_SUPPORTED_SPEEDS;
2823 if (link_speed & ETH_LINK_SPEED_100M)
2824 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2825 if (link_speed & ETH_LINK_SPEED_100M_HD)
2826 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2827 if (link_speed & ETH_LINK_SPEED_1G)
2828 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2829 if (link_speed & ETH_LINK_SPEED_2_5G)
2830 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2831 if (link_speed & ETH_LINK_SPEED_10G)
2832 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2833 if (link_speed & ETH_LINK_SPEED_20G)
2834 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2835 if (link_speed & ETH_LINK_SPEED_25G)
2836 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2837 if (link_speed & ETH_LINK_SPEED_40G)
2838 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2839 if (link_speed & ETH_LINK_SPEED_50G)
2840 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2841 if (link_speed & ETH_LINK_SPEED_100G)
2842 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2846 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2848 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2850 switch (hw_link_speed) {
2851 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2852 eth_link_speed = ETH_SPEED_NUM_100M;
2854 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2855 eth_link_speed = ETH_SPEED_NUM_1G;
2857 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2858 eth_link_speed = ETH_SPEED_NUM_2_5G;
2860 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2861 eth_link_speed = ETH_SPEED_NUM_10G;
2863 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2864 eth_link_speed = ETH_SPEED_NUM_20G;
2866 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2867 eth_link_speed = ETH_SPEED_NUM_25G;
2869 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2870 eth_link_speed = ETH_SPEED_NUM_40G;
2872 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2873 eth_link_speed = ETH_SPEED_NUM_50G;
2875 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2876 eth_link_speed = ETH_SPEED_NUM_100G;
2878 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2880 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2884 return eth_link_speed;
2887 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2889 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2891 switch (hw_link_duplex) {
2892 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2893 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2895 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2897 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2898 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2901 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2905 return eth_link_duplex;
2908 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2911 struct bnxt_link_info *link_info = &bp->link_info;
2913 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2916 "Get link config failed with rc %d\n", rc);
2919 if (link_info->link_speed)
2921 bnxt_parse_hw_link_speed(link_info->link_speed);
2923 link->link_speed = ETH_SPEED_NUM_NONE;
2924 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2925 link->link_status = link_info->link_up;
2926 link->link_autoneg = link_info->auto_mode ==
2927 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2928 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2933 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2936 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2937 struct bnxt_link_info link_req;
2938 uint16_t speed, autoneg;
2940 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2943 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2944 bp->eth_dev->data->port_id);
2948 memset(&link_req, 0, sizeof(link_req));
2949 link_req.link_up = link_up;
2953 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2954 if (BNXT_CHIP_THOR(bp) &&
2955 dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
2956 /* 40G is not supported as part of media auto detect.
2957 * The speed should be forced and autoneg disabled
2958 * to configure 40G speed.
2960 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
2964 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2965 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2966 /* Autoneg can be done only when the FW allows.
2967 * When user configures fixed speed of 40G and later changes to
2968 * any other speed, auto_link_speed/force_link_speed is still set
2969 * to 40G until link comes up at new speed.
2972 !(!BNXT_CHIP_THOR(bp) &&
2973 (bp->link_info.auto_link_speed ||
2974 bp->link_info.force_link_speed))) {
2975 link_req.phy_flags |=
2976 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2977 link_req.auto_link_speed_mask =
2978 bnxt_parse_eth_link_speed_mask(bp,
2979 dev_conf->link_speeds);
2981 if (bp->link_info.phy_type ==
2982 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2983 bp->link_info.phy_type ==
2984 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2985 bp->link_info.media_type ==
2986 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2987 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2991 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2992 /* If user wants a particular speed try that first. */
2994 link_req.link_speed = speed;
2995 else if (bp->link_info.force_link_speed)
2996 link_req.link_speed = bp->link_info.force_link_speed;
2998 link_req.link_speed = bp->link_info.auto_link_speed;
3000 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3001 link_req.auto_pause = bp->link_info.auto_pause;
3002 link_req.force_pause = bp->link_info.force_pause;
3005 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3008 "Set link config failed with rc %d\n", rc);
3016 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3018 struct hwrm_func_qcfg_input req = {0};
3019 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3022 bp->func_svif = BNXT_SVIF_INVALID;
3025 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3026 req.fid = rte_cpu_to_le_16(0xffff);
3028 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3030 HWRM_CHECK_RESULT();
3032 /* Hard Coded.. 0xfff VLAN ID mask */
3033 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3035 svif_info = rte_le_to_cpu_16(resp->svif_info);
3036 if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3037 bp->func_svif = svif_info &
3038 HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3040 flags = rte_le_to_cpu_16(resp->flags);
3041 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3042 bp->flags |= BNXT_FLAG_MULTI_HOST;
3045 !BNXT_VF_IS_TRUSTED(bp) &&
3046 (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3047 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3048 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3049 } else if (BNXT_VF(bp) &&
3050 BNXT_VF_IS_TRUSTED(bp) &&
3051 !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3052 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3053 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3057 *mtu = rte_le_to_cpu_16(resp->mtu);
3059 switch (resp->port_partition_type) {
3060 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3061 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3062 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3064 bp->flags |= BNXT_FLAG_NPAR_PF;
3067 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3076 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3078 struct hwrm_port_mac_qcfg_input req = {0};
3079 struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3080 uint16_t port_svif_info;
3083 bp->port_svif = BNXT_SVIF_INVALID;
3088 HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3090 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3092 HWRM_CHECK_RESULT();
3094 port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3095 if (port_svif_info &
3096 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3097 bp->port_svif = port_svif_info &
3098 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3105 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
3106 struct hwrm_func_qcaps_output *qcaps)
3108 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
3109 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
3110 sizeof(qcaps->mac_address));
3111 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
3112 qcaps->max_rx_rings = fcfg->num_rx_rings;
3113 qcaps->max_tx_rings = fcfg->num_tx_rings;
3114 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
3115 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
3117 qcaps->first_vf_id = 0;
3118 qcaps->max_vnics = fcfg->num_vnics;
3119 qcaps->max_decap_records = 0;
3120 qcaps->max_encap_records = 0;
3121 qcaps->max_tx_wm_flows = 0;
3122 qcaps->max_tx_em_flows = 0;
3123 qcaps->max_rx_wm_flows = 0;
3124 qcaps->max_rx_em_flows = 0;
3125 qcaps->max_flow_id = 0;
3126 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
3127 qcaps->max_sp_tx_rings = 0;
3128 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
3131 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
3133 struct hwrm_func_cfg_input req = {0};
3134 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3138 enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3139 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3140 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3141 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3142 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3143 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3144 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3145 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3146 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3148 if (BNXT_HAS_RING_GRPS(bp)) {
3149 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3150 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
3151 } else if (BNXT_HAS_NQ(bp)) {
3152 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3153 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3156 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3157 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3158 req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3159 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
3160 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
3161 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
3162 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
3163 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
3164 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
3165 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3166 req.fid = rte_cpu_to_le_16(0xffff);
3167 req.enables = rte_cpu_to_le_32(enables);
3169 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3171 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3173 HWRM_CHECK_RESULT();
3179 static void populate_vf_func_cfg_req(struct bnxt *bp,
3180 struct hwrm_func_cfg_input *req,
3183 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3184 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3185 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3186 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3187 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3188 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3189 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3190 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3191 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3192 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3194 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3195 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3197 req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3198 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3200 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3201 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3203 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3204 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3205 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3206 /* TODO: For now, do not support VMDq/RFS on VFs. */
3207 req->num_vnics = rte_cpu_to_le_16(1);
3208 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3212 static void add_random_mac_if_needed(struct bnxt *bp,
3213 struct hwrm_func_cfg_input *cfg_req,
3216 struct rte_ether_addr mac;
3218 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3221 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3223 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3224 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3225 bp->pf.vf_info[vf].random_mac = true;
3227 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3228 RTE_ETHER_ADDR_LEN);
3232 static int reserve_resources_from_vf(struct bnxt *bp,
3233 struct hwrm_func_cfg_input *cfg_req,
3236 struct hwrm_func_qcaps_input req = {0};
3237 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3240 /* Get the actual allocated values now */
3241 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3242 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3243 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3246 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3247 copy_func_cfg_to_qcaps(cfg_req, resp);
3248 } else if (resp->error_code) {
3249 rc = rte_le_to_cpu_16(resp->error_code);
3250 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3251 copy_func_cfg_to_qcaps(cfg_req, resp);
3254 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3255 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3256 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3257 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3258 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3259 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3261 * TODO: While not supporting VMDq with VFs, max_vnics is always
3262 * forced to 1 in this case
3264 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3265 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3272 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3274 struct hwrm_func_qcfg_input req = {0};
3275 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3278 /* Check for zero MAC address */
3279 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3280 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3281 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3282 HWRM_CHECK_RESULT();
3283 rc = rte_le_to_cpu_16(resp->vlan);
3290 static int update_pf_resource_max(struct bnxt *bp)
3292 struct hwrm_func_qcfg_input req = {0};
3293 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3296 /* And copy the allocated numbers into the pf struct */
3297 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3298 req.fid = rte_cpu_to_le_16(0xffff);
3299 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3300 HWRM_CHECK_RESULT();
3302 /* Only TX ring value reflects actual allocation? TODO */
3303 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3304 bp->pf.evb_mode = resp->evb_mode;
3311 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3316 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3320 rc = bnxt_hwrm_func_qcaps(bp);
3324 bp->pf.func_cfg_flags &=
3325 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3326 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3327 bp->pf.func_cfg_flags |=
3328 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3329 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3330 rc = __bnxt_hwrm_func_qcaps(bp);
3334 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3336 struct hwrm_func_cfg_input req = {0};
3337 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3344 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3348 rc = bnxt_hwrm_func_qcaps(bp);
3353 bp->pf.active_vfs = num_vfs;
3356 * First, configure the PF to only use one TX ring. This ensures that
3357 * there are enough rings for all VFs.
3359 * If we don't do this, when we call func_alloc() later, we will lock
3360 * extra rings to the PF that won't be available during func_cfg() of
3363 * This has been fixed with firmware versions above 20.6.54
3365 bp->pf.func_cfg_flags &=
3366 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3367 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3368 bp->pf.func_cfg_flags |=
3369 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3370 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3375 * Now, create and register a buffer to hold forwarded VF requests
3377 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3378 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3379 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3380 if (bp->pf.vf_req_buf == NULL) {
3384 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3385 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
3386 for (i = 0; i < num_vfs; i++)
3387 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
3388 (i * HWRM_MAX_REQ_LEN);
3390 rc = bnxt_hwrm_func_buf_rgtr(bp);
3394 populate_vf_func_cfg_req(bp, &req, num_vfs);
3396 bp->pf.active_vfs = 0;
3397 for (i = 0; i < num_vfs; i++) {
3398 add_random_mac_if_needed(bp, &req, i);
3400 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3401 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
3402 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
3403 rc = bnxt_hwrm_send_message(bp,
3408 /* Clear enable flag for next pass */
3409 req.enables &= ~rte_cpu_to_le_32(
3410 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3412 if (rc || resp->error_code) {
3414 "Failed to initizlie VF %d\n", i);
3416 "Not all VFs available. (%d, %d)\n",
3417 rc, resp->error_code);
3424 reserve_resources_from_vf(bp, &req, i);
3425 bp->pf.active_vfs++;
3426 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
3430 * Now configure the PF to use "the rest" of the resources
3431 * We're using STD_TX_RING_MODE here though which will limit the TX
3432 * rings. This will allow QoS to function properly. Not setting this
3433 * will cause PF rings to break bandwidth settings.
3435 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3439 rc = update_pf_resource_max(bp);
3446 bnxt_hwrm_func_buf_unrgtr(bp);
3450 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3452 struct hwrm_func_cfg_input req = {0};
3453 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3456 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3458 req.fid = rte_cpu_to_le_16(0xffff);
3459 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3460 req.evb_mode = bp->pf.evb_mode;
3462 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3463 HWRM_CHECK_RESULT();
3469 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3470 uint8_t tunnel_type)
3472 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3473 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3476 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3477 req.tunnel_type = tunnel_type;
3478 req.tunnel_dst_port_val = port;
3479 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3480 HWRM_CHECK_RESULT();
3482 switch (tunnel_type) {
3483 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3484 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
3485 bp->vxlan_port = port;
3487 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3488 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
3489 bp->geneve_port = port;
3500 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3501 uint8_t tunnel_type)
3503 struct hwrm_tunnel_dst_port_free_input req = {0};
3504 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3507 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3509 req.tunnel_type = tunnel_type;
3510 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3511 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3513 HWRM_CHECK_RESULT();
3519 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3522 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3523 struct hwrm_func_cfg_input req = {0};
3526 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3528 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3529 req.flags = rte_cpu_to_le_32(flags);
3530 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3532 HWRM_CHECK_RESULT();
3538 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3540 uint32_t *flag = flagp;
3542 vnic->flags = *flag;
3545 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3547 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3550 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3553 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3554 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3556 HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3558 req.req_buf_num_pages = rte_cpu_to_le_16(1);
3559 req.req_buf_page_size = rte_cpu_to_le_16(
3560 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
3561 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3562 req.req_buf_page_addr0 =
3563 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf));
3564 if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3566 "unable to map buffer address to physical memory\n");
3570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3572 HWRM_CHECK_RESULT();
3578 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3581 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3582 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3584 if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3587 HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3589 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3591 HWRM_CHECK_RESULT();
3597 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3599 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3600 struct hwrm_func_cfg_input req = {0};
3603 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3605 req.fid = rte_cpu_to_le_16(0xffff);
3606 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
3607 req.enables = rte_cpu_to_le_32(
3608 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3609 req.async_event_cr = rte_cpu_to_le_16(
3610 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3611 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3613 HWRM_CHECK_RESULT();
3619 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3621 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3622 struct hwrm_func_vf_cfg_input req = {0};
3625 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3627 req.enables = rte_cpu_to_le_32(
3628 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3629 req.async_event_cr = rte_cpu_to_le_16(
3630 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3631 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3633 HWRM_CHECK_RESULT();
3639 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3641 struct hwrm_func_cfg_input req = {0};
3642 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3643 uint16_t dflt_vlan, fid;
3644 uint32_t func_cfg_flags;
3647 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3650 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
3651 fid = bp->pf.vf_info[vf].fid;
3652 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
3654 fid = rte_cpu_to_le_16(0xffff);
3655 func_cfg_flags = bp->pf.func_cfg_flags;
3656 dflt_vlan = bp->vlan;
3659 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3660 req.fid = rte_cpu_to_le_16(fid);
3661 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3662 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3664 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3666 HWRM_CHECK_RESULT();
3672 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3673 uint16_t max_bw, uint16_t enables)
3675 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3676 struct hwrm_func_cfg_input req = {0};
3679 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3681 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3682 req.enables |= rte_cpu_to_le_32(enables);
3683 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3684 req.max_bw = rte_cpu_to_le_32(max_bw);
3685 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3687 HWRM_CHECK_RESULT();
3693 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3695 struct hwrm_func_cfg_input req = {0};
3696 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3699 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3701 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
3702 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3703 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3704 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
3706 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3708 HWRM_CHECK_RESULT();
3714 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3719 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3721 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3726 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3727 void *encaped, size_t ec_size)
3730 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3731 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3733 if (ec_size > sizeof(req.encap_request))
3736 HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3738 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3739 memcpy(req.encap_request, encaped, ec_size);
3741 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3743 HWRM_CHECK_RESULT();
3749 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3750 struct rte_ether_addr *mac)
3752 struct hwrm_func_qcfg_input req = {0};
3753 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3756 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3758 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3759 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3761 HWRM_CHECK_RESULT();
3763 memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3770 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3771 void *encaped, size_t ec_size)
3774 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3775 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3777 if (ec_size > sizeof(req.encap_request))
3780 HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3782 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3783 memcpy(req.encap_request, encaped, ec_size);
3785 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3787 HWRM_CHECK_RESULT();
3793 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3794 struct rte_eth_stats *stats, uint8_t rx)
3797 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3798 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3800 HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3802 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3804 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3806 HWRM_CHECK_RESULT();
3809 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3810 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3811 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3812 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3813 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3814 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3815 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3816 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3818 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3819 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3820 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3821 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3822 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3823 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3831 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3833 struct hwrm_port_qstats_input req = {0};
3834 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3835 struct bnxt_pf_info *pf = &bp->pf;
3838 HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
3840 req.port_id = rte_cpu_to_le_16(pf->port_id);
3841 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3842 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3843 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3845 HWRM_CHECK_RESULT();
3851 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3853 struct hwrm_port_clr_stats_input req = {0};
3854 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3855 struct bnxt_pf_info *pf = &bp->pf;
3858 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
3859 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
3860 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
3863 HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
3865 req.port_id = rte_cpu_to_le_16(pf->port_id);
3866 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3868 HWRM_CHECK_RESULT();
3874 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3876 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3877 struct hwrm_port_led_qcaps_input req = {0};
3883 HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
3884 req.port_id = bp->pf.port_id;
3885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3887 HWRM_CHECK_RESULT();
3889 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3892 bp->num_leds = resp->num_leds;
3893 memcpy(bp->leds, &resp->led0_id,
3894 sizeof(bp->leds[0]) * bp->num_leds);
3895 for (i = 0; i < bp->num_leds; i++) {
3896 struct bnxt_led_info *led = &bp->leds[i];
3898 uint16_t caps = led->led_state_caps;
3900 if (!led->led_group_id ||
3901 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3913 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3915 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3916 struct hwrm_port_led_cfg_input req = {0};
3917 struct bnxt_led_cfg *led_cfg;
3918 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3919 uint16_t duration = 0;
3922 if (!bp->num_leds || BNXT_VF(bp))
3925 HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
3928 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3929 duration = rte_cpu_to_le_16(500);
3931 req.port_id = bp->pf.port_id;
3932 req.num_leds = bp->num_leds;
3933 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3934 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3935 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3936 led_cfg->led_id = bp->leds[i].led_id;
3937 led_cfg->led_state = led_state;
3938 led_cfg->led_blink_on = duration;
3939 led_cfg->led_blink_off = duration;
3940 led_cfg->led_group_id = bp->leds[i].led_group_id;
3943 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3945 HWRM_CHECK_RESULT();
3951 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3955 struct hwrm_nvm_get_dir_info_input req = {0};
3956 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3958 HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
3960 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3962 HWRM_CHECK_RESULT();
3964 *entries = rte_le_to_cpu_32(resp->entries);
3965 *length = rte_le_to_cpu_32(resp->entry_length);
3971 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3974 uint32_t dir_entries;
3975 uint32_t entry_length;
3978 rte_iova_t dma_handle;
3979 struct hwrm_nvm_get_dir_entries_input req = {0};
3980 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3982 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3986 *data++ = dir_entries;
3987 *data++ = entry_length;
3989 memset(data, 0xff, len);
3991 buflen = dir_entries * entry_length;
3992 buf = rte_malloc("nvm_dir", buflen, 0);
3995 dma_handle = rte_malloc_virt2iova(buf);
3996 if (dma_handle == RTE_BAD_IOVA) {
3998 "unable to map response address to physical memory\n");
4001 HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4002 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4006 memcpy(data, buf, len > buflen ? buflen : len);
4009 HWRM_CHECK_RESULT();
4015 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4016 uint32_t offset, uint32_t length,
4021 rte_iova_t dma_handle;
4022 struct hwrm_nvm_read_input req = {0};
4023 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4025 buf = rte_malloc("nvm_item", length, 0);
4029 dma_handle = rte_malloc_virt2iova(buf);
4030 if (dma_handle == RTE_BAD_IOVA) {
4032 "unable to map response address to physical memory\n");
4035 HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4036 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4037 req.dir_idx = rte_cpu_to_le_16(index);
4038 req.offset = rte_cpu_to_le_32(offset);
4039 req.len = rte_cpu_to_le_32(length);
4040 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4042 memcpy(data, buf, length);
4045 HWRM_CHECK_RESULT();
4051 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4054 struct hwrm_nvm_erase_dir_entry_input req = {0};
4055 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4057 HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4058 req.dir_idx = rte_cpu_to_le_16(index);
4059 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4060 HWRM_CHECK_RESULT();
4067 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4068 uint16_t dir_ordinal, uint16_t dir_ext,
4069 uint16_t dir_attr, const uint8_t *data,
4073 struct hwrm_nvm_write_input req = {0};
4074 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4075 rte_iova_t dma_handle;
4078 buf = rte_malloc("nvm_write", data_len, 0);
4082 dma_handle = rte_malloc_virt2iova(buf);
4083 if (dma_handle == RTE_BAD_IOVA) {
4085 "unable to map response address to physical memory\n");
4088 memcpy(buf, data, data_len);
4090 HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4092 req.dir_type = rte_cpu_to_le_16(dir_type);
4093 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4094 req.dir_ext = rte_cpu_to_le_16(dir_ext);
4095 req.dir_attr = rte_cpu_to_le_16(dir_attr);
4096 req.dir_data_length = rte_cpu_to_le_32(data_len);
4097 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4099 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4102 HWRM_CHECK_RESULT();
4109 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4111 uint32_t *count = cbdata;
4113 *count = *count + 1;
4116 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4117 struct bnxt_vnic_info *vnic __rte_unused)
4122 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4126 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4127 &count, bnxt_vnic_count_hwrm_stub);
4132 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4135 struct hwrm_func_vf_vnic_ids_query_input req = {0};
4136 struct hwrm_func_vf_vnic_ids_query_output *resp =
4137 bp->hwrm_cmd_resp_addr;
4140 /* First query all VNIC ids */
4141 HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4143 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
4144 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
4145 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4147 if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4150 "unable to map VNIC ID table address to physical memory\n");
4153 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4154 HWRM_CHECK_RESULT();
4155 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4163 * This function queries the VNIC IDs for a specified VF. It then calls
4164 * the vnic_cb to update the necessary field in vnic_info with cbdata.
4165 * Then it calls the hwrm_cb function to program this new vnic configuration.
4167 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4168 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4169 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4171 struct bnxt_vnic_info vnic;
4173 int i, num_vnic_ids;
4178 /* First query all VNIC ids */
4179 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
4180 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4181 RTE_CACHE_LINE_SIZE);
4182 if (vnic_ids == NULL)
4185 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4186 rte_mem_lock_page(((char *)vnic_ids) + sz);
4188 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4190 if (num_vnic_ids < 0)
4191 return num_vnic_ids;
4193 /* Retrieve VNIC, update bd_stall then update */
4195 for (i = 0; i < num_vnic_ids; i++) {
4196 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4197 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4198 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
4201 if (vnic.mru <= 4) /* Indicates unallocated */
4204 vnic_cb(&vnic, cbdata);
4206 rc = hwrm_cb(bp, &vnic);
4216 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4219 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4220 struct hwrm_func_cfg_input req = {0};
4223 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4225 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
4226 req.enables |= rte_cpu_to_le_32(
4227 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4228 req.vlan_antispoof_mode = on ?
4229 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4230 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4231 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4233 HWRM_CHECK_RESULT();
4239 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4241 struct bnxt_vnic_info vnic;
4244 int num_vnic_ids, i;
4248 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
4249 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4250 RTE_CACHE_LINE_SIZE);
4251 if (vnic_ids == NULL)
4254 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4255 rte_mem_lock_page(((char *)vnic_ids) + sz);
4257 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4263 * Loop through to find the default VNIC ID.
4264 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4265 * by sending the hwrm_func_qcfg command to the firmware.
4267 for (i = 0; i < num_vnic_ids; i++) {
4268 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4269 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4270 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4271 bp->pf.first_vf_id + vf);
4274 if (vnic.func_default) {
4276 return vnic.fw_vnic_id;
4279 /* Could not find a default VNIC. */
4280 PMD_DRV_LOG(ERR, "No default VNIC\n");
4286 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4288 struct bnxt_filter_info *filter)
4291 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4292 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4293 uint32_t enables = 0;
4295 if (filter->fw_em_filter_id != UINT64_MAX)
4296 bnxt_hwrm_clear_em_filter(bp, filter);
4298 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4300 req.flags = rte_cpu_to_le_32(filter->flags);
4302 enables = filter->enables |
4303 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4304 req.dst_id = rte_cpu_to_le_16(dst_id);
4306 if (filter->ip_addr_type) {
4307 req.ip_addr_type = filter->ip_addr_type;
4308 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4311 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4312 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4314 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4315 memcpy(req.src_macaddr, filter->src_macaddr,
4316 RTE_ETHER_ADDR_LEN);
4318 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4319 memcpy(req.dst_macaddr, filter->dst_macaddr,
4320 RTE_ETHER_ADDR_LEN);
4322 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4323 req.ovlan_vid = filter->l2_ovlan;
4325 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4326 req.ivlan_vid = filter->l2_ivlan;
4328 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4329 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4331 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4332 req.ip_protocol = filter->ip_protocol;
4334 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4335 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4337 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4338 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4340 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4341 req.src_port = rte_cpu_to_be_16(filter->src_port);
4343 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4344 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4346 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4347 req.mirror_vnic_id = filter->mirror_vnic_id;
4349 req.enables = rte_cpu_to_le_32(enables);
4351 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4353 HWRM_CHECK_RESULT();
4355 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4361 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4364 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4365 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4367 if (filter->fw_em_filter_id == UINT64_MAX)
4370 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4372 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4374 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4376 HWRM_CHECK_RESULT();
4379 filter->fw_em_filter_id = UINT64_MAX;
4380 filter->fw_l2_filter_id = UINT64_MAX;
4385 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4387 struct bnxt_filter_info *filter)
4390 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4391 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4392 bp->hwrm_cmd_resp_addr;
4393 uint32_t enables = 0;
4395 if (filter->fw_ntuple_filter_id != UINT64_MAX)
4396 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4398 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4400 req.flags = rte_cpu_to_le_32(filter->flags);
4402 enables = filter->enables |
4403 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4404 req.dst_id = rte_cpu_to_le_16(dst_id);
4406 if (filter->ip_addr_type) {
4407 req.ip_addr_type = filter->ip_addr_type;
4409 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4412 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4413 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4415 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4416 memcpy(req.src_macaddr, filter->src_macaddr,
4417 RTE_ETHER_ADDR_LEN);
4419 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4420 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4422 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4423 req.ip_protocol = filter->ip_protocol;
4425 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4426 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4428 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4429 req.src_ipaddr_mask[0] =
4430 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4432 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4433 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4435 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4436 req.dst_ipaddr_mask[0] =
4437 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4439 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4440 req.src_port = rte_cpu_to_le_16(filter->src_port);
4442 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4443 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4445 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4446 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4448 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4449 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4451 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4452 req.mirror_vnic_id = filter->mirror_vnic_id;
4454 req.enables = rte_cpu_to_le_32(enables);
4456 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4458 HWRM_CHECK_RESULT();
4460 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4461 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4467 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4468 struct bnxt_filter_info *filter)
4471 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4472 struct hwrm_cfa_ntuple_filter_free_output *resp =
4473 bp->hwrm_cmd_resp_addr;
4475 if (filter->fw_ntuple_filter_id == UINT64_MAX)
4478 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4480 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4482 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4484 HWRM_CHECK_RESULT();
4487 filter->fw_ntuple_filter_id = UINT64_MAX;
4493 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4495 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4496 uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4497 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4498 struct bnxt_rx_queue **rxqs = bp->rx_queues;
4499 uint16_t *ring_tbl = vnic->rss_table;
4500 int nr_ctxs = vnic->num_lb_ctxts;
4501 int max_rings = bp->rx_nr_rings;
4505 for (i = 0, k = 0; i < nr_ctxs; i++) {
4506 struct bnxt_rx_ring_info *rxr;
4507 struct bnxt_cp_ring_info *cpr;
4509 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4511 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4512 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4513 req.hash_mode_flags = vnic->hash_mode;
4515 req.ring_grp_tbl_addr =
4516 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4517 i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4518 2 * sizeof(*ring_tbl));
4519 req.hash_key_tbl_addr =
4520 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4522 req.ring_table_pair_index = i;
4523 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4525 for (j = 0; j < 64; j++) {
4528 /* Find next active ring. */
4529 for (cnt = 0; cnt < max_rings; cnt++) {
4530 if (rx_queue_state[k] !=
4531 RTE_ETH_QUEUE_STATE_STOPPED)
4533 if (++k == max_rings)
4537 /* Return if no rings are active. */
4538 if (cnt == max_rings) {
4543 /* Add rx/cp ring pair to RSS table. */
4544 rxr = rxqs[k]->rx_ring;
4545 cpr = rxqs[k]->cp_ring;
4547 ring_id = rxr->rx_ring_struct->fw_ring_id;
4548 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4549 ring_id = cpr->cp_ring_struct->fw_ring_id;
4550 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4552 if (++k == max_rings)
4555 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4558 HWRM_CHECK_RESULT();
4565 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4567 unsigned int rss_idx, fw_idx, i;
4569 if (!(vnic->rss_table && vnic->hash_type))
4572 if (BNXT_CHIP_THOR(bp))
4573 return bnxt_vnic_rss_configure_thor(bp, vnic);
4575 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4578 if (vnic->rss_table && vnic->hash_type) {
4580 * Fill the RSS hash & redirection table with
4581 * ring group ids for all VNICs
4583 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4584 rss_idx++, fw_idx++) {
4585 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4586 fw_idx %= bp->rx_cp_nr_rings;
4587 if (vnic->fw_grp_ids[fw_idx] !=
4592 if (i == bp->rx_cp_nr_rings)
4594 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4596 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4602 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4603 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4607 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4609 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4610 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4612 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4613 req->num_cmpl_dma_aggr_during_int =
4614 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4616 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4618 /* min timer set to 1/2 of interrupt timer */
4619 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4621 /* buf timer set to 1/4 of interrupt timer */
4622 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4624 req->cmpl_aggr_dma_tmr_during_int =
4625 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4627 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4628 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4629 req->flags = rte_cpu_to_le_16(flags);
4632 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4633 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4635 struct hwrm_ring_aggint_qcaps_input req = {0};
4636 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4641 HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4642 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4643 HWRM_CHECK_RESULT();
4645 agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4646 agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4648 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4649 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4650 agg_req->flags = rte_cpu_to_le_16(flags);
4652 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4653 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4654 agg_req->enables = rte_cpu_to_le_32(enables);
4660 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4661 struct bnxt_coal *coal, uint16_t ring_id)
4663 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4664 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4665 bp->hwrm_cmd_resp_addr;
4668 /* Set ring coalesce parameters only for 100G NICs */
4669 if (BNXT_CHIP_THOR(bp)) {
4670 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4672 } else if (bnxt_stratus_device(bp)) {
4673 bnxt_hwrm_set_coal_params(coal, &req);
4679 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
4681 req.ring_id = rte_cpu_to_le_16(ring_id);
4682 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4683 HWRM_CHECK_RESULT();
4688 #define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4689 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4691 struct hwrm_func_backing_store_qcaps_input req = {0};
4692 struct hwrm_func_backing_store_qcaps_output *resp =
4693 bp->hwrm_cmd_resp_addr;
4694 struct bnxt_ctx_pg_info *ctx_pg;
4695 struct bnxt_ctx_mem_info *ctx;
4696 int total_alloc_len;
4697 int rc, i, tqm_rings;
4699 if (!BNXT_CHIP_THOR(bp) ||
4700 bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4705 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4706 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4707 HWRM_CHECK_RESULT_SILENT();
4709 total_alloc_len = sizeof(*ctx);
4710 ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4711 RTE_CACHE_LINE_SIZE);
4717 ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4718 ctx->qp_min_qp1_entries =
4719 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4720 ctx->qp_max_l2_entries =
4721 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4722 ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4723 ctx->srq_max_l2_entries =
4724 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4725 ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4726 ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4727 ctx->cq_max_l2_entries =
4728 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4729 ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4730 ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4731 ctx->vnic_max_vnic_entries =
4732 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4733 ctx->vnic_max_ring_table_entries =
4734 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4735 ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4736 ctx->stat_max_entries =
4737 rte_le_to_cpu_32(resp->stat_max_entries);
4738 ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4739 ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4740 ctx->tqm_min_entries_per_ring =
4741 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4742 ctx->tqm_max_entries_per_ring =
4743 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4744 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4745 if (!ctx->tqm_entries_multiple)
4746 ctx->tqm_entries_multiple = 1;
4747 ctx->mrav_max_entries =
4748 rte_le_to_cpu_32(resp->mrav_max_entries);
4749 ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4750 ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4751 ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4752 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
4754 if (!ctx->tqm_fp_rings_count)
4755 ctx->tqm_fp_rings_count = bp->max_q;
4757 tqm_rings = ctx->tqm_fp_rings_count + 1;
4759 ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4760 sizeof(*ctx_pg) * tqm_rings,
4761 RTE_CACHE_LINE_SIZE);
4766 for (i = 0; i < tqm_rings; i++, ctx_pg++)
4767 ctx->tqm_mem[i] = ctx_pg;
4775 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4777 struct hwrm_func_backing_store_cfg_input req = {0};
4778 struct hwrm_func_backing_store_cfg_output *resp =
4779 bp->hwrm_cmd_resp_addr;
4780 struct bnxt_ctx_mem_info *ctx = bp->ctx;
4781 struct bnxt_ctx_pg_info *ctx_pg;
4782 uint32_t *num_entries;
4791 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4792 req.enables = rte_cpu_to_le_32(enables);
4794 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4795 ctx_pg = &ctx->qp_mem;
4796 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4797 req.qp_num_qp1_entries =
4798 rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4799 req.qp_num_l2_entries =
4800 rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4801 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4802 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4803 &req.qpc_pg_size_qpc_lvl,
4807 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
4808 ctx_pg = &ctx->srq_mem;
4809 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4810 req.srq_num_l2_entries =
4811 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
4812 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
4813 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4814 &req.srq_pg_size_srq_lvl,
4818 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
4819 ctx_pg = &ctx->cq_mem;
4820 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4821 req.cq_num_l2_entries =
4822 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
4823 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
4824 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4825 &req.cq_pg_size_cq_lvl,
4829 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
4830 ctx_pg = &ctx->vnic_mem;
4831 req.vnic_num_vnic_entries =
4832 rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
4833 req.vnic_num_ring_table_entries =
4834 rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
4835 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
4836 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4837 &req.vnic_pg_size_vnic_lvl,
4838 &req.vnic_page_dir);
4841 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
4842 ctx_pg = &ctx->stat_mem;
4843 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
4844 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
4845 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4846 &req.stat_pg_size_stat_lvl,
4847 &req.stat_page_dir);
4850 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4851 num_entries = &req.tqm_sp_num_entries;
4852 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
4853 pg_dir = &req.tqm_sp_page_dir;
4854 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
4855 for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
4856 if (!(enables & ena))
4859 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
4861 ctx_pg = ctx->tqm_mem[i];
4862 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
4863 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
4866 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4867 HWRM_CHECK_RESULT();
4873 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
4875 struct hwrm_port_qstats_ext_input req = {0};
4876 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
4877 struct bnxt_pf_info *pf = &bp->pf;
4880 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
4881 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
4884 HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
4886 req.port_id = rte_cpu_to_le_16(pf->port_id);
4887 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
4888 req.tx_stat_host_addr =
4889 rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
4891 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
4893 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
4894 req.rx_stat_host_addr =
4895 rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
4897 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
4899 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4902 bp->fw_rx_port_stats_ext_size = 0;
4903 bp->fw_tx_port_stats_ext_size = 0;
4905 bp->fw_rx_port_stats_ext_size =
4906 rte_le_to_cpu_16(resp->rx_stat_size);
4907 bp->fw_tx_port_stats_ext_size =
4908 rte_le_to_cpu_16(resp->tx_stat_size);
4911 HWRM_CHECK_RESULT();
4918 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
4920 struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
4921 struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
4922 bp->hwrm_cmd_resp_addr;
4925 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
4926 req.tunnel_type = type;
4927 req.dest_fid = bp->fw_fid;
4928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4929 HWRM_CHECK_RESULT();
4937 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
4939 struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
4940 struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
4941 bp->hwrm_cmd_resp_addr;
4944 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
4945 req.tunnel_type = type;
4946 req.dest_fid = bp->fw_fid;
4947 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4948 HWRM_CHECK_RESULT();
4955 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
4957 struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
4958 struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
4959 bp->hwrm_cmd_resp_addr;
4962 HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
4963 req.src_fid = bp->fw_fid;
4964 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4965 HWRM_CHECK_RESULT();
4968 *type = rte_le_to_cpu_32(resp->tunnel_mask);
4975 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
4978 struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
4979 struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
4980 bp->hwrm_cmd_resp_addr;
4983 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
4984 req.src_fid = bp->fw_fid;
4985 req.tunnel_type = tun_type;
4986 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4987 HWRM_CHECK_RESULT();
4990 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
4992 PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
4999 int bnxt_hwrm_set_mac(struct bnxt *bp)
5001 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5002 struct hwrm_func_vf_cfg_input req = {0};
5008 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5011 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5012 memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5014 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5016 HWRM_CHECK_RESULT();
5018 memcpy(bp->dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5024 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5026 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5027 struct hwrm_func_drv_if_change_input req = {0};
5031 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5034 /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5035 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5036 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5038 if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5041 HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5045 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5047 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5049 HWRM_CHECK_RESULT();
5050 flags = rte_le_to_cpu_32(resp->flags);
5056 if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5057 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5058 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5064 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5066 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5067 struct bnxt_error_recovery_info *info = bp->recovery_info;
5068 struct hwrm_error_recovery_qcfg_input req = {0};
5073 /* Older FW does not have error recovery support */
5074 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5077 HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5079 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5081 HWRM_CHECK_RESULT();
5083 flags = rte_le_to_cpu_32(resp->flags);
5084 if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5085 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5086 else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5087 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5089 if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5090 !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5095 /* FW returned values are in units of 100msec */
5096 info->driver_polling_freq =
5097 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5098 info->master_func_wait_period =
5099 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5100 info->normal_func_wait_period =
5101 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5102 info->master_func_wait_period_after_reset =
5103 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5104 info->max_bailout_time_after_reset =
5105 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5106 info->status_regs[BNXT_FW_STATUS_REG] =
5107 rte_le_to_cpu_32(resp->fw_health_status_reg);
5108 info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5109 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5110 info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5111 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5112 info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5113 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5114 info->reg_array_cnt =
5115 rte_le_to_cpu_32(resp->reg_array_cnt);
5117 if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5122 for (i = 0; i < info->reg_array_cnt; i++) {
5123 info->reset_reg[i] =
5124 rte_le_to_cpu_32(resp->reset_reg[i]);
5125 info->reset_reg_val[i] =
5126 rte_le_to_cpu_32(resp->reset_reg_val[i]);
5127 info->delay_after_reset[i] =
5128 resp->delay_after_reset[i];
5133 /* Map the FW status registers */
5135 rc = bnxt_map_fw_health_status_regs(bp);
5138 rte_free(bp->recovery_info);
5139 bp->recovery_info = NULL;
5144 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5146 struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5147 struct hwrm_fw_reset_input req = {0};
5153 HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5155 req.embedded_proc_type =
5156 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5157 req.selfrst_status =
5158 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5159 req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5161 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5164 HWRM_CHECK_RESULT();
5170 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5172 struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5173 struct hwrm_port_ts_query_input req = {0};
5174 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5181 HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5184 case BNXT_PTP_FLAGS_PATH_TX:
5185 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5187 case BNXT_PTP_FLAGS_PATH_RX:
5188 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5190 case BNXT_PTP_FLAGS_CURRENT_TIME:
5191 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5195 req.flags = rte_cpu_to_le_32(flags);
5196 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
5198 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5200 HWRM_CHECK_RESULT();
5203 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5205 (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5212 int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp)
5214 struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp =
5215 bp->hwrm_cmd_resp_addr;
5216 struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
5220 if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT))
5223 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5225 "Not a PF or trusted VF. Command not supported\n");
5229 HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp));
5230 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5232 HWRM_CHECK_RESULT();
5233 flags = rte_le_to_cpu_32(resp->flags);
5236 if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) {
5237 bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN;
5238 PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n");
5244 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5248 struct hwrm_cfa_counter_qcaps_input req = {0};
5249 struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5251 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5253 "Not a PF or trusted VF. Command not supported\n");
5257 HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5258 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5259 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5261 HWRM_CHECK_RESULT();
5263 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5266 PMD_DRV_LOG(DEBUG, "max_fc = %d\n", *max_fc);
5270 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5273 struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5274 struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5276 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5278 "Not a PF or trusted VF. Command not supported\n");
5282 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5284 req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5285 req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5286 req.page_dir = rte_cpu_to_le_64(dma_addr);
5288 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5290 HWRM_CHECK_RESULT();
5292 *ctx_id = rte_le_to_cpu_16(resp->ctx_id);
5293 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5300 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5303 struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5304 struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5306 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5308 "Not a PF or trusted VF. Command not supported\n");
5312 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5314 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5316 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5318 HWRM_CHECK_RESULT();
5324 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5325 uint16_t cntr, uint16_t ctx_id,
5326 uint32_t num_entries, bool enable)
5328 struct hwrm_cfa_counter_cfg_input req = {0};
5329 struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5333 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5335 "Not a PF or trusted VF. Command not supported\n");
5339 HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5341 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5342 req.counter_type = rte_cpu_to_le_16(cntr);
5343 flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5344 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5345 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5346 if (dir == BNXT_DIR_RX)
5347 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5348 else if (dir == BNXT_DIR_TX)
5349 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5350 req.flags = rte_cpu_to_le_16(flags);
5351 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5352 req.num_entries = rte_cpu_to_le_32(num_entries);
5354 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5355 HWRM_CHECK_RESULT();
5361 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5362 enum bnxt_flow_dir dir,
5364 uint16_t num_entries)
5366 struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5367 struct hwrm_cfa_counter_qstats_input req = {0};
5368 uint16_t flow_ctx_id = 0;
5372 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5374 "Not a PF or trusted VF. Command not supported\n");
5378 if (dir == BNXT_DIR_RX) {
5379 flow_ctx_id = bp->rx_fc_in_tbl.ctx_id;
5380 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5381 } else if (dir == BNXT_DIR_TX) {
5382 flow_ctx_id = bp->tx_fc_in_tbl.ctx_id;
5383 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5386 HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5387 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5388 req.counter_type = rte_cpu_to_le_16(cntr);
5389 req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5390 req.num_entries = rte_cpu_to_le_16(num_entries);
5391 req.flags = rte_cpu_to_le_16(flags);
5392 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5394 HWRM_CHECK_RESULT();