1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
27 #define HWRM_SPEC_CODE_1_8_3 0x10803
28 #define HWRM_VERSION_1_9_1 0x10901
29 #define HWRM_VERSION_1_9_2 0x10903
31 struct bnxt_plcmodes_cfg {
33 uint16_t jumbo_thresh;
35 uint16_t hds_threshold;
38 static int page_getenum(size_t size)
54 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 return sizeof(int) * 8 - 1;
58 static int page_roundup(size_t size)
60 return 1 << page_getenum(size);
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
67 if (rmem->nr_pages > 1) {
69 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
71 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76 * HWRM Functions (sent to HWRM)
77 * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78 * HWRM command times out, or a negative error code if the HWRM
79 * command was failed by the FW.
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83 uint32_t msg_len, bool use_kong_mb)
86 struct input *req = msg;
87 struct output *resp = bp->hwrm_cmd_resp_addr;
91 uint16_t max_req_len = bp->max_req_len;
92 struct hwrm_short_input short_input = { 0 };
93 uint16_t bar_offset = use_kong_mb ?
94 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95 uint16_t mb_trigger_offset = use_kong_mb ?
96 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
99 /* Do not send HWRM commands to firmware in error state */
100 if (bp->flags & BNXT_FLAG_FATAL_ERROR)
103 timeout = bp->hwrm_cmd_timeout;
105 if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106 msg_len > bp->max_req_len) {
107 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
109 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110 memcpy(short_cmd_req, req, msg_len);
112 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113 short_input.signature = rte_cpu_to_le_16(
114 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115 short_input.size = rte_cpu_to_le_16(msg_len);
116 short_input.req_addr =
117 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
119 data = (uint32_t *)&short_input;
120 msg_len = sizeof(short_input);
122 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
125 /* Write request msg to hwrm channel */
126 for (i = 0; i < msg_len; i += 4) {
127 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128 rte_write32(*data, bar);
132 /* Zero the rest of the request space */
133 for (; i < max_req_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + bar_offset + i;
138 /* Ring channel doorbell */
139 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
142 * Make sure the channel doorbell ring command complete before
143 * reading the response to avoid getting stale or invalid
148 /* Poll for the valid bit */
149 for (i = 0; i < timeout; i++) {
150 /* Sanity check on the resp->resp_len */
152 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153 /* Last byte of resp contains the valid key */
154 valid = (uint8_t *)resp + resp->resp_len - 1;
155 if (*valid == HWRM_RESP_VALID_KEY)
162 /* Suppress VER_GET timeout messages during reset recovery */
163 if (bp->flags & BNXT_FLAG_FW_RESET &&
164 rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
168 "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169 req->req_type, req->seq_id);
176 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177 * spinlock, and does initial processing.
179 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
180 * releases the spinlock only if it returns. If the regular int return codes
181 * are not used by the function, HWRM_CHECK_RESULT() should not be used
182 * directly, rather it should be copied and modified to suit the function.
184 * HWRM_UNLOCK() must be called after all response processing is completed.
186 #define HWRM_PREP(req, type, kong) do { \
187 rte_spinlock_lock(&bp->hwrm_lock); \
188 if (bp->hwrm_cmd_resp_addr == NULL) { \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
192 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193 (req)->req_type = rte_cpu_to_le_16(type); \
194 (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195 (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197 (req)->target_id = rte_cpu_to_le_16(0xffff); \
198 (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
201 #define HWRM_CHECK_RESULT_SILENT() do {\
203 rte_spinlock_unlock(&bp->hwrm_lock); \
206 if (resp->error_code) { \
207 rc = rte_le_to_cpu_16(resp->error_code); \
208 rte_spinlock_unlock(&bp->hwrm_lock); \
213 #define HWRM_CHECK_RESULT() do {\
215 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216 rte_spinlock_unlock(&bp->hwrm_lock); \
217 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
219 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
221 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
223 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
225 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
231 if (resp->error_code) { \
232 rc = rte_le_to_cpu_16(resp->error_code); \
233 if (resp->resp_len >= 16) { \
234 struct hwrm_err_output *tmp_hwrm_err_op = \
237 "error %d:%d:%08x:%04x\n", \
238 rc, tmp_hwrm_err_op->cmd_err, \
240 tmp_hwrm_err_op->opaque_0), \
242 tmp_hwrm_err_op->opaque_1)); \
244 PMD_DRV_LOG(ERR, "error %d\n", rc); \
246 rte_spinlock_unlock(&bp->hwrm_lock); \
247 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
263 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
274 bool mailbox = BNXT_USE_CHIMP_MB;
275 struct input *req = msg;
276 struct output *resp = bp->hwrm_cmd_resp_addr;
279 mailbox = BNXT_USE_KONG(bp);
281 HWRM_PREP(req, msg_type, mailbox);
283 rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
288 memcpy(resp_msg, resp, resp_len);
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
299 uint32_t *tf_response_code,
303 uint32_t response_len)
306 struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307 struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308 bool mailbox = BNXT_USE_CHIMP_MB;
310 if (msg_len > sizeof(req.tf_req))
314 mailbox = BNXT_USE_KONG(bp);
316 HWRM_PREP(&req, HWRM_TF, mailbox);
317 /* Build request using the user supplied request payload.
318 * TLV request size is checked at build time against HWRM
319 * request max size, thus no checking required.
321 req.tf_type = tf_type;
322 req.tf_subtype = tf_subtype;
323 memcpy(req.tf_req, msg, msg_len);
325 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
328 /* Copy the resp to user provided response buffer */
329 if (response != NULL)
330 /* Post process response data. We need to copy only
331 * the 'payload' as the HWRM data structure really is
332 * HWRM header + msg header + payload and the TFLIB
333 * only provided a payload place holder.
335 if (response_len != 0) {
341 /* Extract the internal tflib response code */
342 *tf_response_code = resp->tf_resp_code;
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
351 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
354 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
358 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367 struct bnxt_vnic_info *vnic,
369 struct bnxt_vlan_table_entry *vlan_table)
372 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
376 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
379 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
382 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
387 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
390 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392 } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
398 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400 req.vlan_tag_tbl_addr =
401 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
404 req.mask = rte_cpu_to_le_32(mask);
406 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
416 struct bnxt_vlan_antispoof_table_entry *vlan_table)
419 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421 bp->hwrm_cmd_resp_addr;
424 * Older HWRM versions did not support this command, and the set_rx_mask
425 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426 * removed from set_rx_mask call, and this command was added.
428 * This command is also present from 1.7.8.11 and higher,
431 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
438 HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439 req.fid = rte_cpu_to_le_16(fid);
441 req.vlan_tag_mask_tbl_addr =
442 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
445 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454 struct bnxt_filter_info *filter)
457 struct bnxt_filter_info *l2_filter = filter;
458 struct bnxt_vnic_info *vnic = NULL;
459 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
462 if (filter->fw_l2_filter_id == UINT64_MAX)
465 if (filter->matching_l2_fltr_ptr)
466 l2_filter = filter->matching_l2_fltr_ptr;
468 PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469 filter, l2_filter, l2_filter->l2_ref_cnt);
471 if (l2_filter->l2_ref_cnt == 0)
474 if (l2_filter->l2_ref_cnt > 0)
475 l2_filter->l2_ref_cnt--;
477 if (l2_filter->l2_ref_cnt > 0)
480 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
482 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
484 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
489 filter->fw_l2_filter_id = UINT64_MAX;
490 if (l2_filter->l2_ref_cnt == 0) {
491 vnic = l2_filter->vnic;
493 STAILQ_REMOVE(&vnic->filter, l2_filter,
494 bnxt_filter_info, next);
495 bnxt_free_filter(bp, l2_filter);
502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
504 struct bnxt_filter_info *filter)
507 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510 const struct rte_eth_vmdq_rx_conf *conf =
511 &dev_conf->rx_adv_conf.vmdq_rx_conf;
512 uint32_t enables = 0;
513 uint16_t j = dst_id - 1;
515 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517 conf->pool_map[j].pools & (1UL << j)) {
519 "Add vlan %u to vmdq pool %u\n",
520 conf->pool_map[j].vlan_id, j);
522 filter->l2_ivlan = conf->pool_map[j].vlan_id;
524 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
528 if (filter->fw_l2_filter_id != UINT64_MAX)
529 bnxt_hwrm_clear_l2_filter(bp, filter);
531 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
533 /* PMD does not support XDP and RoCE */
534 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
535 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
536 req.flags = rte_cpu_to_le_32(filter->flags);
538 enables = filter->enables |
539 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
540 req.dst_id = rte_cpu_to_le_16(dst_id);
543 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
544 memcpy(req.l2_addr, filter->l2_addr,
547 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
548 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
551 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
552 req.l2_ovlan = filter->l2_ovlan;
554 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
555 req.l2_ivlan = filter->l2_ivlan;
557 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
558 req.l2_ovlan_mask = filter->l2_ovlan_mask;
560 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
561 req.l2_ivlan_mask = filter->l2_ivlan_mask;
562 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
563 req.src_id = rte_cpu_to_le_32(filter->src_id);
564 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
565 req.src_type = filter->src_type;
566 if (filter->pri_hint) {
567 req.pri_hint = filter->pri_hint;
568 req.l2_filter_id_hint =
569 rte_cpu_to_le_64(filter->l2_filter_id_hint);
572 req.enables = rte_cpu_to_le_32(enables);
574 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
578 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
579 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
582 filter->l2_ref_cnt++;
587 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
589 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
590 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
597 HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
600 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
603 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
604 if (ptp->tx_tstamp_en)
605 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
608 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
609 req.flags = rte_cpu_to_le_32(flags);
610 req.enables = rte_cpu_to_le_32
611 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
612 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
614 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
620 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
623 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
624 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
630 HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
632 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
634 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
638 if (!BNXT_CHIP_THOR(bp) &&
639 !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
642 if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
643 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
645 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
649 if (!BNXT_CHIP_THOR(bp)) {
650 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
651 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
652 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
653 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
654 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
655 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
656 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
657 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
658 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
659 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
660 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
661 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
662 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
663 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
664 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
665 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
666 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
667 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
676 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
680 for (i = 0; i < bp->pf->max_vfs; i++) {
681 rte_free(bp->pf->vf_info[i].vlan_table);
682 bp->pf->vf_info[i].vlan_table = NULL;
683 rte_free(bp->pf->vf_info[i].vlan_as_table);
684 bp->pf->vf_info[i].vlan_as_table = NULL;
686 rte_free(bp->pf->vf_info);
687 bp->pf->vf_info = NULL;
690 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
693 struct hwrm_func_qcaps_input req = {.req_type = 0 };
694 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695 uint16_t new_max_vfs;
699 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
701 req.fid = rte_cpu_to_le_16(0xffff);
703 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
707 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
708 flags = rte_le_to_cpu_32(resp->flags);
710 bp->pf->port_id = resp->port_id;
711 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
712 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
713 new_max_vfs = bp->pdev->max_vfs;
714 if (new_max_vfs != bp->pf->max_vfs) {
716 bnxt_hwrm_free_vf_info(bp);
717 bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
718 sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
719 if (bp->pf->vf_info == NULL) {
720 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
723 bp->pf->max_vfs = new_max_vfs;
724 for (i = 0; i < new_max_vfs; i++) {
725 bp->pf->vf_info[i].fid =
726 bp->pf->first_vf_id + i;
727 bp->pf->vf_info[i].vlan_table =
728 rte_zmalloc("VF VLAN table",
731 if (bp->pf->vf_info[i].vlan_table == NULL)
733 "Fail to alloc VLAN table for VF %d\n",
737 bp->pf->vf_info[i].vlan_table);
738 bp->pf->vf_info[i].vlan_as_table =
739 rte_zmalloc("VF VLAN AS table",
742 if (bp->pf->vf_info[i].vlan_as_table == NULL)
744 "Alloc VLAN AS table for VF %d fail\n",
748 bp->pf->vf_info[i].vlan_as_table);
749 STAILQ_INIT(&bp->pf->vf_info[i].filter);
754 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
755 if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
756 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
757 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
759 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
761 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
762 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
763 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
764 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
765 bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
766 bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
767 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
768 if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
769 bp->max_l2_ctx += bp->max_rx_em_flows;
770 /* TODO: For now, do not support VMDq/RFS on VFs. */
775 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
779 PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
780 bp->max_l2_ctx, bp->max_vnics);
781 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
783 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
784 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
785 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
786 PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
788 bnxt_hwrm_ptp_qcfg(bp);
792 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
793 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
795 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
796 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
797 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
800 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
801 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
803 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
804 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
806 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
807 bp->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
814 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
818 rc = __bnxt_hwrm_func_qcaps(bp);
819 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
820 rc = bnxt_alloc_ctx_mem(bp);
825 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
826 * But the error can be ignored. Return success.
828 rc = bnxt_hwrm_func_resc_qcaps(bp);
830 bp->flags |= BNXT_FLAG_NEW_RM;
836 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
837 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
841 struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
842 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
844 HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
846 req.target_id = rte_cpu_to_le_16(0xffff);
848 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
852 flags = rte_le_to_cpu_32(resp->flags);
854 if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
855 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
856 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
859 if (flags & HWRM_VNIC_QCAPS_OUTPUT_FLAGS_OUTERMOST_RSS_CAP)
860 bp->vnic_cap_flags |= BNXT_VNIC_CAP_OUTER_RSS;
862 bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
869 int bnxt_hwrm_func_reset(struct bnxt *bp)
872 struct hwrm_func_reset_input req = {.req_type = 0 };
873 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
875 HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
877 req.enables = rte_cpu_to_le_32(0);
879 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
887 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
891 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
892 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
894 if (bp->flags & BNXT_FLAG_REGISTERED)
897 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
898 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
899 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
900 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
902 /* PFs and trusted VFs should indicate the support of the
903 * Master capability on non Stingray platform
905 if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
906 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
908 HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
909 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
910 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
911 req.ver_maj = RTE_VER_YEAR;
912 req.ver_min = RTE_VER_MONTH;
913 req.ver_upd = RTE_VER_MINOR;
916 req.enables |= rte_cpu_to_le_32(
917 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
918 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
919 RTE_MIN(sizeof(req.vf_req_fwd),
920 sizeof(bp->pf->vf_req_fwd)));
923 req.flags = rte_cpu_to_le_32(flags);
925 req.async_event_fwd[0] |=
926 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
927 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
928 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
929 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
930 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
931 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
932 req.async_event_fwd[0] |=
933 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
934 req.async_event_fwd[1] |=
935 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
936 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
938 req.async_event_fwd[1] |=
939 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
941 if (BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))
942 req.async_event_fwd[1] |=
943 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
945 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
949 flags = rte_le_to_cpu_32(resp->flags);
950 if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
951 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
955 bp->flags |= BNXT_FLAG_REGISTERED;
960 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
962 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
965 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
968 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
973 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
974 struct hwrm_func_vf_cfg_input req = {0};
976 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
978 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
979 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
980 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
981 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
982 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
984 if (BNXT_HAS_RING_GRPS(bp)) {
985 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
986 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
989 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
990 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
991 AGG_RING_MULTIPLIER);
992 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
993 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
995 BNXT_NUM_ASYNC_CPR(bp));
996 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
997 if (bp->vf_resv_strategy ==
998 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
999 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1000 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1001 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1002 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1003 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1004 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1005 } else if (bp->vf_resv_strategy ==
1006 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1007 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1008 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1012 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1013 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1014 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1015 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1016 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1017 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1019 if (test && BNXT_HAS_RING_GRPS(bp))
1020 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1022 req.flags = rte_cpu_to_le_32(flags);
1023 req.enables |= rte_cpu_to_le_32(enables);
1025 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1028 HWRM_CHECK_RESULT_SILENT();
1030 HWRM_CHECK_RESULT();
1036 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1039 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1040 struct hwrm_func_resource_qcaps_input req = {0};
1042 HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1043 req.fid = rte_cpu_to_le_16(0xffff);
1045 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1047 HWRM_CHECK_RESULT_SILENT();
1049 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1050 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1051 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1052 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1053 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1054 /* func_resource_qcaps does not return max_rx_em_flows.
1055 * So use the value provided by func_qcaps.
1057 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1058 if (!BNXT_CHIP_THOR(bp) && !bp->pdev->max_vfs)
1059 bp->max_l2_ctx += bp->max_rx_em_flows;
1060 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1061 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1062 bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1063 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1064 if (bp->vf_resv_strategy >
1065 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1066 bp->vf_resv_strategy =
1067 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1073 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1076 struct hwrm_ver_get_input req = {.req_type = 0 };
1077 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1078 uint32_t fw_version;
1079 uint16_t max_resp_len;
1080 char type[RTE_MEMZONE_NAMESIZE];
1081 uint32_t dev_caps_cfg;
1083 bp->max_req_len = HWRM_MAX_REQ_LEN;
1084 bp->hwrm_cmd_timeout = timeout;
1085 HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1087 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1088 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1089 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1091 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1093 if (bp->flags & BNXT_FLAG_FW_RESET)
1094 HWRM_CHECK_RESULT_SILENT();
1096 HWRM_CHECK_RESULT();
1098 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1099 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1100 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1101 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1102 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1103 (resp->hwrm_fw_min_8b << 16) |
1104 (resp->hwrm_fw_bld_8b << 8) |
1105 resp->hwrm_fw_rsvd_8b;
1106 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1107 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1109 fw_version = resp->hwrm_intf_maj_8b << 16;
1110 fw_version |= resp->hwrm_intf_min_8b << 8;
1111 fw_version |= resp->hwrm_intf_upd_8b;
1112 bp->hwrm_spec_code = fw_version;
1114 /* def_req_timeout value is in milliseconds */
1115 bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1116 /* convert timeout to usec */
1117 bp->hwrm_cmd_timeout *= 1000;
1118 if (!bp->hwrm_cmd_timeout)
1119 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1121 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1122 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1127 if (bp->max_req_len > resp->max_req_win_len) {
1128 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1131 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1132 bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1133 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1134 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1136 max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1137 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1139 if (bp->max_resp_len != max_resp_len) {
1140 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1141 bp->pdev->addr.domain, bp->pdev->addr.bus,
1142 bp->pdev->addr.devid, bp->pdev->addr.function);
1144 rte_free(bp->hwrm_cmd_resp_addr);
1146 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1147 if (bp->hwrm_cmd_resp_addr == NULL) {
1151 bp->hwrm_cmd_resp_dma_addr =
1152 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1153 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1155 "Unable to map response buffer to physical memory.\n");
1159 bp->max_resp_len = max_resp_len;
1163 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1165 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1166 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1167 bp->flags |= BNXT_FLAG_SHORT_CMD;
1170 if (((dev_caps_cfg &
1171 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1173 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1174 bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1175 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1176 bp->pdev->addr.domain, bp->pdev->addr.bus,
1177 bp->pdev->addr.devid, bp->pdev->addr.function);
1179 rte_free(bp->hwrm_short_cmd_req_addr);
1181 bp->hwrm_short_cmd_req_addr =
1182 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1183 if (bp->hwrm_short_cmd_req_addr == NULL) {
1187 bp->hwrm_short_cmd_req_dma_addr =
1188 rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1189 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1190 rte_free(bp->hwrm_short_cmd_req_addr);
1192 "Unable to map buffer to physical memory.\n");
1198 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1199 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1200 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1203 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1204 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1206 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1207 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1208 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1212 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1213 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1214 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1223 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1226 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1227 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1229 if (!(bp->flags & BNXT_FLAG_REGISTERED))
1232 HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1235 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1237 HWRM_CHECK_RESULT();
1243 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1246 struct hwrm_port_phy_cfg_input req = {0};
1247 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1248 uint32_t enables = 0;
1250 HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1252 if (conf->link_up) {
1253 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1254 if (bp->link_info->auto_mode && conf->link_speed) {
1255 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1256 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1259 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1261 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1262 * any auto mode, even "none".
1264 if (!conf->link_speed) {
1265 /* No speeds specified. Enable AutoNeg - all speeds */
1266 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1268 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1270 if (bp->link_info->link_signal_mode) {
1272 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1273 req.force_pam4_link_speed =
1274 rte_cpu_to_le_16(conf->link_speed);
1276 req.force_link_speed =
1277 rte_cpu_to_le_16(conf->link_speed);
1279 /* AutoNeg - Advertise speeds specified. */
1280 if (conf->auto_link_speed_mask &&
1281 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1283 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1284 req.auto_link_speed_mask =
1285 conf->auto_link_speed_mask;
1286 if (conf->auto_pam4_link_speeds) {
1288 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1289 req.auto_link_pam4_speed_mask =
1290 conf->auto_pam4_link_speeds;
1293 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1296 if (conf->auto_link_speed &&
1297 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1299 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1301 req.auto_duplex = conf->duplex;
1302 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1303 req.auto_pause = conf->auto_pause;
1304 req.force_pause = conf->force_pause;
1305 /* Set force_pause if there is no auto or if there is a force */
1306 if (req.auto_pause && !req.force_pause)
1307 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1309 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1311 req.enables = rte_cpu_to_le_32(enables);
1314 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1315 PMD_DRV_LOG(INFO, "Force Link Down\n");
1318 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1320 HWRM_CHECK_RESULT();
1326 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1327 struct bnxt_link_info *link_info)
1330 struct hwrm_port_phy_qcfg_input req = {0};
1331 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1333 HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1335 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1337 HWRM_CHECK_RESULT();
1339 link_info->phy_link_status = resp->link;
1340 link_info->link_up =
1341 (link_info->phy_link_status ==
1342 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1343 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1344 link_info->duplex = resp->duplex_cfg;
1345 link_info->pause = resp->pause;
1346 link_info->auto_pause = resp->auto_pause;
1347 link_info->force_pause = resp->force_pause;
1348 link_info->auto_mode = resp->auto_mode;
1349 link_info->phy_type = resp->phy_type;
1350 link_info->media_type = resp->media_type;
1352 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1353 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1354 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1355 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1356 link_info->phy_ver[0] = resp->phy_maj;
1357 link_info->phy_ver[1] = resp->phy_min;
1358 link_info->phy_ver[2] = resp->phy_bld;
1359 link_info->link_signal_mode =
1360 rte_le_to_cpu_16(resp->active_fec_signal_mode);
1361 link_info->force_pam4_link_speed =
1362 rte_le_to_cpu_16(resp->force_pam4_link_speed);
1363 link_info->support_pam4_speeds =
1364 rte_le_to_cpu_16(resp->support_pam4_speeds);
1365 link_info->auto_pam4_link_speeds =
1366 rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1369 PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1370 link_info->link_speed, link_info->auto_mode,
1371 link_info->auto_link_speed, link_info->auto_link_speed_mask,
1372 link_info->support_speeds, link_info->force_link_speed);
1376 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1379 struct hwrm_port_phy_qcaps_input req = {0};
1380 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1381 struct bnxt_link_info *link_info = bp->link_info;
1383 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1386 HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1388 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1390 HWRM_CHECK_RESULT();
1392 bp->port_cnt = resp->port_cnt;
1393 if (resp->supported_speeds_auto_mode)
1394 link_info->support_auto_speeds =
1395 rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1396 if (resp->supported_pam4_speeds_auto_mode)
1397 link_info->support_pam4_auto_speeds =
1398 rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1405 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1409 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1410 if (bp->tx_cos_queue[i].profile ==
1411 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1412 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1419 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1423 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1424 if (bp->tx_cos_queue[i].profile !=
1425 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1426 bp->tx_cos_queue[i].id !=
1427 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1428 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1434 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1437 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1438 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1439 uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1443 HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1445 req.flags = rte_cpu_to_le_32(dir);
1446 /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1447 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1448 !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1450 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1451 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1453 HWRM_CHECK_RESULT();
1455 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1456 GET_TX_QUEUE_INFO(0);
1457 GET_TX_QUEUE_INFO(1);
1458 GET_TX_QUEUE_INFO(2);
1459 GET_TX_QUEUE_INFO(3);
1460 GET_TX_QUEUE_INFO(4);
1461 GET_TX_QUEUE_INFO(5);
1462 GET_TX_QUEUE_INFO(6);
1463 GET_TX_QUEUE_INFO(7);
1465 GET_RX_QUEUE_INFO(0);
1466 GET_RX_QUEUE_INFO(1);
1467 GET_RX_QUEUE_INFO(2);
1468 GET_RX_QUEUE_INFO(3);
1469 GET_RX_QUEUE_INFO(4);
1470 GET_RX_QUEUE_INFO(5);
1471 GET_RX_QUEUE_INFO(6);
1472 GET_RX_QUEUE_INFO(7);
1477 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1480 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1481 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1485 /* iterate and find the COSq profile to use for Tx */
1486 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1487 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1488 if (bp->tx_cos_queue[i].id != 0xff)
1489 bp->tx_cosq_id[j++] =
1490 bp->tx_cos_queue[i].id;
1493 /* When CoS classification is disabled, for normal NIC
1494 * operations, ideally we should look to use LOSSY.
1495 * If not found, fallback to the first valid profile
1497 if (!bnxt_find_lossy_profile(bp))
1498 bnxt_find_first_valid_profile(bp);
1503 bp->max_tc = resp->max_configurable_queues;
1504 bp->max_lltc = resp->max_configurable_lossless_queues;
1505 if (bp->max_tc > BNXT_MAX_QUEUE)
1506 bp->max_tc = BNXT_MAX_QUEUE;
1507 bp->max_q = bp->max_tc;
1509 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1510 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1518 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1519 struct bnxt_ring *ring,
1520 uint32_t ring_type, uint32_t map_index,
1521 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1522 uint16_t tx_cosq_id)
1525 uint32_t enables = 0;
1526 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1527 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1528 struct rte_mempool *mb_pool;
1529 uint16_t rx_buf_size;
1531 HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1533 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1534 req.fbo = rte_cpu_to_le_32(0);
1535 /* Association of ring index with doorbell index */
1536 req.logical_id = rte_cpu_to_le_16(map_index);
1537 req.length = rte_cpu_to_le_32(ring->ring_size);
1539 switch (ring_type) {
1540 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1541 req.ring_type = ring_type;
1542 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1543 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1544 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1545 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1547 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1549 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1550 req.ring_type = ring_type;
1551 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1552 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1553 if (BNXT_CHIP_THOR(bp)) {
1554 mb_pool = bp->rx_queues[0]->mb_pool;
1555 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1556 RTE_PKTMBUF_HEADROOM;
1557 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1558 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1560 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1562 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1564 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1566 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1567 req.ring_type = ring_type;
1568 if (BNXT_HAS_NQ(bp)) {
1569 /* Association of cp ring with nq */
1570 req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1572 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1574 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1576 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1577 req.ring_type = ring_type;
1578 req.page_size = BNXT_PAGE_SHFT;
1579 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1581 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1582 req.ring_type = ring_type;
1583 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1585 mb_pool = bp->rx_queues[0]->mb_pool;
1586 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1587 RTE_PKTMBUF_HEADROOM;
1588 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1589 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1591 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1592 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1593 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1594 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1597 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1602 req.enables = rte_cpu_to_le_32(enables);
1604 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1606 if (rc || resp->error_code) {
1607 if (rc == 0 && resp->error_code)
1608 rc = rte_le_to_cpu_16(resp->error_code);
1609 switch (ring_type) {
1610 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1612 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1615 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1617 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1620 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1622 "hwrm_ring_alloc rx agg failed. rc:%d\n",
1626 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1628 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1631 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1633 "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1637 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1643 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1648 int bnxt_hwrm_ring_free(struct bnxt *bp,
1649 struct bnxt_ring *ring, uint32_t ring_type)
1652 struct hwrm_ring_free_input req = {.req_type = 0 };
1653 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1655 HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1657 req.ring_type = ring_type;
1658 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1660 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1662 if (rc || resp->error_code) {
1663 if (rc == 0 && resp->error_code)
1664 rc = rte_le_to_cpu_16(resp->error_code);
1667 switch (ring_type) {
1668 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1669 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1672 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1673 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1676 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1677 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1680 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1682 "hwrm_ring_free nq failed. rc:%d\n", rc);
1684 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1686 "hwrm_ring_free agg failed. rc:%d\n", rc);
1689 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1697 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1700 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1701 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1703 HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1705 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1706 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1707 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1708 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1710 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1712 HWRM_CHECK_RESULT();
1714 bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1721 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1724 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1725 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1727 HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1729 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1731 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1733 HWRM_CHECK_RESULT();
1736 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1740 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1743 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1744 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1746 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1749 HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1751 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1753 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1755 HWRM_CHECK_RESULT();
1761 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1762 unsigned int idx __rte_unused)
1765 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1766 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1768 HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1770 req.update_period_ms = rte_cpu_to_le_32(0);
1772 req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1774 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1776 HWRM_CHECK_RESULT();
1778 cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1785 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1786 unsigned int idx __rte_unused)
1789 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1790 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1792 HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1794 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1796 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1798 HWRM_CHECK_RESULT();
1804 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1807 struct hwrm_vnic_alloc_input req = { 0 };
1808 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1810 if (!BNXT_HAS_RING_GRPS(bp))
1811 goto skip_ring_grps;
1813 /* map ring groups to this vnic */
1814 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1815 vnic->start_grp_id, vnic->end_grp_id);
1816 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1817 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1819 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1820 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1821 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1822 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1825 vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1826 HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1828 if (vnic->func_default)
1830 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1831 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1833 HWRM_CHECK_RESULT();
1835 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1837 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1841 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1842 struct bnxt_vnic_info *vnic,
1843 struct bnxt_plcmodes_cfg *pmode)
1846 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1847 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1849 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1851 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1853 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1855 HWRM_CHECK_RESULT();
1857 pmode->flags = rte_le_to_cpu_32(resp->flags);
1858 /* dflt_vnic bit doesn't exist in the _cfg command */
1859 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1860 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1861 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1862 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1869 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1870 struct bnxt_vnic_info *vnic,
1871 struct bnxt_plcmodes_cfg *pmode)
1874 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1875 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1877 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1878 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1882 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1884 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1885 req.flags = rte_cpu_to_le_32(pmode->flags);
1886 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1887 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1888 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1889 req.enables = rte_cpu_to_le_32(
1890 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1891 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1892 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1895 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1897 HWRM_CHECK_RESULT();
1903 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1906 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1907 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1908 struct bnxt_plcmodes_cfg pmodes = { 0 };
1909 uint32_t ctx_enable_flag = 0;
1910 uint32_t enables = 0;
1912 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1913 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1917 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1921 HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1923 if (BNXT_CHIP_THOR(bp)) {
1924 int dflt_rxq = vnic->start_grp_id;
1925 struct bnxt_rx_ring_info *rxr;
1926 struct bnxt_cp_ring_info *cpr;
1927 struct bnxt_rx_queue *rxq;
1931 * The first active receive ring is used as the VNIC
1932 * default receive ring. If there are no active receive
1933 * rings (all corresponding receive queues are stopped),
1934 * the first receive ring is used.
1936 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1937 rxq = bp->eth_dev->data->rx_queues[i];
1938 if (rxq->rx_started) {
1944 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1948 req.default_rx_ring_id =
1949 rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1950 req.default_cmpl_ring_id =
1951 rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1952 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1953 HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1957 /* Only RSS support for now TBD: COS & LB */
1958 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1959 if (vnic->lb_rule != 0xffff)
1960 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1961 if (vnic->cos_rule != 0xffff)
1962 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1963 if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1964 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1965 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1967 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1968 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1969 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1972 enables |= ctx_enable_flag;
1973 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1974 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1975 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1976 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1979 req.enables = rte_cpu_to_le_32(enables);
1980 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1981 req.mru = rte_cpu_to_le_16(vnic->mru);
1982 /* Configure default VNIC only once. */
1983 if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1985 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1986 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1988 if (vnic->vlan_strip)
1990 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1993 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1994 if (vnic->roce_dual)
1995 req.flags |= rte_cpu_to_le_32(
1996 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1997 if (vnic->roce_only)
1998 req.flags |= rte_cpu_to_le_32(
1999 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
2000 if (vnic->rss_dflt_cr)
2001 req.flags |= rte_cpu_to_le_32(
2002 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2004 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2006 HWRM_CHECK_RESULT();
2009 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2014 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2018 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2019 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2021 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2022 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2025 HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2028 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2029 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2030 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2032 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2034 HWRM_CHECK_RESULT();
2036 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2037 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2038 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2039 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2040 vnic->mru = rte_le_to_cpu_16(resp->mru);
2041 vnic->func_default = rte_le_to_cpu_32(
2042 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2043 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2044 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2045 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2046 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2047 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2048 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2049 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2050 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2051 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2052 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2059 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2060 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2064 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2065 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2066 bp->hwrm_cmd_resp_addr;
2068 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2070 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2071 HWRM_CHECK_RESULT();
2073 ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2074 if (!BNXT_HAS_RING_GRPS(bp))
2075 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2076 else if (ctx_idx == 0)
2077 vnic->rss_rule = ctx_id;
2085 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2086 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2089 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2090 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2091 bp->hwrm_cmd_resp_addr;
2093 if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2094 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2097 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2099 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2101 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2103 HWRM_CHECK_RESULT();
2109 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2113 if (BNXT_CHIP_THOR(bp)) {
2116 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2117 rc = _bnxt_hwrm_vnic_ctx_free(bp,
2119 vnic->fw_grp_ids[j]);
2120 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2122 vnic->num_lb_ctxts = 0;
2124 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2125 vnic->rss_rule = INVALID_HW_RING_ID;
2131 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2134 struct hwrm_vnic_free_input req = {.req_type = 0 };
2135 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2137 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2138 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2142 HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2144 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2146 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2148 HWRM_CHECK_RESULT();
2151 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2152 /* Configure default VNIC again if necessary. */
2153 if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2154 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2160 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2164 int nr_ctxs = vnic->num_lb_ctxts;
2165 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2166 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2168 for (i = 0; i < nr_ctxs; i++) {
2169 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2171 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2172 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2173 req.hash_mode_flags = vnic->hash_mode;
2175 req.hash_key_tbl_addr =
2176 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2178 req.ring_grp_tbl_addr =
2179 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2180 i * HW_HASH_INDEX_SIZE);
2181 req.ring_table_pair_index = i;
2182 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2184 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2187 HWRM_CHECK_RESULT();
2194 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2195 struct bnxt_vnic_info *vnic)
2198 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2199 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2201 if (!vnic->rss_table)
2204 if (BNXT_CHIP_THOR(bp))
2205 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2207 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2209 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2210 req.hash_mode_flags = vnic->hash_mode;
2212 req.ring_grp_tbl_addr =
2213 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2214 req.hash_key_tbl_addr =
2215 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2216 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2217 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2219 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2221 HWRM_CHECK_RESULT();
2227 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2228 struct bnxt_vnic_info *vnic)
2231 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2232 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2235 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2236 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2240 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2242 req.flags = rte_cpu_to_le_32(
2243 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2245 req.enables = rte_cpu_to_le_32(
2246 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2248 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2249 size -= RTE_PKTMBUF_HEADROOM;
2250 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2252 req.jumbo_thresh = rte_cpu_to_le_16(size);
2253 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2255 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2257 HWRM_CHECK_RESULT();
2263 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2264 struct bnxt_vnic_info *vnic, bool enable)
2267 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2268 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2270 if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2272 PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2276 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2277 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2281 HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2284 req.enables = rte_cpu_to_le_32(
2285 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2286 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2287 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2288 req.flags = rte_cpu_to_le_32(
2289 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2290 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2291 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2292 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2293 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2294 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2295 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2296 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2297 req.min_agg_len = rte_cpu_to_le_32(512);
2299 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2301 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2303 HWRM_CHECK_RESULT();
2309 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2311 struct hwrm_func_cfg_input req = {0};
2312 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2315 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2316 req.enables = rte_cpu_to_le_32(
2317 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2318 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2319 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2321 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2323 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2324 HWRM_CHECK_RESULT();
2327 bp->pf->vf_info[vf].random_mac = false;
2332 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2336 struct hwrm_func_qstats_input req = {.req_type = 0};
2337 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2339 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2341 req.fid = rte_cpu_to_le_16(fid);
2343 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2345 HWRM_CHECK_RESULT();
2348 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2355 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2356 struct rte_eth_stats *stats,
2357 struct hwrm_func_qstats_output *func_qstats)
2360 struct hwrm_func_qstats_input req = {.req_type = 0};
2361 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2363 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2365 req.fid = rte_cpu_to_le_16(fid);
2367 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2369 HWRM_CHECK_RESULT();
2371 memcpy(func_qstats, resp,
2372 sizeof(struct hwrm_func_qstats_output));
2377 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2378 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2379 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2380 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2381 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2382 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2384 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2385 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2386 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2387 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2388 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2389 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2391 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2392 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2393 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2401 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2404 struct hwrm_func_clr_stats_input req = {.req_type = 0};
2405 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2407 HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2409 req.fid = rte_cpu_to_le_16(fid);
2411 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2413 HWRM_CHECK_RESULT();
2419 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2424 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2425 struct bnxt_tx_queue *txq;
2426 struct bnxt_rx_queue *rxq;
2427 struct bnxt_cp_ring_info *cpr;
2429 if (i >= bp->rx_cp_nr_rings) {
2430 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2433 rxq = bp->rx_queues[i];
2437 rc = bnxt_hwrm_stat_clear(bp, cpr);
2445 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2449 struct bnxt_cp_ring_info *cpr;
2451 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2453 if (i >= bp->rx_cp_nr_rings) {
2454 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2456 cpr = bp->rx_queues[i]->cp_ring;
2457 if (BNXT_HAS_RING_GRPS(bp))
2458 bp->grp_info[i].fw_stats_ctx = -1;
2460 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2461 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2462 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2470 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2475 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2476 struct bnxt_tx_queue *txq;
2477 struct bnxt_rx_queue *rxq;
2478 struct bnxt_cp_ring_info *cpr;
2480 if (i >= bp->rx_cp_nr_rings) {
2481 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2484 rxq = bp->rx_queues[i];
2488 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2497 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2502 if (!BNXT_HAS_RING_GRPS(bp))
2505 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2507 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2510 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2518 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2520 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2522 bnxt_hwrm_ring_free(bp, cp_ring,
2523 HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2524 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2525 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2526 sizeof(*cpr->cp_desc_ring));
2527 cpr->cp_raw_cons = 0;
2531 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2533 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2535 bnxt_hwrm_ring_free(bp, cp_ring,
2536 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2537 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2538 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2539 sizeof(*cpr->cp_desc_ring));
2540 cpr->cp_raw_cons = 0;
2544 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2546 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2547 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2548 struct bnxt_ring *ring = rxr->rx_ring_struct;
2549 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2551 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2552 bnxt_hwrm_ring_free(bp, ring,
2553 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2554 ring->fw_ring_id = INVALID_HW_RING_ID;
2555 if (BNXT_HAS_RING_GRPS(bp))
2556 bp->grp_info[queue_index].rx_fw_ring_id =
2559 ring = rxr->ag_ring_struct;
2560 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2561 bnxt_hwrm_ring_free(bp, ring,
2562 BNXT_CHIP_THOR(bp) ?
2563 HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2564 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2565 if (BNXT_HAS_RING_GRPS(bp))
2566 bp->grp_info[queue_index].ag_fw_ring_id =
2569 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2570 bnxt_free_cp_ring(bp, cpr);
2572 if (BNXT_HAS_RING_GRPS(bp))
2573 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2577 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2581 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2582 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2583 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2584 struct bnxt_ring *ring = txr->tx_ring_struct;
2585 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2587 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2588 bnxt_hwrm_ring_free(bp, ring,
2589 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2590 ring->fw_ring_id = INVALID_HW_RING_ID;
2591 memset(txr->tx_desc_ring, 0,
2592 txr->tx_ring_struct->ring_size *
2593 sizeof(*txr->tx_desc_ring));
2594 memset(txr->tx_buf_ring, 0,
2595 txr->tx_ring_struct->ring_size *
2596 sizeof(*txr->tx_buf_ring));
2600 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2601 bnxt_free_cp_ring(bp, cpr);
2602 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2606 for (i = 0; i < bp->rx_cp_nr_rings; i++)
2607 bnxt_free_hwrm_rx_ring(bp, i);
2612 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2617 if (!BNXT_HAS_RING_GRPS(bp))
2620 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2621 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2629 * HWRM utility functions
2632 void bnxt_free_hwrm_resources(struct bnxt *bp)
2634 /* Release memzone */
2635 rte_free(bp->hwrm_cmd_resp_addr);
2636 rte_free(bp->hwrm_short_cmd_req_addr);
2637 bp->hwrm_cmd_resp_addr = NULL;
2638 bp->hwrm_short_cmd_req_addr = NULL;
2639 bp->hwrm_cmd_resp_dma_addr = 0;
2640 bp->hwrm_short_cmd_req_dma_addr = 0;
2643 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2645 struct rte_pci_device *pdev = bp->pdev;
2646 char type[RTE_MEMZONE_NAMESIZE];
2648 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2649 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2650 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2651 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2652 if (bp->hwrm_cmd_resp_addr == NULL)
2654 bp->hwrm_cmd_resp_dma_addr =
2655 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2656 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2658 "unable to map response address to physical memory\n");
2661 rte_spinlock_init(&bp->hwrm_lock);
2667 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2671 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2672 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2675 } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2676 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2681 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2686 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2688 struct bnxt_filter_info *filter;
2691 STAILQ_FOREACH(filter, &vnic->filter, next) {
2692 rc = bnxt_clear_one_vnic_filter(bp, filter);
2693 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2694 bnxt_free_filter(bp, filter);
2700 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2702 struct bnxt_filter_info *filter;
2703 struct rte_flow *flow;
2706 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2707 flow = STAILQ_FIRST(&vnic->flow_list);
2708 filter = flow->filter;
2709 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2710 rc = bnxt_clear_one_vnic_filter(bp, filter);
2712 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2718 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2720 struct bnxt_filter_info *filter;
2723 STAILQ_FOREACH(filter, &vnic->filter, next) {
2724 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2725 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2727 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2728 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2731 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2740 bnxt_free_tunnel_ports(struct bnxt *bp)
2742 if (bp->vxlan_port_cnt)
2743 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2744 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2746 if (bp->geneve_port_cnt)
2747 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2748 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2749 bp->geneve_port = 0;
2752 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2756 if (bp->vnic_info == NULL)
2760 * Cleanup VNICs in reverse order, to make sure the L2 filter
2761 * from vnic0 is last to be cleaned up.
2763 for (i = bp->max_vnics - 1; i >= 0; i--) {
2764 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2766 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2769 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2771 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2773 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2775 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2777 bnxt_hwrm_vnic_free(bp, vnic);
2779 rte_free(vnic->fw_grp_ids);
2781 /* Ring resources */
2782 bnxt_free_all_hwrm_rings(bp);
2783 bnxt_free_all_hwrm_ring_grps(bp);
2784 bnxt_free_all_hwrm_stat_ctxs(bp);
2785 bnxt_free_tunnel_ports(bp);
2788 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2790 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2792 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2793 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2795 switch (conf_link_speed) {
2796 case ETH_LINK_SPEED_10M_HD:
2797 case ETH_LINK_SPEED_100M_HD:
2799 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2801 return hw_link_duplex;
2804 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2809 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2812 uint16_t eth_link_speed = 0;
2814 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2815 return ETH_LINK_SPEED_AUTONEG;
2817 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2818 case ETH_LINK_SPEED_100M:
2819 case ETH_LINK_SPEED_100M_HD:
2822 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2824 case ETH_LINK_SPEED_1G:
2826 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2828 case ETH_LINK_SPEED_2_5G:
2830 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2832 case ETH_LINK_SPEED_10G:
2834 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2836 case ETH_LINK_SPEED_20G:
2838 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2840 case ETH_LINK_SPEED_25G:
2842 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2844 case ETH_LINK_SPEED_40G:
2846 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2848 case ETH_LINK_SPEED_50G:
2849 eth_link_speed = pam4_link ?
2850 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2851 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2853 case ETH_LINK_SPEED_100G:
2854 eth_link_speed = pam4_link ?
2855 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2856 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2858 case ETH_LINK_SPEED_200G:
2860 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2864 "Unsupported link speed %d; default to AUTO\n",
2868 return eth_link_speed;
2871 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2872 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2873 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2874 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2875 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2877 static int bnxt_validate_link_speed(struct bnxt *bp)
2879 uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2880 uint16_t port_id = bp->eth_dev->data->port_id;
2881 uint32_t link_speed_capa;
2884 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2887 link_speed_capa = bnxt_get_speed_capabilities(bp);
2889 if (link_speed & ETH_LINK_SPEED_FIXED) {
2890 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2892 if (one_speed & (one_speed - 1)) {
2894 "Invalid advertised speeds (%u) for port %u\n",
2895 link_speed, port_id);
2898 if ((one_speed & link_speed_capa) != one_speed) {
2900 "Unsupported advertised speed (%u) for port %u\n",
2901 link_speed, port_id);
2905 if (!(link_speed & link_speed_capa)) {
2907 "Unsupported advertised speeds (%u) for port %u\n",
2908 link_speed, port_id);
2916 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2920 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2921 if (bp->link_info->support_speeds)
2922 return bp->link_info->support_speeds;
2923 link_speed = BNXT_SUPPORTED_SPEEDS;
2926 if (link_speed & ETH_LINK_SPEED_100M)
2927 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2928 if (link_speed & ETH_LINK_SPEED_100M_HD)
2929 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2930 if (link_speed & ETH_LINK_SPEED_1G)
2931 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2932 if (link_speed & ETH_LINK_SPEED_2_5G)
2933 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2934 if (link_speed & ETH_LINK_SPEED_10G)
2935 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2936 if (link_speed & ETH_LINK_SPEED_20G)
2937 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2938 if (link_speed & ETH_LINK_SPEED_25G)
2939 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2940 if (link_speed & ETH_LINK_SPEED_40G)
2941 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2942 if (link_speed & ETH_LINK_SPEED_50G)
2943 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2944 if (link_speed & ETH_LINK_SPEED_100G)
2945 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2946 if (link_speed & ETH_LINK_SPEED_200G)
2947 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2951 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2953 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2955 switch (hw_link_speed) {
2956 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2957 eth_link_speed = ETH_SPEED_NUM_100M;
2959 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2960 eth_link_speed = ETH_SPEED_NUM_1G;
2962 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2963 eth_link_speed = ETH_SPEED_NUM_2_5G;
2965 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2966 eth_link_speed = ETH_SPEED_NUM_10G;
2968 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2969 eth_link_speed = ETH_SPEED_NUM_20G;
2971 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2972 eth_link_speed = ETH_SPEED_NUM_25G;
2974 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2975 eth_link_speed = ETH_SPEED_NUM_40G;
2977 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2978 eth_link_speed = ETH_SPEED_NUM_50G;
2980 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2981 eth_link_speed = ETH_SPEED_NUM_100G;
2983 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2984 eth_link_speed = ETH_SPEED_NUM_200G;
2986 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2988 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2992 return eth_link_speed;
2995 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2997 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2999 switch (hw_link_duplex) {
3000 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3001 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3003 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3005 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3006 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3009 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3013 return eth_link_duplex;
3016 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3019 struct bnxt_link_info *link_info = bp->link_info;
3021 rc = bnxt_hwrm_port_phy_qcaps(bp);
3023 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3025 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3027 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3031 if (link_info->link_speed)
3033 bnxt_parse_hw_link_speed(link_info->link_speed);
3035 link->link_speed = ETH_SPEED_NUM_NONE;
3036 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3037 link->link_status = link_info->link_up;
3038 link->link_autoneg = link_info->auto_mode ==
3039 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3040 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3045 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3048 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3049 struct bnxt_link_info link_req;
3050 uint16_t speed, autoneg;
3052 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3055 rc = bnxt_validate_link_speed(bp);
3059 memset(&link_req, 0, sizeof(link_req));
3060 link_req.link_up = link_up;
3064 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3065 if (BNXT_CHIP_THOR(bp) &&
3066 dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3067 /* 40G is not supported as part of media auto detect.
3068 * The speed should be forced and autoneg disabled
3069 * to configure 40G speed.
3071 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3075 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3076 bp->link_info->link_signal_mode);
3077 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3078 /* Autoneg can be done only when the FW allows.
3079 * When user configures fixed speed of 40G and later changes to
3080 * any other speed, auto_link_speed/force_link_speed is still set
3081 * to 40G until link comes up at new speed.
3084 !(!BNXT_CHIP_THOR(bp) &&
3085 (bp->link_info->auto_link_speed ||
3086 bp->link_info->force_link_speed))) {
3087 link_req.phy_flags |=
3088 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3089 link_req.auto_link_speed_mask =
3090 bnxt_parse_eth_link_speed_mask(bp,
3091 dev_conf->link_speeds);
3093 if (bp->link_info->phy_type ==
3094 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3095 bp->link_info->phy_type ==
3096 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3097 bp->link_info->media_type ==
3098 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3099 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3103 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3104 /* If user wants a particular speed try that first. */
3106 link_req.link_speed = speed;
3107 else if (bp->link_info->force_pam4_link_speed)
3108 link_req.link_speed =
3109 bp->link_info->force_pam4_link_speed;
3110 else if (bp->link_info->auto_pam4_link_speeds)
3111 link_req.link_speed =
3112 bp->link_info->auto_pam4_link_speeds;
3113 else if (bp->link_info->support_pam4_speeds)
3114 link_req.link_speed =
3115 bp->link_info->support_pam4_speeds;
3116 else if (bp->link_info->force_link_speed)
3117 link_req.link_speed = bp->link_info->force_link_speed;
3119 link_req.link_speed = bp->link_info->auto_link_speed;
3121 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3122 link_req.auto_pause = bp->link_info->auto_pause;
3123 link_req.force_pause = bp->link_info->force_pause;
3126 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3129 "Set link config failed with rc %d\n", rc);
3137 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3139 struct hwrm_func_qcfg_input req = {0};
3140 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3143 bp->func_svif = BNXT_SVIF_INVALID;
3146 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3147 req.fid = rte_cpu_to_le_16(0xffff);
3149 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3151 HWRM_CHECK_RESULT();
3153 /* Hard Coded.. 0xfff VLAN ID mask */
3154 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3156 svif_info = rte_le_to_cpu_16(resp->svif_info);
3157 if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3158 bp->func_svif = svif_info &
3159 HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3161 flags = rte_le_to_cpu_16(resp->flags);
3162 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3163 bp->flags |= BNXT_FLAG_MULTI_HOST;
3166 !BNXT_VF_IS_TRUSTED(bp) &&
3167 (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3168 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3169 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3170 } else if (BNXT_VF(bp) &&
3171 BNXT_VF_IS_TRUSTED(bp) &&
3172 !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3173 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3174 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3178 *mtu = rte_le_to_cpu_16(resp->mtu);
3180 switch (resp->port_partition_type) {
3181 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3182 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3183 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3185 bp->flags |= BNXT_FLAG_NPAR_PF;
3188 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3197 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3199 struct hwrm_func_qcfg_input req = {0};
3200 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3203 if (!BNXT_VF_IS_TRUSTED(bp))
3209 bp->parent->fid = BNXT_PF_FID_INVALID;
3211 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3213 req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3215 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3217 HWRM_CHECK_RESULT();
3219 memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3220 bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3221 bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3222 bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3224 /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3225 if (bp->parent->vnic == 0) {
3226 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3227 /* Use hard-coded values appropriate for current Wh+ fw. */
3228 if (bp->parent->fid == 2)
3229 bp->parent->vnic = 0x100;
3231 bp->parent->vnic = 1;
3239 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3240 uint16_t *vnic_id, uint16_t *svif)
3242 struct hwrm_func_qcfg_input req = {0};
3243 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3247 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3248 req.fid = rte_cpu_to_le_16(fid);
3250 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3252 HWRM_CHECK_RESULT();
3255 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3257 svif_info = rte_le_to_cpu_16(resp->svif_info);
3258 if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3259 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3266 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3268 struct hwrm_port_mac_qcfg_input req = {0};
3269 struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3270 uint16_t port_svif_info;
3273 bp->port_svif = BNXT_SVIF_INVALID;
3275 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3278 HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3280 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3282 HWRM_CHECK_RESULT_SILENT();
3284 port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3285 if (port_svif_info &
3286 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3287 bp->port_svif = port_svif_info &
3288 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3295 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp,
3296 struct bnxt_pf_resource_info *pf_resc)
3298 struct hwrm_func_cfg_input req = {0};
3299 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3303 enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3304 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3305 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3306 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3307 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3308 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3309 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3310 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3311 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3313 if (BNXT_HAS_RING_GRPS(bp)) {
3314 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3315 req.num_hw_ring_grps =
3316 rte_cpu_to_le_16(pf_resc->num_hw_ring_grps);
3317 } else if (BNXT_HAS_NQ(bp)) {
3318 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3319 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3322 req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3323 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3324 req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3325 req.num_rsscos_ctxs = rte_cpu_to_le_16(pf_resc->num_rsscos_ctxs);
3326 req.num_stat_ctxs = rte_cpu_to_le_16(pf_resc->num_stat_ctxs);
3327 req.num_cmpl_rings = rte_cpu_to_le_16(pf_resc->num_cp_rings);
3328 req.num_tx_rings = rte_cpu_to_le_16(pf_resc->num_tx_rings);
3329 req.num_rx_rings = rte_cpu_to_le_16(pf_resc->num_rx_rings);
3330 req.num_l2_ctxs = rte_cpu_to_le_16(pf_resc->num_l2_ctxs);
3331 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3332 req.fid = rte_cpu_to_le_16(0xffff);
3333 req.enables = rte_cpu_to_le_32(enables);
3335 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3337 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3339 HWRM_CHECK_RESULT();
3345 /* min values are the guaranteed resources and max values are subject
3346 * to availability. The strategy for now is to keep both min & max
3350 bnxt_fill_vf_func_cfg_req_new(struct bnxt *bp,
3351 struct hwrm_func_vf_resource_cfg_input *req,
3354 req->max_rsscos_ctx = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3356 req->min_rsscos_ctx = req->max_rsscos_ctx;
3357 req->max_stat_ctx = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3358 req->min_stat_ctx = req->max_stat_ctx;
3359 req->max_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3361 req->min_cmpl_rings = req->max_cmpl_rings;
3362 req->max_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3363 req->min_tx_rings = req->max_tx_rings;
3364 req->max_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3365 req->min_rx_rings = req->max_rx_rings;
3366 req->max_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3367 req->min_l2_ctxs = req->max_l2_ctxs;
3368 /* TODO: For now, do not support VMDq/RFS on VFs. */
3369 req->max_vnics = rte_cpu_to_le_16(1);
3370 req->min_vnics = req->max_vnics;
3371 req->max_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3373 req->min_hw_ring_grps = req->max_hw_ring_grps;
3375 rte_cpu_to_le_16(HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED);
3379 bnxt_fill_vf_func_cfg_req_old(struct bnxt *bp,
3380 struct hwrm_func_cfg_input *req,
3383 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3384 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3385 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3386 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3387 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3388 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3389 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3390 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3391 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3392 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3394 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3395 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3397 req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3398 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3400 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3401 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3403 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3404 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3405 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3406 /* TODO: For now, do not support VMDq/RFS on VFs. */
3407 req->num_vnics = rte_cpu_to_le_16(1);
3408 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3412 /* Update the port wide resource values based on how many resources
3413 * got allocated to the VF.
3415 static int bnxt_update_max_resources(struct bnxt *bp,
3418 struct hwrm_func_qcfg_input req = {0};
3419 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3422 /* Get the actual allocated values now */
3423 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3424 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3425 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3426 HWRM_CHECK_RESULT();
3428 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3429 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->alloc_stat_ctx);
3430 bp->max_cp_rings -= rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3431 bp->max_tx_rings -= rte_le_to_cpu_16(resp->alloc_tx_rings);
3432 bp->max_rx_rings -= rte_le_to_cpu_16(resp->alloc_rx_rings);
3433 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->alloc_l2_ctx);
3434 bp->max_ring_grps -= rte_le_to_cpu_16(resp->alloc_hw_ring_grps);
3441 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3443 struct hwrm_func_qcfg_input req = {0};
3444 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3447 /* Check for zero MAC address */
3448 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3449 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3450 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3451 HWRM_CHECK_RESULT();
3452 rc = rte_le_to_cpu_16(resp->vlan);
3459 static int bnxt_query_pf_resources(struct bnxt *bp,
3460 struct bnxt_pf_resource_info *pf_resc)
3462 struct hwrm_func_qcfg_input req = {0};
3463 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3466 /* And copy the allocated numbers into the pf struct */
3467 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3468 req.fid = rte_cpu_to_le_16(0xffff);
3469 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3470 HWRM_CHECK_RESULT();
3472 pf_resc->num_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3473 pf_resc->num_rsscos_ctxs = rte_le_to_cpu_16(resp->alloc_rsscos_ctx);
3474 pf_resc->num_stat_ctxs = rte_le_to_cpu_16(resp->alloc_stat_ctx);
3475 pf_resc->num_cp_rings = rte_le_to_cpu_16(resp->alloc_cmpl_rings);
3476 pf_resc->num_rx_rings = rte_le_to_cpu_16(resp->alloc_rx_rings);
3477 pf_resc->num_l2_ctxs = rte_le_to_cpu_16(resp->alloc_l2_ctx);
3478 pf_resc->num_hw_ring_grps = rte_le_to_cpu_32(resp->alloc_hw_ring_grps);
3479 bp->pf->evb_mode = resp->evb_mode;
3487 bnxt_calculate_pf_resources(struct bnxt *bp,
3488 struct bnxt_pf_resource_info *pf_resc,
3492 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx;
3493 pf_resc->num_stat_ctxs = bp->max_stat_ctx;
3494 pf_resc->num_cp_rings = bp->max_cp_rings;
3495 pf_resc->num_tx_rings = bp->max_tx_rings;
3496 pf_resc->num_rx_rings = bp->max_rx_rings;
3497 pf_resc->num_l2_ctxs = bp->max_l2_ctx;
3498 pf_resc->num_hw_ring_grps = bp->max_ring_grps;
3503 pf_resc->num_rsscos_ctxs = bp->max_rsscos_ctx / (num_vfs + 1) +
3504 bp->max_rsscos_ctx % (num_vfs + 1);
3505 pf_resc->num_stat_ctxs = bp->max_stat_ctx / (num_vfs + 1) +
3506 bp->max_stat_ctx % (num_vfs + 1);
3507 pf_resc->num_cp_rings = bp->max_cp_rings / (num_vfs + 1) +
3508 bp->max_cp_rings % (num_vfs + 1);
3509 pf_resc->num_tx_rings = bp->max_tx_rings / (num_vfs + 1) +
3510 bp->max_tx_rings % (num_vfs + 1);
3511 pf_resc->num_rx_rings = bp->max_rx_rings / (num_vfs + 1) +
3512 bp->max_rx_rings % (num_vfs + 1);
3513 pf_resc->num_l2_ctxs = bp->max_l2_ctx / (num_vfs + 1) +
3514 bp->max_l2_ctx % (num_vfs + 1);
3515 pf_resc->num_hw_ring_grps = bp->max_ring_grps / (num_vfs + 1) +
3516 bp->max_ring_grps % (num_vfs + 1);
3519 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3521 struct bnxt_pf_resource_info pf_resc = { 0 };
3525 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3529 rc = bnxt_hwrm_func_qcaps(bp);
3533 bnxt_calculate_pf_resources(bp, &pf_resc, 0);
3535 bp->pf->func_cfg_flags &=
3536 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3537 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3538 bp->pf->func_cfg_flags |=
3539 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3540 rc = bnxt_hwrm_pf_func_cfg(bp, &pf_resc);
3541 rc = __bnxt_hwrm_func_qcaps(bp);
3546 bnxt_configure_vf_req_buf(struct bnxt *bp, int num_vfs)
3548 size_t req_buf_sz, sz;
3551 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3552 bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3553 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3554 if (bp->pf->vf_req_buf == NULL) {
3558 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3559 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3561 for (i = 0; i < num_vfs; i++)
3562 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3563 (i * HWRM_MAX_REQ_LEN);
3565 rc = bnxt_hwrm_func_buf_rgtr(bp, num_vfs);
3567 rte_free(bp->pf->vf_req_buf);
3573 bnxt_process_vf_resc_config_new(struct bnxt *bp, int num_vfs)
3575 struct hwrm_func_vf_resource_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3576 struct hwrm_func_vf_resource_cfg_input req = {0};
3579 bnxt_fill_vf_func_cfg_req_new(bp, &req, num_vfs);
3580 bp->pf->active_vfs = 0;
3581 for (i = 0; i < num_vfs; i++) {
3582 HWRM_PREP(&req, HWRM_FUNC_VF_RESOURCE_CFG, BNXT_USE_CHIMP_MB);
3583 req.vf_id = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3584 rc = bnxt_hwrm_send_message(bp,
3588 if (rc || resp->error_code) {
3590 "Failed to initialize VF %d\n", i);
3592 "Not all VFs available. (%d, %d)\n",
3593 rc, resp->error_code);
3596 /* If the first VF configuration itself fails,
3597 * unregister the vf_fwd_request buffer.
3600 bnxt_hwrm_func_buf_unrgtr(bp);
3605 /* Update the max resource values based on the resource values
3606 * allocated to the VF.
3608 bnxt_update_max_resources(bp, i);
3609 bp->pf->active_vfs++;
3610 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3617 bnxt_process_vf_resc_config_old(struct bnxt *bp, int num_vfs)
3619 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3620 struct hwrm_func_cfg_input req = {0};
3623 bnxt_fill_vf_func_cfg_req_old(bp, &req, num_vfs);
3625 bp->pf->active_vfs = 0;
3626 for (i = 0; i < num_vfs; i++) {
3627 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3628 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3629 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3630 rc = bnxt_hwrm_send_message(bp,
3635 /* Clear enable flag for next pass */
3636 req.enables &= ~rte_cpu_to_le_32(
3637 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3639 if (rc || resp->error_code) {
3641 "Failed to initialize VF %d\n", i);
3643 "Not all VFs available. (%d, %d)\n",
3644 rc, resp->error_code);
3647 /* If the first VF configuration itself fails,
3648 * unregister the vf_fwd_request buffer.
3651 bnxt_hwrm_func_buf_unrgtr(bp);
3657 /* Update the max resource values based on the resource values
3658 * allocated to the VF.
3660 bnxt_update_max_resources(bp, i);
3661 bp->pf->active_vfs++;
3662 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3669 bnxt_configure_vf_resources(struct bnxt *bp, int num_vfs)
3671 if (bp->flags & BNXT_FLAG_NEW_RM)
3672 bnxt_process_vf_resc_config_new(bp, num_vfs);
3674 bnxt_process_vf_resc_config_old(bp, num_vfs);
3678 bnxt_update_pf_resources(struct bnxt *bp,
3679 struct bnxt_pf_resource_info *pf_resc)
3681 bp->max_rsscos_ctx = pf_resc->num_rsscos_ctxs;
3682 bp->max_stat_ctx = pf_resc->num_stat_ctxs;
3683 bp->max_cp_rings = pf_resc->num_cp_rings;
3684 bp->max_tx_rings = pf_resc->num_tx_rings;
3685 bp->max_rx_rings = pf_resc->num_rx_rings;
3686 bp->max_ring_grps = pf_resc->num_hw_ring_grps;
3690 bnxt_configure_pf_resources(struct bnxt *bp,
3691 struct bnxt_pf_resource_info *pf_resc)
3694 * We're using STD_TX_RING_MODE here which will limit the TX
3695 * rings. This will allow QoS to function properly. Not setting this
3696 * will cause PF rings to break bandwidth settings.
3698 bp->pf->func_cfg_flags &=
3699 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3700 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3701 bp->pf->func_cfg_flags |=
3702 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3703 return bnxt_hwrm_pf_func_cfg(bp, pf_resc);
3706 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3708 struct bnxt_pf_resource_info pf_resc = { 0 };
3712 PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
3716 rc = bnxt_hwrm_func_qcaps(bp);
3720 bnxt_calculate_pf_resources(bp, &pf_resc, num_vfs);
3722 rc = bnxt_configure_pf_resources(bp, &pf_resc);
3726 rc = bnxt_query_pf_resources(bp, &pf_resc);
3731 * Now, create and register a buffer to hold forwarded VF requests
3733 rc = bnxt_configure_vf_req_buf(bp, num_vfs);
3737 bnxt_configure_vf_resources(bp, num_vfs);
3739 bnxt_update_pf_resources(bp, &pf_resc);
3744 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3746 struct hwrm_func_cfg_input req = {0};
3747 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3750 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3752 req.fid = rte_cpu_to_le_16(0xffff);
3753 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3754 req.evb_mode = bp->pf->evb_mode;
3756 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3757 HWRM_CHECK_RESULT();
3763 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3764 uint8_t tunnel_type)
3766 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3767 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3770 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3771 req.tunnel_type = tunnel_type;
3772 req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3773 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3774 HWRM_CHECK_RESULT();
3776 switch (tunnel_type) {
3777 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3778 bp->vxlan_fw_dst_port_id =
3779 rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3780 bp->vxlan_port = port;
3782 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3783 bp->geneve_fw_dst_port_id =
3784 rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3785 bp->geneve_port = port;
3796 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3797 uint8_t tunnel_type)
3799 struct hwrm_tunnel_dst_port_free_input req = {0};
3800 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3803 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3805 req.tunnel_type = tunnel_type;
3806 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3807 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3809 HWRM_CHECK_RESULT();
3815 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3818 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3819 struct hwrm_func_cfg_input req = {0};
3822 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3824 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3825 req.flags = rte_cpu_to_le_32(flags);
3826 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3828 HWRM_CHECK_RESULT();
3834 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3836 uint32_t *flag = flagp;
3838 vnic->flags = *flag;
3841 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3843 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3846 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp, int num_vfs)
3848 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3849 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3852 HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3854 req.req_buf_num_pages = rte_cpu_to_le_16(1);
3855 req.req_buf_page_size =
3856 rte_cpu_to_le_16(page_getenum(num_vfs * HWRM_MAX_REQ_LEN));
3857 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3858 req.req_buf_page_addr0 =
3859 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3860 if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3862 "unable to map buffer address to physical memory\n");
3867 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3869 HWRM_CHECK_RESULT();
3875 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3878 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3879 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3881 if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3884 HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3886 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3888 HWRM_CHECK_RESULT();
3894 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3896 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3897 struct hwrm_func_cfg_input req = {0};
3900 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3902 req.fid = rte_cpu_to_le_16(0xffff);
3903 req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3904 req.enables = rte_cpu_to_le_32(
3905 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3906 req.async_event_cr = rte_cpu_to_le_16(
3907 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3908 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3910 HWRM_CHECK_RESULT();
3916 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3918 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3919 struct hwrm_func_vf_cfg_input req = {0};
3922 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3924 req.enables = rte_cpu_to_le_32(
3925 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3926 req.async_event_cr = rte_cpu_to_le_16(
3927 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3928 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3930 HWRM_CHECK_RESULT();
3936 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3938 struct hwrm_func_cfg_input req = {0};
3939 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3940 uint16_t dflt_vlan, fid;
3941 uint32_t func_cfg_flags;
3944 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3947 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
3948 fid = bp->pf->vf_info[vf].fid;
3949 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
3951 fid = rte_cpu_to_le_16(0xffff);
3952 func_cfg_flags = bp->pf->func_cfg_flags;
3953 dflt_vlan = bp->vlan;
3956 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3957 req.fid = rte_cpu_to_le_16(fid);
3958 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3959 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3961 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3963 HWRM_CHECK_RESULT();
3969 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3970 uint16_t max_bw, uint16_t enables)
3972 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3973 struct hwrm_func_cfg_input req = {0};
3976 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3978 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3979 req.enables |= rte_cpu_to_le_32(enables);
3980 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3981 req.max_bw = rte_cpu_to_le_32(max_bw);
3982 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3984 HWRM_CHECK_RESULT();
3990 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3992 struct hwrm_func_cfg_input req = {0};
3993 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3996 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3998 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3999 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4000 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
4001 req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
4003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4005 HWRM_CHECK_RESULT();
4011 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
4016 rc = bnxt_hwrm_func_cfg_def_cp(bp);
4018 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
4023 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
4024 void *encaped, size_t ec_size)
4027 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
4028 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4030 if (ec_size > sizeof(req.encap_request))
4033 HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
4035 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4036 memcpy(req.encap_request, encaped, ec_size);
4038 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4040 HWRM_CHECK_RESULT();
4046 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
4047 struct rte_ether_addr *mac)
4049 struct hwrm_func_qcfg_input req = {0};
4050 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4053 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
4055 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4056 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4058 HWRM_CHECK_RESULT();
4060 memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
4067 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
4068 void *encaped, size_t ec_size)
4071 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
4072 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
4074 if (ec_size > sizeof(req.encap_request))
4077 HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
4079 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
4080 memcpy(req.encap_request, encaped, ec_size);
4082 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4084 HWRM_CHECK_RESULT();
4090 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
4091 struct rte_eth_stats *stats, uint8_t rx)
4094 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
4095 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
4097 HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
4099 req.stat_ctx_id = rte_cpu_to_le_32(cid);
4101 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4103 HWRM_CHECK_RESULT();
4106 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4107 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4108 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4109 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4110 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4111 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4112 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_discard_pkts);
4113 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_error_pkts);
4115 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4116 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4117 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4118 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4119 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4120 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4128 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4130 struct hwrm_port_qstats_input req = {0};
4131 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4132 struct bnxt_pf_info *pf = bp->pf;
4135 HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4137 req.port_id = rte_cpu_to_le_16(pf->port_id);
4138 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4139 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4140 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4142 HWRM_CHECK_RESULT();
4148 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4150 struct hwrm_port_clr_stats_input req = {0};
4151 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4152 struct bnxt_pf_info *pf = bp->pf;
4155 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4156 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4157 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4160 HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4162 req.port_id = rte_cpu_to_le_16(pf->port_id);
4163 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4165 HWRM_CHECK_RESULT();
4171 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4173 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4174 struct hwrm_port_led_qcaps_input req = {0};
4180 HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4181 req.port_id = bp->pf->port_id;
4182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4184 HWRM_CHECK_RESULT();
4186 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4189 bp->leds->num_leds = resp->num_leds;
4190 memcpy(bp->leds, &resp->led0_id,
4191 sizeof(bp->leds[0]) * bp->leds->num_leds);
4192 for (i = 0; i < bp->leds->num_leds; i++) {
4193 struct bnxt_led_info *led = &bp->leds[i];
4195 uint16_t caps = led->led_state_caps;
4197 if (!led->led_group_id ||
4198 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4199 bp->leds->num_leds = 0;
4210 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4212 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4213 struct hwrm_port_led_cfg_input req = {0};
4214 struct bnxt_led_cfg *led_cfg;
4215 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4216 uint16_t duration = 0;
4219 if (!bp->leds->num_leds || BNXT_VF(bp))
4222 HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4225 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4226 duration = rte_cpu_to_le_16(500);
4228 req.port_id = bp->pf->port_id;
4229 req.num_leds = bp->leds->num_leds;
4230 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4231 for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4232 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4233 led_cfg->led_id = bp->leds[i].led_id;
4234 led_cfg->led_state = led_state;
4235 led_cfg->led_blink_on = duration;
4236 led_cfg->led_blink_off = duration;
4237 led_cfg->led_group_id = bp->leds[i].led_group_id;
4240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4242 HWRM_CHECK_RESULT();
4248 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4252 struct hwrm_nvm_get_dir_info_input req = {0};
4253 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4255 HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4259 HWRM_CHECK_RESULT();
4261 *entries = rte_le_to_cpu_32(resp->entries);
4262 *length = rte_le_to_cpu_32(resp->entry_length);
4268 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4271 uint32_t dir_entries;
4272 uint32_t entry_length;
4275 rte_iova_t dma_handle;
4276 struct hwrm_nvm_get_dir_entries_input req = {0};
4277 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4279 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4283 *data++ = dir_entries;
4284 *data++ = entry_length;
4286 memset(data, 0xff, len);
4288 buflen = dir_entries * entry_length;
4289 buf = rte_malloc("nvm_dir", buflen, 0);
4292 dma_handle = rte_malloc_virt2iova(buf);
4293 if (dma_handle == RTE_BAD_IOVA) {
4295 "unable to map response address to physical memory\n");
4298 HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4299 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4300 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4303 memcpy(data, buf, len > buflen ? buflen : len);
4306 HWRM_CHECK_RESULT();
4312 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4313 uint32_t offset, uint32_t length,
4318 rte_iova_t dma_handle;
4319 struct hwrm_nvm_read_input req = {0};
4320 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4322 buf = rte_malloc("nvm_item", length, 0);
4326 dma_handle = rte_malloc_virt2iova(buf);
4327 if (dma_handle == RTE_BAD_IOVA) {
4329 "unable to map response address to physical memory\n");
4332 HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4333 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4334 req.dir_idx = rte_cpu_to_le_16(index);
4335 req.offset = rte_cpu_to_le_32(offset);
4336 req.len = rte_cpu_to_le_32(length);
4337 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4339 memcpy(data, buf, length);
4342 HWRM_CHECK_RESULT();
4348 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4351 struct hwrm_nvm_erase_dir_entry_input req = {0};
4352 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4354 HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4355 req.dir_idx = rte_cpu_to_le_16(index);
4356 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4357 HWRM_CHECK_RESULT();
4364 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4365 uint16_t dir_ordinal, uint16_t dir_ext,
4366 uint16_t dir_attr, const uint8_t *data,
4370 struct hwrm_nvm_write_input req = {0};
4371 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4372 rte_iova_t dma_handle;
4375 buf = rte_malloc("nvm_write", data_len, 0);
4379 dma_handle = rte_malloc_virt2iova(buf);
4380 if (dma_handle == RTE_BAD_IOVA) {
4382 "unable to map response address to physical memory\n");
4385 memcpy(buf, data, data_len);
4387 HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4389 req.dir_type = rte_cpu_to_le_16(dir_type);
4390 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4391 req.dir_ext = rte_cpu_to_le_16(dir_ext);
4392 req.dir_attr = rte_cpu_to_le_16(dir_attr);
4393 req.dir_data_length = rte_cpu_to_le_32(data_len);
4394 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4396 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4399 HWRM_CHECK_RESULT();
4406 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4408 uint32_t *count = cbdata;
4410 *count = *count + 1;
4413 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4414 struct bnxt_vnic_info *vnic __rte_unused)
4419 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4423 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4424 &count, bnxt_vnic_count_hwrm_stub);
4429 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4432 struct hwrm_func_vf_vnic_ids_query_input req = {0};
4433 struct hwrm_func_vf_vnic_ids_query_output *resp =
4434 bp->hwrm_cmd_resp_addr;
4437 /* First query all VNIC ids */
4438 HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4440 req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4441 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4442 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4444 if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4447 "unable to map VNIC ID table address to physical memory\n");
4450 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4451 HWRM_CHECK_RESULT();
4452 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4460 * This function queries the VNIC IDs for a specified VF. It then calls
4461 * the vnic_cb to update the necessary field in vnic_info with cbdata.
4462 * Then it calls the hwrm_cb function to program this new vnic configuration.
4464 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4465 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4466 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4468 struct bnxt_vnic_info vnic;
4470 int i, num_vnic_ids;
4475 /* First query all VNIC ids */
4476 vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4477 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4478 RTE_CACHE_LINE_SIZE);
4479 if (vnic_ids == NULL)
4482 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4483 rte_mem_lock_page(((char *)vnic_ids) + sz);
4485 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4487 if (num_vnic_ids < 0)
4488 return num_vnic_ids;
4490 /* Retrieve VNIC, update bd_stall then update */
4492 for (i = 0; i < num_vnic_ids; i++) {
4493 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4494 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4495 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4498 if (vnic.mru <= 4) /* Indicates unallocated */
4501 vnic_cb(&vnic, cbdata);
4503 rc = hwrm_cb(bp, &vnic);
4513 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4516 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4517 struct hwrm_func_cfg_input req = {0};
4520 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4522 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4523 req.enables |= rte_cpu_to_le_32(
4524 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4525 req.vlan_antispoof_mode = on ?
4526 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4527 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4528 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4530 HWRM_CHECK_RESULT();
4536 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4538 struct bnxt_vnic_info vnic;
4541 int num_vnic_ids, i;
4545 vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4546 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4547 RTE_CACHE_LINE_SIZE);
4548 if (vnic_ids == NULL)
4551 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4552 rte_mem_lock_page(((char *)vnic_ids) + sz);
4554 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4560 * Loop through to find the default VNIC ID.
4561 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4562 * by sending the hwrm_func_qcfg command to the firmware.
4564 for (i = 0; i < num_vnic_ids; i++) {
4565 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4566 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4567 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4568 bp->pf->first_vf_id + vf);
4571 if (vnic.func_default) {
4573 return vnic.fw_vnic_id;
4576 /* Could not find a default VNIC. */
4577 PMD_DRV_LOG(ERR, "No default VNIC\n");
4583 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4585 struct bnxt_filter_info *filter)
4588 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4589 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4590 uint32_t enables = 0;
4592 if (filter->fw_em_filter_id != UINT64_MAX)
4593 bnxt_hwrm_clear_em_filter(bp, filter);
4595 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4597 req.flags = rte_cpu_to_le_32(filter->flags);
4599 enables = filter->enables |
4600 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4601 req.dst_id = rte_cpu_to_le_16(dst_id);
4603 if (filter->ip_addr_type) {
4604 req.ip_addr_type = filter->ip_addr_type;
4605 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4608 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4609 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4611 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4612 memcpy(req.src_macaddr, filter->src_macaddr,
4613 RTE_ETHER_ADDR_LEN);
4615 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4616 memcpy(req.dst_macaddr, filter->dst_macaddr,
4617 RTE_ETHER_ADDR_LEN);
4619 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4620 req.ovlan_vid = filter->l2_ovlan;
4622 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4623 req.ivlan_vid = filter->l2_ivlan;
4625 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4626 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4628 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4629 req.ip_protocol = filter->ip_protocol;
4631 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4632 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4634 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4635 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4637 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4638 req.src_port = rte_cpu_to_be_16(filter->src_port);
4640 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4641 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4643 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4644 req.mirror_vnic_id = filter->mirror_vnic_id;
4646 req.enables = rte_cpu_to_le_32(enables);
4648 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4650 HWRM_CHECK_RESULT();
4652 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4658 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4661 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4662 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4664 if (filter->fw_em_filter_id == UINT64_MAX)
4667 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4669 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4671 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4673 HWRM_CHECK_RESULT();
4676 filter->fw_em_filter_id = UINT64_MAX;
4677 filter->fw_l2_filter_id = UINT64_MAX;
4682 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4684 struct bnxt_filter_info *filter)
4687 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4688 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4689 bp->hwrm_cmd_resp_addr;
4690 uint32_t enables = 0;
4692 if (filter->fw_ntuple_filter_id != UINT64_MAX)
4693 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4695 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4697 req.flags = rte_cpu_to_le_32(filter->flags);
4699 enables = filter->enables |
4700 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4701 req.dst_id = rte_cpu_to_le_16(dst_id);
4703 if (filter->ip_addr_type) {
4704 req.ip_addr_type = filter->ip_addr_type;
4706 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4709 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4710 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4712 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4713 memcpy(req.src_macaddr, filter->src_macaddr,
4714 RTE_ETHER_ADDR_LEN);
4716 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4717 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4719 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4720 req.ip_protocol = filter->ip_protocol;
4722 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4723 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4725 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4726 req.src_ipaddr_mask[0] =
4727 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4729 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4730 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4732 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4733 req.dst_ipaddr_mask[0] =
4734 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4736 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4737 req.src_port = rte_cpu_to_le_16(filter->src_port);
4739 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4740 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4742 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4743 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4745 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4746 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4748 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4749 req.mirror_vnic_id = filter->mirror_vnic_id;
4751 req.enables = rte_cpu_to_le_32(enables);
4753 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4755 HWRM_CHECK_RESULT();
4757 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4758 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4764 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4765 struct bnxt_filter_info *filter)
4768 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4769 struct hwrm_cfa_ntuple_filter_free_output *resp =
4770 bp->hwrm_cmd_resp_addr;
4772 if (filter->fw_ntuple_filter_id == UINT64_MAX)
4775 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4777 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4779 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4781 HWRM_CHECK_RESULT();
4784 filter->fw_ntuple_filter_id = UINT64_MAX;
4790 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4792 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4793 uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4794 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4795 struct bnxt_rx_queue **rxqs = bp->rx_queues;
4796 uint16_t *ring_tbl = vnic->rss_table;
4797 int nr_ctxs = vnic->num_lb_ctxts;
4798 int max_rings = bp->rx_nr_rings;
4802 for (i = 0, k = 0; i < nr_ctxs; i++) {
4803 struct bnxt_rx_ring_info *rxr;
4804 struct bnxt_cp_ring_info *cpr;
4806 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4808 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4809 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4810 req.hash_mode_flags = vnic->hash_mode;
4812 req.ring_grp_tbl_addr =
4813 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4814 i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4815 2 * sizeof(*ring_tbl));
4816 req.hash_key_tbl_addr =
4817 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4819 req.ring_table_pair_index = i;
4820 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4822 for (j = 0; j < 64; j++) {
4825 /* Find next active ring. */
4826 for (cnt = 0; cnt < max_rings; cnt++) {
4827 if (rx_queue_state[k] !=
4828 RTE_ETH_QUEUE_STATE_STOPPED)
4830 if (++k == max_rings)
4834 /* Return if no rings are active. */
4835 if (cnt == max_rings) {
4840 /* Add rx/cp ring pair to RSS table. */
4841 rxr = rxqs[k]->rx_ring;
4842 cpr = rxqs[k]->cp_ring;
4844 ring_id = rxr->rx_ring_struct->fw_ring_id;
4845 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4846 ring_id = cpr->cp_ring_struct->fw_ring_id;
4847 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4849 if (++k == max_rings)
4852 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4855 HWRM_CHECK_RESULT();
4862 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4864 unsigned int rss_idx, fw_idx, i;
4866 if (!(vnic->rss_table && vnic->hash_type))
4869 if (BNXT_CHIP_THOR(bp))
4870 return bnxt_vnic_rss_configure_thor(bp, vnic);
4872 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4875 if (vnic->rss_table && vnic->hash_type) {
4877 * Fill the RSS hash & redirection table with
4878 * ring group ids for all VNICs
4880 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4881 rss_idx++, fw_idx++) {
4882 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4883 fw_idx %= bp->rx_cp_nr_rings;
4884 if (vnic->fw_grp_ids[fw_idx] !=
4889 if (i == bp->rx_cp_nr_rings)
4891 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4893 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4899 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4900 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4904 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4906 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4907 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4909 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4910 req->num_cmpl_dma_aggr_during_int =
4911 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4913 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4915 /* min timer set to 1/2 of interrupt timer */
4916 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4918 /* buf timer set to 1/4 of interrupt timer */
4919 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4921 req->cmpl_aggr_dma_tmr_during_int =
4922 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4924 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4925 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4926 req->flags = rte_cpu_to_le_16(flags);
4929 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4930 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4932 struct hwrm_ring_aggint_qcaps_input req = {0};
4933 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4938 HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4939 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4940 HWRM_CHECK_RESULT();
4942 agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4943 agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4945 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4946 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4947 agg_req->flags = rte_cpu_to_le_16(flags);
4949 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4950 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4951 agg_req->enables = rte_cpu_to_le_32(enables);
4957 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4958 struct bnxt_coal *coal, uint16_t ring_id)
4960 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4961 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4962 bp->hwrm_cmd_resp_addr;
4965 /* Set ring coalesce parameters only for 100G NICs */
4966 if (BNXT_CHIP_THOR(bp)) {
4967 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4969 } else if (bnxt_stratus_device(bp)) {
4970 bnxt_hwrm_set_coal_params(coal, &req);
4976 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
4978 req.ring_id = rte_cpu_to_le_16(ring_id);
4979 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4980 HWRM_CHECK_RESULT();
4985 #define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4986 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4988 struct hwrm_func_backing_store_qcaps_input req = {0};
4989 struct hwrm_func_backing_store_qcaps_output *resp =
4990 bp->hwrm_cmd_resp_addr;
4991 struct bnxt_ctx_pg_info *ctx_pg;
4992 struct bnxt_ctx_mem_info *ctx;
4993 int total_alloc_len;
4994 int rc, i, tqm_rings;
4996 if (!BNXT_CHIP_THOR(bp) ||
4997 bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
5002 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
5003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5004 HWRM_CHECK_RESULT_SILENT();
5006 total_alloc_len = sizeof(*ctx);
5007 ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
5008 RTE_CACHE_LINE_SIZE);
5014 ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
5015 ctx->qp_min_qp1_entries =
5016 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
5017 ctx->qp_max_l2_entries =
5018 rte_le_to_cpu_16(resp->qp_max_l2_entries);
5019 ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
5020 ctx->srq_max_l2_entries =
5021 rte_le_to_cpu_16(resp->srq_max_l2_entries);
5022 ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
5023 ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
5024 ctx->cq_max_l2_entries =
5025 rte_le_to_cpu_16(resp->cq_max_l2_entries);
5026 ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
5027 ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
5028 ctx->vnic_max_vnic_entries =
5029 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
5030 ctx->vnic_max_ring_table_entries =
5031 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
5032 ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
5033 ctx->stat_max_entries =
5034 rte_le_to_cpu_32(resp->stat_max_entries);
5035 ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
5036 ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
5037 ctx->tqm_min_entries_per_ring =
5038 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
5039 ctx->tqm_max_entries_per_ring =
5040 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
5041 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5042 if (!ctx->tqm_entries_multiple)
5043 ctx->tqm_entries_multiple = 1;
5044 ctx->mrav_max_entries =
5045 rte_le_to_cpu_32(resp->mrav_max_entries);
5046 ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
5047 ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
5048 ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
5049 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
5051 if (!ctx->tqm_fp_rings_count)
5052 ctx->tqm_fp_rings_count = bp->max_q;
5054 tqm_rings = ctx->tqm_fp_rings_count + 1;
5056 ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
5057 sizeof(*ctx_pg) * tqm_rings,
5058 RTE_CACHE_LINE_SIZE);
5063 for (i = 0; i < tqm_rings; i++, ctx_pg++)
5064 ctx->tqm_mem[i] = ctx_pg;
5072 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
5074 struct hwrm_func_backing_store_cfg_input req = {0};
5075 struct hwrm_func_backing_store_cfg_output *resp =
5076 bp->hwrm_cmd_resp_addr;
5077 struct bnxt_ctx_mem_info *ctx = bp->ctx;
5078 struct bnxt_ctx_pg_info *ctx_pg;
5079 uint32_t *num_entries;
5088 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
5089 req.enables = rte_cpu_to_le_32(enables);
5091 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
5092 ctx_pg = &ctx->qp_mem;
5093 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5094 req.qp_num_qp1_entries =
5095 rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
5096 req.qp_num_l2_entries =
5097 rte_cpu_to_le_16(ctx->qp_max_l2_entries);
5098 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
5099 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5100 &req.qpc_pg_size_qpc_lvl,
5104 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5105 ctx_pg = &ctx->srq_mem;
5106 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5107 req.srq_num_l2_entries =
5108 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5109 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5110 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5111 &req.srq_pg_size_srq_lvl,
5115 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5116 ctx_pg = &ctx->cq_mem;
5117 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5118 req.cq_num_l2_entries =
5119 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5120 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5121 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5122 &req.cq_pg_size_cq_lvl,
5126 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5127 ctx_pg = &ctx->vnic_mem;
5128 req.vnic_num_vnic_entries =
5129 rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5130 req.vnic_num_ring_table_entries =
5131 rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5132 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5133 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5134 &req.vnic_pg_size_vnic_lvl,
5135 &req.vnic_page_dir);
5138 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5139 ctx_pg = &ctx->stat_mem;
5140 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5141 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5142 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5143 &req.stat_pg_size_stat_lvl,
5144 &req.stat_page_dir);
5147 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5148 num_entries = &req.tqm_sp_num_entries;
5149 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5150 pg_dir = &req.tqm_sp_page_dir;
5151 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5152 for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5153 if (!(enables & ena))
5156 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5158 ctx_pg = ctx->tqm_mem[i];
5159 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5160 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5163 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5164 HWRM_CHECK_RESULT();
5170 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5172 struct hwrm_port_qstats_ext_input req = {0};
5173 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5174 struct bnxt_pf_info *pf = bp->pf;
5177 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5178 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5181 HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5183 req.port_id = rte_cpu_to_le_16(pf->port_id);
5184 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5185 req.tx_stat_host_addr =
5186 rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5188 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5190 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5191 req.rx_stat_host_addr =
5192 rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5194 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5196 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5199 bp->fw_rx_port_stats_ext_size = 0;
5200 bp->fw_tx_port_stats_ext_size = 0;
5202 bp->fw_rx_port_stats_ext_size =
5203 rte_le_to_cpu_16(resp->rx_stat_size);
5204 bp->fw_tx_port_stats_ext_size =
5205 rte_le_to_cpu_16(resp->tx_stat_size);
5208 HWRM_CHECK_RESULT();
5215 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5217 struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5218 struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5219 bp->hwrm_cmd_resp_addr;
5222 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5223 req.tunnel_type = type;
5224 req.dest_fid = bp->fw_fid;
5225 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5226 HWRM_CHECK_RESULT();
5234 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5236 struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5237 struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5238 bp->hwrm_cmd_resp_addr;
5241 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5242 req.tunnel_type = type;
5243 req.dest_fid = bp->fw_fid;
5244 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5245 HWRM_CHECK_RESULT();
5252 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5254 struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5255 struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5256 bp->hwrm_cmd_resp_addr;
5259 HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5260 req.src_fid = bp->fw_fid;
5261 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5262 HWRM_CHECK_RESULT();
5265 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5272 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5275 struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5276 struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5277 bp->hwrm_cmd_resp_addr;
5280 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5281 req.src_fid = bp->fw_fid;
5282 req.tunnel_type = tun_type;
5283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5284 HWRM_CHECK_RESULT();
5287 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5289 PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5296 int bnxt_hwrm_set_mac(struct bnxt *bp)
5298 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5299 struct hwrm_func_vf_cfg_input req = {0};
5305 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5308 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5309 memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5311 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5313 HWRM_CHECK_RESULT();
5320 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5322 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5323 struct hwrm_func_drv_if_change_input req = {0};
5327 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5330 /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5331 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5332 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5334 if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5337 HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5341 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5343 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5345 HWRM_CHECK_RESULT();
5346 flags = rte_le_to_cpu_32(resp->flags);
5352 if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5353 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5354 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5360 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5362 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5363 struct bnxt_error_recovery_info *info = bp->recovery_info;
5364 struct hwrm_error_recovery_qcfg_input req = {0};
5369 /* Older FW does not have error recovery support */
5370 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5373 HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5375 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5377 HWRM_CHECK_RESULT();
5379 flags = rte_le_to_cpu_32(resp->flags);
5380 if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5381 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5382 else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5383 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5385 if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5386 !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5391 /* FW returned values are in units of 100msec */
5392 info->driver_polling_freq =
5393 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5394 info->master_func_wait_period =
5395 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5396 info->normal_func_wait_period =
5397 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5398 info->master_func_wait_period_after_reset =
5399 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5400 info->max_bailout_time_after_reset =
5401 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5402 info->status_regs[BNXT_FW_STATUS_REG] =
5403 rte_le_to_cpu_32(resp->fw_health_status_reg);
5404 info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5405 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5406 info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5407 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5408 info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5409 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5410 info->reg_array_cnt =
5411 rte_le_to_cpu_32(resp->reg_array_cnt);
5413 if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5418 for (i = 0; i < info->reg_array_cnt; i++) {
5419 info->reset_reg[i] =
5420 rte_le_to_cpu_32(resp->reset_reg[i]);
5421 info->reset_reg_val[i] =
5422 rte_le_to_cpu_32(resp->reset_reg_val[i]);
5423 info->delay_after_reset[i] =
5424 resp->delay_after_reset[i];
5429 /* Map the FW status registers */
5431 rc = bnxt_map_fw_health_status_regs(bp);
5434 rte_free(bp->recovery_info);
5435 bp->recovery_info = NULL;
5440 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5442 struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5443 struct hwrm_fw_reset_input req = {0};
5449 HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5451 req.embedded_proc_type =
5452 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5453 req.selfrst_status =
5454 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5455 req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5460 HWRM_CHECK_RESULT();
5466 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5468 struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5469 struct hwrm_port_ts_query_input req = {0};
5470 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5477 HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5480 case BNXT_PTP_FLAGS_PATH_TX:
5481 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5483 case BNXT_PTP_FLAGS_PATH_RX:
5484 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5486 case BNXT_PTP_FLAGS_CURRENT_TIME:
5487 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5491 req.flags = rte_cpu_to_le_32(flags);
5492 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5494 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5496 HWRM_CHECK_RESULT();
5499 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5501 (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5508 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5512 struct hwrm_cfa_counter_qcaps_input req = {0};
5513 struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5515 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5517 "Not a PF or trusted VF. Command not supported\n");
5521 HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5522 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5523 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5525 HWRM_CHECK_RESULT();
5527 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5533 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5536 struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5537 struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5539 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5541 "Not a PF or trusted VF. Command not supported\n");
5545 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5547 req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5548 req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5549 req.page_dir = rte_cpu_to_le_64(dma_addr);
5551 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5553 HWRM_CHECK_RESULT();
5555 *ctx_id = rte_le_to_cpu_16(resp->ctx_id);
5556 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5563 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5566 struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5567 struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5569 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5571 "Not a PF or trusted VF. Command not supported\n");
5575 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5577 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5579 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5581 HWRM_CHECK_RESULT();
5587 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5588 uint16_t cntr, uint16_t ctx_id,
5589 uint32_t num_entries, bool enable)
5591 struct hwrm_cfa_counter_cfg_input req = {0};
5592 struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5596 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5598 "Not a PF or trusted VF. Command not supported\n");
5602 HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5604 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5605 req.counter_type = rte_cpu_to_le_16(cntr);
5606 flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5607 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5608 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5609 if (dir == BNXT_DIR_RX)
5610 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5611 else if (dir == BNXT_DIR_TX)
5612 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5613 req.flags = rte_cpu_to_le_16(flags);
5614 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5615 req.num_entries = rte_cpu_to_le_32(num_entries);
5617 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5618 HWRM_CHECK_RESULT();
5624 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5625 enum bnxt_flow_dir dir,
5627 uint16_t num_entries)
5629 struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5630 struct hwrm_cfa_counter_qstats_input req = {0};
5631 uint16_t flow_ctx_id = 0;
5635 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5637 "Not a PF or trusted VF. Command not supported\n");
5641 if (dir == BNXT_DIR_RX) {
5642 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5643 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5644 } else if (dir == BNXT_DIR_TX) {
5645 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5646 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5649 HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5650 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5651 req.counter_type = rte_cpu_to_le_16(cntr);
5652 req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5653 req.num_entries = rte_cpu_to_le_16(num_entries);
5654 req.flags = rte_cpu_to_le_16(flags);
5655 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5657 HWRM_CHECK_RESULT();
5663 int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
5665 struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5666 struct hwrm_cfa_vfr_alloc_input req = {0};
5669 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5671 "Not a PF or trusted VF. Command not supported\n");
5675 HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
5676 req.vf_id = rte_cpu_to_le_16(vf_idx);
5677 snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5678 bp->eth_dev->data->name, vf_idx);
5680 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5681 HWRM_CHECK_RESULT();
5684 PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
5688 int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
5690 struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
5691 struct hwrm_cfa_vfr_free_input req = {0};
5694 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5696 "Not a PF or trusted VF. Command not supported\n");
5700 HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
5701 req.vf_id = rte_cpu_to_le_16(vf_idx);
5702 snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5703 bp->eth_dev->data->name, vf_idx);
5705 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5706 HWRM_CHECK_RESULT();
5708 PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
5712 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5713 uint16_t *first_vf_id)
5716 struct hwrm_func_qcaps_input req = {.req_type = 0 };
5717 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5719 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5721 req.fid = rte_cpu_to_le_16(fid);
5723 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5725 HWRM_CHECK_RESULT();
5728 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5735 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5737 struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5738 struct hwrm_cfa_pair_alloc_input req = {0};
5741 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5743 "Not a PF or trusted VF. Command not supported\n");
5747 HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5748 req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5749 snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5750 bp->eth_dev->data->name, rep_bp->vf_id);
5752 req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
5753 req.vf_b_id = rte_cpu_to_le_16(rep_bp->vf_id);
5754 req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5755 req.host_b_id = 1; /* TBD - Confirm if this is OK */
5757 req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5758 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5759 req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5760 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5761 req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5762 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5763 req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5764 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5766 req.q_ab = rep_bp->rep_q_r2f;
5767 req.q_ba = rep_bp->rep_q_f2r;
5768 req.fc_ab = rep_bp->rep_fc_r2f;
5769 req.fc_ba = rep_bp->rep_fc_f2r;
5771 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5772 HWRM_CHECK_RESULT();
5775 PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5776 BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5780 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5782 struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5783 struct hwrm_cfa_pair_free_input req = {0};
5786 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5788 "Not a PF or trusted VF. Command not supported\n");
5792 HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5793 snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5794 bp->eth_dev->data->name, rep_bp->vf_id);
5795 req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
5796 req.vf_id = rte_cpu_to_le_16(rep_bp->vf_id);
5797 req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5799 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5800 HWRM_CHECK_RESULT();
5802 PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",