1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_common.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memzone.h>
13 #include <rte_version.h>
17 #include "bnxt_filter.h"
18 #include "bnxt_hwrm.h"
21 #include "bnxt_ring.h"
24 #include "bnxt_vnic.h"
25 #include "hsi_struct_def_dpdk.h"
27 #define HWRM_SPEC_CODE_1_8_3 0x10803
28 #define HWRM_VERSION_1_9_1 0x10901
29 #define HWRM_VERSION_1_9_2 0x10903
31 struct bnxt_plcmodes_cfg {
33 uint16_t jumbo_thresh;
35 uint16_t hds_threshold;
38 static int page_getenum(size_t size)
54 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
55 return sizeof(int) * 8 - 1;
58 static int page_roundup(size_t size)
60 return 1 << page_getenum(size);
63 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem,
67 if (rmem->nr_pages > 1) {
69 *pg_dir = rte_cpu_to_le_64(rmem->pg_tbl_map);
71 *pg_dir = rte_cpu_to_le_64(rmem->dma_arr[0]);
76 * HWRM Functions (sent to HWRM)
77 * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
78 * HWRM command times out, or a negative error code if the HWRM
79 * command was failed by the FW.
82 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
83 uint32_t msg_len, bool use_kong_mb)
86 struct input *req = msg;
87 struct output *resp = bp->hwrm_cmd_resp_addr;
91 uint16_t max_req_len = bp->max_req_len;
92 struct hwrm_short_input short_input = { 0 };
93 uint16_t bar_offset = use_kong_mb ?
94 GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET;
95 uint16_t mb_trigger_offset = use_kong_mb ?
96 GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER;
99 /* Do not send HWRM commands to firmware in error state */
100 if (bp->flags & BNXT_FLAG_FATAL_ERROR)
103 timeout = bp->hwrm_cmd_timeout;
105 if (bp->flags & BNXT_FLAG_SHORT_CMD ||
106 msg_len > bp->max_req_len) {
107 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
109 memset(short_cmd_req, 0, bp->hwrm_max_ext_req_len);
110 memcpy(short_cmd_req, req, msg_len);
112 short_input.req_type = rte_cpu_to_le_16(req->req_type);
113 short_input.signature = rte_cpu_to_le_16(
114 HWRM_SHORT_INPUT_SIGNATURE_SHORT_CMD);
115 short_input.size = rte_cpu_to_le_16(msg_len);
116 short_input.req_addr =
117 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
119 data = (uint32_t *)&short_input;
120 msg_len = sizeof(short_input);
122 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
125 /* Write request msg to hwrm channel */
126 for (i = 0; i < msg_len; i += 4) {
127 bar = (uint8_t *)bp->bar0 + bar_offset + i;
128 rte_write32(*data, bar);
132 /* Zero the rest of the request space */
133 for (; i < max_req_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + bar_offset + i;
138 /* Ring channel doorbell */
139 bar = (uint8_t *)bp->bar0 + mb_trigger_offset;
142 * Make sure the channel doorbell ring command complete before
143 * reading the response to avoid getting stale or invalid
148 /* Poll for the valid bit */
149 for (i = 0; i < timeout; i++) {
150 /* Sanity check on the resp->resp_len */
152 if (resp->resp_len && resp->resp_len <= bp->max_resp_len) {
153 /* Last byte of resp contains the valid key */
154 valid = (uint8_t *)resp + resp->resp_len - 1;
155 if (*valid == HWRM_RESP_VALID_KEY)
162 /* Suppress VER_GET timeout messages during reset recovery */
163 if (bp->flags & BNXT_FLAG_FW_RESET &&
164 rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET)
168 "Error(timeout) sending msg 0x%04x, seq_id %d\n",
169 req->req_type, req->seq_id);
176 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
177 * spinlock, and does initial processing.
179 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
180 * releases the spinlock only if it returns. If the regular int return codes
181 * are not used by the function, HWRM_CHECK_RESULT() should not be used
182 * directly, rather it should be copied and modified to suit the function.
184 * HWRM_UNLOCK() must be called after all response processing is completed.
186 #define HWRM_PREP(req, type, kong) do { \
187 rte_spinlock_lock(&bp->hwrm_lock); \
188 if (bp->hwrm_cmd_resp_addr == NULL) { \
189 rte_spinlock_unlock(&bp->hwrm_lock); \
192 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
193 (req)->req_type = rte_cpu_to_le_16(type); \
194 (req)->cmpl_ring = rte_cpu_to_le_16(-1); \
195 (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\
196 rte_cpu_to_le_16(bp->chimp_cmd_seq++); \
197 (req)->target_id = rte_cpu_to_le_16(0xffff); \
198 (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
201 #define HWRM_CHECK_RESULT_SILENT() do {\
203 rte_spinlock_unlock(&bp->hwrm_lock); \
206 if (resp->error_code) { \
207 rc = rte_le_to_cpu_16(resp->error_code); \
208 rte_spinlock_unlock(&bp->hwrm_lock); \
213 #define HWRM_CHECK_RESULT() do {\
215 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
216 rte_spinlock_unlock(&bp->hwrm_lock); \
217 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
219 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
221 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
223 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
225 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
231 if (resp->error_code) { \
232 rc = rte_le_to_cpu_16(resp->error_code); \
233 if (resp->resp_len >= 16) { \
234 struct hwrm_err_output *tmp_hwrm_err_op = \
237 "error %d:%d:%08x:%04x\n", \
238 rc, tmp_hwrm_err_op->cmd_err, \
240 tmp_hwrm_err_op->opaque_0), \
242 tmp_hwrm_err_op->opaque_1)); \
244 PMD_DRV_LOG(ERR, "error %d\n", rc); \
246 rte_spinlock_unlock(&bp->hwrm_lock); \
247 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
249 else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \
251 else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \
253 else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \
255 else if (rc == HWRM_ERR_CODE_HOT_RESET_PROGRESS) \
263 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
265 int bnxt_hwrm_tf_message_direct(struct bnxt *bp,
274 bool mailbox = BNXT_USE_CHIMP_MB;
275 struct input *req = msg;
276 struct output *resp = bp->hwrm_cmd_resp_addr;
279 mailbox = BNXT_USE_KONG(bp);
281 HWRM_PREP(req, msg_type, mailbox);
283 rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox);
288 memcpy(resp_msg, resp, resp_len);
295 int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp,
299 uint32_t *tf_response_code,
303 uint32_t response_len)
306 struct hwrm_cfa_tflib_input req = { .req_type = 0 };
307 struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr;
308 bool mailbox = BNXT_USE_CHIMP_MB;
310 if (msg_len > sizeof(req.tf_req))
314 mailbox = BNXT_USE_KONG(bp);
316 HWRM_PREP(&req, HWRM_TF, mailbox);
317 /* Build request using the user supplied request payload.
318 * TLV request size is checked at build time against HWRM
319 * request max size, thus no checking required.
321 req.tf_type = tf_type;
322 req.tf_subtype = tf_subtype;
323 memcpy(req.tf_req, msg, msg_len);
325 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox);
328 /* Copy the resp to user provided response buffer */
329 if (response != NULL)
330 /* Post process response data. We need to copy only
331 * the 'payload' as the HWRM data structure really is
332 * HWRM header + msg header + payload and the TFLIB
333 * only provided a payload place holder.
335 if (response_len != 0) {
341 /* Extract the internal tflib response code */
342 *tf_response_code = resp->tf_resp_code;
348 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
351 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
352 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
354 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
355 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
358 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
366 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
367 struct bnxt_vnic_info *vnic,
369 struct bnxt_vlan_table_entry *vlan_table)
372 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
373 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
376 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
379 HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB);
380 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
382 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
383 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
384 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
385 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
387 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
388 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
390 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) {
391 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
392 } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) {
393 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
394 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
395 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
398 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
399 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
400 req.vlan_tag_tbl_addr =
401 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
402 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
404 req.mask = rte_cpu_to_le_32(mask);
406 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
414 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
416 struct bnxt_vlan_antispoof_table_entry *vlan_table)
419 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
420 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
421 bp->hwrm_cmd_resp_addr;
424 * Older HWRM versions did not support this command, and the set_rx_mask
425 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
426 * removed from set_rx_mask call, and this command was added.
428 * This command is also present from 1.7.8.11 and higher,
431 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
432 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
433 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
438 HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB);
439 req.fid = rte_cpu_to_le_16(fid);
441 req.vlan_tag_mask_tbl_addr =
442 rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table));
443 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
445 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
453 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
454 struct bnxt_filter_info *filter)
457 struct bnxt_filter_info *l2_filter = filter;
458 struct bnxt_vnic_info *vnic = NULL;
459 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
460 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
462 if (filter->fw_l2_filter_id == UINT64_MAX)
465 if (filter->matching_l2_fltr_ptr)
466 l2_filter = filter->matching_l2_fltr_ptr;
468 PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n",
469 filter, l2_filter, l2_filter->l2_ref_cnt);
471 if (l2_filter->l2_ref_cnt == 0)
474 if (l2_filter->l2_ref_cnt > 0)
475 l2_filter->l2_ref_cnt--;
477 if (l2_filter->l2_ref_cnt > 0)
480 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB);
482 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
484 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
489 filter->fw_l2_filter_id = UINT64_MAX;
490 if (l2_filter->l2_ref_cnt == 0) {
491 vnic = l2_filter->vnic;
493 STAILQ_REMOVE(&vnic->filter, l2_filter,
494 bnxt_filter_info, next);
495 bnxt_free_filter(bp, l2_filter);
502 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
504 struct bnxt_filter_info *filter)
507 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
508 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
509 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
510 const struct rte_eth_vmdq_rx_conf *conf =
511 &dev_conf->rx_adv_conf.vmdq_rx_conf;
512 uint32_t enables = 0;
513 uint16_t j = dst_id - 1;
515 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
516 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
517 conf->pool_map[j].pools & (1UL << j)) {
519 "Add vlan %u to vmdq pool %u\n",
520 conf->pool_map[j].vlan_id, j);
522 filter->l2_ivlan = conf->pool_map[j].vlan_id;
524 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
525 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
528 if (filter->fw_l2_filter_id != UINT64_MAX)
529 bnxt_hwrm_clear_l2_filter(bp, filter);
531 HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
533 /* PMD does not support XDP and RoCE */
534 filter->flags |= HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_XDP_DISABLE |
535 HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_TRAFFIC_L2;
536 req.flags = rte_cpu_to_le_32(filter->flags);
538 enables = filter->enables |
539 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
540 req.dst_id = rte_cpu_to_le_16(dst_id);
543 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
544 memcpy(req.l2_addr, filter->l2_addr,
547 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
548 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
551 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
552 req.l2_ovlan = filter->l2_ovlan;
554 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
555 req.l2_ivlan = filter->l2_ivlan;
557 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
558 req.l2_ovlan_mask = filter->l2_ovlan_mask;
560 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
561 req.l2_ivlan_mask = filter->l2_ivlan_mask;
562 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
563 req.src_id = rte_cpu_to_le_32(filter->src_id);
564 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
565 req.src_type = filter->src_type;
566 if (filter->pri_hint) {
567 req.pri_hint = filter->pri_hint;
568 req.l2_filter_id_hint =
569 rte_cpu_to_le_64(filter->l2_filter_id_hint);
572 req.enables = rte_cpu_to_le_32(enables);
574 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
578 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
579 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
582 filter->l2_ref_cnt++;
587 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
589 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
590 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
597 HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB);
600 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
603 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
604 if (ptp->tx_tstamp_en)
605 flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
608 HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
609 req.flags = rte_cpu_to_le_32(flags);
610 req.enables = rte_cpu_to_le_32
611 (HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
612 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
614 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
620 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
623 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
624 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
625 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
630 HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB);
632 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
634 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
638 if (!BNXT_CHIP_THOR(bp) &&
639 !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS))
642 if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS)
643 bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS;
645 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
649 if (!BNXT_CHIP_THOR(bp)) {
650 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
651 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
652 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
653 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
654 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
655 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
656 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
657 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
658 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
659 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
660 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
661 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
662 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
663 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
664 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
665 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
666 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
667 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
676 void bnxt_hwrm_free_vf_info(struct bnxt *bp)
680 for (i = 0; i < bp->pf->max_vfs; i++) {
681 rte_free(bp->pf->vf_info[i].vlan_table);
682 bp->pf->vf_info[i].vlan_table = NULL;
683 rte_free(bp->pf->vf_info[i].vlan_as_table);
684 bp->pf->vf_info[i].vlan_as_table = NULL;
686 rte_free(bp->pf->vf_info);
687 bp->pf->vf_info = NULL;
690 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
693 struct hwrm_func_qcaps_input req = {.req_type = 0 };
694 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
695 uint16_t new_max_vfs;
699 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
701 req.fid = rte_cpu_to_le_16(0xffff);
703 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
707 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
708 flags = rte_le_to_cpu_32(resp->flags);
710 bp->pf->port_id = resp->port_id;
711 bp->pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
712 bp->pf->total_vfs = rte_le_to_cpu_16(resp->max_vfs);
713 new_max_vfs = bp->pdev->max_vfs;
714 if (new_max_vfs != bp->pf->max_vfs) {
716 bnxt_hwrm_free_vf_info(bp);
717 bp->pf->vf_info = rte_zmalloc("bnxt_vf_info",
718 sizeof(bp->pf->vf_info[0]) * new_max_vfs, 0);
719 if (bp->pf->vf_info == NULL) {
720 PMD_DRV_LOG(ERR, "Alloc vf info fail\n");
723 bp->pf->max_vfs = new_max_vfs;
724 for (i = 0; i < new_max_vfs; i++) {
725 bp->pf->vf_info[i].fid =
726 bp->pf->first_vf_id + i;
727 bp->pf->vf_info[i].vlan_table =
728 rte_zmalloc("VF VLAN table",
731 if (bp->pf->vf_info[i].vlan_table == NULL)
733 "Fail to alloc VLAN table for VF %d\n",
737 bp->pf->vf_info[i].vlan_table);
738 bp->pf->vf_info[i].vlan_as_table =
739 rte_zmalloc("VF VLAN AS table",
742 if (bp->pf->vf_info[i].vlan_as_table == NULL)
744 "Alloc VLAN AS table for VF %d fail\n",
748 bp->pf->vf_info[i].vlan_as_table);
749 STAILQ_INIT(&bp->pf->vf_info[i].filter);
754 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
755 if (!bnxt_check_zero_bytes(resp->mac_address, RTE_ETHER_ADDR_LEN)) {
756 bp->flags |= BNXT_FLAG_DFLT_MAC_SET;
757 memcpy(bp->mac_addr, &resp->mac_address, RTE_ETHER_ADDR_LEN);
759 bp->flags &= ~BNXT_FLAG_DFLT_MAC_SET;
761 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
762 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
763 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
764 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
765 bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
766 bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows);
767 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
768 if (!BNXT_CHIP_THOR(bp))
769 bp->max_l2_ctx += bp->max_rx_em_flows;
770 /* TODO: For now, do not support VMDq/RFS on VFs. */
775 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
779 PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n",
780 bp->max_l2_ctx, bp->max_vnics);
781 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
783 bp->pf->total_vnics = rte_le_to_cpu_16(resp->max_vnics);
784 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
785 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
786 PMD_DRV_LOG(DEBUG, "PTP SUPPORTED\n");
788 bnxt_hwrm_ptp_qcfg(bp);
792 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
793 bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED;
795 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) {
796 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
797 PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n");
800 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
801 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
803 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
804 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
811 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
815 rc = __bnxt_hwrm_func_qcaps(bp);
816 if (!rc && bp->hwrm_spec_code >= HWRM_SPEC_CODE_1_8_3) {
817 rc = bnxt_alloc_ctx_mem(bp);
821 rc = bnxt_hwrm_func_resc_qcaps(bp);
823 bp->flags |= BNXT_FLAG_NEW_RM;
827 * bnxt_hwrm_func_resc_qcaps can fail and cause init failure.
828 * But the error can be ignored. Return success.
834 /* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */
835 int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
838 struct hwrm_vnic_qcaps_input req = {.req_type = 0 };
839 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
841 HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB);
843 req.target_id = rte_cpu_to_le_16(0xffff);
845 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
849 if (rte_le_to_cpu_32(resp->flags) &
850 HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) {
851 bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY;
852 PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n");
855 bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported);
862 int bnxt_hwrm_func_reset(struct bnxt *bp)
865 struct hwrm_func_reset_input req = {.req_type = 0 };
866 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
868 HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB);
870 req.enables = rte_cpu_to_le_32(0);
872 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
880 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
884 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
885 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
887 if (bp->flags & BNXT_FLAG_REGISTERED)
890 if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
891 flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
892 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
893 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT;
895 /* PFs and trusted VFs should indicate the support of the
896 * Master capability on non Stingray platform
898 if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp))
899 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
901 HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB);
902 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
903 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
904 req.ver_maj = RTE_VER_YEAR;
905 req.ver_min = RTE_VER_MONTH;
906 req.ver_upd = RTE_VER_MINOR;
909 req.enables |= rte_cpu_to_le_32(
910 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
911 memcpy(req.vf_req_fwd, bp->pf->vf_req_fwd,
912 RTE_MIN(sizeof(req.vf_req_fwd),
913 sizeof(bp->pf->vf_req_fwd)));
916 * PF can sniff HWRM API issued by VF. This can be set up by
917 * linux driver and inherited by the DPDK PF driver. Clear
918 * this HWRM sniffer list in FW because DPDK PF driver does
921 flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE;
924 req.flags = rte_cpu_to_le_32(flags);
926 req.async_event_fwd[0] |=
927 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
928 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
929 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE |
930 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE |
931 ASYNC_CMPL_EVENT_ID_RESET_NOTIFY);
932 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
933 req.async_event_fwd[0] |=
934 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY);
935 req.async_event_fwd[1] |=
936 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
937 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
939 req.async_event_fwd[1] |=
940 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION);
942 if (BNXT_VF_IS_TRUSTED(bp))
943 req.async_event_fwd[1] |=
944 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE);
946 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
950 flags = rte_le_to_cpu_32(resp->flags);
951 if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED)
952 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
956 bp->flags |= BNXT_FLAG_REGISTERED;
961 int bnxt_hwrm_check_vf_rings(struct bnxt *bp)
963 if (!(BNXT_VF(bp) && (bp->flags & BNXT_FLAG_NEW_RM)))
966 return bnxt_hwrm_func_reserve_vf_resc(bp, true);
969 int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test)
974 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
975 struct hwrm_func_vf_cfg_input req = {0};
977 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
979 enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS |
980 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS |
981 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
982 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
983 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS;
985 if (BNXT_HAS_RING_GRPS(bp)) {
986 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
987 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->rx_nr_rings);
990 req.num_tx_rings = rte_cpu_to_le_16(bp->tx_nr_rings);
991 req.num_rx_rings = rte_cpu_to_le_16(bp->rx_nr_rings *
992 AGG_RING_MULTIPLIER);
993 req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings);
994 req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings +
996 BNXT_NUM_ASYNC_CPR(bp));
997 req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings);
998 if (bp->vf_resv_strategy ==
999 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) {
1000 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_VNICS |
1001 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1002 HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1003 req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
1004 req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
1005 req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
1006 } else if (bp->vf_resv_strategy ==
1007 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
1008 enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
1009 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1013 flags = HWRM_FUNC_VF_CFG_INPUT_FLAGS_TX_ASSETS_TEST |
1014 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST |
1015 HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST |
1016 HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST |
1017 HWRM_FUNC_VF_CFG_INPUT_FLAGS_STAT_CTX_ASSETS_TEST |
1018 HWRM_FUNC_VF_CFG_INPUT_FLAGS_VNIC_ASSETS_TEST;
1020 if (test && BNXT_HAS_RING_GRPS(bp))
1021 flags |= HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST;
1023 req.flags = rte_cpu_to_le_32(flags);
1024 req.enables |= rte_cpu_to_le_32(enables);
1026 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1029 HWRM_CHECK_RESULT_SILENT();
1031 HWRM_CHECK_RESULT();
1037 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
1040 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1041 struct hwrm_func_resource_qcaps_input req = {0};
1043 HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB);
1044 req.fid = rte_cpu_to_le_16(0xffff);
1046 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1048 HWRM_CHECK_RESULT_SILENT();
1051 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
1052 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
1053 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
1054 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
1055 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
1056 /* func_resource_qcaps does not return max_rx_em_flows.
1057 * So use the value provided by func_qcaps.
1059 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
1060 if (!BNXT_CHIP_THOR(bp))
1061 bp->max_l2_ctx += bp->max_rx_em_flows;
1062 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
1063 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
1065 bp->max_nq_rings = rte_le_to_cpu_16(resp->max_msix);
1066 bp->vf_resv_strategy = rte_le_to_cpu_16(resp->vf_reservation_strategy);
1067 if (bp->vf_resv_strategy >
1068 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC)
1069 bp->vf_resv_strategy =
1070 HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL;
1076 int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout)
1079 struct hwrm_ver_get_input req = {.req_type = 0 };
1080 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
1081 uint32_t fw_version;
1082 uint16_t max_resp_len;
1083 char type[RTE_MEMZONE_NAMESIZE];
1084 uint32_t dev_caps_cfg;
1086 bp->max_req_len = HWRM_MAX_REQ_LEN;
1087 bp->hwrm_cmd_timeout = timeout;
1088 HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB);
1090 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
1091 req.hwrm_intf_min = HWRM_VERSION_MINOR;
1092 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
1094 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1096 if (bp->flags & BNXT_FLAG_FW_RESET)
1097 HWRM_CHECK_RESULT_SILENT();
1099 HWRM_CHECK_RESULT();
1101 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
1102 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
1103 resp->hwrm_intf_upd_8b, resp->hwrm_fw_maj_8b,
1104 resp->hwrm_fw_min_8b, resp->hwrm_fw_bld_8b);
1105 bp->fw_ver = (resp->hwrm_fw_maj_8b << 24) |
1106 (resp->hwrm_fw_min_8b << 16) |
1107 (resp->hwrm_fw_bld_8b << 8) |
1108 resp->hwrm_fw_rsvd_8b;
1109 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
1110 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
1112 fw_version = resp->hwrm_intf_maj_8b << 16;
1113 fw_version |= resp->hwrm_intf_min_8b << 8;
1114 fw_version |= resp->hwrm_intf_upd_8b;
1115 bp->hwrm_spec_code = fw_version;
1117 /* def_req_timeout value is in milliseconds */
1118 bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout);
1119 /* convert timeout to usec */
1120 bp->hwrm_cmd_timeout *= 1000;
1121 if (!bp->hwrm_cmd_timeout)
1122 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
1124 if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) {
1125 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
1130 if (bp->max_req_len > resp->max_req_win_len) {
1131 PMD_DRV_LOG(ERR, "Unsupported request length\n");
1134 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
1135 bp->hwrm_max_ext_req_len = rte_le_to_cpu_16(resp->max_ext_req_len);
1136 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
1137 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
1139 max_resp_len = rte_le_to_cpu_16(resp->max_resp_len);
1140 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
1142 if (bp->max_resp_len != max_resp_len) {
1143 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT,
1144 bp->pdev->addr.domain, bp->pdev->addr.bus,
1145 bp->pdev->addr.devid, bp->pdev->addr.function);
1147 rte_free(bp->hwrm_cmd_resp_addr);
1149 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
1150 if (bp->hwrm_cmd_resp_addr == NULL) {
1154 bp->hwrm_cmd_resp_dma_addr =
1155 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
1156 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
1158 "Unable to map response buffer to physical memory.\n");
1162 bp->max_resp_len = max_resp_len;
1166 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1168 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) {
1169 PMD_DRV_LOG(DEBUG, "Short command supported\n");
1170 bp->flags |= BNXT_FLAG_SHORT_CMD;
1173 if (((dev_caps_cfg &
1174 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
1176 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) ||
1177 bp->hwrm_max_ext_req_len > HWRM_MAX_REQ_LEN) {
1178 sprintf(type, "bnxt_hwrm_short_" PCI_PRI_FMT,
1179 bp->pdev->addr.domain, bp->pdev->addr.bus,
1180 bp->pdev->addr.devid, bp->pdev->addr.function);
1182 rte_free(bp->hwrm_short_cmd_req_addr);
1184 bp->hwrm_short_cmd_req_addr =
1185 rte_malloc(type, bp->hwrm_max_ext_req_len, 0);
1186 if (bp->hwrm_short_cmd_req_addr == NULL) {
1190 bp->hwrm_short_cmd_req_dma_addr =
1191 rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr);
1192 if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) {
1193 rte_free(bp->hwrm_short_cmd_req_addr);
1195 "Unable to map buffer to physical memory.\n");
1201 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) {
1202 bp->flags |= BNXT_FLAG_KONG_MB_EN;
1203 PMD_DRV_LOG(DEBUG, "Kong mailbox channel enabled\n");
1206 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
1207 PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n");
1209 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) {
1210 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT;
1211 PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n");
1215 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) {
1216 PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n");
1217 bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS;
1226 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
1229 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
1230 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
1232 if (!(bp->flags & BNXT_FLAG_REGISTERED))
1235 HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB);
1238 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1240 HWRM_CHECK_RESULT();
1246 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
1249 struct hwrm_port_phy_cfg_input req = {0};
1250 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1251 uint32_t enables = 0;
1253 HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB);
1255 if (conf->link_up) {
1256 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
1257 if (bp->link_info->auto_mode && conf->link_speed) {
1258 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1259 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
1262 req.flags = rte_cpu_to_le_32(conf->phy_flags);
1264 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
1265 * any auto mode, even "none".
1267 if (!conf->link_speed) {
1268 /* No speeds specified. Enable AutoNeg - all speeds */
1269 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
1271 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
1273 if (bp->link_info->link_signal_mode) {
1275 HWRM_PORT_PHY_CFG_IN_EN_FORCE_PAM4_LINK_SPEED;
1276 req.force_pam4_link_speed =
1277 rte_cpu_to_le_16(conf->link_speed);
1279 req.force_link_speed =
1280 rte_cpu_to_le_16(conf->link_speed);
1282 /* AutoNeg - Advertise speeds specified. */
1283 if (conf->auto_link_speed_mask &&
1284 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
1286 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1287 req.auto_link_speed_mask =
1288 conf->auto_link_speed_mask;
1289 if (conf->auto_pam4_link_speeds) {
1291 HWRM_PORT_PHY_CFG_IN_EN_AUTO_PAM4_LINK_SPD_MASK;
1292 req.auto_link_pam4_speed_mask =
1293 conf->auto_pam4_link_speeds;
1296 HWRM_PORT_PHY_CFG_IN_EN_AUTO_LINK_SPEED_MASK;
1299 if (conf->auto_link_speed &&
1300 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE))
1302 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
1304 req.auto_duplex = conf->duplex;
1305 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
1306 req.auto_pause = conf->auto_pause;
1307 req.force_pause = conf->force_pause;
1308 /* Set force_pause if there is no auto or if there is a force */
1309 if (req.auto_pause && !req.force_pause)
1310 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
1312 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
1314 req.enables = rte_cpu_to_le_32(enables);
1317 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
1318 PMD_DRV_LOG(INFO, "Force Link Down\n");
1321 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1323 HWRM_CHECK_RESULT();
1329 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
1330 struct bnxt_link_info *link_info)
1333 struct hwrm_port_phy_qcfg_input req = {0};
1334 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1336 HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB);
1338 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1340 HWRM_CHECK_RESULT();
1342 link_info->phy_link_status = resp->link;
1343 link_info->link_up =
1344 (link_info->phy_link_status ==
1345 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
1346 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
1347 link_info->duplex = resp->duplex_cfg;
1348 link_info->pause = resp->pause;
1349 link_info->auto_pause = resp->auto_pause;
1350 link_info->force_pause = resp->force_pause;
1351 link_info->auto_mode = resp->auto_mode;
1352 link_info->phy_type = resp->phy_type;
1353 link_info->media_type = resp->media_type;
1355 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
1356 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
1357 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
1358 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
1359 link_info->phy_ver[0] = resp->phy_maj;
1360 link_info->phy_ver[1] = resp->phy_min;
1361 link_info->phy_ver[2] = resp->phy_bld;
1362 link_info->link_signal_mode = rte_le_to_cpu_16(resp->link_signal_mode);
1363 link_info->force_pam4_link_speed =
1364 rte_le_to_cpu_16(resp->force_pam4_link_speed);
1365 link_info->support_pam4_speeds =
1366 rte_le_to_cpu_16(resp->support_pam4_speeds);
1367 link_info->auto_pam4_link_speeds =
1368 rte_le_to_cpu_16(resp->auto_pam4_link_speed_mask);
1371 PMD_DRV_LOG(DEBUG, "Link Speed:%d,Auto:%d:%x:%x,Support:%x,Force:%x\n",
1372 link_info->link_speed, link_info->auto_mode,
1373 link_info->auto_link_speed, link_info->auto_link_speed_mask,
1374 link_info->support_speeds, link_info->force_link_speed);
1378 int bnxt_hwrm_port_phy_qcaps(struct bnxt *bp)
1381 struct hwrm_port_phy_qcaps_input req = {0};
1382 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1383 struct bnxt_link_info *link_info = bp->link_info;
1385 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
1388 HWRM_PREP(&req, HWRM_PORT_PHY_QCAPS, BNXT_USE_CHIMP_MB);
1390 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1392 HWRM_CHECK_RESULT();
1394 bp->port_cnt = resp->port_cnt;
1395 if (resp->supported_speeds_auto_mode)
1396 link_info->support_auto_speeds =
1397 rte_le_to_cpu_16(resp->supported_speeds_auto_mode);
1398 if (resp->supported_pam4_speeds_auto_mode)
1399 link_info->support_pam4_auto_speeds =
1400 rte_le_to_cpu_16(resp->supported_pam4_speeds_auto_mode);
1407 static bool bnxt_find_lossy_profile(struct bnxt *bp)
1411 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1412 if (bp->tx_cos_queue[i].profile ==
1413 HWRM_QUEUE_SERVICE_PROFILE_LOSSY) {
1414 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1421 static void bnxt_find_first_valid_profile(struct bnxt *bp)
1425 for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) {
1426 if (bp->tx_cos_queue[i].profile !=
1427 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN &&
1428 bp->tx_cos_queue[i].id !=
1429 HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) {
1430 bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id;
1436 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
1439 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
1440 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
1441 uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX;
1445 HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB);
1447 req.flags = rte_cpu_to_le_32(dir);
1448 /* HWRM Version >= 1.9.1 only if COS Classification is not required. */
1449 if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 &&
1450 !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY))
1452 HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED;
1453 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1455 HWRM_CHECK_RESULT();
1457 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1458 GET_TX_QUEUE_INFO(0);
1459 GET_TX_QUEUE_INFO(1);
1460 GET_TX_QUEUE_INFO(2);
1461 GET_TX_QUEUE_INFO(3);
1462 GET_TX_QUEUE_INFO(4);
1463 GET_TX_QUEUE_INFO(5);
1464 GET_TX_QUEUE_INFO(6);
1465 GET_TX_QUEUE_INFO(7);
1467 GET_RX_QUEUE_INFO(0);
1468 GET_RX_QUEUE_INFO(1);
1469 GET_RX_QUEUE_INFO(2);
1470 GET_RX_QUEUE_INFO(3);
1471 GET_RX_QUEUE_INFO(4);
1472 GET_RX_QUEUE_INFO(5);
1473 GET_RX_QUEUE_INFO(6);
1474 GET_RX_QUEUE_INFO(7);
1479 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX)
1482 if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) {
1483 bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id;
1487 /* iterate and find the COSq profile to use for Tx */
1488 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1489 for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) {
1490 if (bp->tx_cos_queue[i].id != 0xff)
1491 bp->tx_cosq_id[j++] =
1492 bp->tx_cos_queue[i].id;
1495 /* When CoS classification is disabled, for normal NIC
1496 * operations, ideally we should look to use LOSSY.
1497 * If not found, fallback to the first valid profile
1499 if (!bnxt_find_lossy_profile(bp))
1500 bnxt_find_first_valid_profile(bp);
1505 bp->max_tc = resp->max_configurable_queues;
1506 bp->max_lltc = resp->max_configurable_lossless_queues;
1507 if (bp->max_tc > BNXT_MAX_QUEUE)
1508 bp->max_tc = BNXT_MAX_QUEUE;
1509 bp->max_q = bp->max_tc;
1511 if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
1512 dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX;
1520 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1521 struct bnxt_ring *ring,
1522 uint32_t ring_type, uint32_t map_index,
1523 uint32_t stats_ctx_id, uint32_t cmpl_ring_id,
1524 uint16_t tx_cosq_id)
1527 uint32_t enables = 0;
1528 struct hwrm_ring_alloc_input req = {.req_type = 0 };
1529 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1530 struct rte_mempool *mb_pool;
1531 uint16_t rx_buf_size;
1533 HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB);
1535 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
1536 req.fbo = rte_cpu_to_le_32(0);
1537 /* Association of ring index with doorbell index */
1538 req.logical_id = rte_cpu_to_le_16(map_index);
1539 req.length = rte_cpu_to_le_32(ring->ring_size);
1541 switch (ring_type) {
1542 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1543 req.ring_type = ring_type;
1544 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1545 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1546 req.queue_id = rte_cpu_to_le_16(tx_cosq_id);
1547 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1549 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1551 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1552 req.ring_type = ring_type;
1553 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1554 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1555 if (BNXT_CHIP_THOR(bp)) {
1556 mb_pool = bp->rx_queues[0]->mb_pool;
1557 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1558 RTE_PKTMBUF_HEADROOM;
1559 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1560 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1562 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID;
1564 if (stats_ctx_id != INVALID_STATS_CTX_ID)
1566 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1568 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1569 req.ring_type = ring_type;
1570 if (BNXT_HAS_NQ(bp)) {
1571 /* Association of cp ring with nq */
1572 req.nq_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
1574 HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID;
1576 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1578 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1579 req.ring_type = ring_type;
1580 req.page_size = BNXT_PAGE_SHFT;
1581 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
1583 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1584 req.ring_type = ring_type;
1585 req.rx_ring_id = rte_cpu_to_le_16(ring->fw_rx_ring_id);
1587 mb_pool = bp->rx_queues[0]->mb_pool;
1588 rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1589 RTE_PKTMBUF_HEADROOM;
1590 rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size);
1591 req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size);
1593 req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id);
1594 enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
1595 HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
1596 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
1599 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
1604 req.enables = rte_cpu_to_le_32(enables);
1606 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1608 if (rc || resp->error_code) {
1609 if (rc == 0 && resp->error_code)
1610 rc = rte_le_to_cpu_16(resp->error_code);
1611 switch (ring_type) {
1612 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
1614 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1617 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
1619 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1622 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
1624 "hwrm_ring_alloc rx agg failed. rc:%d\n",
1628 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
1630 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1633 case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
1635 "hwrm_ring_alloc nq failed. rc:%d\n", rc);
1639 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1645 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1650 int bnxt_hwrm_ring_free(struct bnxt *bp,
1651 struct bnxt_ring *ring, uint32_t ring_type)
1654 struct hwrm_ring_free_input req = {.req_type = 0 };
1655 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1657 HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB);
1659 req.ring_type = ring_type;
1660 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1662 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1664 if (rc || resp->error_code) {
1665 if (rc == 0 && resp->error_code)
1666 rc = rte_le_to_cpu_16(resp->error_code);
1669 switch (ring_type) {
1670 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1671 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1674 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1675 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1678 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1679 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1682 case HWRM_RING_FREE_INPUT_RING_TYPE_NQ:
1684 "hwrm_ring_free nq failed. rc:%d\n", rc);
1686 case HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG:
1688 "hwrm_ring_free agg failed. rc:%d\n", rc);
1691 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1699 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1702 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1703 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1705 HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB);
1707 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1708 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1709 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1710 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1712 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1714 HWRM_CHECK_RESULT();
1716 bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
1723 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1726 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1727 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1729 HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB);
1731 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1733 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1735 HWRM_CHECK_RESULT();
1738 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1742 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1745 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1746 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1748 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1751 HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB);
1753 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1755 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1757 HWRM_CHECK_RESULT();
1763 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1764 unsigned int idx __rte_unused)
1767 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1768 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1770 HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB);
1772 req.update_period_ms = rte_cpu_to_le_32(0);
1774 req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
1776 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1778 HWRM_CHECK_RESULT();
1780 cpr->hw_stats_ctx_id = rte_le_to_cpu_32(resp->stat_ctx_id);
1787 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1788 unsigned int idx __rte_unused)
1791 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1792 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1794 HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB);
1796 req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id);
1798 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1800 HWRM_CHECK_RESULT();
1806 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1809 struct hwrm_vnic_alloc_input req = { 0 };
1810 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1812 if (!BNXT_HAS_RING_GRPS(bp))
1813 goto skip_ring_grps;
1815 /* map ring groups to this vnic */
1816 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1817 vnic->start_grp_id, vnic->end_grp_id);
1818 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1819 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1821 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1822 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1823 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1824 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1827 vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
1828 HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB);
1830 if (vnic->func_default)
1832 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1833 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1835 HWRM_CHECK_RESULT();
1837 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1839 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1843 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1844 struct bnxt_vnic_info *vnic,
1845 struct bnxt_plcmodes_cfg *pmode)
1848 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1849 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1851 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB);
1853 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1855 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1857 HWRM_CHECK_RESULT();
1859 pmode->flags = rte_le_to_cpu_32(resp->flags);
1860 /* dflt_vnic bit doesn't exist in the _cfg command */
1861 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1862 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1863 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1864 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1871 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1872 struct bnxt_vnic_info *vnic,
1873 struct bnxt_plcmodes_cfg *pmode)
1876 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1877 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1879 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1880 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1884 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
1886 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1887 req.flags = rte_cpu_to_le_32(pmode->flags);
1888 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1889 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1890 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1891 req.enables = rte_cpu_to_le_32(
1892 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1893 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1894 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1897 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
1899 HWRM_CHECK_RESULT();
1905 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1908 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1909 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1910 struct bnxt_plcmodes_cfg pmodes = { 0 };
1911 uint32_t ctx_enable_flag = 0;
1912 uint32_t enables = 0;
1914 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1915 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1919 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1923 HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB);
1925 if (BNXT_CHIP_THOR(bp)) {
1926 int dflt_rxq = vnic->start_grp_id;
1927 struct bnxt_rx_ring_info *rxr;
1928 struct bnxt_cp_ring_info *cpr;
1929 struct bnxt_rx_queue *rxq;
1933 * The first active receive ring is used as the VNIC
1934 * default receive ring. If there are no active receive
1935 * rings (all corresponding receive queues are stopped),
1936 * the first receive ring is used.
1938 for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) {
1939 rxq = bp->eth_dev->data->rx_queues[i];
1940 if (rxq->rx_started) {
1946 rxq = bp->eth_dev->data->rx_queues[dflt_rxq];
1950 req.default_rx_ring_id =
1951 rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id);
1952 req.default_cmpl_ring_id =
1953 rte_cpu_to_le_16(cpr->cp_ring_struct->fw_ring_id);
1954 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_RX_RING_ID |
1955 HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID;
1959 /* Only RSS support for now TBD: COS & LB */
1960 enables = HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP;
1961 if (vnic->lb_rule != 0xffff)
1962 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1963 if (vnic->cos_rule != 0xffff)
1964 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1965 if (vnic->rss_rule != (uint16_t)HWRM_NA_SIGNATURE) {
1966 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1967 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1969 if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) {
1970 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID;
1971 req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id);
1974 enables |= ctx_enable_flag;
1975 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1976 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1977 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1978 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1981 req.enables = rte_cpu_to_le_32(enables);
1982 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1983 req.mru = rte_cpu_to_le_16(vnic->mru);
1984 /* Configure default VNIC only once. */
1985 if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
1987 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1988 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
1990 if (vnic->vlan_strip)
1992 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1995 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1996 if (vnic->roce_dual)
1997 req.flags |= rte_cpu_to_le_32(
1998 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1999 if (vnic->roce_only)
2000 req.flags |= rte_cpu_to_le_32(
2001 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
2002 if (vnic->rss_dflt_cr)
2003 req.flags |= rte_cpu_to_le_32(
2004 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
2006 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2008 HWRM_CHECK_RESULT();
2011 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
2016 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
2020 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
2021 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2023 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2024 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
2027 HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB);
2030 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
2031 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2032 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
2034 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2036 HWRM_CHECK_RESULT();
2038 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
2039 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
2040 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
2041 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
2042 vnic->mru = rte_le_to_cpu_16(resp->mru);
2043 vnic->func_default = rte_le_to_cpu_32(
2044 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
2045 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
2046 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
2047 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
2048 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
2049 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
2050 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
2051 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
2052 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
2053 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
2054 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
2061 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
2062 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2066 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
2067 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
2068 bp->hwrm_cmd_resp_addr;
2070 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB);
2072 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2073 HWRM_CHECK_RESULT();
2075 ctx_id = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
2076 if (!BNXT_HAS_RING_GRPS(bp))
2077 vnic->fw_grp_ids[ctx_idx] = ctx_id;
2078 else if (ctx_idx == 0)
2079 vnic->rss_rule = ctx_id;
2087 int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp,
2088 struct bnxt_vnic_info *vnic, uint16_t ctx_idx)
2091 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
2092 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
2093 bp->hwrm_cmd_resp_addr;
2095 if (ctx_idx == (uint16_t)HWRM_NA_SIGNATURE) {
2096 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
2099 HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB);
2101 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx);
2103 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2105 HWRM_CHECK_RESULT();
2111 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2115 if (BNXT_CHIP_THOR(bp)) {
2118 for (j = 0; j < vnic->num_lb_ctxts; j++) {
2119 rc = _bnxt_hwrm_vnic_ctx_free(bp,
2121 vnic->fw_grp_ids[j]);
2122 vnic->fw_grp_ids[j] = INVALID_HW_RING_ID;
2124 vnic->num_lb_ctxts = 0;
2126 rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule);
2127 vnic->rss_rule = INVALID_HW_RING_ID;
2133 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2136 struct hwrm_vnic_free_input req = {.req_type = 0 };
2137 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
2139 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2140 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
2144 HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB);
2146 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2148 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2150 HWRM_CHECK_RESULT();
2153 vnic->fw_vnic_id = INVALID_HW_RING_ID;
2154 /* Configure default VNIC again if necessary. */
2155 if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
2156 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
2162 bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2166 int nr_ctxs = vnic->num_lb_ctxts;
2167 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2168 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2170 for (i = 0; i < nr_ctxs; i++) {
2171 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2173 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2174 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2175 req.hash_mode_flags = vnic->hash_mode;
2177 req.hash_key_tbl_addr =
2178 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2180 req.ring_grp_tbl_addr =
2181 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
2182 i * HW_HASH_INDEX_SIZE);
2183 req.ring_table_pair_index = i;
2184 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
2186 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
2189 HWRM_CHECK_RESULT();
2196 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
2197 struct bnxt_vnic_info *vnic)
2200 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
2201 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2203 if (!vnic->rss_table)
2206 if (BNXT_CHIP_THOR(bp))
2207 return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic);
2209 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
2211 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
2212 req.hash_mode_flags = vnic->hash_mode;
2214 req.ring_grp_tbl_addr =
2215 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
2216 req.hash_key_tbl_addr =
2217 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
2218 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
2219 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2221 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2223 HWRM_CHECK_RESULT();
2229 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
2230 struct bnxt_vnic_info *vnic)
2233 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
2234 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2237 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2238 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
2242 HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB);
2244 req.flags = rte_cpu_to_le_32(
2245 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
2247 req.enables = rte_cpu_to_le_32(
2248 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
2250 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
2251 size -= RTE_PKTMBUF_HEADROOM;
2252 size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
2254 req.jumbo_thresh = rte_cpu_to_le_16(size);
2255 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2259 HWRM_CHECK_RESULT();
2265 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
2266 struct bnxt_vnic_info *vnic, bool enable)
2269 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
2270 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2272 if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) {
2274 PMD_DRV_LOG(ERR, "No HW support for LRO\n");
2278 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
2279 PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n");
2283 HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB);
2286 req.enables = rte_cpu_to_le_32(
2287 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
2288 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
2289 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
2290 req.flags = rte_cpu_to_le_32(
2291 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
2292 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
2293 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
2294 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
2295 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
2296 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
2297 req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp));
2298 req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp));
2299 req.min_agg_len = rte_cpu_to_le_32(512);
2301 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
2303 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2305 HWRM_CHECK_RESULT();
2311 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
2313 struct hwrm_func_cfg_input req = {0};
2314 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2317 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
2318 req.enables = rte_cpu_to_le_32(
2319 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2320 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
2321 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
2323 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
2325 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2326 HWRM_CHECK_RESULT();
2329 bp->pf->vf_info[vf].random_mac = false;
2334 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
2338 struct hwrm_func_qstats_input req = {.req_type = 0};
2339 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2341 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2343 req.fid = rte_cpu_to_le_16(fid);
2345 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2347 HWRM_CHECK_RESULT();
2350 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
2357 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
2358 struct rte_eth_stats *stats,
2359 struct hwrm_func_qstats_output *func_qstats)
2362 struct hwrm_func_qstats_input req = {.req_type = 0};
2363 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2365 HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB);
2367 req.fid = rte_cpu_to_le_16(fid);
2369 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2371 HWRM_CHECK_RESULT();
2373 memcpy(func_qstats, resp,
2374 sizeof(struct hwrm_func_qstats_output));
2379 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2380 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2381 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2382 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2383 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2384 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2386 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2387 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2388 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2389 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2390 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2391 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2393 stats->imissed = rte_le_to_cpu_64(resp->rx_discard_pkts);
2394 stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts);
2395 stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts);
2403 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
2406 struct hwrm_func_clr_stats_input req = {.req_type = 0};
2407 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2409 HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB);
2411 req.fid = rte_cpu_to_le_16(fid);
2413 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
2415 HWRM_CHECK_RESULT();
2421 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
2426 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2427 struct bnxt_tx_queue *txq;
2428 struct bnxt_rx_queue *rxq;
2429 struct bnxt_cp_ring_info *cpr;
2431 if (i >= bp->rx_cp_nr_rings) {
2432 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2435 rxq = bp->rx_queues[i];
2439 rc = bnxt_hwrm_stat_clear(bp, cpr);
2447 bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
2451 struct bnxt_cp_ring_info *cpr;
2453 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2455 if (i >= bp->rx_cp_nr_rings) {
2456 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
2458 cpr = bp->rx_queues[i]->cp_ring;
2459 if (BNXT_HAS_RING_GRPS(bp))
2460 bp->grp_info[i].fw_stats_ctx = -1;
2462 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
2463 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
2464 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
2472 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
2477 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
2478 struct bnxt_tx_queue *txq;
2479 struct bnxt_rx_queue *rxq;
2480 struct bnxt_cp_ring_info *cpr;
2482 if (i >= bp->rx_cp_nr_rings) {
2483 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
2486 rxq = bp->rx_queues[i];
2490 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
2499 bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
2504 if (!BNXT_HAS_RING_GRPS(bp))
2507 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
2509 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
2512 rc = bnxt_hwrm_ring_grp_free(bp, idx);
2520 void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2522 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2524 bnxt_hwrm_ring_free(bp, cp_ring,
2525 HWRM_RING_FREE_INPUT_RING_TYPE_NQ);
2526 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2527 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2528 sizeof(*cpr->cp_desc_ring));
2529 cpr->cp_raw_cons = 0;
2533 void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2535 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
2537 bnxt_hwrm_ring_free(bp, cp_ring,
2538 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
2539 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
2540 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
2541 sizeof(*cpr->cp_desc_ring));
2542 cpr->cp_raw_cons = 0;
2546 void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index)
2548 struct bnxt_rx_queue *rxq = bp->rx_queues[queue_index];
2549 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
2550 struct bnxt_ring *ring = rxr->rx_ring_struct;
2551 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
2553 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2554 bnxt_hwrm_ring_free(bp, ring,
2555 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2556 ring->fw_ring_id = INVALID_HW_RING_ID;
2557 if (BNXT_HAS_RING_GRPS(bp))
2558 bp->grp_info[queue_index].rx_fw_ring_id =
2561 ring = rxr->ag_ring_struct;
2562 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2563 bnxt_hwrm_ring_free(bp, ring,
2564 BNXT_CHIP_THOR(bp) ?
2565 HWRM_RING_FREE_INPUT_RING_TYPE_RX_AGG :
2566 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
2567 if (BNXT_HAS_RING_GRPS(bp))
2568 bp->grp_info[queue_index].ag_fw_ring_id =
2571 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
2572 bnxt_free_cp_ring(bp, cpr);
2574 if (BNXT_HAS_RING_GRPS(bp))
2575 bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID;
2579 bnxt_free_all_hwrm_rings(struct bnxt *bp)
2583 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
2584 struct bnxt_tx_queue *txq = bp->tx_queues[i];
2585 struct bnxt_tx_ring_info *txr = txq->tx_ring;
2586 struct bnxt_ring *ring = txr->tx_ring_struct;
2587 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
2589 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
2590 bnxt_hwrm_ring_free(bp, ring,
2591 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
2592 ring->fw_ring_id = INVALID_HW_RING_ID;
2593 memset(txr->tx_desc_ring, 0,
2594 txr->tx_ring_struct->ring_size *
2595 sizeof(*txr->tx_desc_ring));
2596 memset(txr->tx_buf_ring, 0,
2597 txr->tx_ring_struct->ring_size *
2598 sizeof(*txr->tx_buf_ring));
2602 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
2603 bnxt_free_cp_ring(bp, cpr);
2604 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
2608 for (i = 0; i < bp->rx_cp_nr_rings; i++)
2609 bnxt_free_hwrm_rx_ring(bp, i);
2614 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
2619 if (!BNXT_HAS_RING_GRPS(bp))
2622 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
2623 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
2631 * HWRM utility functions
2634 void bnxt_free_hwrm_resources(struct bnxt *bp)
2636 /* Release memzone */
2637 rte_free(bp->hwrm_cmd_resp_addr);
2638 rte_free(bp->hwrm_short_cmd_req_addr);
2639 bp->hwrm_cmd_resp_addr = NULL;
2640 bp->hwrm_short_cmd_req_addr = NULL;
2641 bp->hwrm_cmd_resp_dma_addr = 0;
2642 bp->hwrm_short_cmd_req_dma_addr = 0;
2645 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2647 struct rte_pci_device *pdev = bp->pdev;
2648 char type[RTE_MEMZONE_NAMESIZE];
2650 sprintf(type, "bnxt_hwrm_" PCI_PRI_FMT, pdev->addr.domain,
2651 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2652 bp->max_resp_len = HWRM_MAX_RESP_LEN;
2653 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
2654 if (bp->hwrm_cmd_resp_addr == NULL)
2656 bp->hwrm_cmd_resp_dma_addr =
2657 rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr);
2658 if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) {
2660 "unable to map response address to physical memory\n");
2663 rte_spinlock_init(&bp->hwrm_lock);
2669 bnxt_clear_one_vnic_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
2673 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
2674 rc = bnxt_hwrm_clear_em_filter(bp, filter);
2677 } else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
2678 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
2683 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
2688 bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2690 struct bnxt_filter_info *filter;
2693 STAILQ_FOREACH(filter, &vnic->filter, next) {
2694 rc = bnxt_clear_one_vnic_filter(bp, filter);
2695 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
2696 bnxt_free_filter(bp, filter);
2702 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2704 struct bnxt_filter_info *filter;
2705 struct rte_flow *flow;
2708 while (!STAILQ_EMPTY(&vnic->flow_list)) {
2709 flow = STAILQ_FIRST(&vnic->flow_list);
2710 filter = flow->filter;
2711 PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type);
2712 rc = bnxt_clear_one_vnic_filter(bp, filter);
2714 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
2720 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2722 struct bnxt_filter_info *filter;
2725 STAILQ_FOREACH(filter, &vnic->filter, next) {
2726 if (filter->filter_type == HWRM_CFA_EM_FILTER)
2727 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
2729 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
2730 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
2733 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
2742 bnxt_free_tunnel_ports(struct bnxt *bp)
2744 if (bp->vxlan_port_cnt)
2745 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
2746 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
2748 if (bp->geneve_port_cnt)
2749 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
2750 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
2751 bp->geneve_port = 0;
2754 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
2758 if (bp->vnic_info == NULL)
2762 * Cleanup VNICs in reverse order, to make sure the L2 filter
2763 * from vnic0 is last to be cleaned up.
2765 for (i = bp->max_vnics - 1; i >= 0; i--) {
2766 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2768 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
2771 bnxt_clear_hwrm_vnic_flows(bp, vnic);
2773 bnxt_clear_hwrm_vnic_filters(bp, vnic);
2775 bnxt_hwrm_vnic_ctx_free(bp, vnic);
2777 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
2779 bnxt_hwrm_vnic_free(bp, vnic);
2781 rte_free(vnic->fw_grp_ids);
2783 /* Ring resources */
2784 bnxt_free_all_hwrm_rings(bp);
2785 bnxt_free_all_hwrm_ring_grps(bp);
2786 bnxt_free_all_hwrm_stat_ctxs(bp);
2787 bnxt_free_tunnel_ports(bp);
2790 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
2792 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2794 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
2795 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
2797 switch (conf_link_speed) {
2798 case ETH_LINK_SPEED_10M_HD:
2799 case ETH_LINK_SPEED_100M_HD:
2801 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
2803 return hw_link_duplex;
2806 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2811 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed,
2814 uint16_t eth_link_speed = 0;
2816 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2817 return ETH_LINK_SPEED_AUTONEG;
2819 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2820 case ETH_LINK_SPEED_100M:
2821 case ETH_LINK_SPEED_100M_HD:
2824 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2826 case ETH_LINK_SPEED_1G:
2828 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2830 case ETH_LINK_SPEED_2_5G:
2832 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2834 case ETH_LINK_SPEED_10G:
2836 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2838 case ETH_LINK_SPEED_20G:
2840 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2842 case ETH_LINK_SPEED_25G:
2844 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2846 case ETH_LINK_SPEED_40G:
2848 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2850 case ETH_LINK_SPEED_50G:
2851 eth_link_speed = pam4_link ?
2852 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB :
2853 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2855 case ETH_LINK_SPEED_100G:
2856 eth_link_speed = pam4_link ?
2857 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB :
2858 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2860 case ETH_LINK_SPEED_200G:
2862 HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2866 "Unsupported link speed %d; default to AUTO\n",
2870 return eth_link_speed;
2873 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2874 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2875 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2876 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | \
2877 ETH_LINK_SPEED_100G | ETH_LINK_SPEED_200G)
2879 static int bnxt_validate_link_speed(struct bnxt *bp)
2881 uint32_t link_speed = bp->eth_dev->data->dev_conf.link_speeds;
2882 uint16_t port_id = bp->eth_dev->data->port_id;
2883 uint32_t link_speed_capa;
2886 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2889 link_speed_capa = bnxt_get_speed_capabilities(bp);
2891 if (link_speed & ETH_LINK_SPEED_FIXED) {
2892 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2894 if (one_speed & (one_speed - 1)) {
2896 "Invalid advertised speeds (%u) for port %u\n",
2897 link_speed, port_id);
2900 if ((one_speed & link_speed_capa) != one_speed) {
2902 "Unsupported advertised speed (%u) for port %u\n",
2903 link_speed, port_id);
2907 if (!(link_speed & link_speed_capa)) {
2909 "Unsupported advertised speeds (%u) for port %u\n",
2910 link_speed, port_id);
2918 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2922 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2923 if (bp->link_info->support_speeds)
2924 return bp->link_info->support_speeds;
2925 link_speed = BNXT_SUPPORTED_SPEEDS;
2928 if (link_speed & ETH_LINK_SPEED_100M)
2929 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2930 if (link_speed & ETH_LINK_SPEED_100M_HD)
2931 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2932 if (link_speed & ETH_LINK_SPEED_1G)
2933 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2934 if (link_speed & ETH_LINK_SPEED_2_5G)
2935 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2936 if (link_speed & ETH_LINK_SPEED_10G)
2937 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2938 if (link_speed & ETH_LINK_SPEED_20G)
2939 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2940 if (link_speed & ETH_LINK_SPEED_25G)
2941 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2942 if (link_speed & ETH_LINK_SPEED_40G)
2943 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2944 if (link_speed & ETH_LINK_SPEED_50G)
2945 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2946 if (link_speed & ETH_LINK_SPEED_100G)
2947 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2948 if (link_speed & ETH_LINK_SPEED_200G)
2949 ret |= HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
2953 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2955 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2957 switch (hw_link_speed) {
2958 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2959 eth_link_speed = ETH_SPEED_NUM_100M;
2961 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2962 eth_link_speed = ETH_SPEED_NUM_1G;
2964 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2965 eth_link_speed = ETH_SPEED_NUM_2_5G;
2967 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2968 eth_link_speed = ETH_SPEED_NUM_10G;
2970 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2971 eth_link_speed = ETH_SPEED_NUM_20G;
2973 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2974 eth_link_speed = ETH_SPEED_NUM_25G;
2976 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2977 eth_link_speed = ETH_SPEED_NUM_40G;
2979 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2980 eth_link_speed = ETH_SPEED_NUM_50G;
2982 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2983 eth_link_speed = ETH_SPEED_NUM_100G;
2985 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
2986 eth_link_speed = ETH_SPEED_NUM_200G;
2988 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2990 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2994 return eth_link_speed;
2997 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2999 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3001 switch (hw_link_duplex) {
3002 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
3003 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
3005 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
3007 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
3008 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
3011 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
3015 return eth_link_duplex;
3018 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
3021 struct bnxt_link_info *link_info = bp->link_info;
3023 rc = bnxt_hwrm_port_phy_qcaps(bp);
3025 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3027 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
3029 PMD_DRV_LOG(ERR, "Get link config failed with rc %d\n", rc);
3033 if (link_info->link_speed)
3035 bnxt_parse_hw_link_speed(link_info->link_speed);
3037 link->link_speed = ETH_SPEED_NUM_NONE;
3038 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
3039 link->link_status = link_info->link_up;
3040 link->link_autoneg = link_info->auto_mode ==
3041 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
3042 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
3047 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
3050 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
3051 struct bnxt_link_info link_req;
3052 uint16_t speed, autoneg;
3054 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
3057 rc = bnxt_validate_link_speed(bp);
3061 memset(&link_req, 0, sizeof(link_req));
3062 link_req.link_up = link_up;
3066 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
3067 if (BNXT_CHIP_THOR(bp) &&
3068 dev_conf->link_speeds == ETH_LINK_SPEED_40G) {
3069 /* 40G is not supported as part of media auto detect.
3070 * The speed should be forced and autoneg disabled
3071 * to configure 40G speed.
3073 PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n");
3077 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds,
3078 bp->link_info->link_signal_mode);
3079 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
3080 /* Autoneg can be done only when the FW allows.
3081 * When user configures fixed speed of 40G and later changes to
3082 * any other speed, auto_link_speed/force_link_speed is still set
3083 * to 40G until link comes up at new speed.
3086 !(!BNXT_CHIP_THOR(bp) &&
3087 (bp->link_info->auto_link_speed ||
3088 bp->link_info->force_link_speed))) {
3089 link_req.phy_flags |=
3090 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
3091 link_req.auto_link_speed_mask =
3092 bnxt_parse_eth_link_speed_mask(bp,
3093 dev_conf->link_speeds);
3095 if (bp->link_info->phy_type ==
3096 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
3097 bp->link_info->phy_type ==
3098 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
3099 bp->link_info->media_type ==
3100 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
3101 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
3105 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
3106 /* If user wants a particular speed try that first. */
3108 link_req.link_speed = speed;
3109 else if (bp->link_info->force_pam4_link_speed)
3110 link_req.link_speed =
3111 bp->link_info->force_pam4_link_speed;
3112 else if (bp->link_info->auto_pam4_link_speeds)
3113 link_req.link_speed =
3114 bp->link_info->auto_pam4_link_speeds;
3115 else if (bp->link_info->support_pam4_speeds)
3116 link_req.link_speed =
3117 bp->link_info->support_pam4_speeds;
3118 else if (bp->link_info->force_link_speed)
3119 link_req.link_speed = bp->link_info->force_link_speed;
3121 link_req.link_speed = bp->link_info->auto_link_speed;
3123 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
3124 link_req.auto_pause = bp->link_info->auto_pause;
3125 link_req.force_pause = bp->link_info->force_pause;
3128 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
3131 "Set link config failed with rc %d\n", rc);
3139 int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu)
3141 struct hwrm_func_qcfg_input req = {0};
3142 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3145 bp->func_svif = BNXT_SVIF_INVALID;
3148 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3149 req.fid = rte_cpu_to_le_16(0xffff);
3151 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3153 HWRM_CHECK_RESULT();
3155 /* Hard Coded.. 0xfff VLAN ID mask */
3156 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
3158 svif_info = rte_le_to_cpu_16(resp->svif_info);
3159 if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID)
3160 bp->func_svif = svif_info &
3161 HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3163 flags = rte_le_to_cpu_16(resp->flags);
3164 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
3165 bp->flags |= BNXT_FLAG_MULTI_HOST;
3168 !BNXT_VF_IS_TRUSTED(bp) &&
3169 (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3170 bp->flags |= BNXT_FLAG_TRUSTED_VF_EN;
3171 PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n");
3172 } else if (BNXT_VF(bp) &&
3173 BNXT_VF_IS_TRUSTED(bp) &&
3174 !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) {
3175 bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN;
3176 PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n");
3180 *mtu = rte_le_to_cpu_16(resp->mtu);
3182 switch (resp->port_partition_type) {
3183 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
3184 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
3185 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
3187 bp->flags |= BNXT_FLAG_NPAR_PF;
3190 bp->flags &= ~BNXT_FLAG_NPAR_PF;
3199 int bnxt_hwrm_parent_pf_qcfg(struct bnxt *bp)
3201 struct hwrm_func_qcfg_input req = {0};
3202 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3205 if (!BNXT_VF_IS_TRUSTED(bp))
3211 bp->parent->fid = BNXT_PF_FID_INVALID;
3213 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3215 req.fid = rte_cpu_to_le_16(0xfffe); /* Request parent PF information. */
3217 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3219 HWRM_CHECK_RESULT();
3221 memcpy(bp->parent->mac_addr, resp->mac_address, RTE_ETHER_ADDR_LEN);
3222 bp->parent->vnic = rte_le_to_cpu_16(resp->dflt_vnic_id);
3223 bp->parent->fid = rte_le_to_cpu_16(resp->fid);
3224 bp->parent->port_id = rte_le_to_cpu_16(resp->port_id);
3226 /* FIXME: Temporary workaround - remove when firmware issue is fixed. */
3227 if (bp->parent->vnic == 0) {
3228 PMD_DRV_LOG(ERR, "Error: parent VNIC unavailable.\n");
3229 /* Use hard-coded values appropriate for current Wh+ fw. */
3230 if (bp->parent->fid == 2)
3231 bp->parent->vnic = 0x100;
3233 bp->parent->vnic = 1;
3241 int bnxt_hwrm_get_dflt_vnic_svif(struct bnxt *bp, uint16_t fid,
3242 uint16_t *vnic_id, uint16_t *svif)
3244 struct hwrm_func_qcfg_input req = {0};
3245 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3249 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3250 req.fid = rte_cpu_to_le_16(fid);
3252 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3254 HWRM_CHECK_RESULT();
3257 *vnic_id = rte_le_to_cpu_16(resp->dflt_vnic_id);
3259 svif_info = rte_le_to_cpu_16(resp->svif_info);
3260 if (svif && (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID))
3261 *svif = svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK;
3268 int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp)
3270 struct hwrm_port_mac_qcfg_input req = {0};
3271 struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3272 uint16_t port_svif_info;
3275 bp->port_svif = BNXT_SVIF_INVALID;
3277 if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
3280 HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB);
3282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3284 HWRM_CHECK_RESULT_SILENT();
3286 port_svif_info = rte_le_to_cpu_16(resp->port_svif_info);
3287 if (port_svif_info &
3288 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID)
3289 bp->port_svif = port_svif_info &
3290 HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK;
3297 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
3298 struct hwrm_func_qcaps_output *qcaps)
3300 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
3301 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
3302 sizeof(qcaps->mac_address));
3303 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
3304 qcaps->max_rx_rings = fcfg->num_rx_rings;
3305 qcaps->max_tx_rings = fcfg->num_tx_rings;
3306 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
3307 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
3309 qcaps->first_vf_id = 0;
3310 qcaps->max_vnics = fcfg->num_vnics;
3311 qcaps->max_decap_records = 0;
3312 qcaps->max_encap_records = 0;
3313 qcaps->max_tx_wm_flows = 0;
3314 qcaps->max_tx_em_flows = 0;
3315 qcaps->max_rx_wm_flows = 0;
3316 qcaps->max_rx_em_flows = 0;
3317 qcaps->max_flow_id = 0;
3318 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
3319 qcaps->max_sp_tx_rings = 0;
3320 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
3323 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
3325 struct hwrm_func_cfg_input req = {0};
3326 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3330 enables = HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3331 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3332 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3333 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3334 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3335 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3336 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3337 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3338 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS;
3340 if (BNXT_HAS_RING_GRPS(bp)) {
3341 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS;
3342 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
3343 } else if (BNXT_HAS_NQ(bp)) {
3344 enables |= HWRM_FUNC_CFG_INPUT_ENABLES_NUM_MSIX;
3345 req.num_msix = rte_cpu_to_le_16(bp->max_nq_rings);
3348 req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3349 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
3350 req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3351 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
3352 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
3353 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
3354 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
3355 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
3356 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
3357 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
3358 req.fid = rte_cpu_to_le_16(0xffff);
3359 req.enables = rte_cpu_to_le_32(enables);
3361 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3363 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3365 HWRM_CHECK_RESULT();
3371 static void populate_vf_func_cfg_req(struct bnxt *bp,
3372 struct hwrm_func_cfg_input *req,
3375 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
3376 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
3377 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
3378 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
3379 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
3380 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
3381 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
3382 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
3383 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
3384 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
3386 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
3387 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
3389 req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
3390 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
3392 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
3393 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
3395 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
3396 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
3397 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
3398 /* TODO: For now, do not support VMDq/RFS on VFs. */
3399 req->num_vnics = rte_cpu_to_le_16(1);
3400 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
3404 static void add_random_mac_if_needed(struct bnxt *bp,
3405 struct hwrm_func_cfg_input *cfg_req,
3408 struct rte_ether_addr mac;
3410 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
3413 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
3415 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3416 rte_eth_random_addr(cfg_req->dflt_mac_addr);
3417 bp->pf->vf_info[vf].random_mac = true;
3419 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes,
3420 RTE_ETHER_ADDR_LEN);
3424 static int reserve_resources_from_vf(struct bnxt *bp,
3425 struct hwrm_func_cfg_input *cfg_req,
3428 struct hwrm_func_qcaps_input req = {0};
3429 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3432 /* Get the actual allocated values now */
3433 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
3434 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3435 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3438 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
3439 copy_func_cfg_to_qcaps(cfg_req, resp);
3440 } else if (resp->error_code) {
3441 rc = rte_le_to_cpu_16(resp->error_code);
3442 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
3443 copy_func_cfg_to_qcaps(cfg_req, resp);
3446 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
3447 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
3448 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
3449 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
3450 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
3451 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
3453 * TODO: While not supporting VMDq with VFs, max_vnics is always
3454 * forced to 1 in this case
3456 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
3457 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
3464 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
3466 struct hwrm_func_qcfg_input req = {0};
3467 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3470 /* Check for zero MAC address */
3471 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3472 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3473 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3474 HWRM_CHECK_RESULT();
3475 rc = rte_le_to_cpu_16(resp->vlan);
3482 static int update_pf_resource_max(struct bnxt *bp)
3484 struct hwrm_func_qcfg_input req = {0};
3485 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3488 /* And copy the allocated numbers into the pf struct */
3489 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3490 req.fid = rte_cpu_to_le_16(0xffff);
3491 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3492 HWRM_CHECK_RESULT();
3494 /* Only TX ring value reflects actual allocation? TODO */
3495 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
3496 bp->pf->evb_mode = resp->evb_mode;
3503 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
3508 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3512 rc = bnxt_hwrm_func_qcaps(bp);
3516 bp->pf->func_cfg_flags &=
3517 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3518 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3519 bp->pf->func_cfg_flags |=
3520 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
3521 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3522 rc = __bnxt_hwrm_func_qcaps(bp);
3526 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
3528 struct hwrm_func_cfg_input req = {0};
3529 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3536 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
3540 rc = bnxt_hwrm_func_qcaps(bp);
3545 bp->pf->active_vfs = num_vfs;
3548 * First, configure the PF to only use one TX ring. This ensures that
3549 * there are enough rings for all VFs.
3551 * If we don't do this, when we call func_alloc() later, we will lock
3552 * extra rings to the PF that won't be available during func_cfg() of
3555 * This has been fixed with firmware versions above 20.6.54
3557 bp->pf->func_cfg_flags &=
3558 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
3559 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
3560 bp->pf->func_cfg_flags |=
3561 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
3562 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
3567 * Now, create and register a buffer to hold forwarded VF requests
3569 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
3570 bp->pf->vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
3571 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
3572 if (bp->pf->vf_req_buf == NULL) {
3576 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
3577 rte_mem_lock_page(((char *)bp->pf->vf_req_buf) + sz);
3578 for (i = 0; i < num_vfs; i++)
3579 bp->pf->vf_info[i].req_buf = ((char *)bp->pf->vf_req_buf) +
3580 (i * HWRM_MAX_REQ_LEN);
3582 rc = bnxt_hwrm_func_buf_rgtr(bp);
3586 populate_vf_func_cfg_req(bp, &req, num_vfs);
3588 bp->pf->active_vfs = 0;
3589 for (i = 0; i < num_vfs; i++) {
3590 add_random_mac_if_needed(bp, &req, i);
3592 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3593 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[i].func_cfg_flags);
3594 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[i].fid);
3595 rc = bnxt_hwrm_send_message(bp,
3600 /* Clear enable flag for next pass */
3601 req.enables &= ~rte_cpu_to_le_32(
3602 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
3604 if (rc || resp->error_code) {
3606 "Failed to initizlie VF %d\n", i);
3608 "Not all VFs available. (%d, %d)\n",
3609 rc, resp->error_code);
3616 reserve_resources_from_vf(bp, &req, i);
3617 bp->pf->active_vfs++;
3618 bnxt_hwrm_func_clr_stats(bp, bp->pf->vf_info[i].fid);
3622 * Now configure the PF to use "the rest" of the resources
3623 * We're using STD_TX_RING_MODE here though which will limit the TX
3624 * rings. This will allow QoS to function properly. Not setting this
3625 * will cause PF rings to break bandwidth settings.
3627 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
3631 rc = update_pf_resource_max(bp);
3638 bnxt_hwrm_func_buf_unrgtr(bp);
3642 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
3644 struct hwrm_func_cfg_input req = {0};
3645 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3648 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3650 req.fid = rte_cpu_to_le_16(0xffff);
3651 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
3652 req.evb_mode = bp->pf->evb_mode;
3654 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3655 HWRM_CHECK_RESULT();
3661 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
3662 uint8_t tunnel_type)
3664 struct hwrm_tunnel_dst_port_alloc_input req = {0};
3665 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3668 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB);
3669 req.tunnel_type = tunnel_type;
3670 req.tunnel_dst_port_val = rte_cpu_to_be_16(port);
3671 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3672 HWRM_CHECK_RESULT();
3674 switch (tunnel_type) {
3675 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
3676 bp->vxlan_fw_dst_port_id =
3677 rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3678 bp->vxlan_port = port;
3680 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
3681 bp->geneve_fw_dst_port_id =
3682 rte_le_to_cpu_16(resp->tunnel_dst_port_id);
3683 bp->geneve_port = port;
3694 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
3695 uint8_t tunnel_type)
3697 struct hwrm_tunnel_dst_port_free_input req = {0};
3698 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
3701 HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB);
3703 req.tunnel_type = tunnel_type;
3704 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
3705 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3707 HWRM_CHECK_RESULT();
3713 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
3716 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3717 struct hwrm_func_cfg_input req = {0};
3720 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3722 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3723 req.flags = rte_cpu_to_le_32(flags);
3724 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3726 HWRM_CHECK_RESULT();
3732 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
3734 uint32_t *flag = flagp;
3736 vnic->flags = *flag;
3739 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3741 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
3744 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
3747 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
3748 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
3750 HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB);
3752 req.req_buf_num_pages = rte_cpu_to_le_16(1);
3753 req.req_buf_page_size = rte_cpu_to_le_16(
3754 page_getenum(bp->pf->active_vfs * HWRM_MAX_REQ_LEN));
3755 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
3756 req.req_buf_page_addr0 =
3757 rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf->vf_req_buf));
3758 if (req.req_buf_page_addr0 == RTE_BAD_IOVA) {
3760 "unable to map buffer address to physical memory\n");
3764 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3766 HWRM_CHECK_RESULT();
3772 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
3775 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
3776 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
3778 if (!(BNXT_PF(bp) && bp->pdev->max_vfs))
3781 HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB);
3783 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3785 HWRM_CHECK_RESULT();
3791 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
3793 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3794 struct hwrm_func_cfg_input req = {0};
3797 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3799 req.fid = rte_cpu_to_le_16(0xffff);
3800 req.flags = rte_cpu_to_le_32(bp->pf->func_cfg_flags);
3801 req.enables = rte_cpu_to_le_32(
3802 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3803 req.async_event_cr = rte_cpu_to_le_16(
3804 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3805 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3807 HWRM_CHECK_RESULT();
3813 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
3815 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3816 struct hwrm_func_vf_cfg_input req = {0};
3819 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
3821 req.enables = rte_cpu_to_le_32(
3822 HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
3823 req.async_event_cr = rte_cpu_to_le_16(
3824 bp->async_cp_ring->cp_ring_struct->fw_ring_id);
3825 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3827 HWRM_CHECK_RESULT();
3833 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
3835 struct hwrm_func_cfg_input req = {0};
3836 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3837 uint16_t dflt_vlan, fid;
3838 uint32_t func_cfg_flags;
3841 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3844 dflt_vlan = bp->pf->vf_info[vf].dflt_vlan;
3845 fid = bp->pf->vf_info[vf].fid;
3846 func_cfg_flags = bp->pf->vf_info[vf].func_cfg_flags;
3848 fid = rte_cpu_to_le_16(0xffff);
3849 func_cfg_flags = bp->pf->func_cfg_flags;
3850 dflt_vlan = bp->vlan;
3853 req.flags = rte_cpu_to_le_32(func_cfg_flags);
3854 req.fid = rte_cpu_to_le_16(fid);
3855 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3856 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
3858 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3860 HWRM_CHECK_RESULT();
3866 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
3867 uint16_t max_bw, uint16_t enables)
3869 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3870 struct hwrm_func_cfg_input req = {0};
3873 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3875 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3876 req.enables |= rte_cpu_to_le_32(enables);
3877 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3878 req.max_bw = rte_cpu_to_le_32(max_bw);
3879 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3881 HWRM_CHECK_RESULT();
3887 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
3889 struct hwrm_func_cfg_input req = {0};
3890 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3893 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
3895 req.flags = rte_cpu_to_le_32(bp->pf->vf_info[vf].func_cfg_flags);
3896 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3897 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
3898 req.dflt_vlan = rte_cpu_to_le_16(bp->pf->vf_info[vf].dflt_vlan);
3900 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3902 HWRM_CHECK_RESULT();
3908 int bnxt_hwrm_set_async_event_cr(struct bnxt *bp)
3913 rc = bnxt_hwrm_func_cfg_def_cp(bp);
3915 rc = bnxt_hwrm_vf_func_cfg_def_cp(bp);
3920 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
3921 void *encaped, size_t ec_size)
3924 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
3925 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3927 if (ec_size > sizeof(req.encap_request))
3930 HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB);
3932 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3933 memcpy(req.encap_request, encaped, ec_size);
3935 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3937 HWRM_CHECK_RESULT();
3943 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
3944 struct rte_ether_addr *mac)
3946 struct hwrm_func_qcfg_input req = {0};
3947 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
3950 HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB);
3952 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
3953 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3955 HWRM_CHECK_RESULT();
3957 memcpy(mac->addr_bytes, resp->mac_address, RTE_ETHER_ADDR_LEN);
3964 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
3965 void *encaped, size_t ec_size)
3968 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
3969 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
3971 if (ec_size > sizeof(req.encap_request))
3974 HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB);
3976 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
3977 memcpy(req.encap_request, encaped, ec_size);
3979 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
3981 HWRM_CHECK_RESULT();
3987 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
3988 struct rte_eth_stats *stats, uint8_t rx)
3991 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
3992 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
3994 HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB);
3996 req.stat_ctx_id = rte_cpu_to_le_32(cid);
3998 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4000 HWRM_CHECK_RESULT();
4003 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
4004 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
4005 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
4006 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
4007 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
4008 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
4009 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
4010 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
4012 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
4013 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
4014 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
4015 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
4016 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
4017 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
4025 int bnxt_hwrm_port_qstats(struct bnxt *bp)
4027 struct hwrm_port_qstats_input req = {0};
4028 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
4029 struct bnxt_pf_info *pf = bp->pf;
4032 HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB);
4034 req.port_id = rte_cpu_to_le_16(pf->port_id);
4035 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
4036 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
4037 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4039 HWRM_CHECK_RESULT();
4045 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
4047 struct hwrm_port_clr_stats_input req = {0};
4048 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
4049 struct bnxt_pf_info *pf = bp->pf;
4052 /* Not allowed on NS2 device, NPAR, MultiHost, VF */
4053 if (!(bp->flags & BNXT_FLAG_PORT_STATS) || BNXT_VF(bp) ||
4054 BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp))
4057 HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB);
4059 req.port_id = rte_cpu_to_le_16(pf->port_id);
4060 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4062 HWRM_CHECK_RESULT();
4068 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
4070 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4071 struct hwrm_port_led_qcaps_input req = {0};
4077 HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB);
4078 req.port_id = bp->pf->port_id;
4079 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4081 HWRM_CHECK_RESULT();
4083 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
4086 bp->leds->num_leds = resp->num_leds;
4087 memcpy(bp->leds, &resp->led0_id,
4088 sizeof(bp->leds[0]) * bp->leds->num_leds);
4089 for (i = 0; i < bp->leds->num_leds; i++) {
4090 struct bnxt_led_info *led = &bp->leds[i];
4092 uint16_t caps = led->led_state_caps;
4094 if (!led->led_group_id ||
4095 !BNXT_LED_ALT_BLINK_CAP(caps)) {
4096 bp->leds->num_leds = 0;
4107 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
4109 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4110 struct hwrm_port_led_cfg_input req = {0};
4111 struct bnxt_led_cfg *led_cfg;
4112 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
4113 uint16_t duration = 0;
4116 if (!bp->leds->num_leds || BNXT_VF(bp))
4119 HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB);
4122 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
4123 duration = rte_cpu_to_le_16(500);
4125 req.port_id = bp->pf->port_id;
4126 req.num_leds = bp->leds->num_leds;
4127 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
4128 for (i = 0; i < bp->leds->num_leds; i++, led_cfg++) {
4129 req.enables |= BNXT_LED_DFLT_ENABLES(i);
4130 led_cfg->led_id = bp->leds[i].led_id;
4131 led_cfg->led_state = led_state;
4132 led_cfg->led_blink_on = duration;
4133 led_cfg->led_blink_off = duration;
4134 led_cfg->led_group_id = bp->leds[i].led_group_id;
4137 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4139 HWRM_CHECK_RESULT();
4145 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
4149 struct hwrm_nvm_get_dir_info_input req = {0};
4150 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
4152 HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB);
4154 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4156 HWRM_CHECK_RESULT();
4158 *entries = rte_le_to_cpu_32(resp->entries);
4159 *length = rte_le_to_cpu_32(resp->entry_length);
4165 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
4168 uint32_t dir_entries;
4169 uint32_t entry_length;
4172 rte_iova_t dma_handle;
4173 struct hwrm_nvm_get_dir_entries_input req = {0};
4174 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
4176 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
4180 *data++ = dir_entries;
4181 *data++ = entry_length;
4183 memset(data, 0xff, len);
4185 buflen = dir_entries * entry_length;
4186 buf = rte_malloc("nvm_dir", buflen, 0);
4189 dma_handle = rte_malloc_virt2iova(buf);
4190 if (dma_handle == RTE_BAD_IOVA) {
4192 "unable to map response address to physical memory\n");
4195 HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB);
4196 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4197 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4200 memcpy(data, buf, len > buflen ? buflen : len);
4203 HWRM_CHECK_RESULT();
4209 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
4210 uint32_t offset, uint32_t length,
4215 rte_iova_t dma_handle;
4216 struct hwrm_nvm_read_input req = {0};
4217 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
4219 buf = rte_malloc("nvm_item", length, 0);
4223 dma_handle = rte_malloc_virt2iova(buf);
4224 if (dma_handle == RTE_BAD_IOVA) {
4226 "unable to map response address to physical memory\n");
4229 HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB);
4230 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
4231 req.dir_idx = rte_cpu_to_le_16(index);
4232 req.offset = rte_cpu_to_le_32(offset);
4233 req.len = rte_cpu_to_le_32(length);
4234 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4236 memcpy(data, buf, length);
4239 HWRM_CHECK_RESULT();
4245 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
4248 struct hwrm_nvm_erase_dir_entry_input req = {0};
4249 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
4251 HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB);
4252 req.dir_idx = rte_cpu_to_le_16(index);
4253 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4254 HWRM_CHECK_RESULT();
4261 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
4262 uint16_t dir_ordinal, uint16_t dir_ext,
4263 uint16_t dir_attr, const uint8_t *data,
4267 struct hwrm_nvm_write_input req = {0};
4268 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
4269 rte_iova_t dma_handle;
4272 buf = rte_malloc("nvm_write", data_len, 0);
4276 dma_handle = rte_malloc_virt2iova(buf);
4277 if (dma_handle == RTE_BAD_IOVA) {
4279 "unable to map response address to physical memory\n");
4282 memcpy(buf, data, data_len);
4284 HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB);
4286 req.dir_type = rte_cpu_to_le_16(dir_type);
4287 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
4288 req.dir_ext = rte_cpu_to_le_16(dir_ext);
4289 req.dir_attr = rte_cpu_to_le_16(dir_attr);
4290 req.dir_data_length = rte_cpu_to_le_32(data_len);
4291 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
4293 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4296 HWRM_CHECK_RESULT();
4303 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
4305 uint32_t *count = cbdata;
4307 *count = *count + 1;
4310 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
4311 struct bnxt_vnic_info *vnic __rte_unused)
4316 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
4320 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
4321 &count, bnxt_vnic_count_hwrm_stub);
4326 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
4329 struct hwrm_func_vf_vnic_ids_query_input req = {0};
4330 struct hwrm_func_vf_vnic_ids_query_output *resp =
4331 bp->hwrm_cmd_resp_addr;
4334 /* First query all VNIC ids */
4335 HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB);
4337 req.vf_id = rte_cpu_to_le_16(bp->pf->first_vf_id + vf);
4338 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf->total_vnics);
4339 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids));
4341 if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) {
4344 "unable to map VNIC ID table address to physical memory\n");
4347 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4348 HWRM_CHECK_RESULT();
4349 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
4357 * This function queries the VNIC IDs for a specified VF. It then calls
4358 * the vnic_cb to update the necessary field in vnic_info with cbdata.
4359 * Then it calls the hwrm_cb function to program this new vnic configuration.
4361 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
4362 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
4363 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
4365 struct bnxt_vnic_info vnic;
4367 int i, num_vnic_ids;
4372 /* First query all VNIC ids */
4373 vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4374 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4375 RTE_CACHE_LINE_SIZE);
4376 if (vnic_ids == NULL)
4379 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4380 rte_mem_lock_page(((char *)vnic_ids) + sz);
4382 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4384 if (num_vnic_ids < 0)
4385 return num_vnic_ids;
4387 /* Retrieve VNIC, update bd_stall then update */
4389 for (i = 0; i < num_vnic_ids; i++) {
4390 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4391 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4392 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf->first_vf_id + vf);
4395 if (vnic.mru <= 4) /* Indicates unallocated */
4398 vnic_cb(&vnic, cbdata);
4400 rc = hwrm_cb(bp, &vnic);
4410 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
4413 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4414 struct hwrm_func_cfg_input req = {0};
4417 HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB);
4419 req.fid = rte_cpu_to_le_16(bp->pf->vf_info[vf].fid);
4420 req.enables |= rte_cpu_to_le_32(
4421 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
4422 req.vlan_antispoof_mode = on ?
4423 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
4424 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
4425 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4427 HWRM_CHECK_RESULT();
4433 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
4435 struct bnxt_vnic_info vnic;
4438 int num_vnic_ids, i;
4442 vnic_id_sz = bp->pf->total_vnics * sizeof(*vnic_ids);
4443 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
4444 RTE_CACHE_LINE_SIZE);
4445 if (vnic_ids == NULL)
4448 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
4449 rte_mem_lock_page(((char *)vnic_ids) + sz);
4451 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
4457 * Loop through to find the default VNIC ID.
4458 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
4459 * by sending the hwrm_func_qcfg command to the firmware.
4461 for (i = 0; i < num_vnic_ids; i++) {
4462 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
4463 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
4464 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
4465 bp->pf->first_vf_id + vf);
4468 if (vnic.func_default) {
4470 return vnic.fw_vnic_id;
4473 /* Could not find a default VNIC. */
4474 PMD_DRV_LOG(ERR, "No default VNIC\n");
4480 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
4482 struct bnxt_filter_info *filter)
4485 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
4486 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4487 uint32_t enables = 0;
4489 if (filter->fw_em_filter_id != UINT64_MAX)
4490 bnxt_hwrm_clear_em_filter(bp, filter);
4492 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp));
4494 req.flags = rte_cpu_to_le_32(filter->flags);
4496 enables = filter->enables |
4497 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
4498 req.dst_id = rte_cpu_to_le_16(dst_id);
4500 if (filter->ip_addr_type) {
4501 req.ip_addr_type = filter->ip_addr_type;
4502 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4505 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4506 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4508 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4509 memcpy(req.src_macaddr, filter->src_macaddr,
4510 RTE_ETHER_ADDR_LEN);
4512 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
4513 memcpy(req.dst_macaddr, filter->dst_macaddr,
4514 RTE_ETHER_ADDR_LEN);
4516 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
4517 req.ovlan_vid = filter->l2_ovlan;
4519 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
4520 req.ivlan_vid = filter->l2_ivlan;
4522 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
4523 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4525 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4526 req.ip_protocol = filter->ip_protocol;
4528 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4529 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
4531 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
4532 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
4534 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
4535 req.src_port = rte_cpu_to_be_16(filter->src_port);
4537 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
4538 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
4540 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4541 req.mirror_vnic_id = filter->mirror_vnic_id;
4543 req.enables = rte_cpu_to_le_32(enables);
4545 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4547 HWRM_CHECK_RESULT();
4549 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
4555 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
4558 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
4559 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
4561 if (filter->fw_em_filter_id == UINT64_MAX)
4564 HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp));
4566 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
4568 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
4570 HWRM_CHECK_RESULT();
4573 filter->fw_em_filter_id = UINT64_MAX;
4574 filter->fw_l2_filter_id = UINT64_MAX;
4579 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
4581 struct bnxt_filter_info *filter)
4584 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
4585 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4586 bp->hwrm_cmd_resp_addr;
4587 uint32_t enables = 0;
4589 if (filter->fw_ntuple_filter_id != UINT64_MAX)
4590 bnxt_hwrm_clear_ntuple_filter(bp, filter);
4592 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB);
4594 req.flags = rte_cpu_to_le_32(filter->flags);
4596 enables = filter->enables |
4597 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
4598 req.dst_id = rte_cpu_to_le_16(dst_id);
4600 if (filter->ip_addr_type) {
4601 req.ip_addr_type = filter->ip_addr_type;
4603 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
4606 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
4607 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
4609 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
4610 memcpy(req.src_macaddr, filter->src_macaddr,
4611 RTE_ETHER_ADDR_LEN);
4613 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
4614 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
4616 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
4617 req.ip_protocol = filter->ip_protocol;
4619 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
4620 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
4622 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
4623 req.src_ipaddr_mask[0] =
4624 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
4626 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
4627 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
4629 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
4630 req.dst_ipaddr_mask[0] =
4631 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
4633 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
4634 req.src_port = rte_cpu_to_le_16(filter->src_port);
4636 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
4637 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
4639 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
4640 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
4642 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
4643 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
4645 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
4646 req.mirror_vnic_id = filter->mirror_vnic_id;
4648 req.enables = rte_cpu_to_le_32(enables);
4650 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4652 HWRM_CHECK_RESULT();
4654 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
4655 filter->flow_id = rte_le_to_cpu_32(resp->flow_id);
4661 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
4662 struct bnxt_filter_info *filter)
4665 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
4666 struct hwrm_cfa_ntuple_filter_free_output *resp =
4667 bp->hwrm_cmd_resp_addr;
4669 if (filter->fw_ntuple_filter_id == UINT64_MAX)
4672 HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB);
4674 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
4676 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4678 HWRM_CHECK_RESULT();
4681 filter->fw_ntuple_filter_id = UINT64_MAX;
4687 bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4689 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
4690 uint8_t *rx_queue_state = bp->eth_dev->data->rx_queue_state;
4691 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
4692 struct bnxt_rx_queue **rxqs = bp->rx_queues;
4693 uint16_t *ring_tbl = vnic->rss_table;
4694 int nr_ctxs = vnic->num_lb_ctxts;
4695 int max_rings = bp->rx_nr_rings;
4699 for (i = 0, k = 0; i < nr_ctxs; i++) {
4700 struct bnxt_rx_ring_info *rxr;
4701 struct bnxt_cp_ring_info *cpr;
4703 HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB);
4705 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
4706 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
4707 req.hash_mode_flags = vnic->hash_mode;
4709 req.ring_grp_tbl_addr =
4710 rte_cpu_to_le_64(vnic->rss_table_dma_addr +
4711 i * BNXT_RSS_ENTRIES_PER_CTX_THOR *
4712 2 * sizeof(*ring_tbl));
4713 req.hash_key_tbl_addr =
4714 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
4716 req.ring_table_pair_index = i;
4717 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_grp_ids[i]);
4719 for (j = 0; j < 64; j++) {
4722 /* Find next active ring. */
4723 for (cnt = 0; cnt < max_rings; cnt++) {
4724 if (rx_queue_state[k] !=
4725 RTE_ETH_QUEUE_STATE_STOPPED)
4727 if (++k == max_rings)
4731 /* Return if no rings are active. */
4732 if (cnt == max_rings) {
4737 /* Add rx/cp ring pair to RSS table. */
4738 rxr = rxqs[k]->rx_ring;
4739 cpr = rxqs[k]->cp_ring;
4741 ring_id = rxr->rx_ring_struct->fw_ring_id;
4742 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4743 ring_id = cpr->cp_ring_struct->fw_ring_id;
4744 *ring_tbl++ = rte_cpu_to_le_16(ring_id);
4746 if (++k == max_rings)
4749 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
4752 HWRM_CHECK_RESULT();
4759 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
4761 unsigned int rss_idx, fw_idx, i;
4763 if (!(vnic->rss_table && vnic->hash_type))
4766 if (BNXT_CHIP_THOR(bp))
4767 return bnxt_vnic_rss_configure_thor(bp, vnic);
4769 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4772 if (vnic->rss_table && vnic->hash_type) {
4774 * Fill the RSS hash & redirection table with
4775 * ring group ids for all VNICs
4777 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
4778 rss_idx++, fw_idx++) {
4779 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
4780 fw_idx %= bp->rx_cp_nr_rings;
4781 if (vnic->fw_grp_ids[fw_idx] !=
4786 if (i == bp->rx_cp_nr_rings)
4788 vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx];
4790 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);
4796 static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
4797 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
4801 req->num_cmpl_aggr_int = rte_cpu_to_le_16(hw_coal->num_cmpl_aggr_int);
4803 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4804 req->num_cmpl_dma_aggr = rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr);
4806 /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
4807 req->num_cmpl_dma_aggr_during_int =
4808 rte_cpu_to_le_16(hw_coal->num_cmpl_dma_aggr_during_int);
4810 req->int_lat_tmr_max = rte_cpu_to_le_16(hw_coal->int_lat_tmr_max);
4812 /* min timer set to 1/2 of interrupt timer */
4813 req->int_lat_tmr_min = rte_cpu_to_le_16(hw_coal->int_lat_tmr_min);
4815 /* buf timer set to 1/4 of interrupt timer */
4816 req->cmpl_aggr_dma_tmr = rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr);
4818 req->cmpl_aggr_dma_tmr_during_int =
4819 rte_cpu_to_le_16(hw_coal->cmpl_aggr_dma_tmr_during_int);
4821 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4822 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4823 req->flags = rte_cpu_to_le_16(flags);
4826 static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp,
4827 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *agg_req)
4829 struct hwrm_ring_aggint_qcaps_input req = {0};
4830 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4835 HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB);
4836 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4837 HWRM_CHECK_RESULT();
4839 agg_req->num_cmpl_dma_aggr = resp->num_cmpl_dma_aggr_max;
4840 agg_req->cmpl_aggr_dma_tmr = resp->cmpl_aggr_dma_tmr_min;
4842 flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET |
4843 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
4844 agg_req->flags = rte_cpu_to_le_16(flags);
4846 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_CMPL_AGGR_DMA_TMR |
4847 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR;
4848 agg_req->enables = rte_cpu_to_le_32(enables);
4854 int bnxt_hwrm_set_ring_coal(struct bnxt *bp,
4855 struct bnxt_coal *coal, uint16_t ring_id)
4857 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
4858 struct hwrm_ring_cmpl_ring_cfg_aggint_params_output *resp =
4859 bp->hwrm_cmd_resp_addr;
4862 /* Set ring coalesce parameters only for 100G NICs */
4863 if (BNXT_CHIP_THOR(bp)) {
4864 if (bnxt_hwrm_set_coal_params_thor(bp, &req))
4866 } else if (bnxt_stratus_device(bp)) {
4867 bnxt_hwrm_set_coal_params(coal, &req);
4873 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
4875 req.ring_id = rte_cpu_to_le_16(ring_id);
4876 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4877 HWRM_CHECK_RESULT();
4882 #define BNXT_RTE_MEMZONE_FLAG (RTE_MEMZONE_1GB | RTE_MEMZONE_IOVA_CONTIG)
4883 int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
4885 struct hwrm_func_backing_store_qcaps_input req = {0};
4886 struct hwrm_func_backing_store_qcaps_output *resp =
4887 bp->hwrm_cmd_resp_addr;
4888 struct bnxt_ctx_pg_info *ctx_pg;
4889 struct bnxt_ctx_mem_info *ctx;
4890 int total_alloc_len;
4891 int rc, i, tqm_rings;
4893 if (!BNXT_CHIP_THOR(bp) ||
4894 bp->hwrm_spec_code < HWRM_VERSION_1_9_2 ||
4899 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB);
4900 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
4901 HWRM_CHECK_RESULT_SILENT();
4903 total_alloc_len = sizeof(*ctx);
4904 ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len,
4905 RTE_CACHE_LINE_SIZE);
4911 ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries);
4912 ctx->qp_min_qp1_entries =
4913 rte_le_to_cpu_16(resp->qp_min_qp1_entries);
4914 ctx->qp_max_l2_entries =
4915 rte_le_to_cpu_16(resp->qp_max_l2_entries);
4916 ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size);
4917 ctx->srq_max_l2_entries =
4918 rte_le_to_cpu_16(resp->srq_max_l2_entries);
4919 ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries);
4920 ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size);
4921 ctx->cq_max_l2_entries =
4922 rte_le_to_cpu_16(resp->cq_max_l2_entries);
4923 ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries);
4924 ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size);
4925 ctx->vnic_max_vnic_entries =
4926 rte_le_to_cpu_16(resp->vnic_max_vnic_entries);
4927 ctx->vnic_max_ring_table_entries =
4928 rte_le_to_cpu_16(resp->vnic_max_ring_table_entries);
4929 ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size);
4930 ctx->stat_max_entries =
4931 rte_le_to_cpu_32(resp->stat_max_entries);
4932 ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size);
4933 ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size);
4934 ctx->tqm_min_entries_per_ring =
4935 rte_le_to_cpu_32(resp->tqm_min_entries_per_ring);
4936 ctx->tqm_max_entries_per_ring =
4937 rte_le_to_cpu_32(resp->tqm_max_entries_per_ring);
4938 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
4939 if (!ctx->tqm_entries_multiple)
4940 ctx->tqm_entries_multiple = 1;
4941 ctx->mrav_max_entries =
4942 rte_le_to_cpu_32(resp->mrav_max_entries);
4943 ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size);
4944 ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size);
4945 ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries);
4946 ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
4948 if (!ctx->tqm_fp_rings_count)
4949 ctx->tqm_fp_rings_count = bp->max_q;
4951 tqm_rings = ctx->tqm_fp_rings_count + 1;
4953 ctx_pg = rte_malloc("bnxt_ctx_pg_mem",
4954 sizeof(*ctx_pg) * tqm_rings,
4955 RTE_CACHE_LINE_SIZE);
4960 for (i = 0; i < tqm_rings; i++, ctx_pg++)
4961 ctx->tqm_mem[i] = ctx_pg;
4969 int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables)
4971 struct hwrm_func_backing_store_cfg_input req = {0};
4972 struct hwrm_func_backing_store_cfg_output *resp =
4973 bp->hwrm_cmd_resp_addr;
4974 struct bnxt_ctx_mem_info *ctx = bp->ctx;
4975 struct bnxt_ctx_pg_info *ctx_pg;
4976 uint32_t *num_entries;
4985 HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB);
4986 req.enables = rte_cpu_to_le_32(enables);
4988 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
4989 ctx_pg = &ctx->qp_mem;
4990 req.qp_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
4991 req.qp_num_qp1_entries =
4992 rte_cpu_to_le_16(ctx->qp_min_qp1_entries);
4993 req.qp_num_l2_entries =
4994 rte_cpu_to_le_16(ctx->qp_max_l2_entries);
4995 req.qp_entry_size = rte_cpu_to_le_16(ctx->qp_entry_size);
4996 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
4997 &req.qpc_pg_size_qpc_lvl,
5001 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
5002 ctx_pg = &ctx->srq_mem;
5003 req.srq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5004 req.srq_num_l2_entries =
5005 rte_cpu_to_le_16(ctx->srq_max_l2_entries);
5006 req.srq_entry_size = rte_cpu_to_le_16(ctx->srq_entry_size);
5007 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5008 &req.srq_pg_size_srq_lvl,
5012 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
5013 ctx_pg = &ctx->cq_mem;
5014 req.cq_num_entries = rte_cpu_to_le_32(ctx_pg->entries);
5015 req.cq_num_l2_entries =
5016 rte_cpu_to_le_16(ctx->cq_max_l2_entries);
5017 req.cq_entry_size = rte_cpu_to_le_16(ctx->cq_entry_size);
5018 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5019 &req.cq_pg_size_cq_lvl,
5023 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
5024 ctx_pg = &ctx->vnic_mem;
5025 req.vnic_num_vnic_entries =
5026 rte_cpu_to_le_16(ctx->vnic_max_vnic_entries);
5027 req.vnic_num_ring_table_entries =
5028 rte_cpu_to_le_16(ctx->vnic_max_ring_table_entries);
5029 req.vnic_entry_size = rte_cpu_to_le_16(ctx->vnic_entry_size);
5030 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5031 &req.vnic_pg_size_vnic_lvl,
5032 &req.vnic_page_dir);
5035 if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
5036 ctx_pg = &ctx->stat_mem;
5037 req.stat_num_entries = rte_cpu_to_le_16(ctx->stat_max_entries);
5038 req.stat_entry_size = rte_cpu_to_le_16(ctx->stat_entry_size);
5039 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5040 &req.stat_pg_size_stat_lvl,
5041 &req.stat_page_dir);
5044 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5045 num_entries = &req.tqm_sp_num_entries;
5046 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl;
5047 pg_dir = &req.tqm_sp_page_dir;
5048 ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
5049 for (i = 0; i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
5050 if (!(enables & ena))
5053 req.tqm_entry_size = rte_cpu_to_le_16(ctx->tqm_entry_size);
5055 ctx_pg = ctx->tqm_mem[i];
5056 *num_entries = rte_cpu_to_le_16(ctx_pg->entries);
5057 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
5060 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5061 HWRM_CHECK_RESULT();
5067 int bnxt_hwrm_ext_port_qstats(struct bnxt *bp)
5069 struct hwrm_port_qstats_ext_input req = {0};
5070 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
5071 struct bnxt_pf_info *pf = bp->pf;
5074 if (!(bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS ||
5075 bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS))
5078 HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB);
5080 req.port_id = rte_cpu_to_le_16(pf->port_id);
5081 if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) {
5082 req.tx_stat_host_addr =
5083 rte_cpu_to_le_64(bp->hw_tx_port_stats_ext_map);
5085 rte_cpu_to_le_16(sizeof(struct tx_port_stats_ext));
5087 if (bp->flags & BNXT_FLAG_EXT_RX_PORT_STATS) {
5088 req.rx_stat_host_addr =
5089 rte_cpu_to_le_64(bp->hw_rx_port_stats_ext_map);
5091 rte_cpu_to_le_16(sizeof(struct rx_port_stats_ext));
5093 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5096 bp->fw_rx_port_stats_ext_size = 0;
5097 bp->fw_tx_port_stats_ext_size = 0;
5099 bp->fw_rx_port_stats_ext_size =
5100 rte_le_to_cpu_16(resp->rx_stat_size);
5101 bp->fw_tx_port_stats_ext_size =
5102 rte_le_to_cpu_16(resp->tx_stat_size);
5105 HWRM_CHECK_RESULT();
5112 bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type)
5114 struct hwrm_cfa_redirect_tunnel_type_alloc_input req = {0};
5115 struct hwrm_cfa_redirect_tunnel_type_alloc_output *resp =
5116 bp->hwrm_cmd_resp_addr;
5119 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB);
5120 req.tunnel_type = type;
5121 req.dest_fid = bp->fw_fid;
5122 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5123 HWRM_CHECK_RESULT();
5131 bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type)
5133 struct hwrm_cfa_redirect_tunnel_type_free_input req = {0};
5134 struct hwrm_cfa_redirect_tunnel_type_free_output *resp =
5135 bp->hwrm_cmd_resp_addr;
5138 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB);
5139 req.tunnel_type = type;
5140 req.dest_fid = bp->fw_fid;
5141 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5142 HWRM_CHECK_RESULT();
5149 int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type)
5151 struct hwrm_cfa_redirect_query_tunnel_type_input req = {0};
5152 struct hwrm_cfa_redirect_query_tunnel_type_output *resp =
5153 bp->hwrm_cmd_resp_addr;
5156 HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB);
5157 req.src_fid = bp->fw_fid;
5158 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5159 HWRM_CHECK_RESULT();
5162 *type = rte_le_to_cpu_32(resp->tunnel_mask);
5169 int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type,
5172 struct hwrm_cfa_redirect_tunnel_type_info_input req = {0};
5173 struct hwrm_cfa_redirect_tunnel_type_info_output *resp =
5174 bp->hwrm_cmd_resp_addr;
5177 HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB);
5178 req.src_fid = bp->fw_fid;
5179 req.tunnel_type = tun_type;
5180 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5181 HWRM_CHECK_RESULT();
5184 *dst_fid = rte_le_to_cpu_16(resp->dest_fid);
5186 PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid);
5193 int bnxt_hwrm_set_mac(struct bnxt *bp)
5195 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5196 struct hwrm_func_vf_cfg_input req = {0};
5202 HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB);
5205 rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
5206 memcpy(req.dflt_mac_addr, bp->mac_addr, RTE_ETHER_ADDR_LEN);
5208 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5210 HWRM_CHECK_RESULT();
5217 int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
5219 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
5220 struct hwrm_func_drv_if_change_input req = {0};
5224 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
5227 /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery.
5228 * If we issue FUNC_DRV_IF_CHANGE with flags down before
5229 * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR
5231 if (!up && (bp->flags & BNXT_FLAG_FW_RESET))
5234 HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB);
5238 rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP);
5240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5242 HWRM_CHECK_RESULT();
5243 flags = rte_le_to_cpu_32(resp->flags);
5249 if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) {
5250 PMD_DRV_LOG(INFO, "FW reset happened while port was down\n");
5251 bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE;
5257 int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
5259 struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5260 struct bnxt_error_recovery_info *info = bp->recovery_info;
5261 struct hwrm_error_recovery_qcfg_input req = {0};
5266 /* Older FW does not have error recovery support */
5267 if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
5270 HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB);
5272 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5274 HWRM_CHECK_RESULT();
5276 flags = rte_le_to_cpu_32(resp->flags);
5277 if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST)
5278 info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST;
5279 else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
5280 info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU;
5282 if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) &&
5283 !(bp->flags & BNXT_FLAG_KONG_MB_EN)) {
5288 /* FW returned values are in units of 100msec */
5289 info->driver_polling_freq =
5290 rte_le_to_cpu_32(resp->driver_polling_freq) * 100;
5291 info->master_func_wait_period =
5292 rte_le_to_cpu_32(resp->master_func_wait_period) * 100;
5293 info->normal_func_wait_period =
5294 rte_le_to_cpu_32(resp->normal_func_wait_period) * 100;
5295 info->master_func_wait_period_after_reset =
5296 rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100;
5297 info->max_bailout_time_after_reset =
5298 rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100;
5299 info->status_regs[BNXT_FW_STATUS_REG] =
5300 rte_le_to_cpu_32(resp->fw_health_status_reg);
5301 info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] =
5302 rte_le_to_cpu_32(resp->fw_heartbeat_reg);
5303 info->status_regs[BNXT_FW_RECOVERY_CNT_REG] =
5304 rte_le_to_cpu_32(resp->fw_reset_cnt_reg);
5305 info->status_regs[BNXT_FW_RESET_INPROG_REG] =
5306 rte_le_to_cpu_32(resp->reset_inprogress_reg);
5307 info->reg_array_cnt =
5308 rte_le_to_cpu_32(resp->reg_array_cnt);
5310 if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) {
5315 for (i = 0; i < info->reg_array_cnt; i++) {
5316 info->reset_reg[i] =
5317 rte_le_to_cpu_32(resp->reset_reg[i]);
5318 info->reset_reg_val[i] =
5319 rte_le_to_cpu_32(resp->reset_reg_val[i]);
5320 info->delay_after_reset[i] =
5321 resp->delay_after_reset[i];
5326 /* Map the FW status registers */
5328 rc = bnxt_map_fw_health_status_regs(bp);
5331 rte_free(bp->recovery_info);
5332 bp->recovery_info = NULL;
5337 int bnxt_hwrm_fw_reset(struct bnxt *bp)
5339 struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr;
5340 struct hwrm_fw_reset_input req = {0};
5346 HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp));
5348 req.embedded_proc_type =
5349 HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
5350 req.selfrst_status =
5351 HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
5352 req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
5354 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req),
5357 HWRM_CHECK_RESULT();
5363 int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp)
5365 struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr;
5366 struct hwrm_port_ts_query_input req = {0};
5367 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
5374 HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB);
5377 case BNXT_PTP_FLAGS_PATH_TX:
5378 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX;
5380 case BNXT_PTP_FLAGS_PATH_RX:
5381 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX;
5383 case BNXT_PTP_FLAGS_CURRENT_TIME:
5384 flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME;
5388 req.flags = rte_cpu_to_le_32(flags);
5389 req.port_id = rte_cpu_to_le_16(bp->pf->port_id);
5391 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5393 HWRM_CHECK_RESULT();
5396 *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]);
5398 (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32;
5405 int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc)
5409 struct hwrm_cfa_counter_qcaps_input req = {0};
5410 struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5412 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5414 "Not a PF or trusted VF. Command not supported\n");
5418 HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp));
5419 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5422 HWRM_CHECK_RESULT();
5424 *max_fc = rte_le_to_cpu_16(resp->max_rx_fc);
5430 int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id)
5433 struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 };
5434 struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
5436 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5438 "Not a PF or trusted VF. Command not supported\n");
5442 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp));
5444 req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0;
5445 req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M;
5446 req.page_dir = rte_cpu_to_le_64(dma_addr);
5448 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5450 HWRM_CHECK_RESULT();
5452 *ctx_id = rte_le_to_cpu_16(resp->ctx_id);
5453 PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id);
5460 int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id)
5463 struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 };
5464 struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
5466 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5468 "Not a PF or trusted VF. Command not supported\n");
5472 HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp));
5474 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5476 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5478 HWRM_CHECK_RESULT();
5484 int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir,
5485 uint16_t cntr, uint16_t ctx_id,
5486 uint32_t num_entries, bool enable)
5488 struct hwrm_cfa_counter_cfg_input req = {0};
5489 struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr;
5493 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5495 "Not a PF or trusted VF. Command not supported\n");
5499 HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp));
5501 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5502 req.counter_type = rte_cpu_to_le_16(cntr);
5503 flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE :
5504 HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE;
5505 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL;
5506 if (dir == BNXT_DIR_RX)
5507 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX;
5508 else if (dir == BNXT_DIR_TX)
5509 flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX;
5510 req.flags = rte_cpu_to_le_16(flags);
5511 req.ctx_id = rte_cpu_to_le_16(ctx_id);
5512 req.num_entries = rte_cpu_to_le_32(num_entries);
5514 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5515 HWRM_CHECK_RESULT();
5521 int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp,
5522 enum bnxt_flow_dir dir,
5524 uint16_t num_entries)
5526 struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr;
5527 struct hwrm_cfa_counter_qstats_input req = {0};
5528 uint16_t flow_ctx_id = 0;
5532 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5534 "Not a PF or trusted VF. Command not supported\n");
5538 if (dir == BNXT_DIR_RX) {
5539 flow_ctx_id = bp->flow_stat->rx_fc_in_tbl.ctx_id;
5540 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX;
5541 } else if (dir == BNXT_DIR_TX) {
5542 flow_ctx_id = bp->flow_stat->tx_fc_in_tbl.ctx_id;
5543 flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX;
5546 HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp));
5547 req.target_id = rte_cpu_to_le_16(bp->fw_fid);
5548 req.counter_type = rte_cpu_to_le_16(cntr);
5549 req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id);
5550 req.num_entries = rte_cpu_to_le_16(num_entries);
5551 req.flags = rte_cpu_to_le_16(flags);
5552 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp));
5554 HWRM_CHECK_RESULT();
5560 int bnxt_hwrm_cfa_vfr_alloc(struct bnxt *bp, uint16_t vf_idx)
5562 struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5563 struct hwrm_cfa_vfr_alloc_input req = {0};
5566 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5568 "Not a PF or trusted VF. Command not supported\n");
5572 HWRM_PREP(&req, HWRM_CFA_VFR_ALLOC, BNXT_USE_CHIMP_MB);
5573 req.vf_id = rte_cpu_to_le_16(vf_idx);
5574 snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5575 bp->eth_dev->data->name, vf_idx);
5577 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5578 HWRM_CHECK_RESULT();
5581 PMD_DRV_LOG(DEBUG, "VFR %d allocated\n", vf_idx);
5585 int bnxt_hwrm_cfa_vfr_free(struct bnxt *bp, uint16_t vf_idx)
5587 struct hwrm_cfa_vfr_free_output *resp = bp->hwrm_cmd_resp_addr;
5588 struct hwrm_cfa_vfr_free_input req = {0};
5591 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5593 "Not a PF or trusted VF. Command not supported\n");
5597 HWRM_PREP(&req, HWRM_CFA_VFR_FREE, BNXT_USE_CHIMP_MB);
5598 req.vf_id = rte_cpu_to_le_16(vf_idx);
5599 snprintf(req.vfr_name, sizeof(req.vfr_name), "%svfr%d",
5600 bp->eth_dev->data->name, vf_idx);
5602 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5603 HWRM_CHECK_RESULT();
5605 PMD_DRV_LOG(DEBUG, "VFR %d freed\n", vf_idx);
5609 int bnxt_hwrm_first_vf_id_query(struct bnxt *bp, uint16_t fid,
5610 uint16_t *first_vf_id)
5613 struct hwrm_func_qcaps_input req = {.req_type = 0 };
5614 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5616 HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB);
5618 req.fid = rte_cpu_to_le_16(fid);
5620 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5622 HWRM_CHECK_RESULT();
5625 *first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
5632 int bnxt_hwrm_cfa_pair_alloc(struct bnxt *bp, struct bnxt_representor *rep_bp)
5634 struct hwrm_cfa_pair_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5635 struct hwrm_cfa_pair_alloc_input req = {0};
5638 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5640 "Not a PF or trusted VF. Command not supported\n");
5644 HWRM_PREP(&req, HWRM_CFA_PAIR_ALLOC, BNXT_USE_CHIMP_MB);
5645 req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5646 snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5647 bp->eth_dev->data->name, rep_bp->vf_id);
5649 req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
5650 req.vf_b_id = rte_cpu_to_le_16(rep_bp->vf_id);
5651 req.vf_a_id = rte_cpu_to_le_16(bp->fw_fid);
5652 req.host_b_id = 1; /* TBD - Confirm if this is OK */
5654 req.enables |= rep_bp->flags & BNXT_REP_Q_R2F_VALID ?
5655 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_AB_VALID : 0;
5656 req.enables |= rep_bp->flags & BNXT_REP_Q_F2R_VALID ?
5657 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_Q_BA_VALID : 0;
5658 req.enables |= rep_bp->flags & BNXT_REP_FC_R2F_VALID ?
5659 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_AB_VALID : 0;
5660 req.enables |= rep_bp->flags & BNXT_REP_FC_F2R_VALID ?
5661 HWRM_CFA_PAIR_ALLOC_INPUT_ENABLES_FC_BA_VALID : 0;
5663 req.q_ab = rep_bp->rep_q_r2f;
5664 req.q_ba = rep_bp->rep_q_f2r;
5665 req.fc_ab = rep_bp->rep_fc_r2f;
5666 req.fc_ba = rep_bp->rep_fc_f2r;
5668 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5669 HWRM_CHECK_RESULT();
5672 PMD_DRV_LOG(DEBUG, "%s %d allocated\n",
5673 BNXT_REP_PF(rep_bp) ? "PFR" : "VFR", rep_bp->vf_id);
5677 int bnxt_hwrm_cfa_pair_free(struct bnxt *bp, struct bnxt_representor *rep_bp)
5679 struct hwrm_cfa_pair_free_output *resp = bp->hwrm_cmd_resp_addr;
5680 struct hwrm_cfa_pair_free_input req = {0};
5683 if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) {
5685 "Not a PF or trusted VF. Command not supported\n");
5689 HWRM_PREP(&req, HWRM_CFA_PAIR_FREE, BNXT_USE_CHIMP_MB);
5690 snprintf(req.pair_name, sizeof(req.pair_name), "%svfr%d",
5691 bp->eth_dev->data->name, rep_bp->vf_id);
5692 req.pf_b_id = rte_cpu_to_le_32(rep_bp->rep_based_pf);
5693 req.vf_id = rte_cpu_to_le_16(rep_bp->vf_id);
5694 req.pair_mode = HWRM_CFA_PAIR_FREE_INPUT_PAIR_MODE_REP2FN_TRUFLOW;
5696 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
5697 HWRM_CHECK_RESULT();
5699 PMD_DRV_LOG(DEBUG, "%s %d freed\n", BNXT_REP_PF(rep_bp) ? "PFR" : "VFR",