4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 rte_spinlock_unlock(&bp->hwrm_lock); \
202 if (resp->error_code) { \
203 rc = rte_le_to_cpu_16(resp->error_code); \
204 if (resp->resp_len >= 16) { \
205 struct hwrm_err_output *tmp_hwrm_err_op = \
208 "%s error %d:%d:%08x:%04x\n", \
210 rc, tmp_hwrm_err_op->cmd_err, \
212 tmp_hwrm_err_op->opaque_0), \
214 tmp_hwrm_err_op->opaque_1)); \
218 "%s error %d\n", __func__, rc); \
220 rte_spinlock_unlock(&bp->hwrm_lock); \
225 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
230 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
233 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
237 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246 struct bnxt_vnic_info *vnic,
248 struct bnxt_vlan_table_entry *vlan_table)
251 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
255 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
258 /* FIXME add multicast flag, when multicast adding options is supported
261 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 if (vnic->mc_addr_cnt) {
272 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
277 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280 rte_mem_virt2iova(vlan_table));
281 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
283 req.mask = rte_cpu_to_le_32(mask);
285 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
295 struct bnxt_vlan_antispoof_table_entry *vlan_table)
298 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300 bp->hwrm_cmd_resp_addr;
303 * Older HWRM versions did not support this command, and the set_rx_mask
304 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305 * removed from set_rx_mask call, and this command was added.
307 * This command is also present from 1.7.8.11 and higher,
310 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
317 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318 req.fid = rte_cpu_to_le_16(fid);
320 req.vlan_tag_mask_tbl_addr =
321 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
322 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
324 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333 struct bnxt_filter_info *filter)
336 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
339 if (filter->fw_l2_filter_id == UINT64_MAX)
342 HWRM_PREP(req, CFA_L2_FILTER_FREE);
344 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
346 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
351 filter->fw_l2_filter_id = -1;
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
358 struct bnxt_filter_info *filter)
361 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
364 const struct rte_eth_vmdq_rx_conf *conf =
365 &dev_conf->rx_adv_conf.vmdq_rx_conf;
366 uint32_t enables = 0;
367 uint16_t j = dst_id - 1;
369 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
370 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
371 conf->pool_map[j].pools & (1UL << j)) {
373 "Add vlan %u to vmdq pool %u\n",
374 conf->pool_map[j].vlan_id, j);
376 filter->l2_ivlan = conf->pool_map[j].vlan_id;
378 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
379 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
382 if (filter->fw_l2_filter_id != UINT64_MAX)
383 bnxt_hwrm_clear_l2_filter(bp, filter);
385 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
387 req.flags = rte_cpu_to_le_32(filter->flags);
389 enables = filter->enables |
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
391 req.dst_id = rte_cpu_to_le_16(dst_id);
394 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
395 memcpy(req.l2_addr, filter->l2_addr,
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
399 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
402 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
403 req.l2_ovlan = filter->l2_ovlan;
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
406 req.l2_ovlan = filter->l2_ivlan;
408 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
409 req.l2_ovlan_mask = filter->l2_ovlan_mask;
411 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
412 req.l2_ovlan_mask = filter->l2_ivlan_mask;
413 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
414 req.src_id = rte_cpu_to_le_32(filter->src_id);
415 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
416 req.src_type = filter->src_type;
418 req.enables = rte_cpu_to_le_32(enables);
420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
430 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
432 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
433 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
440 HWRM_PREP(req, PORT_MAC_CFG);
443 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
445 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
446 if (ptp->tx_tstamp_en)
447 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
449 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
450 req.flags = rte_cpu_to_le_32(flags);
452 rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
453 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
455 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
461 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
464 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
465 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
466 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
468 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
472 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
474 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
476 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
480 if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
483 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
487 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
488 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
489 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
490 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
491 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
492 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
493 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
494 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
495 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
496 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
497 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
498 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
499 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
500 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
501 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
502 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
503 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
504 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
512 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
515 struct hwrm_func_qcaps_input req = {.req_type = 0 };
516 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
517 uint16_t new_max_vfs;
521 HWRM_PREP(req, FUNC_QCAPS);
523 req.fid = rte_cpu_to_le_16(0xffff);
525 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
529 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
530 flags = rte_le_to_cpu_32(resp->flags);
532 bp->pf.port_id = resp->port_id;
533 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
534 new_max_vfs = bp->pdev->max_vfs;
535 if (new_max_vfs != bp->pf.max_vfs) {
537 rte_free(bp->pf.vf_info);
538 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
539 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
540 bp->pf.max_vfs = new_max_vfs;
541 for (i = 0; i < new_max_vfs; i++) {
542 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
543 bp->pf.vf_info[i].vlan_table =
544 rte_zmalloc("VF VLAN table",
547 if (bp->pf.vf_info[i].vlan_table == NULL)
549 "Fail to alloc VLAN table for VF %d\n",
553 bp->pf.vf_info[i].vlan_table);
554 bp->pf.vf_info[i].vlan_as_table =
555 rte_zmalloc("VF VLAN AS table",
558 if (bp->pf.vf_info[i].vlan_as_table == NULL)
560 "Alloc VLAN AS table for VF %d fail\n",
564 bp->pf.vf_info[i].vlan_as_table);
565 STAILQ_INIT(&bp->pf.vf_info[i].filter);
570 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
571 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
572 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
573 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
574 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
575 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
576 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
577 /* TODO: For now, do not support VMDq/RFS on VFs. */
582 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
586 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
588 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
589 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
590 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
591 RTE_LOG(INFO, PMD, "PTP SUPPORTED");
593 bnxt_hwrm_ptp_qcfg(bp);
602 int bnxt_hwrm_func_reset(struct bnxt *bp)
605 struct hwrm_func_reset_input req = {.req_type = 0 };
606 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
608 HWRM_PREP(req, FUNC_RESET);
610 req.enables = rte_cpu_to_le_32(0);
612 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
620 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
623 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
624 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
626 if (bp->flags & BNXT_FLAG_REGISTERED)
629 HWRM_PREP(req, FUNC_DRV_RGTR);
630 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
631 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
632 req.ver_maj = RTE_VER_YEAR;
633 req.ver_min = RTE_VER_MONTH;
634 req.ver_upd = RTE_VER_MINOR;
637 req.enables |= rte_cpu_to_le_32(
638 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
639 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
640 RTE_MIN(sizeof(req.vf_req_fwd),
641 sizeof(bp->pf.vf_req_fwd)));
644 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
645 //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
647 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
652 bp->flags |= BNXT_FLAG_REGISTERED;
657 int bnxt_hwrm_ver_get(struct bnxt *bp)
660 struct hwrm_ver_get_input req = {.req_type = 0 };
661 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
664 uint16_t max_resp_len;
665 char type[RTE_MEMZONE_NAMESIZE];
666 uint32_t dev_caps_cfg;
668 bp->max_req_len = HWRM_MAX_REQ_LEN;
669 HWRM_PREP(req, VER_GET);
671 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
672 req.hwrm_intf_min = HWRM_VERSION_MINOR;
673 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
675 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
679 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
680 resp->hwrm_intf_maj, resp->hwrm_intf_min,
682 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
683 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
684 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
685 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
686 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
688 my_version = HWRM_VERSION_MAJOR << 16;
689 my_version |= HWRM_VERSION_MINOR << 8;
690 my_version |= HWRM_VERSION_UPDATE;
692 fw_version = resp->hwrm_intf_maj << 16;
693 fw_version |= resp->hwrm_intf_min << 8;
694 fw_version |= resp->hwrm_intf_upd;
696 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
697 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
702 if (my_version != fw_version) {
703 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
704 if (my_version < fw_version) {
706 "Firmware API version is newer than driver.\n");
708 "The driver may be missing features.\n");
711 "Firmware API version is older than driver.\n");
713 "Not all driver features may be functional.\n");
717 if (bp->max_req_len > resp->max_req_win_len) {
718 RTE_LOG(ERR, PMD, "Unsupported request length\n");
721 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
722 max_resp_len = resp->max_resp_len;
723 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
725 if (bp->max_resp_len != max_resp_len) {
726 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
727 bp->pdev->addr.domain, bp->pdev->addr.bus,
728 bp->pdev->addr.devid, bp->pdev->addr.function);
730 rte_free(bp->hwrm_cmd_resp_addr);
732 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
733 if (bp->hwrm_cmd_resp_addr == NULL) {
737 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
738 bp->hwrm_cmd_resp_dma_addr =
739 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
740 if (bp->hwrm_cmd_resp_dma_addr == 0) {
742 "Unable to map response buffer to physical memory.\n");
746 bp->max_resp_len = max_resp_len;
750 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
752 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
753 RTE_LOG(DEBUG, PMD, "Short command supported\n");
755 rte_free(bp->hwrm_short_cmd_req_addr);
757 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
759 if (bp->hwrm_short_cmd_req_addr == NULL) {
763 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
764 bp->hwrm_short_cmd_req_dma_addr =
765 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
766 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
767 rte_free(bp->hwrm_short_cmd_req_addr);
769 "Unable to map buffer to physical memory.\n");
774 bp->flags |= BNXT_FLAG_SHORT_CMD;
782 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
785 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
786 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
788 if (!(bp->flags & BNXT_FLAG_REGISTERED))
791 HWRM_PREP(req, FUNC_DRV_UNRGTR);
794 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
799 bp->flags &= ~BNXT_FLAG_REGISTERED;
804 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
807 struct hwrm_port_phy_cfg_input req = {0};
808 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
809 uint32_t enables = 0;
811 HWRM_PREP(req, PORT_PHY_CFG);
814 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
815 if (bp->link_info.auto_mode && conf->link_speed) {
816 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
817 RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
820 req.flags = rte_cpu_to_le_32(conf->phy_flags);
821 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
822 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
824 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
825 * any auto mode, even "none".
827 if (!conf->link_speed) {
828 /* No speeds specified. Enable AutoNeg - all speeds */
830 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
832 /* AutoNeg - Advertise speeds specified. */
833 if (conf->auto_link_speed_mask) {
835 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
836 req.auto_link_speed_mask =
837 conf->auto_link_speed_mask;
839 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
842 req.auto_duplex = conf->duplex;
843 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
844 req.auto_pause = conf->auto_pause;
845 req.force_pause = conf->force_pause;
846 /* Set force_pause if there is no auto or if there is a force */
847 if (req.auto_pause && !req.force_pause)
848 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
850 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
852 req.enables = rte_cpu_to_le_32(enables);
855 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
856 RTE_LOG(INFO, PMD, "Force Link Down\n");
859 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
867 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
868 struct bnxt_link_info *link_info)
871 struct hwrm_port_phy_qcfg_input req = {0};
872 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
874 HWRM_PREP(req, PORT_PHY_QCFG);
876 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
880 link_info->phy_link_status = resp->link;
882 (link_info->phy_link_status ==
883 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
884 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
885 link_info->duplex = resp->duplex_cfg;
886 link_info->pause = resp->pause;
887 link_info->auto_pause = resp->auto_pause;
888 link_info->force_pause = resp->force_pause;
889 link_info->auto_mode = resp->auto_mode;
890 link_info->phy_type = resp->phy_type;
891 link_info->media_type = resp->media_type;
893 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
894 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
895 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
896 link_info->phy_ver[0] = resp->phy_maj;
897 link_info->phy_ver[1] = resp->phy_min;
898 link_info->phy_ver[2] = resp->phy_bld;
905 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
908 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
909 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
911 HWRM_PREP(req, QUEUE_QPORTCFG);
913 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
917 #define GET_QUEUE_INFO(x) \
918 bp->cos_queue[x].id = resp->queue_id##x; \
919 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
935 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
936 struct bnxt_ring *ring,
937 uint32_t ring_type, uint32_t map_index,
938 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
941 uint32_t enables = 0;
942 struct hwrm_ring_alloc_input req = {.req_type = 0 };
943 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
945 HWRM_PREP(req, RING_ALLOC);
947 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
948 req.fbo = rte_cpu_to_le_32(0);
949 /* Association of ring index with doorbell index */
950 req.logical_id = rte_cpu_to_le_16(map_index);
951 req.length = rte_cpu_to_le_32(ring->ring_size);
954 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
955 req.queue_id = bp->cos_queue[0].id;
957 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
958 req.ring_type = ring_type;
959 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
960 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
961 if (stats_ctx_id != INVALID_STATS_CTX_ID)
963 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
965 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
966 req.ring_type = ring_type;
968 * TODO: Some HWRM versions crash with
969 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
971 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
974 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
979 req.enables = rte_cpu_to_le_32(enables);
981 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
983 if (rc || resp->error_code) {
984 if (rc == 0 && resp->error_code)
985 rc = rte_le_to_cpu_16(resp->error_code);
987 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
989 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
992 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
994 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
997 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
999 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1003 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
1009 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1014 int bnxt_hwrm_ring_free(struct bnxt *bp,
1015 struct bnxt_ring *ring, uint32_t ring_type)
1018 struct hwrm_ring_free_input req = {.req_type = 0 };
1019 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1021 HWRM_PREP(req, RING_FREE);
1023 req.ring_type = ring_type;
1024 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1026 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1028 if (rc || resp->error_code) {
1029 if (rc == 0 && resp->error_code)
1030 rc = rte_le_to_cpu_16(resp->error_code);
1033 switch (ring_type) {
1034 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1035 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
1038 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1039 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
1042 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1043 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
1047 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
1055 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1058 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1059 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1061 HWRM_PREP(req, RING_GRP_ALLOC);
1063 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1064 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1065 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1066 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1068 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1070 HWRM_CHECK_RESULT();
1072 bp->grp_info[idx].fw_grp_id =
1073 rte_le_to_cpu_16(resp->ring_group_id);
1080 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1083 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1084 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1086 HWRM_PREP(req, RING_GRP_FREE);
1088 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1090 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1092 HWRM_CHECK_RESULT();
1095 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1099 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1102 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1103 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1105 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1108 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1110 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1112 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1114 HWRM_CHECK_RESULT();
1120 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1121 unsigned int idx __rte_unused)
1124 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1125 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1127 HWRM_PREP(req, STAT_CTX_ALLOC);
1129 req.update_period_ms = rte_cpu_to_le_32(0);
1131 req.stats_dma_addr =
1132 rte_cpu_to_le_64(cpr->hw_stats_map);
1134 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1136 HWRM_CHECK_RESULT();
1138 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1141 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
1146 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1147 unsigned int idx __rte_unused)
1150 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1151 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1153 HWRM_PREP(req, STAT_CTX_FREE);
1155 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1157 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1159 HWRM_CHECK_RESULT();
1165 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1168 struct hwrm_vnic_alloc_input req = { 0 };
1169 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1171 /* map ring groups to this vnic */
1172 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1173 vnic->start_grp_id, vnic->end_grp_id);
1174 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1175 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1176 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1177 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1178 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1179 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1180 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1181 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1182 HWRM_PREP(req, VNIC_ALLOC);
1184 if (vnic->func_default)
1185 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1186 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1188 HWRM_CHECK_RESULT();
1190 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1192 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1196 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1197 struct bnxt_vnic_info *vnic,
1198 struct bnxt_plcmodes_cfg *pmode)
1201 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1202 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1204 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1206 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1208 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1210 HWRM_CHECK_RESULT();
1212 pmode->flags = rte_le_to_cpu_32(resp->flags);
1213 /* dflt_vnic bit doesn't exist in the _cfg command */
1214 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1215 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1216 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1217 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1224 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1225 struct bnxt_vnic_info *vnic,
1226 struct bnxt_plcmodes_cfg *pmode)
1229 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1230 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1232 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1234 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1235 req.flags = rte_cpu_to_le_32(pmode->flags);
1236 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1237 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1238 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1239 req.enables = rte_cpu_to_le_32(
1240 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1241 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1242 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1245 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1247 HWRM_CHECK_RESULT();
1253 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1256 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1257 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1258 uint32_t ctx_enable_flag = 0;
1259 struct bnxt_plcmodes_cfg pmodes;
1261 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1262 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1266 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1270 HWRM_PREP(req, VNIC_CFG);
1272 /* Only RSS support for now TBD: COS & LB */
1274 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1275 if (vnic->lb_rule != 0xffff)
1276 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1277 if (vnic->cos_rule != 0xffff)
1278 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1279 if (vnic->rss_rule != 0xffff) {
1280 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1281 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1283 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1284 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1285 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1286 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1287 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1288 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1289 req.mru = rte_cpu_to_le_16(vnic->mru);
1290 if (vnic->func_default)
1292 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1293 if (vnic->vlan_strip)
1295 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1298 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1299 if (vnic->roce_dual)
1300 req.flags |= rte_cpu_to_le_32(
1301 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1302 if (vnic->roce_only)
1303 req.flags |= rte_cpu_to_le_32(
1304 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1305 if (vnic->rss_dflt_cr)
1306 req.flags |= rte_cpu_to_le_32(
1307 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1309 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1311 HWRM_CHECK_RESULT();
1314 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1319 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1323 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1324 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1326 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1327 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1330 HWRM_PREP(req, VNIC_QCFG);
1333 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1334 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1335 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1337 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1339 HWRM_CHECK_RESULT();
1341 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1342 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1343 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1344 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1345 vnic->mru = rte_le_to_cpu_16(resp->mru);
1346 vnic->func_default = rte_le_to_cpu_32(
1347 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1348 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1349 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1350 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1351 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1352 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1353 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1354 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1355 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1356 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1357 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1364 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1367 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1368 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1369 bp->hwrm_cmd_resp_addr;
1371 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1373 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1375 HWRM_CHECK_RESULT();
1377 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1379 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1384 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1387 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1388 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1389 bp->hwrm_cmd_resp_addr;
1391 if (vnic->rss_rule == 0xffff) {
1392 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1395 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1397 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1399 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1401 HWRM_CHECK_RESULT();
1404 vnic->rss_rule = INVALID_HW_RING_ID;
1409 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1412 struct hwrm_vnic_free_input req = {.req_type = 0 };
1413 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1415 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1416 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1420 HWRM_PREP(req, VNIC_FREE);
1422 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1424 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1426 HWRM_CHECK_RESULT();
1429 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1433 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1434 struct bnxt_vnic_info *vnic)
1437 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1438 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1440 HWRM_PREP(req, VNIC_RSS_CFG);
1442 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1444 req.ring_grp_tbl_addr =
1445 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1446 req.hash_key_tbl_addr =
1447 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1448 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1450 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1452 HWRM_CHECK_RESULT();
1458 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1459 struct bnxt_vnic_info *vnic)
1462 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1463 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1466 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1468 req.flags = rte_cpu_to_le_32(
1469 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1471 req.enables = rte_cpu_to_le_32(
1472 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1474 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1475 size -= RTE_PKTMBUF_HEADROOM;
1477 req.jumbo_thresh = rte_cpu_to_le_16(size);
1478 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1480 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1482 HWRM_CHECK_RESULT();
1488 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1489 struct bnxt_vnic_info *vnic, bool enable)
1492 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1493 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1495 HWRM_PREP(req, VNIC_TPA_CFG);
1498 req.enables = rte_cpu_to_le_32(
1499 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1500 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1501 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1502 req.flags = rte_cpu_to_le_32(
1503 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1504 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1505 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1506 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1507 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1508 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1509 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1510 req.max_agg_segs = rte_cpu_to_le_16(5);
1512 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1513 req.min_agg_len = rte_cpu_to_le_32(512);
1516 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1518 HWRM_CHECK_RESULT();
1524 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1526 struct hwrm_func_cfg_input req = {0};
1527 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1530 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1531 req.enables = rte_cpu_to_le_32(
1532 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1533 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1534 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1536 HWRM_PREP(req, FUNC_CFG);
1538 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1539 HWRM_CHECK_RESULT();
1542 bp->pf.vf_info[vf].random_mac = false;
1547 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1551 struct hwrm_func_qstats_input req = {.req_type = 0};
1552 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1554 HWRM_PREP(req, FUNC_QSTATS);
1556 req.fid = rte_cpu_to_le_16(fid);
1558 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1560 HWRM_CHECK_RESULT();
1563 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1570 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1571 struct rte_eth_stats *stats)
1574 struct hwrm_func_qstats_input req = {.req_type = 0};
1575 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1577 HWRM_PREP(req, FUNC_QSTATS);
1579 req.fid = rte_cpu_to_le_16(fid);
1581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1583 HWRM_CHECK_RESULT();
1585 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1586 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1587 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1588 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1589 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1590 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1592 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1593 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1594 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1595 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1596 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1597 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1599 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1600 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1602 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1609 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1612 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1613 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1615 HWRM_PREP(req, FUNC_CLR_STATS);
1617 req.fid = rte_cpu_to_le_16(fid);
1619 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1621 HWRM_CHECK_RESULT();
1628 * HWRM utility functions
1631 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1636 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1637 struct bnxt_tx_queue *txq;
1638 struct bnxt_rx_queue *rxq;
1639 struct bnxt_cp_ring_info *cpr;
1641 if (i >= bp->rx_cp_nr_rings) {
1642 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1645 rxq = bp->rx_queues[i];
1649 rc = bnxt_hwrm_stat_clear(bp, cpr);
1656 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1660 struct bnxt_cp_ring_info *cpr;
1662 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1664 if (i >= bp->rx_cp_nr_rings)
1665 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1667 cpr = bp->rx_queues[i]->cp_ring;
1668 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1669 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1670 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1672 * TODO. Need a better way to reset grp_info.stats_ctx
1673 * for Rx rings only. stats_ctx is not saved for Tx
1676 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1684 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1689 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1690 struct bnxt_tx_queue *txq;
1691 struct bnxt_rx_queue *rxq;
1692 struct bnxt_cp_ring_info *cpr;
1694 if (i >= bp->rx_cp_nr_rings) {
1695 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1698 rxq = bp->rx_queues[i];
1702 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1710 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1715 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1717 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1720 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1728 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1729 unsigned int idx __rte_unused)
1731 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1733 bnxt_hwrm_ring_free(bp, cp_ring,
1734 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1735 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1736 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1737 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1738 sizeof(*cpr->cp_desc_ring));
1739 cpr->cp_raw_cons = 0;
1742 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1747 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1748 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1749 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1750 struct bnxt_ring *ring = txr->tx_ring_struct;
1751 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1752 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1754 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1755 bnxt_hwrm_ring_free(bp, ring,
1756 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1757 ring->fw_ring_id = INVALID_HW_RING_ID;
1758 memset(txr->tx_desc_ring, 0,
1759 txr->tx_ring_struct->ring_size *
1760 sizeof(*txr->tx_desc_ring));
1761 memset(txr->tx_buf_ring, 0,
1762 txr->tx_ring_struct->ring_size *
1763 sizeof(*txr->tx_buf_ring));
1767 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1768 bnxt_free_cp_ring(bp, cpr, idx);
1769 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1773 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1774 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1775 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1776 struct bnxt_ring *ring = rxr->rx_ring_struct;
1777 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1778 unsigned int idx = i + 1;
1780 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1781 bnxt_hwrm_ring_free(bp, ring,
1782 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1783 ring->fw_ring_id = INVALID_HW_RING_ID;
1784 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1785 memset(rxr->rx_desc_ring, 0,
1786 rxr->rx_ring_struct->ring_size *
1787 sizeof(*rxr->rx_desc_ring));
1788 memset(rxr->rx_buf_ring, 0,
1789 rxr->rx_ring_struct->ring_size *
1790 sizeof(*rxr->rx_buf_ring));
1792 memset(rxr->ag_buf_ring, 0,
1793 rxr->ag_ring_struct->ring_size *
1794 sizeof(*rxr->ag_buf_ring));
1797 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1798 bnxt_free_cp_ring(bp, cpr, idx);
1799 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1800 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1804 /* Default completion ring */
1806 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1808 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1809 bnxt_free_cp_ring(bp, cpr, 0);
1810 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1817 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1822 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1823 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1830 void bnxt_free_hwrm_resources(struct bnxt *bp)
1832 /* Release memzone */
1833 rte_free(bp->hwrm_cmd_resp_addr);
1834 rte_free(bp->hwrm_short_cmd_req_addr);
1835 bp->hwrm_cmd_resp_addr = NULL;
1836 bp->hwrm_short_cmd_req_addr = NULL;
1837 bp->hwrm_cmd_resp_dma_addr = 0;
1838 bp->hwrm_short_cmd_req_dma_addr = 0;
1841 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1843 struct rte_pci_device *pdev = bp->pdev;
1844 char type[RTE_MEMZONE_NAMESIZE];
1846 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1847 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1848 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1849 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1850 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1851 if (bp->hwrm_cmd_resp_addr == NULL)
1853 bp->hwrm_cmd_resp_dma_addr =
1854 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1855 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1857 "unable to map response address to physical memory\n");
1860 rte_spinlock_init(&bp->hwrm_lock);
1865 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1867 struct bnxt_filter_info *filter;
1870 STAILQ_FOREACH(filter, &vnic->filter, next) {
1871 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1872 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1873 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1874 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1876 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1884 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1886 struct bnxt_filter_info *filter;
1887 struct rte_flow *flow;
1890 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1891 filter = flow->filter;
1892 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1893 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1894 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1895 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1896 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1898 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1900 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1908 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1910 struct bnxt_filter_info *filter;
1913 STAILQ_FOREACH(filter, &vnic->filter, next) {
1914 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1915 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1917 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1918 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1921 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1929 void bnxt_free_tunnel_ports(struct bnxt *bp)
1931 if (bp->vxlan_port_cnt)
1932 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1933 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1935 if (bp->geneve_port_cnt)
1936 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1937 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1938 bp->geneve_port = 0;
1941 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1945 if (bp->vnic_info == NULL)
1949 * Cleanup VNICs in reverse order, to make sure the L2 filter
1950 * from vnic0 is last to be cleaned up.
1952 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1953 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1955 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1957 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1959 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1961 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1963 bnxt_hwrm_vnic_free(bp, vnic);
1965 /* Ring resources */
1966 bnxt_free_all_hwrm_rings(bp);
1967 bnxt_free_all_hwrm_ring_grps(bp);
1968 bnxt_free_all_hwrm_stat_ctxs(bp);
1969 bnxt_free_tunnel_ports(bp);
1972 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1974 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1976 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1977 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1979 switch (conf_link_speed) {
1980 case ETH_LINK_SPEED_10M_HD:
1981 case ETH_LINK_SPEED_100M_HD:
1982 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1984 return hw_link_duplex;
1987 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1989 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1992 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1994 uint16_t eth_link_speed = 0;
1996 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1997 return ETH_LINK_SPEED_AUTONEG;
1999 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2000 case ETH_LINK_SPEED_100M:
2001 case ETH_LINK_SPEED_100M_HD:
2003 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2005 case ETH_LINK_SPEED_1G:
2007 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2009 case ETH_LINK_SPEED_2_5G:
2011 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2013 case ETH_LINK_SPEED_10G:
2015 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2017 case ETH_LINK_SPEED_20G:
2019 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2021 case ETH_LINK_SPEED_25G:
2023 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2025 case ETH_LINK_SPEED_40G:
2027 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2029 case ETH_LINK_SPEED_50G:
2031 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2035 "Unsupported link speed %d; default to AUTO\n",
2039 return eth_link_speed;
2042 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2043 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2044 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2045 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
2047 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2051 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2054 if (link_speed & ETH_LINK_SPEED_FIXED) {
2055 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2057 if (one_speed & (one_speed - 1)) {
2059 "Invalid advertised speeds (%u) for port %u\n",
2060 link_speed, port_id);
2063 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2065 "Unsupported advertised speed (%u) for port %u\n",
2066 link_speed, port_id);
2070 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2072 "Unsupported advertised speeds (%u) for port %u\n",
2073 link_speed, port_id);
2081 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2085 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2086 if (bp->link_info.support_speeds)
2087 return bp->link_info.support_speeds;
2088 link_speed = BNXT_SUPPORTED_SPEEDS;
2091 if (link_speed & ETH_LINK_SPEED_100M)
2092 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2093 if (link_speed & ETH_LINK_SPEED_100M_HD)
2094 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2095 if (link_speed & ETH_LINK_SPEED_1G)
2096 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2097 if (link_speed & ETH_LINK_SPEED_2_5G)
2098 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2099 if (link_speed & ETH_LINK_SPEED_10G)
2100 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2101 if (link_speed & ETH_LINK_SPEED_20G)
2102 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2103 if (link_speed & ETH_LINK_SPEED_25G)
2104 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2105 if (link_speed & ETH_LINK_SPEED_40G)
2106 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2107 if (link_speed & ETH_LINK_SPEED_50G)
2108 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2112 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2114 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2116 switch (hw_link_speed) {
2117 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2118 eth_link_speed = ETH_SPEED_NUM_100M;
2120 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2121 eth_link_speed = ETH_SPEED_NUM_1G;
2123 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2124 eth_link_speed = ETH_SPEED_NUM_2_5G;
2126 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2127 eth_link_speed = ETH_SPEED_NUM_10G;
2129 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2130 eth_link_speed = ETH_SPEED_NUM_20G;
2132 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2133 eth_link_speed = ETH_SPEED_NUM_25G;
2135 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2136 eth_link_speed = ETH_SPEED_NUM_40G;
2138 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2139 eth_link_speed = ETH_SPEED_NUM_50G;
2141 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2143 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2147 return eth_link_speed;
2150 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2152 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2154 switch (hw_link_duplex) {
2155 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2156 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2157 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2159 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2160 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2163 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2167 return eth_link_duplex;
2170 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2173 struct bnxt_link_info *link_info = &bp->link_info;
2175 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2178 "Get link config failed with rc %d\n", rc);
2181 if (link_info->link_speed)
2183 bnxt_parse_hw_link_speed(link_info->link_speed);
2185 link->link_speed = ETH_SPEED_NUM_NONE;
2186 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2187 link->link_status = link_info->link_up;
2188 link->link_autoneg = link_info->auto_mode ==
2189 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2190 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2195 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2198 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2199 struct bnxt_link_info link_req;
2200 uint16_t speed, autoneg;
2202 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2205 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2206 bp->eth_dev->data->port_id);
2210 memset(&link_req, 0, sizeof(link_req));
2211 link_req.link_up = link_up;
2215 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2216 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2217 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2219 link_req.phy_flags |=
2220 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2221 link_req.auto_link_speed_mask =
2222 bnxt_parse_eth_link_speed_mask(bp,
2223 dev_conf->link_speeds);
2225 if (bp->link_info.phy_type ==
2226 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2227 bp->link_info.phy_type ==
2228 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2229 bp->link_info.media_type ==
2230 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2231 RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2235 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2236 link_req.link_speed = speed;
2238 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2239 link_req.auto_pause = bp->link_info.auto_pause;
2240 link_req.force_pause = bp->link_info.force_pause;
2243 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2246 "Set link config failed with rc %d\n", rc);
2254 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2256 struct hwrm_func_qcfg_input req = {0};
2257 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2260 HWRM_PREP(req, FUNC_QCFG);
2261 req.fid = rte_cpu_to_le_16(0xffff);
2263 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2265 HWRM_CHECK_RESULT();
2267 /* Hard Coded.. 0xfff VLAN ID mask */
2268 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2270 switch (resp->port_partition_type) {
2271 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2272 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2273 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2274 bp->port_partition_type = resp->port_partition_type;
2277 bp->port_partition_type = 0;
2286 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2287 struct hwrm_func_qcaps_output *qcaps)
2289 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2290 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2291 sizeof(qcaps->mac_address));
2292 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2293 qcaps->max_rx_rings = fcfg->num_rx_rings;
2294 qcaps->max_tx_rings = fcfg->num_tx_rings;
2295 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2296 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2298 qcaps->first_vf_id = 0;
2299 qcaps->max_vnics = fcfg->num_vnics;
2300 qcaps->max_decap_records = 0;
2301 qcaps->max_encap_records = 0;
2302 qcaps->max_tx_wm_flows = 0;
2303 qcaps->max_tx_em_flows = 0;
2304 qcaps->max_rx_wm_flows = 0;
2305 qcaps->max_rx_em_flows = 0;
2306 qcaps->max_flow_id = 0;
2307 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2308 qcaps->max_sp_tx_rings = 0;
2309 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2312 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2314 struct hwrm_func_cfg_input req = {0};
2315 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2318 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2319 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2320 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2321 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2322 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2323 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2324 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2325 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2326 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2327 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2328 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2329 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2330 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2331 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2332 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2333 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2334 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2335 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2336 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2337 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2338 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2339 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2340 req.fid = rte_cpu_to_le_16(0xffff);
2342 HWRM_PREP(req, FUNC_CFG);
2344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2346 HWRM_CHECK_RESULT();
2352 static void populate_vf_func_cfg_req(struct bnxt *bp,
2353 struct hwrm_func_cfg_input *req,
2356 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2357 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2358 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2359 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2360 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2361 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2362 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2363 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2364 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2365 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2367 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2368 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2369 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2370 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2371 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2373 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2374 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2376 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2377 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2378 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2379 /* TODO: For now, do not support VMDq/RFS on VFs. */
2380 req->num_vnics = rte_cpu_to_le_16(1);
2381 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2385 static void add_random_mac_if_needed(struct bnxt *bp,
2386 struct hwrm_func_cfg_input *cfg_req,
2389 struct ether_addr mac;
2391 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2394 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2396 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2397 eth_random_addr(cfg_req->dflt_mac_addr);
2398 bp->pf.vf_info[vf].random_mac = true;
2400 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2404 static void reserve_resources_from_vf(struct bnxt *bp,
2405 struct hwrm_func_cfg_input *cfg_req,
2408 struct hwrm_func_qcaps_input req = {0};
2409 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2412 /* Get the actual allocated values now */
2413 HWRM_PREP(req, FUNC_QCAPS);
2414 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2415 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2418 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2419 copy_func_cfg_to_qcaps(cfg_req, resp);
2420 } else if (resp->error_code) {
2421 rc = rte_le_to_cpu_16(resp->error_code);
2422 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2423 copy_func_cfg_to_qcaps(cfg_req, resp);
2426 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2427 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2428 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2429 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2430 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2431 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2433 * TODO: While not supporting VMDq with VFs, max_vnics is always
2434 * forced to 1 in this case
2436 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2437 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2442 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2444 struct hwrm_func_qcfg_input req = {0};
2445 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2448 /* Check for zero MAC address */
2449 HWRM_PREP(req, FUNC_QCFG);
2450 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2451 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2453 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2455 } else if (resp->error_code) {
2456 rc = rte_le_to_cpu_16(resp->error_code);
2457 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2460 rc = rte_le_to_cpu_16(resp->vlan);
2467 static int update_pf_resource_max(struct bnxt *bp)
2469 struct hwrm_func_qcfg_input req = {0};
2470 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2473 /* And copy the allocated numbers into the pf struct */
2474 HWRM_PREP(req, FUNC_QCFG);
2475 req.fid = rte_cpu_to_le_16(0xffff);
2476 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2477 HWRM_CHECK_RESULT();
2479 /* Only TX ring value reflects actual allocation? TODO */
2480 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2481 bp->pf.evb_mode = resp->evb_mode;
2488 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2493 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2497 rc = bnxt_hwrm_func_qcaps(bp);
2501 bp->pf.func_cfg_flags &=
2502 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2503 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2504 bp->pf.func_cfg_flags |=
2505 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2506 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2510 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2512 struct hwrm_func_cfg_input req = {0};
2513 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2520 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2524 rc = bnxt_hwrm_func_qcaps(bp);
2529 bp->pf.active_vfs = num_vfs;
2532 * First, configure the PF to only use one TX ring. This ensures that
2533 * there are enough rings for all VFs.
2535 * If we don't do this, when we call func_alloc() later, we will lock
2536 * extra rings to the PF that won't be available during func_cfg() of
2539 * This has been fixed with firmware versions above 20.6.54
2541 bp->pf.func_cfg_flags &=
2542 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2543 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2544 bp->pf.func_cfg_flags |=
2545 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2546 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2551 * Now, create and register a buffer to hold forwarded VF requests
2553 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2554 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2555 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2556 if (bp->pf.vf_req_buf == NULL) {
2560 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2561 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2562 for (i = 0; i < num_vfs; i++)
2563 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2564 (i * HWRM_MAX_REQ_LEN);
2566 rc = bnxt_hwrm_func_buf_rgtr(bp);
2570 populate_vf_func_cfg_req(bp, &req, num_vfs);
2572 bp->pf.active_vfs = 0;
2573 for (i = 0; i < num_vfs; i++) {
2574 add_random_mac_if_needed(bp, &req, i);
2576 HWRM_PREP(req, FUNC_CFG);
2577 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2578 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2579 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2581 /* Clear enable flag for next pass */
2582 req.enables &= ~rte_cpu_to_le_32(
2583 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2585 if (rc || resp->error_code) {
2587 "Failed to initizlie VF %d\n", i);
2589 "Not all VFs available. (%d, %d)\n",
2590 rc, resp->error_code);
2597 reserve_resources_from_vf(bp, &req, i);
2598 bp->pf.active_vfs++;
2599 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2603 * Now configure the PF to use "the rest" of the resources
2604 * We're using STD_TX_RING_MODE here though which will limit the TX
2605 * rings. This will allow QoS to function properly. Not setting this
2606 * will cause PF rings to break bandwidth settings.
2608 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2612 rc = update_pf_resource_max(bp);
2619 bnxt_hwrm_func_buf_unrgtr(bp);
2623 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2625 struct hwrm_func_cfg_input req = {0};
2626 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2629 HWRM_PREP(req, FUNC_CFG);
2631 req.fid = rte_cpu_to_le_16(0xffff);
2632 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2633 req.evb_mode = bp->pf.evb_mode;
2635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2636 HWRM_CHECK_RESULT();
2642 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2643 uint8_t tunnel_type)
2645 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2646 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2649 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2650 req.tunnel_type = tunnel_type;
2651 req.tunnel_dst_port_val = port;
2652 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2653 HWRM_CHECK_RESULT();
2655 switch (tunnel_type) {
2656 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2657 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2658 bp->vxlan_port = port;
2660 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2661 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2662 bp->geneve_port = port;
2673 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2674 uint8_t tunnel_type)
2676 struct hwrm_tunnel_dst_port_free_input req = {0};
2677 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2680 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2682 req.tunnel_type = tunnel_type;
2683 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2684 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2686 HWRM_CHECK_RESULT();
2692 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2695 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2696 struct hwrm_func_cfg_input req = {0};
2699 HWRM_PREP(req, FUNC_CFG);
2701 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2702 req.flags = rte_cpu_to_le_32(flags);
2703 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2705 HWRM_CHECK_RESULT();
2711 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2713 uint32_t *flag = flagp;
2715 vnic->flags = *flag;
2718 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2720 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2723 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2726 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2727 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2729 HWRM_PREP(req, FUNC_BUF_RGTR);
2731 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2732 req.req_buf_page_size = rte_cpu_to_le_16(
2733 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2734 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2735 req.req_buf_page_addr[0] =
2736 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2737 if (req.req_buf_page_addr[0] == 0) {
2739 "unable to map buffer address to physical memory\n");
2743 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2745 HWRM_CHECK_RESULT();
2751 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2754 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2755 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2757 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2759 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2761 HWRM_CHECK_RESULT();
2767 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2769 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2770 struct hwrm_func_cfg_input req = {0};
2773 HWRM_PREP(req, FUNC_CFG);
2775 req.fid = rte_cpu_to_le_16(0xffff);
2776 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2777 req.enables = rte_cpu_to_le_32(
2778 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2779 req.async_event_cr = rte_cpu_to_le_16(
2780 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2781 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2783 HWRM_CHECK_RESULT();
2789 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2791 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2792 struct hwrm_func_vf_cfg_input req = {0};
2795 HWRM_PREP(req, FUNC_VF_CFG);
2797 req.enables = rte_cpu_to_le_32(
2798 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2799 req.async_event_cr = rte_cpu_to_le_16(
2800 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2801 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2803 HWRM_CHECK_RESULT();
2809 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2811 struct hwrm_func_cfg_input req = {0};
2812 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2813 uint16_t dflt_vlan, fid;
2814 uint32_t func_cfg_flags;
2817 HWRM_PREP(req, FUNC_CFG);
2820 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2821 fid = bp->pf.vf_info[vf].fid;
2822 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2824 fid = rte_cpu_to_le_16(0xffff);
2825 func_cfg_flags = bp->pf.func_cfg_flags;
2826 dflt_vlan = bp->vlan;
2829 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2830 req.fid = rte_cpu_to_le_16(fid);
2831 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2832 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2834 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2836 HWRM_CHECK_RESULT();
2842 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2843 uint16_t max_bw, uint16_t enables)
2845 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2846 struct hwrm_func_cfg_input req = {0};
2849 HWRM_PREP(req, FUNC_CFG);
2851 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2852 req.enables |= rte_cpu_to_le_32(enables);
2853 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2854 req.max_bw = rte_cpu_to_le_32(max_bw);
2855 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2857 HWRM_CHECK_RESULT();
2863 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2865 struct hwrm_func_cfg_input req = {0};
2866 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2869 HWRM_PREP(req, FUNC_CFG);
2871 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2872 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2873 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2874 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2876 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2878 HWRM_CHECK_RESULT();
2884 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2885 void *encaped, size_t ec_size)
2888 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2889 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2891 if (ec_size > sizeof(req.encap_request))
2894 HWRM_PREP(req, REJECT_FWD_RESP);
2896 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2897 memcpy(req.encap_request, encaped, ec_size);
2899 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2901 HWRM_CHECK_RESULT();
2907 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2908 struct ether_addr *mac)
2910 struct hwrm_func_qcfg_input req = {0};
2911 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2914 HWRM_PREP(req, FUNC_QCFG);
2916 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2917 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2919 HWRM_CHECK_RESULT();
2921 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2928 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2929 void *encaped, size_t ec_size)
2932 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2933 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2935 if (ec_size > sizeof(req.encap_request))
2938 HWRM_PREP(req, EXEC_FWD_RESP);
2940 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2941 memcpy(req.encap_request, encaped, ec_size);
2943 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2945 HWRM_CHECK_RESULT();
2951 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2952 struct rte_eth_stats *stats, uint8_t rx)
2955 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2956 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2958 HWRM_PREP(req, STAT_CTX_QUERY);
2960 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2962 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2964 HWRM_CHECK_RESULT();
2967 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2968 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2969 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2970 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2971 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2972 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2973 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2974 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2976 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2977 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2978 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2979 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2980 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2981 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2982 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2991 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2993 struct hwrm_port_qstats_input req = {0};
2994 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2995 struct bnxt_pf_info *pf = &bp->pf;
2998 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3001 HWRM_PREP(req, PORT_QSTATS);
3003 req.port_id = rte_cpu_to_le_16(pf->port_id);
3004 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3005 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3006 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3008 HWRM_CHECK_RESULT();
3014 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3016 struct hwrm_port_clr_stats_input req = {0};
3017 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3018 struct bnxt_pf_info *pf = &bp->pf;
3021 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3024 HWRM_PREP(req, PORT_CLR_STATS);
3026 req.port_id = rte_cpu_to_le_16(pf->port_id);
3027 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3029 HWRM_CHECK_RESULT();
3035 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3037 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3038 struct hwrm_port_led_qcaps_input req = {0};
3044 HWRM_PREP(req, PORT_LED_QCAPS);
3045 req.port_id = bp->pf.port_id;
3046 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3048 HWRM_CHECK_RESULT();
3050 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3053 bp->num_leds = resp->num_leds;
3054 memcpy(bp->leds, &resp->led0_id,
3055 sizeof(bp->leds[0]) * bp->num_leds);
3056 for (i = 0; i < bp->num_leds; i++) {
3057 struct bnxt_led_info *led = &bp->leds[i];
3059 uint16_t caps = led->led_state_caps;
3061 if (!led->led_group_id ||
3062 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3074 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3076 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3077 struct hwrm_port_led_cfg_input req = {0};
3078 struct bnxt_led_cfg *led_cfg;
3079 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3080 uint16_t duration = 0;
3083 if (!bp->num_leds || BNXT_VF(bp))
3086 HWRM_PREP(req, PORT_LED_CFG);
3089 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3090 duration = rte_cpu_to_le_16(500);
3092 req.port_id = bp->pf.port_id;
3093 req.num_leds = bp->num_leds;
3094 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3095 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3096 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3097 led_cfg->led_id = bp->leds[i].led_id;
3098 led_cfg->led_state = led_state;
3099 led_cfg->led_blink_on = duration;
3100 led_cfg->led_blink_off = duration;
3101 led_cfg->led_group_id = bp->leds[i].led_group_id;
3104 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3106 HWRM_CHECK_RESULT();
3112 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3116 struct hwrm_nvm_get_dir_info_input req = {0};
3117 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3119 HWRM_PREP(req, NVM_GET_DIR_INFO);
3121 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3123 HWRM_CHECK_RESULT();
3127 *entries = rte_le_to_cpu_32(resp->entries);
3128 *length = rte_le_to_cpu_32(resp->entry_length);
3133 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3136 uint32_t dir_entries;
3137 uint32_t entry_length;
3140 rte_iova_t dma_handle;
3141 struct hwrm_nvm_get_dir_entries_input req = {0};
3142 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3144 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3148 *data++ = dir_entries;
3149 *data++ = entry_length;
3151 memset(data, 0xff, len);
3153 buflen = dir_entries * entry_length;
3154 buf = rte_malloc("nvm_dir", buflen, 0);
3155 rte_mem_lock_page(buf);
3158 dma_handle = rte_mem_virt2iova(buf);
3159 if (dma_handle == 0) {
3161 "unable to map response address to physical memory\n");
3164 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3165 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3166 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3168 HWRM_CHECK_RESULT();
3172 memcpy(data, buf, len > buflen ? buflen : len);
3179 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3180 uint32_t offset, uint32_t length,
3185 rte_iova_t dma_handle;
3186 struct hwrm_nvm_read_input req = {0};
3187 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3189 buf = rte_malloc("nvm_item", length, 0);
3190 rte_mem_lock_page(buf);
3194 dma_handle = rte_mem_virt2iova(buf);
3195 if (dma_handle == 0) {
3197 "unable to map response address to physical memory\n");
3200 HWRM_PREP(req, NVM_READ);
3201 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3202 req.dir_idx = rte_cpu_to_le_16(index);
3203 req.offset = rte_cpu_to_le_32(offset);
3204 req.len = rte_cpu_to_le_32(length);
3205 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3206 HWRM_CHECK_RESULT();
3209 memcpy(data, buf, length);
3215 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3218 struct hwrm_nvm_erase_dir_entry_input req = {0};
3219 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3221 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3222 req.dir_idx = rte_cpu_to_le_16(index);
3223 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3224 HWRM_CHECK_RESULT();
3231 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3232 uint16_t dir_ordinal, uint16_t dir_ext,
3233 uint16_t dir_attr, const uint8_t *data,
3237 struct hwrm_nvm_write_input req = {0};
3238 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3239 rte_iova_t dma_handle;
3242 HWRM_PREP(req, NVM_WRITE);
3244 req.dir_type = rte_cpu_to_le_16(dir_type);
3245 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3246 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3247 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3248 req.dir_data_length = rte_cpu_to_le_32(data_len);
3250 buf = rte_malloc("nvm_write", data_len, 0);
3251 rte_mem_lock_page(buf);
3255 dma_handle = rte_mem_virt2iova(buf);
3256 if (dma_handle == 0) {
3258 "unable to map response address to physical memory\n");
3261 memcpy(buf, data, data_len);
3262 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3264 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3266 HWRM_CHECK_RESULT();
3274 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3276 uint32_t *count = cbdata;
3278 *count = *count + 1;
3281 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3282 struct bnxt_vnic_info *vnic __rte_unused)
3287 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3291 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3292 &count, bnxt_vnic_count_hwrm_stub);
3297 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3300 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3301 struct hwrm_func_vf_vnic_ids_query_output *resp =
3302 bp->hwrm_cmd_resp_addr;
3305 /* First query all VNIC ids */
3306 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3308 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3309 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3310 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3312 if (req.vnic_id_tbl_addr == 0) {
3315 "unable to map VNIC ID table address to physical memory\n");
3318 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3321 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3323 } else if (resp->error_code) {
3324 rc = rte_le_to_cpu_16(resp->error_code);
3326 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3329 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3337 * This function queries the VNIC IDs for a specified VF. It then calls
3338 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3339 * Then it calls the hwrm_cb function to program this new vnic configuration.
3341 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3342 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3343 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3345 struct bnxt_vnic_info vnic;
3347 int i, num_vnic_ids;
3352 /* First query all VNIC ids */
3353 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3354 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3355 RTE_CACHE_LINE_SIZE);
3356 if (vnic_ids == NULL) {
3360 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3361 rte_mem_lock_page(((char *)vnic_ids) + sz);
3363 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3365 if (num_vnic_ids < 0)
3366 return num_vnic_ids;
3368 /* Retrieve VNIC, update bd_stall then update */
3370 for (i = 0; i < num_vnic_ids; i++) {
3371 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3372 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3373 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3376 if (vnic.mru <= 4) /* Indicates unallocated */
3379 vnic_cb(&vnic, cbdata);
3381 rc = hwrm_cb(bp, &vnic);
3391 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3394 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3395 struct hwrm_func_cfg_input req = {0};
3398 HWRM_PREP(req, FUNC_CFG);
3400 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3401 req.enables |= rte_cpu_to_le_32(
3402 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3403 req.vlan_antispoof_mode = on ?
3404 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3405 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3406 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3408 HWRM_CHECK_RESULT();
3414 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3416 struct bnxt_vnic_info vnic;
3419 int num_vnic_ids, i;
3423 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3424 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3425 RTE_CACHE_LINE_SIZE);
3426 if (vnic_ids == NULL) {
3431 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3432 rte_mem_lock_page(((char *)vnic_ids) + sz);
3434 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3440 * Loop through to find the default VNIC ID.
3441 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3442 * by sending the hwrm_func_qcfg command to the firmware.
3444 for (i = 0; i < num_vnic_ids; i++) {
3445 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3446 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3447 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3448 bp->pf.first_vf_id + vf);
3451 if (vnic.func_default) {
3453 return vnic.fw_vnic_id;
3456 /* Could not find a default VNIC. */
3457 RTE_LOG(ERR, PMD, "No default VNIC\n");
3463 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3465 struct bnxt_filter_info *filter)
3468 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3469 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3470 uint32_t enables = 0;
3472 if (filter->fw_em_filter_id != UINT64_MAX)
3473 bnxt_hwrm_clear_em_filter(bp, filter);
3475 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3477 req.flags = rte_cpu_to_le_32(filter->flags);
3479 enables = filter->enables |
3480 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3481 req.dst_id = rte_cpu_to_le_16(dst_id);
3483 if (filter->ip_addr_type) {
3484 req.ip_addr_type = filter->ip_addr_type;
3485 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3488 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3489 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3491 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3492 memcpy(req.src_macaddr, filter->src_macaddr,
3495 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3496 memcpy(req.dst_macaddr, filter->dst_macaddr,
3499 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3500 req.ovlan_vid = filter->l2_ovlan;
3502 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3503 req.ivlan_vid = filter->l2_ivlan;
3505 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3506 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3508 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3509 req.ip_protocol = filter->ip_protocol;
3511 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3512 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3514 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3515 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3517 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3518 req.src_port = rte_cpu_to_be_16(filter->src_port);
3520 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3521 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3523 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3524 req.mirror_vnic_id = filter->mirror_vnic_id;
3526 req.enables = rte_cpu_to_le_32(enables);
3528 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3530 HWRM_CHECK_RESULT();
3532 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3538 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3541 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3542 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3544 if (filter->fw_em_filter_id == UINT64_MAX)
3547 RTE_LOG(ERR, PMD, "Clear EM filter\n");
3548 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3550 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3552 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3554 HWRM_CHECK_RESULT();
3557 filter->fw_em_filter_id = -1;
3558 filter->fw_l2_filter_id = -1;
3563 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3565 struct bnxt_filter_info *filter)
3568 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3569 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3570 bp->hwrm_cmd_resp_addr;
3571 uint32_t enables = 0;
3573 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3574 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3576 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3578 req.flags = rte_cpu_to_le_32(filter->flags);
3580 enables = filter->enables |
3581 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3582 req.dst_id = rte_cpu_to_le_16(dst_id);
3585 if (filter->ip_addr_type) {
3586 req.ip_addr_type = filter->ip_addr_type;
3588 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3591 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3592 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3594 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3595 memcpy(req.src_macaddr, filter->src_macaddr,
3598 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3599 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3602 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3603 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3605 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3606 req.ip_protocol = filter->ip_protocol;
3608 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3609 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3611 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3612 req.src_ipaddr_mask[0] =
3613 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3615 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3616 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3618 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3619 req.dst_ipaddr_mask[0] =
3620 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3622 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3623 req.src_port = rte_cpu_to_le_16(filter->src_port);
3625 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3626 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3628 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3629 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3631 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3632 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3634 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3635 req.mirror_vnic_id = filter->mirror_vnic_id;
3637 req.enables = rte_cpu_to_le_32(enables);
3639 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3641 HWRM_CHECK_RESULT();
3643 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3649 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3650 struct bnxt_filter_info *filter)
3653 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3654 struct hwrm_cfa_ntuple_filter_free_output *resp =
3655 bp->hwrm_cmd_resp_addr;
3657 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3660 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3662 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3664 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3666 HWRM_CHECK_RESULT();
3669 filter->fw_ntuple_filter_id = -1;
3670 filter->fw_l2_filter_id = -1;