4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 rte_spinlock_unlock(&bp->hwrm_lock); \
202 if (resp->error_code) { \
203 rc = rte_le_to_cpu_16(resp->error_code); \
204 if (resp->resp_len >= 16) { \
205 struct hwrm_err_output *tmp_hwrm_err_op = \
208 "%s error %d:%d:%08x:%04x\n", \
210 rc, tmp_hwrm_err_op->cmd_err, \
212 tmp_hwrm_err_op->opaque_0), \
214 tmp_hwrm_err_op->opaque_1)); \
218 "%s error %d\n", __func__, rc); \
220 rte_spinlock_unlock(&bp->hwrm_lock); \
225 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
230 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
233 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
237 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246 struct bnxt_vnic_info *vnic,
248 struct bnxt_vlan_table_entry *vlan_table)
251 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
255 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
258 /* FIXME add multicast flag, when multicast adding options is supported
261 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 if (vnic->mc_addr_cnt) {
272 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
277 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
280 rte_mem_virt2phy(vlan_table));
281 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
283 req.mask = rte_cpu_to_le_32(mask);
285 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
293 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
295 struct bnxt_vlan_antispoof_table_entry *vlan_table)
298 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
299 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
300 bp->hwrm_cmd_resp_addr;
303 * Older HWRM versions did not support this command, and the set_rx_mask
304 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
305 * removed from set_rx_mask call, and this command was added.
307 * This command is also present from 1.7.8.11 and higher,
310 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
311 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
312 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
317 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
318 req.fid = rte_cpu_to_le_16(fid);
320 req.vlan_tag_mask_tbl_addr =
321 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
322 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
324 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
333 struct bnxt_filter_info *filter)
336 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
337 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
339 if (filter->fw_l2_filter_id == UINT64_MAX)
342 HWRM_PREP(req, CFA_L2_FILTER_FREE);
344 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
346 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
351 filter->fw_l2_filter_id = -1;
356 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
358 struct bnxt_filter_info *filter)
361 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
362 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
363 uint32_t enables = 0;
365 if (filter->fw_l2_filter_id != UINT64_MAX)
366 bnxt_hwrm_clear_l2_filter(bp, filter);
368 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
370 req.flags = rte_cpu_to_le_32(filter->flags);
372 enables = filter->enables |
373 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
374 req.dst_id = rte_cpu_to_le_16(dst_id);
377 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
378 memcpy(req.l2_addr, filter->l2_addr,
381 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
382 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
385 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
386 req.l2_ovlan = filter->l2_ovlan;
388 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
389 req.l2_ovlan_mask = filter->l2_ovlan_mask;
390 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
391 req.src_id = rte_cpu_to_le_32(filter->src_id);
392 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
393 req.src_type = filter->src_type;
395 req.enables = rte_cpu_to_le_32(enables);
397 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
401 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
407 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
410 struct hwrm_func_qcaps_input req = {.req_type = 0 };
411 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
412 uint16_t new_max_vfs;
415 HWRM_PREP(req, FUNC_QCAPS);
417 req.fid = rte_cpu_to_le_16(0xffff);
419 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
423 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
425 bp->pf.port_id = resp->port_id;
426 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
427 new_max_vfs = bp->pdev->max_vfs;
428 if (new_max_vfs != bp->pf.max_vfs) {
430 rte_free(bp->pf.vf_info);
431 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
432 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
433 bp->pf.max_vfs = new_max_vfs;
434 for (i = 0; i < new_max_vfs; i++) {
435 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
436 bp->pf.vf_info[i].vlan_table =
437 rte_zmalloc("VF VLAN table",
440 if (bp->pf.vf_info[i].vlan_table == NULL)
442 "Fail to alloc VLAN table for VF %d\n",
446 bp->pf.vf_info[i].vlan_table);
447 bp->pf.vf_info[i].vlan_as_table =
448 rte_zmalloc("VF VLAN AS table",
451 if (bp->pf.vf_info[i].vlan_as_table == NULL)
453 "Alloc VLAN AS table for VF %d fail\n",
457 bp->pf.vf_info[i].vlan_as_table);
458 STAILQ_INIT(&bp->pf.vf_info[i].filter);
463 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
464 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
465 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
466 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
467 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
468 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
469 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
470 /* TODO: For now, do not support VMDq/RFS on VFs. */
475 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
479 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
481 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
487 int bnxt_hwrm_func_reset(struct bnxt *bp)
490 struct hwrm_func_reset_input req = {.req_type = 0 };
491 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
493 HWRM_PREP(req, FUNC_RESET);
495 req.enables = rte_cpu_to_le_32(0);
497 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
505 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
508 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
509 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
511 if (bp->flags & BNXT_FLAG_REGISTERED)
514 HWRM_PREP(req, FUNC_DRV_RGTR);
515 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
516 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
517 req.ver_maj = RTE_VER_YEAR;
518 req.ver_min = RTE_VER_MONTH;
519 req.ver_upd = RTE_VER_MINOR;
522 req.enables |= rte_cpu_to_le_32(
523 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
524 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
525 RTE_MIN(sizeof(req.vf_req_fwd),
526 sizeof(bp->pf.vf_req_fwd)));
529 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
530 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
532 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
537 bp->flags |= BNXT_FLAG_REGISTERED;
542 int bnxt_hwrm_ver_get(struct bnxt *bp)
545 struct hwrm_ver_get_input req = {.req_type = 0 };
546 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
549 uint16_t max_resp_len;
550 char type[RTE_MEMZONE_NAMESIZE];
551 uint32_t dev_caps_cfg;
553 bp->max_req_len = HWRM_MAX_REQ_LEN;
554 HWRM_PREP(req, VER_GET);
556 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
557 req.hwrm_intf_min = HWRM_VERSION_MINOR;
558 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
560 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
564 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
565 resp->hwrm_intf_maj, resp->hwrm_intf_min,
567 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
568 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
569 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
570 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
571 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
573 my_version = HWRM_VERSION_MAJOR << 16;
574 my_version |= HWRM_VERSION_MINOR << 8;
575 my_version |= HWRM_VERSION_UPDATE;
577 fw_version = resp->hwrm_intf_maj << 16;
578 fw_version |= resp->hwrm_intf_min << 8;
579 fw_version |= resp->hwrm_intf_upd;
581 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
582 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
587 if (my_version != fw_version) {
588 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
589 if (my_version < fw_version) {
591 "Firmware API version is newer than driver.\n");
593 "The driver may be missing features.\n");
596 "Firmware API version is older than driver.\n");
598 "Not all driver features may be functional.\n");
602 if (bp->max_req_len > resp->max_req_win_len) {
603 RTE_LOG(ERR, PMD, "Unsupported request length\n");
606 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
607 max_resp_len = resp->max_resp_len;
608 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
610 if (bp->max_resp_len != max_resp_len) {
611 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
612 bp->pdev->addr.domain, bp->pdev->addr.bus,
613 bp->pdev->addr.devid, bp->pdev->addr.function);
615 rte_free(bp->hwrm_cmd_resp_addr);
617 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
618 if (bp->hwrm_cmd_resp_addr == NULL) {
622 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
623 bp->hwrm_cmd_resp_dma_addr =
624 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
625 if (bp->hwrm_cmd_resp_dma_addr == 0) {
627 "Unable to map response buffer to physical memory.\n");
631 bp->max_resp_len = max_resp_len;
635 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
637 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
638 RTE_LOG(DEBUG, PMD, "Short command supported\n");
640 rte_free(bp->hwrm_short_cmd_req_addr);
642 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
644 if (bp->hwrm_short_cmd_req_addr == NULL) {
648 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
649 bp->hwrm_short_cmd_req_dma_addr =
650 rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
651 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
652 rte_free(bp->hwrm_short_cmd_req_addr);
654 "Unable to map buffer to physical memory.\n");
659 bp->flags |= BNXT_FLAG_SHORT_CMD;
667 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
670 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
671 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
673 if (!(bp->flags & BNXT_FLAG_REGISTERED))
676 HWRM_PREP(req, FUNC_DRV_UNRGTR);
679 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
684 bp->flags &= ~BNXT_FLAG_REGISTERED;
689 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
692 struct hwrm_port_phy_cfg_input req = {0};
693 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
694 uint32_t enables = 0;
695 uint32_t link_speed_mask =
696 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
698 HWRM_PREP(req, PORT_PHY_CFG);
701 req.flags = rte_cpu_to_le_32(conf->phy_flags);
702 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
704 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
705 * any auto mode, even "none".
707 if (!conf->link_speed) {
708 req.auto_mode = conf->auto_mode;
709 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
710 if (conf->auto_mode ==
711 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
712 req.auto_link_speed_mask =
713 conf->auto_link_speed_mask;
714 enables |= link_speed_mask;
716 if (bp->link_info.auto_link_speed) {
717 req.auto_link_speed =
718 bp->link_info.auto_link_speed;
720 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
723 req.auto_duplex = conf->duplex;
724 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
725 req.auto_pause = conf->auto_pause;
726 req.force_pause = conf->force_pause;
727 /* Set force_pause if there is no auto or if there is a force */
728 if (req.auto_pause && !req.force_pause)
729 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
731 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
733 req.enables = rte_cpu_to_le_32(enables);
736 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
737 RTE_LOG(INFO, PMD, "Force Link Down\n");
740 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
748 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
749 struct bnxt_link_info *link_info)
752 struct hwrm_port_phy_qcfg_input req = {0};
753 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
755 HWRM_PREP(req, PORT_PHY_QCFG);
757 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
761 link_info->phy_link_status = resp->link;
763 (link_info->phy_link_status ==
764 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
765 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
766 link_info->duplex = resp->duplex;
767 link_info->pause = resp->pause;
768 link_info->auto_pause = resp->auto_pause;
769 link_info->force_pause = resp->force_pause;
770 link_info->auto_mode = resp->auto_mode;
772 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
773 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
774 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
775 link_info->phy_ver[0] = resp->phy_maj;
776 link_info->phy_ver[1] = resp->phy_min;
777 link_info->phy_ver[2] = resp->phy_bld;
784 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
787 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
788 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
790 HWRM_PREP(req, QUEUE_QPORTCFG);
792 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
796 #define GET_QUEUE_INFO(x) \
797 bp->cos_queue[x].id = resp->queue_id##x; \
798 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
814 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
815 struct bnxt_ring *ring,
816 uint32_t ring_type, uint32_t map_index,
817 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
820 uint32_t enables = 0;
821 struct hwrm_ring_alloc_input req = {.req_type = 0 };
822 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
824 HWRM_PREP(req, RING_ALLOC);
826 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
827 req.fbo = rte_cpu_to_le_32(0);
828 /* Association of ring index with doorbell index */
829 req.logical_id = rte_cpu_to_le_16(map_index);
830 req.length = rte_cpu_to_le_32(ring->ring_size);
833 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
834 req.queue_id = bp->cos_queue[0].id;
836 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
837 req.ring_type = ring_type;
838 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
839 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
840 if (stats_ctx_id != INVALID_STATS_CTX_ID)
842 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
844 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
845 req.ring_type = ring_type;
847 * TODO: Some HWRM versions crash with
848 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
850 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
853 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
858 req.enables = rte_cpu_to_le_32(enables);
860 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
862 if (rc || resp->error_code) {
863 if (rc == 0 && resp->error_code)
864 rc = rte_le_to_cpu_16(resp->error_code);
866 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
868 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
871 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
873 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
876 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
878 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
882 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
888 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
893 int bnxt_hwrm_ring_free(struct bnxt *bp,
894 struct bnxt_ring *ring, uint32_t ring_type)
897 struct hwrm_ring_free_input req = {.req_type = 0 };
898 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
900 HWRM_PREP(req, RING_FREE);
902 req.ring_type = ring_type;
903 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
907 if (rc || resp->error_code) {
908 if (rc == 0 && resp->error_code)
909 rc = rte_le_to_cpu_16(resp->error_code);
913 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
914 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
917 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
918 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
921 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
922 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
926 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
934 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
937 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
938 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
940 HWRM_PREP(req, RING_GRP_ALLOC);
942 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
943 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
944 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
945 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
947 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
951 bp->grp_info[idx].fw_grp_id =
952 rte_le_to_cpu_16(resp->ring_group_id);
959 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
962 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
963 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
965 HWRM_PREP(req, RING_GRP_FREE);
967 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
969 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
974 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
978 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
981 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
982 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
984 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
987 HWRM_PREP(req, STAT_CTX_CLR_STATS);
989 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
991 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
999 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1000 unsigned int idx __rte_unused)
1003 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1004 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1006 HWRM_PREP(req, STAT_CTX_ALLOC);
1008 req.update_period_ms = rte_cpu_to_le_32(0);
1010 req.stats_dma_addr =
1011 rte_cpu_to_le_64(cpr->hw_stats_map);
1013 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1015 HWRM_CHECK_RESULT();
1017 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1020 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
1025 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1026 unsigned int idx __rte_unused)
1029 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1030 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1032 HWRM_PREP(req, STAT_CTX_FREE);
1034 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1036 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1038 HWRM_CHECK_RESULT();
1044 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1047 struct hwrm_vnic_alloc_input req = { 0 };
1048 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1050 /* map ring groups to this vnic */
1051 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1052 vnic->start_grp_id, vnic->end_grp_id);
1053 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1054 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1055 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1056 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1057 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1058 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1059 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1060 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1061 HWRM_PREP(req, VNIC_ALLOC);
1063 if (vnic->func_default)
1064 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1065 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1067 HWRM_CHECK_RESULT();
1069 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1071 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1075 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1076 struct bnxt_vnic_info *vnic,
1077 struct bnxt_plcmodes_cfg *pmode)
1080 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1081 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1083 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1085 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1087 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1089 HWRM_CHECK_RESULT();
1091 pmode->flags = rte_le_to_cpu_32(resp->flags);
1092 /* dflt_vnic bit doesn't exist in the _cfg command */
1093 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1094 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1095 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1096 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1103 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1104 struct bnxt_vnic_info *vnic,
1105 struct bnxt_plcmodes_cfg *pmode)
1108 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1109 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1111 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1113 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1114 req.flags = rte_cpu_to_le_32(pmode->flags);
1115 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1116 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1117 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1118 req.enables = rte_cpu_to_le_32(
1119 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1120 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1121 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1124 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1126 HWRM_CHECK_RESULT();
1132 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1135 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1136 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1137 uint32_t ctx_enable_flag = 0;
1138 struct bnxt_plcmodes_cfg pmodes;
1140 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1141 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1145 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1149 HWRM_PREP(req, VNIC_CFG);
1151 /* Only RSS support for now TBD: COS & LB */
1153 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1154 if (vnic->lb_rule != 0xffff)
1155 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1156 if (vnic->cos_rule != 0xffff)
1157 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1158 if (vnic->rss_rule != 0xffff) {
1159 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1160 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1162 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1163 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1164 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1165 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1166 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1167 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1168 req.mru = rte_cpu_to_le_16(vnic->mru);
1169 if (vnic->func_default)
1171 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1172 if (vnic->vlan_strip)
1174 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1177 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1178 if (vnic->roce_dual)
1179 req.flags |= rte_cpu_to_le_32(
1180 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1181 if (vnic->roce_only)
1182 req.flags |= rte_cpu_to_le_32(
1183 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1184 if (vnic->rss_dflt_cr)
1185 req.flags |= rte_cpu_to_le_32(
1186 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1188 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1190 HWRM_CHECK_RESULT();
1193 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1198 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1202 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1203 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1205 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1206 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1209 HWRM_PREP(req, VNIC_QCFG);
1212 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1213 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1214 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1216 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218 HWRM_CHECK_RESULT();
1220 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1221 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1222 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1223 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1224 vnic->mru = rte_le_to_cpu_16(resp->mru);
1225 vnic->func_default = rte_le_to_cpu_32(
1226 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1227 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1228 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1229 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1230 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1231 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1232 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1233 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1234 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1235 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1236 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1243 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1246 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1247 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1248 bp->hwrm_cmd_resp_addr;
1250 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1252 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1254 HWRM_CHECK_RESULT();
1256 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1258 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1263 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1266 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1267 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1268 bp->hwrm_cmd_resp_addr;
1270 if (vnic->rss_rule == 0xffff) {
1271 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1274 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1276 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1280 HWRM_CHECK_RESULT();
1283 vnic->rss_rule = INVALID_HW_RING_ID;
1288 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1291 struct hwrm_vnic_free_input req = {.req_type = 0 };
1292 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1294 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1295 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1299 HWRM_PREP(req, VNIC_FREE);
1301 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1303 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1305 HWRM_CHECK_RESULT();
1308 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1312 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1313 struct bnxt_vnic_info *vnic)
1316 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1317 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1319 HWRM_PREP(req, VNIC_RSS_CFG);
1321 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1323 req.ring_grp_tbl_addr =
1324 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1325 req.hash_key_tbl_addr =
1326 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1327 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1329 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1331 HWRM_CHECK_RESULT();
1337 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1338 struct bnxt_vnic_info *vnic)
1341 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1342 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1345 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1347 req.flags = rte_cpu_to_le_32(
1348 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1350 req.enables = rte_cpu_to_le_32(
1351 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1353 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1354 size -= RTE_PKTMBUF_HEADROOM;
1356 req.jumbo_thresh = rte_cpu_to_le_16(size);
1357 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1359 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1361 HWRM_CHECK_RESULT();
1367 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1368 struct bnxt_vnic_info *vnic, bool enable)
1371 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1372 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1374 HWRM_PREP(req, VNIC_TPA_CFG);
1377 req.enables = rte_cpu_to_le_32(
1378 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1379 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1380 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1381 req.flags = rte_cpu_to_le_32(
1382 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1383 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1384 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1385 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1386 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1387 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1388 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1389 req.max_agg_segs = rte_cpu_to_le_16(5);
1391 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1392 req.min_agg_len = rte_cpu_to_le_32(512);
1395 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1397 HWRM_CHECK_RESULT();
1403 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1405 struct hwrm_func_cfg_input req = {0};
1406 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1409 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1410 req.enables = rte_cpu_to_le_32(
1411 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1412 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1413 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1415 HWRM_PREP(req, FUNC_CFG);
1417 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1418 HWRM_CHECK_RESULT();
1421 bp->pf.vf_info[vf].random_mac = false;
1426 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1430 struct hwrm_func_qstats_input req = {.req_type = 0};
1431 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1433 HWRM_PREP(req, FUNC_QSTATS);
1435 req.fid = rte_cpu_to_le_16(fid);
1437 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1439 HWRM_CHECK_RESULT();
1442 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1449 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1450 struct rte_eth_stats *stats)
1453 struct hwrm_func_qstats_input req = {.req_type = 0};
1454 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1456 HWRM_PREP(req, FUNC_QSTATS);
1458 req.fid = rte_cpu_to_le_16(fid);
1460 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1462 HWRM_CHECK_RESULT();
1464 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1465 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1466 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1467 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1468 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1469 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1471 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1472 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1473 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1474 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1475 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1476 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1478 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1479 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1481 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1488 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1491 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1492 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1494 HWRM_PREP(req, FUNC_CLR_STATS);
1496 req.fid = rte_cpu_to_le_16(fid);
1498 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1500 HWRM_CHECK_RESULT();
1507 * HWRM utility functions
1510 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1515 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1516 struct bnxt_tx_queue *txq;
1517 struct bnxt_rx_queue *rxq;
1518 struct bnxt_cp_ring_info *cpr;
1520 if (i >= bp->rx_cp_nr_rings) {
1521 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1524 rxq = bp->rx_queues[i];
1528 rc = bnxt_hwrm_stat_clear(bp, cpr);
1535 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1539 struct bnxt_cp_ring_info *cpr;
1541 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1543 if (i >= bp->rx_cp_nr_rings)
1544 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1546 cpr = bp->rx_queues[i]->cp_ring;
1547 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1548 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1549 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1551 * TODO. Need a better way to reset grp_info.stats_ctx
1552 * for Rx rings only. stats_ctx is not saved for Tx
1555 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1563 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1568 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1569 struct bnxt_tx_queue *txq;
1570 struct bnxt_rx_queue *rxq;
1571 struct bnxt_cp_ring_info *cpr;
1573 if (i >= bp->rx_cp_nr_rings) {
1574 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1577 rxq = bp->rx_queues[i];
1581 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1589 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1594 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1596 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1599 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1607 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1608 unsigned int idx __rte_unused)
1610 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1612 bnxt_hwrm_ring_free(bp, cp_ring,
1613 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1614 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1615 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1616 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1617 sizeof(*cpr->cp_desc_ring));
1618 cpr->cp_raw_cons = 0;
1621 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1626 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1627 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1628 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1629 struct bnxt_ring *ring = txr->tx_ring_struct;
1630 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1631 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1633 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1634 bnxt_hwrm_ring_free(bp, ring,
1635 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1636 ring->fw_ring_id = INVALID_HW_RING_ID;
1637 memset(txr->tx_desc_ring, 0,
1638 txr->tx_ring_struct->ring_size *
1639 sizeof(*txr->tx_desc_ring));
1640 memset(txr->tx_buf_ring, 0,
1641 txr->tx_ring_struct->ring_size *
1642 sizeof(*txr->tx_buf_ring));
1646 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1647 bnxt_free_cp_ring(bp, cpr, idx);
1648 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1652 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1653 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1654 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1655 struct bnxt_ring *ring = rxr->rx_ring_struct;
1656 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1657 unsigned int idx = i + 1;
1659 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1660 bnxt_hwrm_ring_free(bp, ring,
1661 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1662 ring->fw_ring_id = INVALID_HW_RING_ID;
1663 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1664 memset(rxr->rx_desc_ring, 0,
1665 rxr->rx_ring_struct->ring_size *
1666 sizeof(*rxr->rx_desc_ring));
1667 memset(rxr->rx_buf_ring, 0,
1668 rxr->rx_ring_struct->ring_size *
1669 sizeof(*rxr->rx_buf_ring));
1671 memset(rxr->ag_buf_ring, 0,
1672 rxr->ag_ring_struct->ring_size *
1673 sizeof(*rxr->ag_buf_ring));
1676 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1677 bnxt_free_cp_ring(bp, cpr, idx);
1678 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1679 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1683 /* Default completion ring */
1685 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1687 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1688 bnxt_free_cp_ring(bp, cpr, 0);
1689 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1696 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1701 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1702 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1709 void bnxt_free_hwrm_resources(struct bnxt *bp)
1711 /* Release memzone */
1712 rte_free(bp->hwrm_cmd_resp_addr);
1713 rte_free(bp->hwrm_short_cmd_req_addr);
1714 bp->hwrm_cmd_resp_addr = NULL;
1715 bp->hwrm_short_cmd_req_addr = NULL;
1716 bp->hwrm_cmd_resp_dma_addr = 0;
1717 bp->hwrm_short_cmd_req_dma_addr = 0;
1720 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1722 struct rte_pci_device *pdev = bp->pdev;
1723 char type[RTE_MEMZONE_NAMESIZE];
1725 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1726 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1727 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1728 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1729 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1730 if (bp->hwrm_cmd_resp_addr == NULL)
1732 bp->hwrm_cmd_resp_dma_addr =
1733 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1734 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1736 "unable to map response address to physical memory\n");
1739 rte_spinlock_init(&bp->hwrm_lock);
1744 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1746 struct bnxt_filter_info *filter;
1749 STAILQ_FOREACH(filter, &vnic->filter, next) {
1750 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1751 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1752 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1753 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1755 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1763 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1765 struct bnxt_filter_info *filter;
1766 struct rte_flow *flow;
1769 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1770 filter = flow->filter;
1771 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1772 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1773 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1774 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1775 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1777 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1779 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1787 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1789 struct bnxt_filter_info *filter;
1792 STAILQ_FOREACH(filter, &vnic->filter, next) {
1793 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1794 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1796 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1797 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1800 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1808 void bnxt_free_tunnel_ports(struct bnxt *bp)
1810 if (bp->vxlan_port_cnt)
1811 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1812 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1814 if (bp->geneve_port_cnt)
1815 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1816 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1817 bp->geneve_port = 0;
1820 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1824 if (bp->vnic_info == NULL)
1828 * Cleanup VNICs in reverse order, to make sure the L2 filter
1829 * from vnic0 is last to be cleaned up.
1831 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1832 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1834 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1836 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1838 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1840 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1842 bnxt_hwrm_vnic_free(bp, vnic);
1844 /* Ring resources */
1845 bnxt_free_all_hwrm_rings(bp);
1846 bnxt_free_all_hwrm_ring_grps(bp);
1847 bnxt_free_all_hwrm_stat_ctxs(bp);
1848 bnxt_free_tunnel_ports(bp);
1851 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1853 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1855 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1856 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1858 switch (conf_link_speed) {
1859 case ETH_LINK_SPEED_10M_HD:
1860 case ETH_LINK_SPEED_100M_HD:
1861 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1863 return hw_link_duplex;
1866 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1868 uint16_t eth_link_speed = 0;
1870 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1871 return ETH_LINK_SPEED_AUTONEG;
1873 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1874 case ETH_LINK_SPEED_100M:
1875 case ETH_LINK_SPEED_100M_HD:
1877 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1879 case ETH_LINK_SPEED_1G:
1881 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1883 case ETH_LINK_SPEED_2_5G:
1885 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1887 case ETH_LINK_SPEED_10G:
1889 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1891 case ETH_LINK_SPEED_20G:
1893 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1895 case ETH_LINK_SPEED_25G:
1897 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1899 case ETH_LINK_SPEED_40G:
1901 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1903 case ETH_LINK_SPEED_50G:
1905 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1909 "Unsupported link speed %d; default to AUTO\n",
1913 return eth_link_speed;
1916 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1917 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1918 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1919 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1921 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1925 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1928 if (link_speed & ETH_LINK_SPEED_FIXED) {
1929 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1931 if (one_speed & (one_speed - 1)) {
1933 "Invalid advertised speeds (%u) for port %u\n",
1934 link_speed, port_id);
1937 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1939 "Unsupported advertised speed (%u) for port %u\n",
1940 link_speed, port_id);
1944 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1946 "Unsupported advertised speeds (%u) for port %u\n",
1947 link_speed, port_id);
1955 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1959 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1960 if (bp->link_info.support_speeds)
1961 return bp->link_info.support_speeds;
1962 link_speed = BNXT_SUPPORTED_SPEEDS;
1965 if (link_speed & ETH_LINK_SPEED_100M)
1966 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1967 if (link_speed & ETH_LINK_SPEED_100M_HD)
1968 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1969 if (link_speed & ETH_LINK_SPEED_1G)
1970 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1971 if (link_speed & ETH_LINK_SPEED_2_5G)
1972 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1973 if (link_speed & ETH_LINK_SPEED_10G)
1974 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1975 if (link_speed & ETH_LINK_SPEED_20G)
1976 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1977 if (link_speed & ETH_LINK_SPEED_25G)
1978 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1979 if (link_speed & ETH_LINK_SPEED_40G)
1980 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1981 if (link_speed & ETH_LINK_SPEED_50G)
1982 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1986 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1988 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1990 switch (hw_link_speed) {
1991 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1992 eth_link_speed = ETH_SPEED_NUM_100M;
1994 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1995 eth_link_speed = ETH_SPEED_NUM_1G;
1997 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1998 eth_link_speed = ETH_SPEED_NUM_2_5G;
2000 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2001 eth_link_speed = ETH_SPEED_NUM_10G;
2003 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2004 eth_link_speed = ETH_SPEED_NUM_20G;
2006 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2007 eth_link_speed = ETH_SPEED_NUM_25G;
2009 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2010 eth_link_speed = ETH_SPEED_NUM_40G;
2012 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2013 eth_link_speed = ETH_SPEED_NUM_50G;
2015 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2017 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2021 return eth_link_speed;
2024 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2026 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2028 switch (hw_link_duplex) {
2029 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2030 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2031 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2033 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2034 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2037 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2041 return eth_link_duplex;
2044 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2047 struct bnxt_link_info *link_info = &bp->link_info;
2049 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2052 "Get link config failed with rc %d\n", rc);
2055 if (link_info->link_speed)
2057 bnxt_parse_hw_link_speed(link_info->link_speed);
2059 link->link_speed = ETH_SPEED_NUM_NONE;
2060 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2061 link->link_status = link_info->link_up;
2062 link->link_autoneg = link_info->auto_mode ==
2063 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2064 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2069 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2072 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2073 struct bnxt_link_info link_req;
2076 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2079 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2080 bp->eth_dev->data->port_id);
2084 memset(&link_req, 0, sizeof(link_req));
2085 link_req.link_up = link_up;
2089 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2090 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2092 link_req.phy_flags |=
2093 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2094 link_req.auto_mode =
2095 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2096 link_req.auto_link_speed_mask =
2097 bnxt_parse_eth_link_speed_mask(bp,
2098 dev_conf->link_speeds);
2100 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2101 link_req.link_speed = speed;
2102 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2104 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2105 link_req.auto_pause = bp->link_info.auto_pause;
2106 link_req.force_pause = bp->link_info.force_pause;
2109 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2112 "Set link config failed with rc %d\n", rc);
2120 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2122 struct hwrm_func_qcfg_input req = {0};
2123 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2126 HWRM_PREP(req, FUNC_QCFG);
2127 req.fid = rte_cpu_to_le_16(0xffff);
2129 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2131 HWRM_CHECK_RESULT();
2133 /* Hard Coded.. 0xfff VLAN ID mask */
2134 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2136 switch (resp->port_partition_type) {
2137 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2138 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2139 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2140 bp->port_partition_type = resp->port_partition_type;
2143 bp->port_partition_type = 0;
2152 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2153 struct hwrm_func_qcaps_output *qcaps)
2155 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2156 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2157 sizeof(qcaps->mac_address));
2158 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2159 qcaps->max_rx_rings = fcfg->num_rx_rings;
2160 qcaps->max_tx_rings = fcfg->num_tx_rings;
2161 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2162 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2164 qcaps->first_vf_id = 0;
2165 qcaps->max_vnics = fcfg->num_vnics;
2166 qcaps->max_decap_records = 0;
2167 qcaps->max_encap_records = 0;
2168 qcaps->max_tx_wm_flows = 0;
2169 qcaps->max_tx_em_flows = 0;
2170 qcaps->max_rx_wm_flows = 0;
2171 qcaps->max_rx_em_flows = 0;
2172 qcaps->max_flow_id = 0;
2173 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2174 qcaps->max_sp_tx_rings = 0;
2175 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2178 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2180 struct hwrm_func_cfg_input req = {0};
2181 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2184 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2185 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2186 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2187 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2188 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2189 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2190 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2191 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2192 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2193 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2194 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2195 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2196 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2197 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2198 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2199 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2200 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2201 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2202 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2203 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2204 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2205 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2206 req.fid = rte_cpu_to_le_16(0xffff);
2208 HWRM_PREP(req, FUNC_CFG);
2210 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2212 HWRM_CHECK_RESULT();
2218 static void populate_vf_func_cfg_req(struct bnxt *bp,
2219 struct hwrm_func_cfg_input *req,
2222 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2223 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2224 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2225 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2226 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2227 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2228 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2229 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2230 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2231 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2233 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2234 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2235 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2236 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2237 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2239 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2240 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2242 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2243 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2244 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2245 /* TODO: For now, do not support VMDq/RFS on VFs. */
2246 req->num_vnics = rte_cpu_to_le_16(1);
2247 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2251 static void add_random_mac_if_needed(struct bnxt *bp,
2252 struct hwrm_func_cfg_input *cfg_req,
2255 struct ether_addr mac;
2257 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2260 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2262 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2263 eth_random_addr(cfg_req->dflt_mac_addr);
2264 bp->pf.vf_info[vf].random_mac = true;
2266 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2270 static void reserve_resources_from_vf(struct bnxt *bp,
2271 struct hwrm_func_cfg_input *cfg_req,
2274 struct hwrm_func_qcaps_input req = {0};
2275 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2278 /* Get the actual allocated values now */
2279 HWRM_PREP(req, FUNC_QCAPS);
2280 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2281 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2284 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2285 copy_func_cfg_to_qcaps(cfg_req, resp);
2286 } else if (resp->error_code) {
2287 rc = rte_le_to_cpu_16(resp->error_code);
2288 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2289 copy_func_cfg_to_qcaps(cfg_req, resp);
2292 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2293 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2294 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2295 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2296 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2297 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2299 * TODO: While not supporting VMDq with VFs, max_vnics is always
2300 * forced to 1 in this case
2302 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2303 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2308 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2310 struct hwrm_func_qcfg_input req = {0};
2311 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2314 /* Check for zero MAC address */
2315 HWRM_PREP(req, FUNC_QCFG);
2316 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2317 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2319 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2321 } else if (resp->error_code) {
2322 rc = rte_le_to_cpu_16(resp->error_code);
2323 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2326 rc = rte_le_to_cpu_16(resp->vlan);
2333 static int update_pf_resource_max(struct bnxt *bp)
2335 struct hwrm_func_qcfg_input req = {0};
2336 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2339 /* And copy the allocated numbers into the pf struct */
2340 HWRM_PREP(req, FUNC_QCFG);
2341 req.fid = rte_cpu_to_le_16(0xffff);
2342 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2343 HWRM_CHECK_RESULT();
2345 /* Only TX ring value reflects actual allocation? TODO */
2346 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2347 bp->pf.evb_mode = resp->evb_mode;
2354 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2359 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2363 rc = bnxt_hwrm_func_qcaps(bp);
2367 bp->pf.func_cfg_flags &=
2368 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2369 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2370 bp->pf.func_cfg_flags |=
2371 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2372 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2376 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2378 struct hwrm_func_cfg_input req = {0};
2379 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2386 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2390 rc = bnxt_hwrm_func_qcaps(bp);
2395 bp->pf.active_vfs = num_vfs;
2398 * First, configure the PF to only use one TX ring. This ensures that
2399 * there are enough rings for all VFs.
2401 * If we don't do this, when we call func_alloc() later, we will lock
2402 * extra rings to the PF that won't be available during func_cfg() of
2405 * This has been fixed with firmware versions above 20.6.54
2407 bp->pf.func_cfg_flags &=
2408 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2409 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2410 bp->pf.func_cfg_flags |=
2411 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2412 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2417 * Now, create and register a buffer to hold forwarded VF requests
2419 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2420 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2421 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2422 if (bp->pf.vf_req_buf == NULL) {
2426 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2427 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2428 for (i = 0; i < num_vfs; i++)
2429 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2430 (i * HWRM_MAX_REQ_LEN);
2432 rc = bnxt_hwrm_func_buf_rgtr(bp);
2436 populate_vf_func_cfg_req(bp, &req, num_vfs);
2438 bp->pf.active_vfs = 0;
2439 for (i = 0; i < num_vfs; i++) {
2440 add_random_mac_if_needed(bp, &req, i);
2442 HWRM_PREP(req, FUNC_CFG);
2443 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2444 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2445 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2447 /* Clear enable flag for next pass */
2448 req.enables &= ~rte_cpu_to_le_32(
2449 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2451 if (rc || resp->error_code) {
2453 "Failed to initizlie VF %d\n", i);
2455 "Not all VFs available. (%d, %d)\n",
2456 rc, resp->error_code);
2463 reserve_resources_from_vf(bp, &req, i);
2464 bp->pf.active_vfs++;
2465 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2469 * Now configure the PF to use "the rest" of the resources
2470 * We're using STD_TX_RING_MODE here though which will limit the TX
2471 * rings. This will allow QoS to function properly. Not setting this
2472 * will cause PF rings to break bandwidth settings.
2474 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2478 rc = update_pf_resource_max(bp);
2485 bnxt_hwrm_func_buf_unrgtr(bp);
2489 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2491 struct hwrm_func_cfg_input req = {0};
2492 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2495 HWRM_PREP(req, FUNC_CFG);
2497 req.fid = rte_cpu_to_le_16(0xffff);
2498 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2499 req.evb_mode = bp->pf.evb_mode;
2501 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2502 HWRM_CHECK_RESULT();
2508 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2509 uint8_t tunnel_type)
2511 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2512 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2515 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2516 req.tunnel_type = tunnel_type;
2517 req.tunnel_dst_port_val = port;
2518 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2519 HWRM_CHECK_RESULT();
2521 switch (tunnel_type) {
2522 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2523 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2524 bp->vxlan_port = port;
2526 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2527 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2528 bp->geneve_port = port;
2539 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2540 uint8_t tunnel_type)
2542 struct hwrm_tunnel_dst_port_free_input req = {0};
2543 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2546 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2548 req.tunnel_type = tunnel_type;
2549 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2550 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2552 HWRM_CHECK_RESULT();
2558 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2561 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2562 struct hwrm_func_cfg_input req = {0};
2565 HWRM_PREP(req, FUNC_CFG);
2567 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2568 req.flags = rte_cpu_to_le_32(flags);
2569 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2571 HWRM_CHECK_RESULT();
2577 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2579 uint32_t *flag = flagp;
2581 vnic->flags = *flag;
2584 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2586 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2589 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2592 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2593 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2595 HWRM_PREP(req, FUNC_BUF_RGTR);
2597 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2598 req.req_buf_page_size = rte_cpu_to_le_16(
2599 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2600 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2601 req.req_buf_page_addr[0] =
2602 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2603 if (req.req_buf_page_addr[0] == 0) {
2605 "unable to map buffer address to physical memory\n");
2609 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2611 HWRM_CHECK_RESULT();
2617 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2620 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2621 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2623 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2625 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2627 HWRM_CHECK_RESULT();
2633 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2635 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2636 struct hwrm_func_cfg_input req = {0};
2639 HWRM_PREP(req, FUNC_CFG);
2641 req.fid = rte_cpu_to_le_16(0xffff);
2642 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2643 req.enables = rte_cpu_to_le_32(
2644 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2645 req.async_event_cr = rte_cpu_to_le_16(
2646 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2647 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2649 HWRM_CHECK_RESULT();
2655 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2657 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2658 struct hwrm_func_vf_cfg_input req = {0};
2661 HWRM_PREP(req, FUNC_VF_CFG);
2663 req.enables = rte_cpu_to_le_32(
2664 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2665 req.async_event_cr = rte_cpu_to_le_16(
2666 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2667 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2669 HWRM_CHECK_RESULT();
2675 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2677 struct hwrm_func_cfg_input req = {0};
2678 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2679 uint16_t dflt_vlan, fid;
2680 uint32_t func_cfg_flags;
2683 HWRM_PREP(req, FUNC_CFG);
2686 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2687 fid = bp->pf.vf_info[vf].fid;
2688 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2690 fid = rte_cpu_to_le_16(0xffff);
2691 func_cfg_flags = bp->pf.func_cfg_flags;
2692 dflt_vlan = bp->vlan;
2695 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2696 req.fid = rte_cpu_to_le_16(fid);
2697 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2698 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2700 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2702 HWRM_CHECK_RESULT();
2708 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2709 uint16_t max_bw, uint16_t enables)
2711 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2712 struct hwrm_func_cfg_input req = {0};
2715 HWRM_PREP(req, FUNC_CFG);
2717 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2718 req.enables |= rte_cpu_to_le_32(enables);
2719 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2720 req.max_bw = rte_cpu_to_le_32(max_bw);
2721 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2723 HWRM_CHECK_RESULT();
2729 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2731 struct hwrm_func_cfg_input req = {0};
2732 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2735 HWRM_PREP(req, FUNC_CFG);
2737 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2738 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2739 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2740 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2742 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2744 HWRM_CHECK_RESULT();
2750 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2751 void *encaped, size_t ec_size)
2754 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2755 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2757 if (ec_size > sizeof(req.encap_request))
2760 HWRM_PREP(req, REJECT_FWD_RESP);
2762 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2763 memcpy(req.encap_request, encaped, ec_size);
2765 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2767 HWRM_CHECK_RESULT();
2773 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2774 struct ether_addr *mac)
2776 struct hwrm_func_qcfg_input req = {0};
2777 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2780 HWRM_PREP(req, FUNC_QCFG);
2782 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2783 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2785 HWRM_CHECK_RESULT();
2787 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2794 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2795 void *encaped, size_t ec_size)
2798 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2799 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2801 if (ec_size > sizeof(req.encap_request))
2804 HWRM_PREP(req, EXEC_FWD_RESP);
2806 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2807 memcpy(req.encap_request, encaped, ec_size);
2809 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2811 HWRM_CHECK_RESULT();
2817 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2818 struct rte_eth_stats *stats, uint8_t rx)
2821 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2822 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2824 HWRM_PREP(req, STAT_CTX_QUERY);
2826 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2828 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2830 HWRM_CHECK_RESULT();
2833 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2834 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2835 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2836 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2837 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2838 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2839 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2840 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2842 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2843 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2844 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2845 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2846 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2847 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2848 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2857 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2859 struct hwrm_port_qstats_input req = {0};
2860 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2861 struct bnxt_pf_info *pf = &bp->pf;
2864 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2867 HWRM_PREP(req, PORT_QSTATS);
2869 req.port_id = rte_cpu_to_le_16(pf->port_id);
2870 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2871 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2872 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2874 HWRM_CHECK_RESULT();
2880 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2882 struct hwrm_port_clr_stats_input req = {0};
2883 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2884 struct bnxt_pf_info *pf = &bp->pf;
2887 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2890 HWRM_PREP(req, PORT_CLR_STATS);
2892 req.port_id = rte_cpu_to_le_16(pf->port_id);
2893 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2895 HWRM_CHECK_RESULT();
2901 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2903 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2904 struct hwrm_port_led_qcaps_input req = {0};
2910 HWRM_PREP(req, PORT_LED_QCAPS);
2911 req.port_id = bp->pf.port_id;
2912 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2914 HWRM_CHECK_RESULT();
2916 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2919 bp->num_leds = resp->num_leds;
2920 memcpy(bp->leds, &resp->led0_id,
2921 sizeof(bp->leds[0]) * bp->num_leds);
2922 for (i = 0; i < bp->num_leds; i++) {
2923 struct bnxt_led_info *led = &bp->leds[i];
2925 uint16_t caps = led->led_state_caps;
2927 if (!led->led_group_id ||
2928 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2940 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2942 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2943 struct hwrm_port_led_cfg_input req = {0};
2944 struct bnxt_led_cfg *led_cfg;
2945 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2946 uint16_t duration = 0;
2949 if (!bp->num_leds || BNXT_VF(bp))
2952 HWRM_PREP(req, PORT_LED_CFG);
2955 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2956 duration = rte_cpu_to_le_16(500);
2958 req.port_id = bp->pf.port_id;
2959 req.num_leds = bp->num_leds;
2960 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2961 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2962 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2963 led_cfg->led_id = bp->leds[i].led_id;
2964 led_cfg->led_state = led_state;
2965 led_cfg->led_blink_on = duration;
2966 led_cfg->led_blink_off = duration;
2967 led_cfg->led_group_id = bp->leds[i].led_group_id;
2970 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2972 HWRM_CHECK_RESULT();
2978 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
2982 struct hwrm_nvm_get_dir_info_input req = {0};
2983 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
2985 HWRM_PREP(req, NVM_GET_DIR_INFO);
2987 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2989 HWRM_CHECK_RESULT();
2993 *entries = rte_le_to_cpu_32(resp->entries);
2994 *length = rte_le_to_cpu_32(resp->entry_length);
2999 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3002 uint32_t dir_entries;
3003 uint32_t entry_length;
3006 phys_addr_t dma_handle;
3007 struct hwrm_nvm_get_dir_entries_input req = {0};
3008 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3010 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3014 *data++ = dir_entries;
3015 *data++ = entry_length;
3017 memset(data, 0xff, len);
3019 buflen = dir_entries * entry_length;
3020 buf = rte_malloc("nvm_dir", buflen, 0);
3021 rte_mem_lock_page(buf);
3024 dma_handle = rte_mem_virt2phy(buf);
3025 if (dma_handle == 0) {
3027 "unable to map response address to physical memory\n");
3030 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3031 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3032 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3034 HWRM_CHECK_RESULT();
3038 memcpy(data, buf, len > buflen ? buflen : len);
3045 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3046 uint32_t offset, uint32_t length,
3051 phys_addr_t dma_handle;
3052 struct hwrm_nvm_read_input req = {0};
3053 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3055 buf = rte_malloc("nvm_item", length, 0);
3056 rte_mem_lock_page(buf);
3060 dma_handle = rte_mem_virt2phy(buf);
3061 if (dma_handle == 0) {
3063 "unable to map response address to physical memory\n");
3066 HWRM_PREP(req, NVM_READ);
3067 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3068 req.dir_idx = rte_cpu_to_le_16(index);
3069 req.offset = rte_cpu_to_le_32(offset);
3070 req.len = rte_cpu_to_le_32(length);
3071 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3072 HWRM_CHECK_RESULT();
3075 memcpy(data, buf, length);
3081 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3084 struct hwrm_nvm_erase_dir_entry_input req = {0};
3085 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3087 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3088 req.dir_idx = rte_cpu_to_le_16(index);
3089 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3090 HWRM_CHECK_RESULT();
3097 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3098 uint16_t dir_ordinal, uint16_t dir_ext,
3099 uint16_t dir_attr, const uint8_t *data,
3103 struct hwrm_nvm_write_input req = {0};
3104 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3105 phys_addr_t dma_handle;
3108 HWRM_PREP(req, NVM_WRITE);
3110 req.dir_type = rte_cpu_to_le_16(dir_type);
3111 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3112 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3113 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3114 req.dir_data_length = rte_cpu_to_le_32(data_len);
3116 buf = rte_malloc("nvm_write", data_len, 0);
3117 rte_mem_lock_page(buf);
3121 dma_handle = rte_mem_virt2phy(buf);
3122 if (dma_handle == 0) {
3124 "unable to map response address to physical memory\n");
3127 memcpy(buf, data, data_len);
3128 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3130 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3132 HWRM_CHECK_RESULT();
3140 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3142 uint32_t *count = cbdata;
3144 *count = *count + 1;
3147 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3148 struct bnxt_vnic_info *vnic __rte_unused)
3153 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3157 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3158 &count, bnxt_vnic_count_hwrm_stub);
3163 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3166 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3167 struct hwrm_func_vf_vnic_ids_query_output *resp =
3168 bp->hwrm_cmd_resp_addr;
3171 /* First query all VNIC ids */
3172 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3174 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3175 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3176 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
3178 if (req.vnic_id_tbl_addr == 0) {
3181 "unable to map VNIC ID table address to physical memory\n");
3184 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3187 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3189 } else if (resp->error_code) {
3190 rc = rte_le_to_cpu_16(resp->error_code);
3192 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3195 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3203 * This function queries the VNIC IDs for a specified VF. It then calls
3204 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3205 * Then it calls the hwrm_cb function to program this new vnic configuration.
3207 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3208 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3209 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3211 struct bnxt_vnic_info vnic;
3213 int i, num_vnic_ids;
3218 /* First query all VNIC ids */
3219 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3220 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3221 RTE_CACHE_LINE_SIZE);
3222 if (vnic_ids == NULL) {
3226 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3227 rte_mem_lock_page(((char *)vnic_ids) + sz);
3229 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3231 if (num_vnic_ids < 0)
3232 return num_vnic_ids;
3234 /* Retrieve VNIC, update bd_stall then update */
3236 for (i = 0; i < num_vnic_ids; i++) {
3237 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3238 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3239 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3242 if (vnic.mru <= 4) /* Indicates unallocated */
3245 vnic_cb(&vnic, cbdata);
3247 rc = hwrm_cb(bp, &vnic);
3257 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3260 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3261 struct hwrm_func_cfg_input req = {0};
3264 HWRM_PREP(req, FUNC_CFG);
3266 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3267 req.enables |= rte_cpu_to_le_32(
3268 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3269 req.vlan_antispoof_mode = on ?
3270 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3271 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3272 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3274 HWRM_CHECK_RESULT();
3280 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3282 struct bnxt_vnic_info vnic;
3285 int num_vnic_ids, i;
3289 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3290 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3291 RTE_CACHE_LINE_SIZE);
3292 if (vnic_ids == NULL) {
3297 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3298 rte_mem_lock_page(((char *)vnic_ids) + sz);
3300 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3306 * Loop through to find the default VNIC ID.
3307 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3308 * by sending the hwrm_func_qcfg command to the firmware.
3310 for (i = 0; i < num_vnic_ids; i++) {
3311 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3312 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3313 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3314 bp->pf.first_vf_id + vf);
3317 if (vnic.func_default) {
3319 return vnic.fw_vnic_id;
3322 /* Could not find a default VNIC. */
3323 RTE_LOG(ERR, PMD, "No default VNIC\n");
3329 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3331 struct bnxt_filter_info *filter)
3334 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3335 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3336 uint32_t enables = 0;
3338 if (filter->fw_em_filter_id != UINT64_MAX)
3339 bnxt_hwrm_clear_em_filter(bp, filter);
3341 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3343 req.flags = rte_cpu_to_le_32(filter->flags);
3345 enables = filter->enables |
3346 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3347 req.dst_id = rte_cpu_to_le_16(dst_id);
3349 if (filter->ip_addr_type) {
3350 req.ip_addr_type = filter->ip_addr_type;
3351 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3354 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3355 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3357 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3358 memcpy(req.src_macaddr, filter->src_macaddr,
3361 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3362 memcpy(req.dst_macaddr, filter->dst_macaddr,
3365 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3366 req.ovlan_vid = filter->l2_ovlan;
3368 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3369 req.ivlan_vid = filter->l2_ivlan;
3371 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3372 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3374 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3375 req.ip_protocol = filter->ip_protocol;
3377 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3378 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3380 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3381 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3383 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3384 req.src_port = rte_cpu_to_be_16(filter->src_port);
3386 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3387 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3389 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3390 req.mirror_vnic_id = filter->mirror_vnic_id;
3392 req.enables = rte_cpu_to_le_32(enables);
3394 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3396 HWRM_CHECK_RESULT();
3398 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3404 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3407 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3408 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3410 if (filter->fw_em_filter_id == UINT64_MAX)
3413 RTE_LOG(ERR, PMD, "Clear EM filter\n");
3414 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3416 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3418 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3420 HWRM_CHECK_RESULT();
3423 filter->fw_em_filter_id = -1;
3424 filter->fw_l2_filter_id = -1;
3429 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3431 struct bnxt_filter_info *filter)
3434 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3435 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3436 bp->hwrm_cmd_resp_addr;
3437 uint32_t enables = 0;
3439 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3440 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3442 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3444 req.flags = rte_cpu_to_le_32(filter->flags);
3446 enables = filter->enables |
3447 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3448 req.dst_id = rte_cpu_to_le_16(dst_id);
3451 if (filter->ip_addr_type) {
3452 req.ip_addr_type = filter->ip_addr_type;
3454 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3457 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3458 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3460 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3461 memcpy(req.src_macaddr, filter->src_macaddr,
3464 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3465 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3468 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3469 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3471 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3472 req.ip_protocol = filter->ip_protocol;
3474 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3475 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3477 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3478 req.src_ipaddr_mask[0] =
3479 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3481 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3482 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3484 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3485 req.dst_ipaddr_mask[0] =
3486 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3488 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3489 req.src_port = rte_cpu_to_le_16(filter->src_port);
3491 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3492 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3494 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3495 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3497 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3498 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3500 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3501 req.mirror_vnic_id = filter->mirror_vnic_id;
3503 req.enables = rte_cpu_to_le_32(enables);
3505 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3507 HWRM_CHECK_RESULT();
3509 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3515 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3516 struct bnxt_filter_info *filter)
3519 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3520 struct hwrm_cfa_ntuple_filter_free_output *resp =
3521 bp->hwrm_cmd_resp_addr;
3523 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3526 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3528 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3530 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3532 HWRM_CHECK_RESULT();
3535 filter->fw_ntuple_filter_id = -1;
3536 filter->fw_l2_filter_id = -1;