4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 rte_spinlock_unlock(&bp->hwrm_lock); \
202 if (resp->error_code) { \
203 rc = rte_le_to_cpu_16(resp->error_code); \
204 if (resp->resp_len >= 16) { \
205 struct hwrm_err_output *tmp_hwrm_err_op = \
208 "%s error %d:%d:%08x:%04x\n", \
210 rc, tmp_hwrm_err_op->cmd_err, \
212 tmp_hwrm_err_op->opaque_0), \
214 tmp_hwrm_err_op->opaque_1)); \
218 "%s error %d\n", __func__, rc); \
220 rte_spinlock_unlock(&bp->hwrm_lock); \
225 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
230 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
233 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
237 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246 struct bnxt_vnic_info *vnic,
248 struct bnxt_vlan_table_entry *vlan_table)
251 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
255 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
258 /* FIXME add multicast flag, when multicast adding options is supported
261 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
262 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
263 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
265 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
267 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
269 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
271 if (vnic->mc_addr_cnt) {
272 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
273 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
274 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
277 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
278 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
279 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
280 rte_mem_virt2phy(vlan_table));
281 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
283 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
286 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
294 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
296 struct bnxt_vlan_antispoof_table_entry *vlan_table)
299 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
300 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
301 bp->hwrm_cmd_resp_addr;
304 * Older HWRM versions did not support this command, and the set_rx_mask
305 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
306 * removed from set_rx_mask call, and this command was added.
308 * This command is also present from 1.7.8.11 and higher,
311 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
312 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
313 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
318 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
319 req.fid = rte_cpu_to_le_16(fid);
321 req.vlan_tag_mask_tbl_addr =
322 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
323 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
325 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
333 int bnxt_hwrm_clear_filter(struct bnxt *bp,
334 struct bnxt_filter_info *filter)
337 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
338 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
340 if (filter->fw_l2_filter_id == UINT64_MAX)
343 HWRM_PREP(req, CFA_L2_FILTER_FREE);
345 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
347 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
352 filter->fw_l2_filter_id = -1;
357 int bnxt_hwrm_set_filter(struct bnxt *bp,
359 struct bnxt_filter_info *filter)
362 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
363 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
364 uint32_t enables = 0;
366 if (filter->fw_l2_filter_id != UINT64_MAX)
367 bnxt_hwrm_clear_filter(bp, filter);
369 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
371 req.flags = rte_cpu_to_le_32(filter->flags);
373 enables = filter->enables |
374 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
375 req.dst_id = rte_cpu_to_le_16(dst_id);
378 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
379 memcpy(req.l2_addr, filter->l2_addr,
382 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
383 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
386 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
387 req.l2_ovlan = filter->l2_ovlan;
389 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
390 req.l2_ovlan_mask = filter->l2_ovlan_mask;
391 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
392 req.src_id = rte_cpu_to_le_32(filter->src_id);
393 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
394 req.src_type = filter->src_type;
396 req.enables = rte_cpu_to_le_32(enables);
398 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
402 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
408 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
411 struct hwrm_func_qcaps_input req = {.req_type = 0 };
412 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
413 uint16_t new_max_vfs;
416 HWRM_PREP(req, FUNC_QCAPS);
418 req.fid = rte_cpu_to_le_16(0xffff);
420 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
424 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
426 bp->pf.port_id = resp->port_id;
427 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
428 new_max_vfs = bp->pdev->max_vfs;
429 if (new_max_vfs != bp->pf.max_vfs) {
431 rte_free(bp->pf.vf_info);
432 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
433 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
434 bp->pf.max_vfs = new_max_vfs;
435 for (i = 0; i < new_max_vfs; i++) {
436 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
437 bp->pf.vf_info[i].vlan_table =
438 rte_zmalloc("VF VLAN table",
441 if (bp->pf.vf_info[i].vlan_table == NULL)
443 "Fail to alloc VLAN table for VF %d\n",
447 bp->pf.vf_info[i].vlan_table);
448 bp->pf.vf_info[i].vlan_as_table =
449 rte_zmalloc("VF VLAN AS table",
452 if (bp->pf.vf_info[i].vlan_as_table == NULL)
454 "Alloc VLAN AS table for VF %d fail\n",
458 bp->pf.vf_info[i].vlan_as_table);
459 STAILQ_INIT(&bp->pf.vf_info[i].filter);
464 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
465 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
466 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
467 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
468 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
469 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
470 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
471 /* TODO: For now, do not support VMDq/RFS on VFs. */
476 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
480 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
482 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
488 int bnxt_hwrm_func_reset(struct bnxt *bp)
491 struct hwrm_func_reset_input req = {.req_type = 0 };
492 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
494 HWRM_PREP(req, FUNC_RESET);
496 req.enables = rte_cpu_to_le_32(0);
498 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
506 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
509 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
510 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
512 if (bp->flags & BNXT_FLAG_REGISTERED)
515 HWRM_PREP(req, FUNC_DRV_RGTR);
516 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
517 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
518 req.ver_maj = RTE_VER_YEAR;
519 req.ver_min = RTE_VER_MONTH;
520 req.ver_upd = RTE_VER_MINOR;
523 req.enables |= rte_cpu_to_le_32(
524 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
525 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
526 RTE_MIN(sizeof(req.vf_req_fwd),
527 sizeof(bp->pf.vf_req_fwd)));
530 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
531 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
533 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
538 bp->flags |= BNXT_FLAG_REGISTERED;
543 int bnxt_hwrm_ver_get(struct bnxt *bp)
546 struct hwrm_ver_get_input req = {.req_type = 0 };
547 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
550 uint16_t max_resp_len;
551 char type[RTE_MEMZONE_NAMESIZE];
552 uint32_t dev_caps_cfg;
554 bp->max_req_len = HWRM_MAX_REQ_LEN;
555 HWRM_PREP(req, VER_GET);
557 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
558 req.hwrm_intf_min = HWRM_VERSION_MINOR;
559 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
561 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
565 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
566 resp->hwrm_intf_maj, resp->hwrm_intf_min,
568 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
569 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
570 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
571 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
572 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
574 my_version = HWRM_VERSION_MAJOR << 16;
575 my_version |= HWRM_VERSION_MINOR << 8;
576 my_version |= HWRM_VERSION_UPDATE;
578 fw_version = resp->hwrm_intf_maj << 16;
579 fw_version |= resp->hwrm_intf_min << 8;
580 fw_version |= resp->hwrm_intf_upd;
582 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
583 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
588 if (my_version != fw_version) {
589 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
590 if (my_version < fw_version) {
592 "Firmware API version is newer than driver.\n");
594 "The driver may be missing features.\n");
597 "Firmware API version is older than driver.\n");
599 "Not all driver features may be functional.\n");
603 if (bp->max_req_len > resp->max_req_win_len) {
604 RTE_LOG(ERR, PMD, "Unsupported request length\n");
607 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
608 max_resp_len = resp->max_resp_len;
609 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
611 if (bp->max_resp_len != max_resp_len) {
612 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
613 bp->pdev->addr.domain, bp->pdev->addr.bus,
614 bp->pdev->addr.devid, bp->pdev->addr.function);
616 rte_free(bp->hwrm_cmd_resp_addr);
618 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
619 if (bp->hwrm_cmd_resp_addr == NULL) {
623 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
624 bp->hwrm_cmd_resp_dma_addr =
625 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
626 if (bp->hwrm_cmd_resp_dma_addr == 0) {
628 "Unable to map response buffer to physical memory.\n");
632 bp->max_resp_len = max_resp_len;
636 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
638 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
639 RTE_LOG(DEBUG, PMD, "Short command supported\n");
641 rte_free(bp->hwrm_short_cmd_req_addr);
643 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
645 if (bp->hwrm_short_cmd_req_addr == NULL) {
649 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
650 bp->hwrm_short_cmd_req_dma_addr =
651 rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
652 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
653 rte_free(bp->hwrm_short_cmd_req_addr);
655 "Unable to map buffer to physical memory.\n");
660 bp->flags |= BNXT_FLAG_SHORT_CMD;
668 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
671 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
672 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
674 if (!(bp->flags & BNXT_FLAG_REGISTERED))
677 HWRM_PREP(req, FUNC_DRV_UNRGTR);
680 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685 bp->flags &= ~BNXT_FLAG_REGISTERED;
690 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
693 struct hwrm_port_phy_cfg_input req = {0};
694 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
695 uint32_t enables = 0;
696 uint32_t link_speed_mask =
697 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
699 HWRM_PREP(req, PORT_PHY_CFG);
702 req.flags = rte_cpu_to_le_32(conf->phy_flags);
703 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
705 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
706 * any auto mode, even "none".
708 if (!conf->link_speed) {
709 req.auto_mode = conf->auto_mode;
710 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
711 if (conf->auto_mode ==
712 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
713 req.auto_link_speed_mask =
714 conf->auto_link_speed_mask;
715 enables |= link_speed_mask;
717 if (bp->link_info.auto_link_speed) {
718 req.auto_link_speed =
719 bp->link_info.auto_link_speed;
721 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
724 req.auto_duplex = conf->duplex;
725 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
726 req.auto_pause = conf->auto_pause;
727 req.force_pause = conf->force_pause;
728 /* Set force_pause if there is no auto or if there is a force */
729 if (req.auto_pause && !req.force_pause)
730 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
732 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
734 req.enables = rte_cpu_to_le_32(enables);
737 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
738 RTE_LOG(INFO, PMD, "Force Link Down\n");
741 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
749 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
750 struct bnxt_link_info *link_info)
753 struct hwrm_port_phy_qcfg_input req = {0};
754 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
756 HWRM_PREP(req, PORT_PHY_QCFG);
758 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
762 link_info->phy_link_status = resp->link;
764 (link_info->phy_link_status ==
765 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
766 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
767 link_info->duplex = resp->duplex;
768 link_info->pause = resp->pause;
769 link_info->auto_pause = resp->auto_pause;
770 link_info->force_pause = resp->force_pause;
771 link_info->auto_mode = resp->auto_mode;
773 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
774 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
775 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
776 link_info->phy_ver[0] = resp->phy_maj;
777 link_info->phy_ver[1] = resp->phy_min;
778 link_info->phy_ver[2] = resp->phy_bld;
785 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
788 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
789 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
791 HWRM_PREP(req, QUEUE_QPORTCFG);
793 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
797 #define GET_QUEUE_INFO(x) \
798 bp->cos_queue[x].id = resp->queue_id##x; \
799 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
815 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
816 struct bnxt_ring *ring,
817 uint32_t ring_type, uint32_t map_index,
818 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
821 uint32_t enables = 0;
822 struct hwrm_ring_alloc_input req = {.req_type = 0 };
823 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
825 HWRM_PREP(req, RING_ALLOC);
827 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
828 req.fbo = rte_cpu_to_le_32(0);
829 /* Association of ring index with doorbell index */
830 req.logical_id = rte_cpu_to_le_16(map_index);
831 req.length = rte_cpu_to_le_32(ring->ring_size);
834 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
835 req.queue_id = bp->cos_queue[0].id;
837 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
838 req.ring_type = ring_type;
839 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
840 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
841 if (stats_ctx_id != INVALID_STATS_CTX_ID)
843 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
845 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
846 req.ring_type = ring_type;
848 * TODO: Some HWRM versions crash with
849 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
851 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
854 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
859 req.enables = rte_cpu_to_le_32(enables);
861 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
863 if (rc || resp->error_code) {
864 if (rc == 0 && resp->error_code)
865 rc = rte_le_to_cpu_16(resp->error_code);
867 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
869 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
872 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
874 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
877 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
879 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
883 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
889 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
894 int bnxt_hwrm_ring_free(struct bnxt *bp,
895 struct bnxt_ring *ring, uint32_t ring_type)
898 struct hwrm_ring_free_input req = {.req_type = 0 };
899 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
901 HWRM_PREP(req, RING_FREE);
903 req.ring_type = ring_type;
904 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
906 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
908 if (rc || resp->error_code) {
909 if (rc == 0 && resp->error_code)
910 rc = rte_le_to_cpu_16(resp->error_code);
914 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
915 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
918 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
919 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
922 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
923 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
927 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
935 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
938 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
939 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
941 HWRM_PREP(req, RING_GRP_ALLOC);
943 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
944 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
945 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
946 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
948 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
952 bp->grp_info[idx].fw_grp_id =
953 rte_le_to_cpu_16(resp->ring_group_id);
960 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
963 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
964 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
966 HWRM_PREP(req, RING_GRP_FREE);
968 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
970 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
975 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
979 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
982 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
983 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
985 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
988 HWRM_PREP(req, STAT_CTX_CLR_STATS);
990 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
992 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1001 unsigned int idx __rte_unused)
1004 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1005 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1007 HWRM_PREP(req, STAT_CTX_ALLOC);
1009 req.update_period_ms = rte_cpu_to_le_32(0);
1011 req.stats_dma_addr =
1012 rte_cpu_to_le_64(cpr->hw_stats_map);
1014 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1016 HWRM_CHECK_RESULT();
1018 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1025 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1026 unsigned int idx __rte_unused)
1029 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1030 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1032 HWRM_PREP(req, STAT_CTX_FREE);
1034 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1036 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1038 HWRM_CHECK_RESULT();
1044 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1047 struct hwrm_vnic_alloc_input req = { 0 };
1048 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1050 /* map ring groups to this vnic */
1051 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1052 vnic->start_grp_id, vnic->end_grp_id);
1053 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1054 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1055 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1056 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1057 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1058 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1059 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1060 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1061 HWRM_PREP(req, VNIC_ALLOC);
1063 if (vnic->func_default)
1064 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1065 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1067 HWRM_CHECK_RESULT();
1069 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1071 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1075 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1076 struct bnxt_vnic_info *vnic,
1077 struct bnxt_plcmodes_cfg *pmode)
1080 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1081 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1083 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1085 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1087 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1089 HWRM_CHECK_RESULT();
1091 pmode->flags = rte_le_to_cpu_32(resp->flags);
1092 /* dflt_vnic bit doesn't exist in the _cfg command */
1093 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1094 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1095 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1096 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1103 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1104 struct bnxt_vnic_info *vnic,
1105 struct bnxt_plcmodes_cfg *pmode)
1108 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1109 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1111 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1113 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1114 req.flags = rte_cpu_to_le_32(pmode->flags);
1115 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1116 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1117 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1118 req.enables = rte_cpu_to_le_32(
1119 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1120 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1121 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1124 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1126 HWRM_CHECK_RESULT();
1132 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1135 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1136 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1137 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1138 struct bnxt_plcmodes_cfg pmodes;
1140 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1141 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1145 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1149 HWRM_PREP(req, VNIC_CFG);
1151 /* Only RSS support for now TBD: COS & LB */
1153 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1154 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1155 if (vnic->lb_rule != 0xffff)
1156 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1157 if (vnic->cos_rule != 0xffff)
1158 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1159 if (vnic->rss_rule != 0xffff)
1160 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1161 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1162 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1163 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1164 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1165 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1166 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1167 req.mru = rte_cpu_to_le_16(vnic->mru);
1168 if (vnic->func_default)
1170 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1171 if (vnic->vlan_strip)
1173 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1176 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1177 if (vnic->roce_dual)
1178 req.flags |= rte_cpu_to_le_32(
1179 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1180 if (vnic->roce_only)
1181 req.flags |= rte_cpu_to_le_32(
1182 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1183 if (vnic->rss_dflt_cr)
1184 req.flags |= rte_cpu_to_le_32(
1185 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1187 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1189 HWRM_CHECK_RESULT();
1192 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1197 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1201 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1202 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1204 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1205 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1208 HWRM_PREP(req, VNIC_QCFG);
1211 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1212 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1213 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1215 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1217 HWRM_CHECK_RESULT();
1219 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1220 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1221 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1222 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1223 vnic->mru = rte_le_to_cpu_16(resp->mru);
1224 vnic->func_default = rte_le_to_cpu_32(
1225 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1226 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1227 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1228 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1229 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1230 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1231 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1232 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1233 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1234 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1235 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1242 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1245 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1246 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1247 bp->hwrm_cmd_resp_addr;
1249 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1251 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1253 HWRM_CHECK_RESULT();
1255 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1257 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1262 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1265 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1266 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1267 bp->hwrm_cmd_resp_addr;
1269 if (vnic->rss_rule == 0xffff) {
1270 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1273 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1275 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1277 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1279 HWRM_CHECK_RESULT();
1282 vnic->rss_rule = INVALID_HW_RING_ID;
1287 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1290 struct hwrm_vnic_free_input req = {.req_type = 0 };
1291 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1293 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1294 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1298 HWRM_PREP(req, VNIC_FREE);
1300 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1302 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1304 HWRM_CHECK_RESULT();
1307 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1311 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1312 struct bnxt_vnic_info *vnic)
1315 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1316 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1318 HWRM_PREP(req, VNIC_RSS_CFG);
1320 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1322 req.ring_grp_tbl_addr =
1323 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1324 req.hash_key_tbl_addr =
1325 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1326 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1328 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1330 HWRM_CHECK_RESULT();
1336 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1337 struct bnxt_vnic_info *vnic)
1340 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1341 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1344 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1346 req.flags = rte_cpu_to_le_32(
1347 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1349 req.enables = rte_cpu_to_le_32(
1350 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1352 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1353 size -= RTE_PKTMBUF_HEADROOM;
1355 req.jumbo_thresh = rte_cpu_to_le_16(size);
1356 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1358 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1360 HWRM_CHECK_RESULT();
1366 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1367 struct bnxt_vnic_info *vnic, bool enable)
1370 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1371 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1373 HWRM_PREP(req, VNIC_TPA_CFG);
1376 req.enables = rte_cpu_to_le_32(
1377 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1378 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1379 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1380 req.flags = rte_cpu_to_le_32(
1381 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1382 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1383 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1384 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1385 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1386 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1387 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1388 req.max_agg_segs = rte_cpu_to_le_16(5);
1390 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1391 req.min_agg_len = rte_cpu_to_le_32(512);
1394 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1396 HWRM_CHECK_RESULT();
1402 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1404 struct hwrm_func_cfg_input req = {0};
1405 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1408 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1409 req.enables = rte_cpu_to_le_32(
1410 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1411 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1412 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1414 HWRM_PREP(req, FUNC_CFG);
1416 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1417 HWRM_CHECK_RESULT();
1420 bp->pf.vf_info[vf].random_mac = false;
1425 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1429 struct hwrm_func_qstats_input req = {.req_type = 0};
1430 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1432 HWRM_PREP(req, FUNC_QSTATS);
1434 req.fid = rte_cpu_to_le_16(fid);
1436 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1438 HWRM_CHECK_RESULT();
1441 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1448 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1449 struct rte_eth_stats *stats)
1452 struct hwrm_func_qstats_input req = {.req_type = 0};
1453 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1455 HWRM_PREP(req, FUNC_QSTATS);
1457 req.fid = rte_cpu_to_le_16(fid);
1459 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1461 HWRM_CHECK_RESULT();
1463 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1464 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1465 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1466 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1467 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1468 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1470 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1471 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1472 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1473 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1474 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1475 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1477 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1478 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1480 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1487 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1490 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1491 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1493 HWRM_PREP(req, FUNC_CLR_STATS);
1495 req.fid = rte_cpu_to_le_16(fid);
1497 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1499 HWRM_CHECK_RESULT();
1506 * HWRM utility functions
1509 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1514 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1515 struct bnxt_tx_queue *txq;
1516 struct bnxt_rx_queue *rxq;
1517 struct bnxt_cp_ring_info *cpr;
1519 if (i >= bp->rx_cp_nr_rings) {
1520 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1523 rxq = bp->rx_queues[i];
1527 rc = bnxt_hwrm_stat_clear(bp, cpr);
1534 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1538 struct bnxt_cp_ring_info *cpr;
1540 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1542 if (i >= bp->rx_cp_nr_rings)
1543 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1545 cpr = bp->rx_queues[i]->cp_ring;
1546 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1547 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1548 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1550 * TODO. Need a better way to reset grp_info.stats_ctx
1551 * for Rx rings only. stats_ctx is not saved for Tx
1554 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1562 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1567 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1568 struct bnxt_tx_queue *txq;
1569 struct bnxt_rx_queue *rxq;
1570 struct bnxt_cp_ring_info *cpr;
1572 if (i >= bp->rx_cp_nr_rings) {
1573 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1576 rxq = bp->rx_queues[i];
1580 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1588 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1593 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1595 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1597 "Attempt to free invalid ring group %d\n",
1602 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1610 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1611 unsigned int idx __rte_unused)
1613 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1615 bnxt_hwrm_ring_free(bp, cp_ring,
1616 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1617 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1618 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1619 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1620 sizeof(*cpr->cp_desc_ring));
1621 cpr->cp_raw_cons = 0;
1624 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1629 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1630 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1631 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1632 struct bnxt_ring *ring = txr->tx_ring_struct;
1633 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1634 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1636 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1637 bnxt_hwrm_ring_free(bp, ring,
1638 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1639 ring->fw_ring_id = INVALID_HW_RING_ID;
1640 memset(txr->tx_desc_ring, 0,
1641 txr->tx_ring_struct->ring_size *
1642 sizeof(*txr->tx_desc_ring));
1643 memset(txr->tx_buf_ring, 0,
1644 txr->tx_ring_struct->ring_size *
1645 sizeof(*txr->tx_buf_ring));
1649 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1650 bnxt_free_cp_ring(bp, cpr, idx);
1651 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1655 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1656 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1657 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1658 struct bnxt_ring *ring = rxr->rx_ring_struct;
1659 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1660 unsigned int idx = i + 1;
1662 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1663 bnxt_hwrm_ring_free(bp, ring,
1664 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1665 ring->fw_ring_id = INVALID_HW_RING_ID;
1666 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1667 memset(rxr->rx_desc_ring, 0,
1668 rxr->rx_ring_struct->ring_size *
1669 sizeof(*rxr->rx_desc_ring));
1670 memset(rxr->rx_buf_ring, 0,
1671 rxr->rx_ring_struct->ring_size *
1672 sizeof(*rxr->rx_buf_ring));
1674 memset(rxr->ag_buf_ring, 0,
1675 rxr->ag_ring_struct->ring_size *
1676 sizeof(*rxr->ag_buf_ring));
1679 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1680 bnxt_free_cp_ring(bp, cpr, idx);
1681 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1682 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1686 /* Default completion ring */
1688 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1690 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1691 bnxt_free_cp_ring(bp, cpr, 0);
1692 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1699 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1704 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1705 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1712 void bnxt_free_hwrm_resources(struct bnxt *bp)
1714 /* Release memzone */
1715 rte_free(bp->hwrm_cmd_resp_addr);
1716 rte_free(bp->hwrm_short_cmd_req_addr);
1717 bp->hwrm_cmd_resp_addr = NULL;
1718 bp->hwrm_short_cmd_req_addr = NULL;
1719 bp->hwrm_cmd_resp_dma_addr = 0;
1720 bp->hwrm_short_cmd_req_dma_addr = 0;
1723 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1725 struct rte_pci_device *pdev = bp->pdev;
1726 char type[RTE_MEMZONE_NAMESIZE];
1728 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1729 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1730 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1731 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1732 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1733 if (bp->hwrm_cmd_resp_addr == NULL)
1735 bp->hwrm_cmd_resp_dma_addr =
1736 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1737 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1739 "unable to map response address to physical memory\n");
1742 rte_spinlock_init(&bp->hwrm_lock);
1747 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1749 struct bnxt_filter_info *filter;
1752 STAILQ_FOREACH(filter, &vnic->filter, next) {
1753 rc = bnxt_hwrm_clear_filter(bp, filter);
1760 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1762 struct bnxt_filter_info *filter;
1765 STAILQ_FOREACH(filter, &vnic->filter, next) {
1766 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1773 void bnxt_free_tunnel_ports(struct bnxt *bp)
1775 if (bp->vxlan_port_cnt)
1776 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1777 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1779 if (bp->geneve_port_cnt)
1780 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1781 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1782 bp->geneve_port = 0;
1785 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1787 struct bnxt_vnic_info *vnic;
1790 if (bp->vnic_info == NULL)
1793 vnic = &bp->vnic_info[0];
1795 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1797 /* VNIC resources */
1798 for (i = 0; i < bp->nr_vnics; i++) {
1799 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1801 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1803 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1805 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1807 bnxt_hwrm_vnic_free(bp, vnic);
1809 /* Ring resources */
1810 bnxt_free_all_hwrm_rings(bp);
1811 bnxt_free_all_hwrm_ring_grps(bp);
1812 bnxt_free_all_hwrm_stat_ctxs(bp);
1813 bnxt_free_tunnel_ports(bp);
1816 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1818 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1820 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1821 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1823 switch (conf_link_speed) {
1824 case ETH_LINK_SPEED_10M_HD:
1825 case ETH_LINK_SPEED_100M_HD:
1826 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1828 return hw_link_duplex;
1831 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1833 uint16_t eth_link_speed = 0;
1835 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1836 return ETH_LINK_SPEED_AUTONEG;
1838 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1839 case ETH_LINK_SPEED_100M:
1840 case ETH_LINK_SPEED_100M_HD:
1842 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1844 case ETH_LINK_SPEED_1G:
1846 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1848 case ETH_LINK_SPEED_2_5G:
1850 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1852 case ETH_LINK_SPEED_10G:
1854 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1856 case ETH_LINK_SPEED_20G:
1858 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1860 case ETH_LINK_SPEED_25G:
1862 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1864 case ETH_LINK_SPEED_40G:
1866 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1868 case ETH_LINK_SPEED_50G:
1870 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1874 "Unsupported link speed %d; default to AUTO\n",
1878 return eth_link_speed;
1881 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1882 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1883 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1884 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1886 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1890 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1893 if (link_speed & ETH_LINK_SPEED_FIXED) {
1894 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1896 if (one_speed & (one_speed - 1)) {
1898 "Invalid advertised speeds (%u) for port %u\n",
1899 link_speed, port_id);
1902 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1904 "Unsupported advertised speed (%u) for port %u\n",
1905 link_speed, port_id);
1909 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1911 "Unsupported advertised speeds (%u) for port %u\n",
1912 link_speed, port_id);
1920 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1924 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1925 if (bp->link_info.support_speeds)
1926 return bp->link_info.support_speeds;
1927 link_speed = BNXT_SUPPORTED_SPEEDS;
1930 if (link_speed & ETH_LINK_SPEED_100M)
1931 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1932 if (link_speed & ETH_LINK_SPEED_100M_HD)
1933 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1934 if (link_speed & ETH_LINK_SPEED_1G)
1935 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1936 if (link_speed & ETH_LINK_SPEED_2_5G)
1937 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1938 if (link_speed & ETH_LINK_SPEED_10G)
1939 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1940 if (link_speed & ETH_LINK_SPEED_20G)
1941 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1942 if (link_speed & ETH_LINK_SPEED_25G)
1943 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1944 if (link_speed & ETH_LINK_SPEED_40G)
1945 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1946 if (link_speed & ETH_LINK_SPEED_50G)
1947 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1951 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1953 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1955 switch (hw_link_speed) {
1956 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1957 eth_link_speed = ETH_SPEED_NUM_100M;
1959 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1960 eth_link_speed = ETH_SPEED_NUM_1G;
1962 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1963 eth_link_speed = ETH_SPEED_NUM_2_5G;
1965 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1966 eth_link_speed = ETH_SPEED_NUM_10G;
1968 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1969 eth_link_speed = ETH_SPEED_NUM_20G;
1971 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1972 eth_link_speed = ETH_SPEED_NUM_25G;
1974 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1975 eth_link_speed = ETH_SPEED_NUM_40G;
1977 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1978 eth_link_speed = ETH_SPEED_NUM_50G;
1980 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1982 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1986 return eth_link_speed;
1989 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1991 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1993 switch (hw_link_duplex) {
1994 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1995 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1996 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1998 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1999 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2002 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2006 return eth_link_duplex;
2009 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2012 struct bnxt_link_info *link_info = &bp->link_info;
2014 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2017 "Get link config failed with rc %d\n", rc);
2020 if (link_info->link_speed)
2022 bnxt_parse_hw_link_speed(link_info->link_speed);
2024 link->link_speed = ETH_SPEED_NUM_NONE;
2025 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2026 link->link_status = link_info->link_up;
2027 link->link_autoneg = link_info->auto_mode ==
2028 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2029 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2034 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2037 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2038 struct bnxt_link_info link_req;
2041 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2044 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2045 bp->eth_dev->data->port_id);
2049 memset(&link_req, 0, sizeof(link_req));
2050 link_req.link_up = link_up;
2054 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2055 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2057 link_req.phy_flags |=
2058 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2059 link_req.auto_mode =
2060 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2061 link_req.auto_link_speed_mask =
2062 bnxt_parse_eth_link_speed_mask(bp,
2063 dev_conf->link_speeds);
2065 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2066 link_req.link_speed = speed;
2067 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2069 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2070 link_req.auto_pause = bp->link_info.auto_pause;
2071 link_req.force_pause = bp->link_info.force_pause;
2074 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2077 "Set link config failed with rc %d\n", rc);
2085 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2087 struct hwrm_func_qcfg_input req = {0};
2088 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2091 HWRM_PREP(req, FUNC_QCFG);
2092 req.fid = rte_cpu_to_le_16(0xffff);
2094 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2096 HWRM_CHECK_RESULT();
2098 /* Hard Coded.. 0xfff VLAN ID mask */
2099 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2101 switch (resp->port_partition_type) {
2102 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2103 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2104 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2105 bp->port_partition_type = resp->port_partition_type;
2108 bp->port_partition_type = 0;
2117 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2118 struct hwrm_func_qcaps_output *qcaps)
2120 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2121 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2122 sizeof(qcaps->mac_address));
2123 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2124 qcaps->max_rx_rings = fcfg->num_rx_rings;
2125 qcaps->max_tx_rings = fcfg->num_tx_rings;
2126 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2127 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2129 qcaps->first_vf_id = 0;
2130 qcaps->max_vnics = fcfg->num_vnics;
2131 qcaps->max_decap_records = 0;
2132 qcaps->max_encap_records = 0;
2133 qcaps->max_tx_wm_flows = 0;
2134 qcaps->max_tx_em_flows = 0;
2135 qcaps->max_rx_wm_flows = 0;
2136 qcaps->max_rx_em_flows = 0;
2137 qcaps->max_flow_id = 0;
2138 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2139 qcaps->max_sp_tx_rings = 0;
2140 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2143 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2145 struct hwrm_func_cfg_input req = {0};
2146 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2149 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2150 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2151 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2152 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2153 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2154 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2155 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2156 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2157 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2158 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2159 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2160 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2161 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2162 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2163 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2164 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2165 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2166 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2167 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2168 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2169 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2170 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2171 req.fid = rte_cpu_to_le_16(0xffff);
2173 HWRM_PREP(req, FUNC_CFG);
2175 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2177 HWRM_CHECK_RESULT();
2183 static void populate_vf_func_cfg_req(struct bnxt *bp,
2184 struct hwrm_func_cfg_input *req,
2187 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2188 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2189 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2190 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2191 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2192 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2193 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2194 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2195 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2196 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2198 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2199 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2200 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2201 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2202 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2204 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2205 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2207 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2208 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2209 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2210 /* TODO: For now, do not support VMDq/RFS on VFs. */
2211 req->num_vnics = rte_cpu_to_le_16(1);
2212 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2216 static void add_random_mac_if_needed(struct bnxt *bp,
2217 struct hwrm_func_cfg_input *cfg_req,
2220 struct ether_addr mac;
2222 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2225 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2227 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2228 eth_random_addr(cfg_req->dflt_mac_addr);
2229 bp->pf.vf_info[vf].random_mac = true;
2231 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2235 static void reserve_resources_from_vf(struct bnxt *bp,
2236 struct hwrm_func_cfg_input *cfg_req,
2239 struct hwrm_func_qcaps_input req = {0};
2240 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2243 /* Get the actual allocated values now */
2244 HWRM_PREP(req, FUNC_QCAPS);
2245 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2246 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2249 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2250 copy_func_cfg_to_qcaps(cfg_req, resp);
2251 } else if (resp->error_code) {
2252 rc = rte_le_to_cpu_16(resp->error_code);
2253 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2254 copy_func_cfg_to_qcaps(cfg_req, resp);
2257 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2258 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2259 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2260 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2261 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2262 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2264 * TODO: While not supporting VMDq with VFs, max_vnics is always
2265 * forced to 1 in this case
2267 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2268 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2273 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2275 struct hwrm_func_qcfg_input req = {0};
2276 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2279 /* Check for zero MAC address */
2280 HWRM_PREP(req, FUNC_QCFG);
2281 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2284 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2286 } else if (resp->error_code) {
2287 rc = rte_le_to_cpu_16(resp->error_code);
2288 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2291 rc = rte_le_to_cpu_16(resp->vlan);
2298 static int update_pf_resource_max(struct bnxt *bp)
2300 struct hwrm_func_qcfg_input req = {0};
2301 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2304 /* And copy the allocated numbers into the pf struct */
2305 HWRM_PREP(req, FUNC_QCFG);
2306 req.fid = rte_cpu_to_le_16(0xffff);
2307 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2308 HWRM_CHECK_RESULT();
2310 /* Only TX ring value reflects actual allocation? TODO */
2311 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2312 bp->pf.evb_mode = resp->evb_mode;
2319 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2324 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2328 rc = bnxt_hwrm_func_qcaps(bp);
2332 bp->pf.func_cfg_flags &=
2333 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2334 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2335 bp->pf.func_cfg_flags |=
2336 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2337 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2341 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2343 struct hwrm_func_cfg_input req = {0};
2344 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2351 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2355 rc = bnxt_hwrm_func_qcaps(bp);
2360 bp->pf.active_vfs = num_vfs;
2363 * First, configure the PF to only use one TX ring. This ensures that
2364 * there are enough rings for all VFs.
2366 * If we don't do this, when we call func_alloc() later, we will lock
2367 * extra rings to the PF that won't be available during func_cfg() of
2370 * This has been fixed with firmware versions above 20.6.54
2372 bp->pf.func_cfg_flags &=
2373 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2374 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2375 bp->pf.func_cfg_flags |=
2376 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2377 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2382 * Now, create and register a buffer to hold forwarded VF requests
2384 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2385 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2386 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2387 if (bp->pf.vf_req_buf == NULL) {
2391 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2392 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2393 for (i = 0; i < num_vfs; i++)
2394 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2395 (i * HWRM_MAX_REQ_LEN);
2397 rc = bnxt_hwrm_func_buf_rgtr(bp);
2401 populate_vf_func_cfg_req(bp, &req, num_vfs);
2403 bp->pf.active_vfs = 0;
2404 for (i = 0; i < num_vfs; i++) {
2405 add_random_mac_if_needed(bp, &req, i);
2407 HWRM_PREP(req, FUNC_CFG);
2408 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2409 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2410 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2412 /* Clear enable flag for next pass */
2413 req.enables &= ~rte_cpu_to_le_32(
2414 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2416 if (rc || resp->error_code) {
2418 "Failed to initizlie VF %d\n", i);
2420 "Not all VFs available. (%d, %d)\n",
2421 rc, resp->error_code);
2428 reserve_resources_from_vf(bp, &req, i);
2429 bp->pf.active_vfs++;
2430 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2434 * Now configure the PF to use "the rest" of the resources
2435 * We're using STD_TX_RING_MODE here though which will limit the TX
2436 * rings. This will allow QoS to function properly. Not setting this
2437 * will cause PF rings to break bandwidth settings.
2439 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2443 rc = update_pf_resource_max(bp);
2450 bnxt_hwrm_func_buf_unrgtr(bp);
2454 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2456 struct hwrm_func_cfg_input req = {0};
2457 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2460 HWRM_PREP(req, FUNC_CFG);
2462 req.fid = rte_cpu_to_le_16(0xffff);
2463 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2464 req.evb_mode = bp->pf.evb_mode;
2466 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2467 HWRM_CHECK_RESULT();
2473 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2474 uint8_t tunnel_type)
2476 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2477 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2480 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2481 req.tunnel_type = tunnel_type;
2482 req.tunnel_dst_port_val = port;
2483 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2484 HWRM_CHECK_RESULT();
2486 switch (tunnel_type) {
2487 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2488 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2489 bp->vxlan_port = port;
2491 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2492 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2493 bp->geneve_port = port;
2504 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2505 uint8_t tunnel_type)
2507 struct hwrm_tunnel_dst_port_free_input req = {0};
2508 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2511 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2513 req.tunnel_type = tunnel_type;
2514 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2515 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2517 HWRM_CHECK_RESULT();
2523 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2526 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2527 struct hwrm_func_cfg_input req = {0};
2530 HWRM_PREP(req, FUNC_CFG);
2532 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2533 req.flags = rte_cpu_to_le_32(flags);
2534 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2536 HWRM_CHECK_RESULT();
2542 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2544 uint32_t *flag = flagp;
2546 vnic->flags = *flag;
2549 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2551 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2554 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2557 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2558 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2560 HWRM_PREP(req, FUNC_BUF_RGTR);
2562 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2563 req.req_buf_page_size = rte_cpu_to_le_16(
2564 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2565 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2566 req.req_buf_page_addr[0] =
2567 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2568 if (req.req_buf_page_addr[0] == 0) {
2570 "unable to map buffer address to physical memory\n");
2574 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2576 HWRM_CHECK_RESULT();
2582 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2585 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2586 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2588 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2590 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2592 HWRM_CHECK_RESULT();
2598 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2600 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2601 struct hwrm_func_cfg_input req = {0};
2604 HWRM_PREP(req, FUNC_CFG);
2606 req.fid = rte_cpu_to_le_16(0xffff);
2607 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2608 req.enables = rte_cpu_to_le_32(
2609 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2610 req.async_event_cr = rte_cpu_to_le_16(
2611 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2612 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2614 HWRM_CHECK_RESULT();
2620 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2622 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2623 struct hwrm_func_vf_cfg_input req = {0};
2626 HWRM_PREP(req, FUNC_VF_CFG);
2628 req.enables = rte_cpu_to_le_32(
2629 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2630 req.async_event_cr = rte_cpu_to_le_16(
2631 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2632 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2634 HWRM_CHECK_RESULT();
2640 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2642 struct hwrm_func_cfg_input req = {0};
2643 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2644 uint16_t dflt_vlan, fid;
2645 uint32_t func_cfg_flags;
2648 HWRM_PREP(req, FUNC_CFG);
2651 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2652 fid = bp->pf.vf_info[vf].fid;
2653 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2655 fid = rte_cpu_to_le_16(0xffff);
2656 func_cfg_flags = bp->pf.func_cfg_flags;
2657 dflt_vlan = bp->vlan;
2660 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2661 req.fid = rte_cpu_to_le_16(fid);
2662 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2663 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2665 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2667 HWRM_CHECK_RESULT();
2673 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2674 uint16_t max_bw, uint16_t enables)
2676 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2677 struct hwrm_func_cfg_input req = {0};
2680 HWRM_PREP(req, FUNC_CFG);
2682 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2683 req.enables |= rte_cpu_to_le_32(enables);
2684 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2685 req.max_bw = rte_cpu_to_le_32(max_bw);
2686 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2688 HWRM_CHECK_RESULT();
2694 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2696 struct hwrm_func_cfg_input req = {0};
2697 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2700 HWRM_PREP(req, FUNC_CFG);
2702 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2703 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2704 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2705 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2707 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2709 HWRM_CHECK_RESULT();
2715 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2716 void *encaped, size_t ec_size)
2719 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2720 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2722 if (ec_size > sizeof(req.encap_request))
2725 HWRM_PREP(req, REJECT_FWD_RESP);
2727 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2728 memcpy(req.encap_request, encaped, ec_size);
2730 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2732 HWRM_CHECK_RESULT();
2738 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2739 struct ether_addr *mac)
2741 struct hwrm_func_qcfg_input req = {0};
2742 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2745 HWRM_PREP(req, FUNC_QCFG);
2747 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2748 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2750 HWRM_CHECK_RESULT();
2752 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2759 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2760 void *encaped, size_t ec_size)
2763 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2764 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2766 if (ec_size > sizeof(req.encap_request))
2769 HWRM_PREP(req, EXEC_FWD_RESP);
2771 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2772 memcpy(req.encap_request, encaped, ec_size);
2774 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2776 HWRM_CHECK_RESULT();
2782 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2783 struct rte_eth_stats *stats)
2786 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2787 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2789 HWRM_PREP(req, STAT_CTX_QUERY);
2791 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2793 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2795 HWRM_CHECK_RESULT();
2797 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2798 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2799 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2800 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2801 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2802 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2804 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2805 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2806 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2807 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2808 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2809 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2811 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2812 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2813 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2820 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2822 struct hwrm_port_qstats_input req = {0};
2823 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2824 struct bnxt_pf_info *pf = &bp->pf;
2827 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2830 HWRM_PREP(req, PORT_QSTATS);
2832 req.port_id = rte_cpu_to_le_16(pf->port_id);
2833 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2834 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2837 HWRM_CHECK_RESULT();
2843 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2845 struct hwrm_port_clr_stats_input req = {0};
2846 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2847 struct bnxt_pf_info *pf = &bp->pf;
2850 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2853 HWRM_PREP(req, PORT_CLR_STATS);
2855 req.port_id = rte_cpu_to_le_16(pf->port_id);
2856 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2858 HWRM_CHECK_RESULT();
2864 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2866 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2867 struct hwrm_port_led_qcaps_input req = {0};
2873 HWRM_PREP(req, PORT_LED_QCAPS);
2874 req.port_id = bp->pf.port_id;
2875 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2877 HWRM_CHECK_RESULT();
2879 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2882 bp->num_leds = resp->num_leds;
2883 memcpy(bp->leds, &resp->led0_id,
2884 sizeof(bp->leds[0]) * bp->num_leds);
2885 for (i = 0; i < bp->num_leds; i++) {
2886 struct bnxt_led_info *led = &bp->leds[i];
2888 uint16_t caps = led->led_state_caps;
2890 if (!led->led_group_id ||
2891 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2903 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2905 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2906 struct hwrm_port_led_cfg_input req = {0};
2907 struct bnxt_led_cfg *led_cfg;
2908 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2909 uint16_t duration = 0;
2912 if (!bp->num_leds || BNXT_VF(bp))
2915 HWRM_PREP(req, PORT_LED_CFG);
2918 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2919 duration = rte_cpu_to_le_16(500);
2921 req.port_id = bp->pf.port_id;
2922 req.num_leds = bp->num_leds;
2923 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2924 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2925 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2926 led_cfg->led_id = bp->leds[i].led_id;
2927 led_cfg->led_state = led_state;
2928 led_cfg->led_blink_on = duration;
2929 led_cfg->led_blink_off = duration;
2930 led_cfg->led_group_id = bp->leds[i].led_group_id;
2933 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2935 HWRM_CHECK_RESULT();
2942 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2944 uint32_t *count = cbdata;
2946 *count = *count + 1;
2949 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2950 struct bnxt_vnic_info *vnic __rte_unused)
2955 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2959 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2960 &count, bnxt_vnic_count_hwrm_stub);
2965 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2968 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2969 struct hwrm_func_vf_vnic_ids_query_output *resp =
2970 bp->hwrm_cmd_resp_addr;
2973 /* First query all VNIC ids */
2974 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
2976 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2977 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2978 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2980 if (req.vnic_id_tbl_addr == 0) {
2983 "unable to map VNIC ID table address to physical memory\n");
2986 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2989 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2991 } else if (resp->error_code) {
2992 rc = rte_le_to_cpu_16(resp->error_code);
2994 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2997 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3005 * This function queries the VNIC IDs for a specified VF. It then calls
3006 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3007 * Then it calls the hwrm_cb function to program this new vnic configuration.
3009 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3010 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3011 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3013 struct bnxt_vnic_info vnic;
3015 int i, num_vnic_ids;
3020 /* First query all VNIC ids */
3021 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3022 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3023 RTE_CACHE_LINE_SIZE);
3024 if (vnic_ids == NULL) {
3028 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3029 rte_mem_lock_page(((char *)vnic_ids) + sz);
3031 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3033 if (num_vnic_ids < 0)
3034 return num_vnic_ids;
3036 /* Retrieve VNIC, update bd_stall then update */
3038 for (i = 0; i < num_vnic_ids; i++) {
3039 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3040 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3041 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3044 if (vnic.mru <= 4) /* Indicates unallocated */
3047 vnic_cb(&vnic, cbdata);
3049 rc = hwrm_cb(bp, &vnic);
3059 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3062 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3063 struct hwrm_func_cfg_input req = {0};
3066 HWRM_PREP(req, FUNC_CFG);
3068 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3069 req.enables |= rte_cpu_to_le_32(
3070 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3071 req.vlan_antispoof_mode = on ?
3072 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3073 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3074 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3076 HWRM_CHECK_RESULT();
3082 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3084 struct bnxt_vnic_info vnic;
3087 int num_vnic_ids, i;
3091 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3092 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3093 RTE_CACHE_LINE_SIZE);
3094 if (vnic_ids == NULL) {
3099 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3100 rte_mem_lock_page(((char *)vnic_ids) + sz);
3102 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3108 * Loop through to find the default VNIC ID.
3109 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3110 * by sending the hwrm_func_qcfg command to the firmware.
3112 for (i = 0; i < num_vnic_ids; i++) {
3113 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3114 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3115 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3116 bp->pf.first_vf_id + vf);
3119 if (vnic.func_default) {
3121 return vnic.fw_vnic_id;
3124 /* Could not find a default VNIC. */
3125 RTE_LOG(ERR, PMD, "No default VNIC\n");