4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
109 uint16_t max_req_len = bp->max_req_len;
110 struct hwrm_short_input short_input = { 0 };
112 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
113 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
115 memset(short_cmd_req, 0, bp->max_req_len);
116 memcpy(short_cmd_req, req, msg_len);
118 short_input.req_type = rte_cpu_to_le_16(req->req_type);
119 short_input.signature = rte_cpu_to_le_16(
120 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
121 short_input.size = rte_cpu_to_le_16(msg_len);
122 short_input.req_addr =
123 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
125 data = (uint32_t *)&short_input;
126 msg_len = sizeof(short_input);
128 /* Sync memory write before updating doorbell */
131 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
134 /* Write request msg to hwrm channel */
135 for (i = 0; i < msg_len; i += 4) {
136 bar = (uint8_t *)bp->bar0 + i;
137 rte_write32(*data, bar);
141 /* Zero the rest of the request space */
142 for (; i < max_req_len; i += 4) {
143 bar = (uint8_t *)bp->bar0 + i;
147 /* Ring channel doorbell */
148 bar = (uint8_t *)bp->bar0 + 0x100;
151 /* Poll for the valid bit */
152 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
153 /* Sanity check on the resp->resp_len */
155 if (resp->resp_len && resp->resp_len <=
157 /* Last byte of resp contains the valid key */
158 valid = (uint8_t *)resp + resp->resp_len - 1;
159 if (*valid == HWRM_RESP_VALID_KEY)
165 if (i >= HWRM_CMD_TIMEOUT) {
166 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
176 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
180 rte_spinlock_lock(&bp->hwrm_lock);
181 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
182 rte_spinlock_unlock(&bp->hwrm_lock);
186 #define HWRM_PREP(req, type, cr, resp) \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(cr); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
194 #define HWRM_CHECK_RESULT \
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
201 if (resp->error_code) { \
202 rc = rte_le_to_cpu_16(resp->error_code); \
203 if (resp->resp_len >= 16) { \
204 struct hwrm_err_output *tmp_hwrm_err_op = \
207 "%s error %d:%d:%08x:%04x\n", \
209 rc, tmp_hwrm_err_op->cmd_err, \
211 tmp_hwrm_err_op->opaque_0), \
213 tmp_hwrm_err_op->opaque_1)); \
217 "%s error %d\n", __func__, rc); \
223 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
226 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
227 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
229 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
230 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
233 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
240 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
241 struct bnxt_vnic_info *vnic,
243 struct bnxt_vlan_table_entry *vlan_table)
246 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
247 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
250 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
251 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
253 /* FIXME add multicast flag, when multicast adding options is supported
256 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
257 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
258 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
259 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
260 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
261 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
262 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
263 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
264 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
265 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266 if (vnic->mc_addr_cnt) {
267 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
268 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
269 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
271 if (vlan_count && vlan_table) {
272 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
273 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
274 rte_mem_virt2phy(vlan_table));
275 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
277 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
280 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
287 int bnxt_hwrm_clear_filter(struct bnxt *bp,
288 struct bnxt_filter_info *filter)
291 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
292 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
294 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
296 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
298 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
302 filter->fw_l2_filter_id = -1;
307 int bnxt_hwrm_set_filter(struct bnxt *bp,
309 struct bnxt_filter_info *filter)
312 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
313 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
314 uint32_t enables = 0;
316 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
318 req.flags = rte_cpu_to_le_32(filter->flags);
320 enables = filter->enables |
321 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
322 req.dst_id = rte_cpu_to_le_16(dst_id);
325 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
326 memcpy(req.l2_addr, filter->l2_addr,
329 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
330 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
333 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
334 req.l2_ovlan = filter->l2_ovlan;
336 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
337 req.l2_ovlan_mask = filter->l2_ovlan_mask;
338 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
339 req.src_id = rte_cpu_to_le_32(filter->src_id);
340 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
341 req.src_type = filter->src_type;
343 req.enables = rte_cpu_to_le_32(enables);
345 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
349 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
354 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
357 struct hwrm_func_qcaps_input req = {.req_type = 0 };
358 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
359 uint16_t new_max_vfs;
362 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
364 req.fid = rte_cpu_to_le_16(0xffff);
366 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
370 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
372 bp->pf.port_id = resp->port_id;
373 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
374 new_max_vfs = bp->pdev->max_vfs;
375 if (new_max_vfs != bp->pf.max_vfs) {
377 rte_free(bp->pf.vf_info);
378 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
379 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
380 bp->pf.max_vfs = new_max_vfs;
381 for (i = 0; i < new_max_vfs; i++) {
382 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
383 bp->pf.vf_info[i].vlan_table =
384 rte_zmalloc("VF VLAN table",
387 if (bp->pf.vf_info[i].vlan_table == NULL)
389 "Fail to alloc VLAN table for VF %d\n",
393 bp->pf.vf_info[i].vlan_table);
394 STAILQ_INIT(&bp->pf.vf_info[i].filter);
399 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
400 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
401 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
402 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
403 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
404 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
405 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
406 /* TODO: For now, do not support VMDq/RFS on VFs. */
411 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
415 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
417 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
422 int bnxt_hwrm_func_reset(struct bnxt *bp)
425 struct hwrm_func_reset_input req = {.req_type = 0 };
426 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
428 HWRM_PREP(req, FUNC_RESET, -1, resp);
430 req.enables = rte_cpu_to_le_32(0);
432 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
439 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
442 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
443 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
445 if (bp->flags & BNXT_FLAG_REGISTERED)
448 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
449 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
450 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
451 req.ver_maj = RTE_VER_YEAR;
452 req.ver_min = RTE_VER_MONTH;
453 req.ver_upd = RTE_VER_MINOR;
456 req.enables |= rte_cpu_to_le_32(
457 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
458 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
459 RTE_MIN(sizeof(req.vf_req_fwd),
460 sizeof(bp->pf.vf_req_fwd)));
463 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
464 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
466 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
470 bp->flags |= BNXT_FLAG_REGISTERED;
475 int bnxt_hwrm_ver_get(struct bnxt *bp)
478 struct hwrm_ver_get_input req = {.req_type = 0 };
479 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
482 uint16_t max_resp_len;
483 char type[RTE_MEMZONE_NAMESIZE];
484 uint32_t dev_caps_cfg;
486 bp->max_req_len = HWRM_MAX_REQ_LEN;
487 HWRM_PREP(req, VER_GET, -1, resp);
489 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
490 req.hwrm_intf_min = HWRM_VERSION_MINOR;
491 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
494 * Hold the lock since we may be adjusting the response pointers.
496 rte_spinlock_lock(&bp->hwrm_lock);
497 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
501 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
502 resp->hwrm_intf_maj, resp->hwrm_intf_min,
504 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
505 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
506 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
507 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
508 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
510 my_version = HWRM_VERSION_MAJOR << 16;
511 my_version |= HWRM_VERSION_MINOR << 8;
512 my_version |= HWRM_VERSION_UPDATE;
514 fw_version = resp->hwrm_intf_maj << 16;
515 fw_version |= resp->hwrm_intf_min << 8;
516 fw_version |= resp->hwrm_intf_upd;
518 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
519 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
524 if (my_version != fw_version) {
525 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
526 if (my_version < fw_version) {
528 "Firmware API version is newer than driver.\n");
530 "The driver may be missing features.\n");
533 "Firmware API version is older than driver.\n");
535 "Not all driver features may be functional.\n");
539 if (bp->max_req_len > resp->max_req_win_len) {
540 RTE_LOG(ERR, PMD, "Unsupported request length\n");
543 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
544 max_resp_len = resp->max_resp_len;
545 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
547 if (bp->max_resp_len != max_resp_len) {
548 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
549 bp->pdev->addr.domain, bp->pdev->addr.bus,
550 bp->pdev->addr.devid, bp->pdev->addr.function);
552 rte_free(bp->hwrm_cmd_resp_addr);
554 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
555 if (bp->hwrm_cmd_resp_addr == NULL) {
559 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
560 bp->hwrm_cmd_resp_dma_addr =
561 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
562 if (bp->hwrm_cmd_resp_dma_addr == 0) {
564 "Unable to map response buffer to physical memory.\n");
568 bp->max_resp_len = max_resp_len;
572 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
574 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
575 RTE_LOG(DEBUG, PMD, "Short command supported\n");
577 rte_free(bp->hwrm_short_cmd_req_addr);
579 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
581 if (bp->hwrm_short_cmd_req_addr == NULL) {
585 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
586 bp->hwrm_short_cmd_req_dma_addr =
587 rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
588 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
589 rte_free(bp->hwrm_short_cmd_req_addr);
591 "Unable to map buffer to physical memory.\n");
596 bp->flags |= BNXT_FLAG_SHORT_CMD;
600 rte_spinlock_unlock(&bp->hwrm_lock);
604 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
607 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
608 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
610 if (!(bp->flags & BNXT_FLAG_REGISTERED))
613 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
616 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
620 bp->flags &= ~BNXT_FLAG_REGISTERED;
625 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
628 struct hwrm_port_phy_cfg_input req = {0};
629 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
630 uint32_t enables = 0;
632 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
635 req.flags = rte_cpu_to_le_32(conf->phy_flags);
636 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
638 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
639 * any auto mode, even "none".
641 if (!conf->link_speed) {
642 req.auto_mode |= conf->auto_mode;
643 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
644 req.auto_link_speed_mask = conf->auto_link_speed_mask;
646 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
647 req.auto_link_speed = bp->link_info.auto_link_speed;
649 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
651 req.auto_duplex = conf->duplex;
652 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
653 req.auto_pause = conf->auto_pause;
654 req.force_pause = conf->force_pause;
655 /* Set force_pause if there is no auto or if there is a force */
656 if (req.auto_pause && !req.force_pause)
657 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
659 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
661 req.enables = rte_cpu_to_le_32(enables);
664 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
665 RTE_LOG(INFO, PMD, "Force Link Down\n");
668 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
675 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
676 struct bnxt_link_info *link_info)
679 struct hwrm_port_phy_qcfg_input req = {0};
680 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
682 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
684 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
688 link_info->phy_link_status = resp->link;
689 if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
690 link_info->link_up = 1;
691 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
693 link_info->link_up = 0;
694 link_info->link_speed = 0;
696 link_info->duplex = resp->duplex;
697 link_info->pause = resp->pause;
698 link_info->auto_pause = resp->auto_pause;
699 link_info->force_pause = resp->force_pause;
700 link_info->auto_mode = resp->auto_mode;
702 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
703 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
704 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
705 link_info->phy_ver[0] = resp->phy_maj;
706 link_info->phy_ver[1] = resp->phy_min;
707 link_info->phy_ver[2] = resp->phy_bld;
712 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
715 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
716 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
718 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
720 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
724 #define GET_QUEUE_INFO(x) \
725 bp->cos_queue[x].id = resp->queue_id##x; \
726 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
740 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
741 struct bnxt_ring *ring,
742 uint32_t ring_type, uint32_t map_index,
743 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
746 uint32_t enables = 0;
747 struct hwrm_ring_alloc_input req = {.req_type = 0 };
748 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
750 HWRM_PREP(req, RING_ALLOC, -1, resp);
752 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
753 req.fbo = rte_cpu_to_le_32(0);
754 /* Association of ring index with doorbell index */
755 req.logical_id = rte_cpu_to_le_16(map_index);
756 req.length = rte_cpu_to_le_32(ring->ring_size);
759 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
760 req.queue_id = bp->cos_queue[0].id;
762 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
763 req.ring_type = ring_type;
764 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
765 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
766 if (stats_ctx_id != INVALID_STATS_CTX_ID)
768 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
770 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
771 req.ring_type = ring_type;
773 * TODO: Some HWRM versions crash with
774 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
776 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
779 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
783 req.enables = rte_cpu_to_le_32(enables);
785 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
787 if (rc || resp->error_code) {
788 if (rc == 0 && resp->error_code)
789 rc = rte_le_to_cpu_16(resp->error_code);
791 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
793 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
795 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
797 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
799 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
801 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
804 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
809 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
813 int bnxt_hwrm_ring_free(struct bnxt *bp,
814 struct bnxt_ring *ring, uint32_t ring_type)
817 struct hwrm_ring_free_input req = {.req_type = 0 };
818 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
820 HWRM_PREP(req, RING_FREE, -1, resp);
822 req.ring_type = ring_type;
823 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
825 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
827 if (rc || resp->error_code) {
828 if (rc == 0 && resp->error_code)
829 rc = rte_le_to_cpu_16(resp->error_code);
832 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
833 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
836 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
837 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
840 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
841 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
845 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
852 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
855 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
856 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
858 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
860 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
861 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
862 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
863 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
865 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
869 bp->grp_info[idx].fw_grp_id =
870 rte_le_to_cpu_16(resp->ring_group_id);
875 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
878 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
879 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
881 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
883 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
889 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
893 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
896 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
897 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
899 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
902 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
904 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
906 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
913 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
914 unsigned int idx __rte_unused)
917 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
918 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
920 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
922 req.update_period_ms = rte_cpu_to_le_32(0);
925 rte_cpu_to_le_64(cpr->hw_stats_map);
927 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
931 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
936 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
937 unsigned int idx __rte_unused)
940 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
941 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
943 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
945 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
947 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
954 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
957 struct hwrm_vnic_alloc_input req = { 0 };
958 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
960 /* map ring groups to this vnic */
961 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
962 vnic->start_grp_id, vnic->end_grp_id);
963 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
964 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
965 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
966 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
967 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
968 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
969 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
970 ETHER_CRC_LEN + VLAN_TAG_SIZE;
971 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
973 if (vnic->func_default)
974 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
975 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
983 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
984 struct bnxt_vnic_info *vnic,
985 struct bnxt_plcmodes_cfg *pmode)
988 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
989 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
991 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
993 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
995 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
999 pmode->flags = rte_le_to_cpu_32(resp->flags);
1000 /* dflt_vnic bit doesn't exist in the _cfg command */
1001 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1002 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1003 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1004 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1009 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1010 struct bnxt_vnic_info *vnic,
1011 struct bnxt_plcmodes_cfg *pmode)
1014 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1015 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1017 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1019 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1020 req.flags = rte_cpu_to_le_32(pmode->flags);
1021 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1022 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1023 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1024 req.enables = rte_cpu_to_le_32(
1025 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1026 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1027 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1030 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1037 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1040 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1041 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1042 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1043 struct bnxt_plcmodes_cfg pmodes;
1045 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1049 HWRM_PREP(req, VNIC_CFG, -1, resp);
1051 /* Only RSS support for now TBD: COS & LB */
1053 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1054 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1055 if (vnic->lb_rule != 0xffff)
1056 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1057 if (vnic->cos_rule != 0xffff)
1058 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1059 if (vnic->rss_rule != 0xffff)
1060 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1061 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1062 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1063 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1064 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1065 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1066 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1067 req.mru = rte_cpu_to_le_16(vnic->mru);
1068 if (vnic->func_default)
1070 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1071 if (vnic->vlan_strip)
1073 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1076 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1077 if (vnic->roce_dual)
1078 req.flags |= rte_cpu_to_le_32(
1079 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1080 if (vnic->roce_only)
1081 req.flags |= rte_cpu_to_le_32(
1082 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1083 if (vnic->rss_dflt_cr)
1084 req.flags |= rte_cpu_to_le_32(
1085 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1087 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1091 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1096 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1100 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1101 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1103 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1106 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1107 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1108 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1110 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1114 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1115 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1116 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1117 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1118 vnic->mru = rte_le_to_cpu_16(resp->mru);
1119 vnic->func_default = rte_le_to_cpu_32(
1120 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1121 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1122 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1123 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1124 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1125 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1126 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1127 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1128 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1129 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1130 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1135 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1138 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1139 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1140 bp->hwrm_cmd_resp_addr;
1142 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1144 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1148 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1153 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1156 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1157 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1158 bp->hwrm_cmd_resp_addr;
1160 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1162 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1164 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1168 vnic->rss_rule = INVALID_HW_RING_ID;
1173 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1176 struct hwrm_vnic_free_input req = {.req_type = 0 };
1177 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1179 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1182 HWRM_PREP(req, VNIC_FREE, -1, resp);
1184 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1186 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1190 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1194 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1195 struct bnxt_vnic_info *vnic)
1198 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1199 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1201 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1203 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1205 req.ring_grp_tbl_addr =
1206 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1207 req.hash_key_tbl_addr =
1208 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1209 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1211 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1218 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1219 struct bnxt_vnic_info *vnic)
1222 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1223 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1226 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1228 req.flags = rte_cpu_to_le_32(
1229 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1231 req.enables = rte_cpu_to_le_32(
1232 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1234 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1235 size -= RTE_PKTMBUF_HEADROOM;
1237 req.jumbo_thresh = rte_cpu_to_le_16(size);
1238 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1247 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1248 struct bnxt_vnic_info *vnic, bool enable)
1251 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1252 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1254 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1257 req.enables = rte_cpu_to_le_32(
1258 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1259 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1260 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1261 req.flags = rte_cpu_to_le_32(
1262 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1263 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1264 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1265 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1266 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1267 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1268 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1269 req.max_agg_segs = rte_cpu_to_le_16(5);
1271 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1272 req.min_agg_len = rte_cpu_to_le_32(512);
1275 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1282 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1284 struct hwrm_func_cfg_input req = {0};
1285 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1288 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1289 req.enables = rte_cpu_to_le_32(
1290 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1291 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1292 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1294 HWRM_PREP(req, FUNC_CFG, -1, resp);
1296 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1299 bp->pf.vf_info[vf].random_mac = false;
1304 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1308 struct hwrm_func_qstats_input req = {.req_type = 0};
1309 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1311 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1313 req.fid = rte_cpu_to_le_16(fid);
1315 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1320 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1325 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1326 struct rte_eth_stats *stats)
1329 struct hwrm_func_qstats_input req = {.req_type = 0};
1330 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1332 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1334 req.fid = rte_cpu_to_le_16(fid);
1336 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1340 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1341 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1342 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1343 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1344 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1345 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1347 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1348 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1349 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1350 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1351 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1352 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1354 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1355 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1357 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1362 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1365 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1366 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1368 HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1370 req.fid = rte_cpu_to_le_16(fid);
1372 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1380 * HWRM utility functions
1383 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1388 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1389 struct bnxt_tx_queue *txq;
1390 struct bnxt_rx_queue *rxq;
1391 struct bnxt_cp_ring_info *cpr;
1393 if (i >= bp->rx_cp_nr_rings) {
1394 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1397 rxq = bp->rx_queues[i];
1401 rc = bnxt_hwrm_stat_clear(bp, cpr);
1408 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1412 struct bnxt_cp_ring_info *cpr;
1414 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1416 if (i >= bp->rx_cp_nr_rings)
1417 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1419 cpr = bp->rx_queues[i]->cp_ring;
1420 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1421 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1422 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1424 * TODO. Need a better way to reset grp_info.stats_ctx
1425 * for Rx rings only. stats_ctx is not saved for Tx
1428 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1436 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1441 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1442 struct bnxt_tx_queue *txq;
1443 struct bnxt_rx_queue *rxq;
1444 struct bnxt_cp_ring_info *cpr;
1446 if (i >= bp->rx_cp_nr_rings) {
1447 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1450 rxq = bp->rx_queues[i];
1454 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1462 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1467 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1469 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1471 "Attempt to free invalid ring group %d\n",
1476 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1484 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1485 unsigned int idx __rte_unused)
1487 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1489 bnxt_hwrm_ring_free(bp, cp_ring,
1490 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1491 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1492 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1493 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1494 sizeof(*cpr->cp_desc_ring));
1495 cpr->cp_raw_cons = 0;
1498 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1503 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1504 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1505 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1506 struct bnxt_ring *ring = txr->tx_ring_struct;
1507 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1508 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1510 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1511 bnxt_hwrm_ring_free(bp, ring,
1512 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1513 ring->fw_ring_id = INVALID_HW_RING_ID;
1514 memset(txr->tx_desc_ring, 0,
1515 txr->tx_ring_struct->ring_size *
1516 sizeof(*txr->tx_desc_ring));
1517 memset(txr->tx_buf_ring, 0,
1518 txr->tx_ring_struct->ring_size *
1519 sizeof(*txr->tx_buf_ring));
1523 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1524 bnxt_free_cp_ring(bp, cpr, idx);
1525 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1529 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1530 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1531 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1532 struct bnxt_ring *ring = rxr->rx_ring_struct;
1533 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1534 unsigned int idx = i + 1;
1536 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1537 bnxt_hwrm_ring_free(bp, ring,
1538 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1539 ring->fw_ring_id = INVALID_HW_RING_ID;
1540 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1541 memset(rxr->rx_desc_ring, 0,
1542 rxr->rx_ring_struct->ring_size *
1543 sizeof(*rxr->rx_desc_ring));
1544 memset(rxr->rx_buf_ring, 0,
1545 rxr->rx_ring_struct->ring_size *
1546 sizeof(*rxr->rx_buf_ring));
1548 memset(rxr->ag_buf_ring, 0,
1549 rxr->ag_ring_struct->ring_size *
1550 sizeof(*rxr->ag_buf_ring));
1553 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1554 bnxt_free_cp_ring(bp, cpr, idx);
1555 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1556 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1560 /* Default completion ring */
1562 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1564 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1565 bnxt_free_cp_ring(bp, cpr, 0);
1566 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1573 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1578 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1579 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1586 void bnxt_free_hwrm_resources(struct bnxt *bp)
1588 /* Release memzone */
1589 rte_free(bp->hwrm_cmd_resp_addr);
1590 rte_free(bp->hwrm_short_cmd_req_addr);
1591 bp->hwrm_cmd_resp_addr = NULL;
1592 bp->hwrm_short_cmd_req_addr = NULL;
1593 bp->hwrm_cmd_resp_dma_addr = 0;
1594 bp->hwrm_short_cmd_req_dma_addr = 0;
1597 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1599 struct rte_pci_device *pdev = bp->pdev;
1600 char type[RTE_MEMZONE_NAMESIZE];
1602 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1603 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1604 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1605 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1606 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1607 if (bp->hwrm_cmd_resp_addr == NULL)
1609 bp->hwrm_cmd_resp_dma_addr =
1610 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1611 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1613 "unable to map response address to physical memory\n");
1616 rte_spinlock_init(&bp->hwrm_lock);
1621 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1623 struct bnxt_filter_info *filter;
1626 STAILQ_FOREACH(filter, &vnic->filter, next) {
1627 rc = bnxt_hwrm_clear_filter(bp, filter);
1634 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1636 struct bnxt_filter_info *filter;
1639 STAILQ_FOREACH(filter, &vnic->filter, next) {
1640 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1647 void bnxt_free_tunnel_ports(struct bnxt *bp)
1649 if (bp->vxlan_port_cnt)
1650 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1651 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1653 if (bp->geneve_port_cnt)
1654 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1655 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1656 bp->geneve_port = 0;
1659 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1661 struct bnxt_vnic_info *vnic;
1664 if (bp->vnic_info == NULL)
1667 vnic = &bp->vnic_info[0];
1669 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1671 /* VNIC resources */
1672 for (i = 0; i < bp->nr_vnics; i++) {
1673 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1675 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1677 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1679 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1681 bnxt_hwrm_vnic_free(bp, vnic);
1683 /* Ring resources */
1684 bnxt_free_all_hwrm_rings(bp);
1685 bnxt_free_all_hwrm_ring_grps(bp);
1686 bnxt_free_all_hwrm_stat_ctxs(bp);
1687 bnxt_free_tunnel_ports(bp);
1690 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1692 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1694 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1695 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1697 switch (conf_link_speed) {
1698 case ETH_LINK_SPEED_10M_HD:
1699 case ETH_LINK_SPEED_100M_HD:
1700 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1702 return hw_link_duplex;
1705 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1707 uint16_t eth_link_speed = 0;
1709 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1710 return ETH_LINK_SPEED_AUTONEG;
1712 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1713 case ETH_LINK_SPEED_100M:
1714 case ETH_LINK_SPEED_100M_HD:
1716 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1718 case ETH_LINK_SPEED_1G:
1720 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1722 case ETH_LINK_SPEED_2_5G:
1724 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1726 case ETH_LINK_SPEED_10G:
1728 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1730 case ETH_LINK_SPEED_20G:
1732 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1734 case ETH_LINK_SPEED_25G:
1736 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1738 case ETH_LINK_SPEED_40G:
1740 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1742 case ETH_LINK_SPEED_50G:
1744 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1748 "Unsupported link speed %d; default to AUTO\n",
1752 return eth_link_speed;
1755 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1756 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1757 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1758 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1760 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1764 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1767 if (link_speed & ETH_LINK_SPEED_FIXED) {
1768 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1770 if (one_speed & (one_speed - 1)) {
1772 "Invalid advertised speeds (%u) for port %u\n",
1773 link_speed, port_id);
1776 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1778 "Unsupported advertised speed (%u) for port %u\n",
1779 link_speed, port_id);
1783 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1785 "Unsupported advertised speeds (%u) for port %u\n",
1786 link_speed, port_id);
1793 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1797 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1798 link_speed = BNXT_SUPPORTED_SPEEDS;
1800 if (link_speed & ETH_LINK_SPEED_100M)
1801 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1802 if (link_speed & ETH_LINK_SPEED_100M_HD)
1803 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1804 if (link_speed & ETH_LINK_SPEED_1G)
1805 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1806 if (link_speed & ETH_LINK_SPEED_2_5G)
1807 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1808 if (link_speed & ETH_LINK_SPEED_10G)
1809 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1810 if (link_speed & ETH_LINK_SPEED_20G)
1811 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1812 if (link_speed & ETH_LINK_SPEED_25G)
1813 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1814 if (link_speed & ETH_LINK_SPEED_40G)
1815 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1816 if (link_speed & ETH_LINK_SPEED_50G)
1817 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1821 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1823 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1825 switch (hw_link_speed) {
1826 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1827 eth_link_speed = ETH_SPEED_NUM_100M;
1829 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1830 eth_link_speed = ETH_SPEED_NUM_1G;
1832 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1833 eth_link_speed = ETH_SPEED_NUM_2_5G;
1835 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1836 eth_link_speed = ETH_SPEED_NUM_10G;
1838 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1839 eth_link_speed = ETH_SPEED_NUM_20G;
1841 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1842 eth_link_speed = ETH_SPEED_NUM_25G;
1844 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1845 eth_link_speed = ETH_SPEED_NUM_40G;
1847 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1848 eth_link_speed = ETH_SPEED_NUM_50G;
1850 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1852 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1856 return eth_link_speed;
1859 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1861 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1863 switch (hw_link_duplex) {
1864 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1865 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1866 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1868 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1869 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1872 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1876 return eth_link_duplex;
1879 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1882 struct bnxt_link_info *link_info = &bp->link_info;
1884 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1887 "Get link config failed with rc %d\n", rc);
1890 if (link_info->link_up)
1892 bnxt_parse_hw_link_speed(link_info->link_speed);
1894 link->link_speed = ETH_SPEED_NUM_NONE;
1895 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1896 link->link_status = link_info->link_up;
1897 link->link_autoneg = link_info->auto_mode ==
1898 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1899 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1904 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1907 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1908 struct bnxt_link_info link_req;
1911 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1914 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1915 bp->eth_dev->data->port_id);
1919 memset(&link_req, 0, sizeof(link_req));
1920 link_req.link_up = link_up;
1924 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1925 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1927 link_req.phy_flags |=
1928 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1929 link_req.auto_mode =
1930 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1931 link_req.auto_link_speed_mask =
1932 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1934 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1935 link_req.link_speed = speed;
1936 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1938 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1939 link_req.auto_pause = bp->link_info.auto_pause;
1940 link_req.force_pause = bp->link_info.force_pause;
1943 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1946 "Set link config failed with rc %d\n", rc);
1949 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1955 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1957 struct hwrm_func_qcfg_input req = {0};
1958 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1961 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1962 req.fid = rte_cpu_to_le_16(0xffff);
1964 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1968 /* Hard Coded.. 0xfff VLAN ID mask */
1969 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1971 switch (resp->port_partition_type) {
1972 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1973 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1974 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1975 bp->port_partition_type = resp->port_partition_type;
1978 bp->port_partition_type = 0;
1985 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1986 struct hwrm_func_qcaps_output *qcaps)
1988 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1989 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1990 sizeof(qcaps->mac_address));
1991 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1992 qcaps->max_rx_rings = fcfg->num_rx_rings;
1993 qcaps->max_tx_rings = fcfg->num_tx_rings;
1994 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1995 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1997 qcaps->first_vf_id = 0;
1998 qcaps->max_vnics = fcfg->num_vnics;
1999 qcaps->max_decap_records = 0;
2000 qcaps->max_encap_records = 0;
2001 qcaps->max_tx_wm_flows = 0;
2002 qcaps->max_tx_em_flows = 0;
2003 qcaps->max_rx_wm_flows = 0;
2004 qcaps->max_rx_em_flows = 0;
2005 qcaps->max_flow_id = 0;
2006 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2007 qcaps->max_sp_tx_rings = 0;
2008 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2011 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2013 struct hwrm_func_cfg_input req = {0};
2014 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2017 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2018 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2019 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2020 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2021 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2022 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2023 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2024 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2025 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2026 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2027 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2028 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2029 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2030 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2031 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2032 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2033 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2034 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2035 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2036 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2037 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2038 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2039 req.fid = rte_cpu_to_le_16(0xffff);
2041 HWRM_PREP(req, FUNC_CFG, -1, resp);
2043 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2049 static void populate_vf_func_cfg_req(struct bnxt *bp,
2050 struct hwrm_func_cfg_input *req,
2053 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2054 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2055 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2056 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2057 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2058 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2059 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2060 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2061 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2062 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2064 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2065 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2066 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2067 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2068 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2070 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2071 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2073 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2074 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2075 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2076 /* TODO: For now, do not support VMDq/RFS on VFs. */
2077 req->num_vnics = rte_cpu_to_le_16(1);
2078 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2082 static void add_random_mac_if_needed(struct bnxt *bp,
2083 struct hwrm_func_cfg_input *cfg_req,
2086 struct ether_addr mac;
2088 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2091 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2093 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2094 eth_random_addr(cfg_req->dflt_mac_addr);
2095 bp->pf.vf_info[vf].random_mac = true;
2097 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2101 static void reserve_resources_from_vf(struct bnxt *bp,
2102 struct hwrm_func_cfg_input *cfg_req,
2105 struct hwrm_func_qcaps_input req = {0};
2106 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2109 /* Get the actual allocated values now */
2110 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2111 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2112 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2115 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2116 copy_func_cfg_to_qcaps(cfg_req, resp);
2117 } else if (resp->error_code) {
2118 rc = rte_le_to_cpu_16(resp->error_code);
2119 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2120 copy_func_cfg_to_qcaps(cfg_req, resp);
2123 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2124 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2125 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2126 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2127 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2128 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2130 * TODO: While not supporting VMDq with VFs, max_vnics is always
2131 * forced to 1 in this case
2133 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2134 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2137 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2139 struct hwrm_func_qcfg_input req = {0};
2140 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2143 /* Check for zero MAC address */
2144 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2145 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2146 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2148 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2150 } else if (resp->error_code) {
2151 rc = rte_le_to_cpu_16(resp->error_code);
2152 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2155 return rte_le_to_cpu_16(resp->vlan);
2158 static int update_pf_resource_max(struct bnxt *bp)
2160 struct hwrm_func_qcfg_input req = {0};
2161 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2164 /* And copy the allocated numbers into the pf struct */
2165 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2166 req.fid = rte_cpu_to_le_16(0xffff);
2167 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2170 /* Only TX ring value reflects actual allocation? TODO */
2171 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2172 bp->pf.evb_mode = resp->evb_mode;
2177 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2182 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2186 rc = bnxt_hwrm_func_qcaps(bp);
2190 bp->pf.func_cfg_flags &=
2191 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2192 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2193 bp->pf.func_cfg_flags |=
2194 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2195 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2199 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2201 struct hwrm_func_cfg_input req = {0};
2202 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2209 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2213 rc = bnxt_hwrm_func_qcaps(bp);
2218 bp->pf.active_vfs = num_vfs;
2221 * First, configure the PF to only use one TX ring. This ensures that
2222 * there are enough rings for all VFs.
2224 * If we don't do this, when we call func_alloc() later, we will lock
2225 * extra rings to the PF that won't be available during func_cfg() of
2228 * This has been fixed with firmware versions above 20.6.54
2230 bp->pf.func_cfg_flags &=
2231 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2232 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2233 bp->pf.func_cfg_flags |=
2234 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2235 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2240 * Now, create and register a buffer to hold forwarded VF requests
2242 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2243 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2244 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2245 if (bp->pf.vf_req_buf == NULL) {
2249 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2250 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2251 for (i = 0; i < num_vfs; i++)
2252 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2253 (i * HWRM_MAX_REQ_LEN);
2255 rc = bnxt_hwrm_func_buf_rgtr(bp);
2259 populate_vf_func_cfg_req(bp, &req, num_vfs);
2261 bp->pf.active_vfs = 0;
2262 for (i = 0; i < num_vfs; i++) {
2263 add_random_mac_if_needed(bp, &req, i);
2265 HWRM_PREP(req, FUNC_CFG, -1, resp);
2266 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2267 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2268 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2270 /* Clear enable flag for next pass */
2271 req.enables &= ~rte_cpu_to_le_32(
2272 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2274 if (rc || resp->error_code) {
2276 "Failed to initizlie VF %d\n", i);
2278 "Not all VFs available. (%d, %d)\n",
2279 rc, resp->error_code);
2283 reserve_resources_from_vf(bp, &req, i);
2284 bp->pf.active_vfs++;
2288 * Now configure the PF to use "the rest" of the resources
2289 * We're using STD_TX_RING_MODE here though which will limit the TX
2290 * rings. This will allow QoS to function properly. Not setting this
2291 * will cause PF rings to break bandwidth settings.
2293 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2297 rc = update_pf_resource_max(bp);
2304 bnxt_hwrm_func_buf_unrgtr(bp);
2308 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2310 struct hwrm_func_cfg_input req = {0};
2311 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2314 HWRM_PREP(req, FUNC_CFG, -1, resp);
2316 req.fid = rte_cpu_to_le_16(0xffff);
2317 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2318 req.evb_mode = bp->pf.evb_mode;
2320 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2326 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2327 uint8_t tunnel_type)
2329 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2330 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2333 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2334 req.tunnel_type = tunnel_type;
2335 req.tunnel_dst_port_val = port;
2336 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2339 switch (tunnel_type) {
2340 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2341 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2342 bp->vxlan_port = port;
2344 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2345 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2346 bp->geneve_port = port;
2354 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2355 uint8_t tunnel_type)
2357 struct hwrm_tunnel_dst_port_free_input req = {0};
2358 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2361 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2362 req.tunnel_type = tunnel_type;
2363 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2364 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2370 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2373 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2374 struct hwrm_func_cfg_input req = {0};
2377 HWRM_PREP(req, FUNC_CFG, -1, resp);
2378 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2379 req.flags = rte_cpu_to_le_32(flags);
2380 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2386 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2388 uint32_t *flag = flagp;
2390 vnic->flags = *flag;
2393 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2395 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2398 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2401 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2402 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2404 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2406 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2407 req.req_buf_page_size = rte_cpu_to_le_16(
2408 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2409 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2410 req.req_buf_page_addr[0] =
2411 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2412 if (req.req_buf_page_addr[0] == 0) {
2414 "unable to map buffer address to physical memory\n");
2418 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2425 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2428 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2429 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2431 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2433 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2440 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2442 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2443 struct hwrm_func_cfg_input req = {0};
2446 HWRM_PREP(req, FUNC_CFG, -1, resp);
2447 req.fid = rte_cpu_to_le_16(0xffff);
2448 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2449 req.enables = rte_cpu_to_le_32(
2450 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2451 req.async_event_cr = rte_cpu_to_le_16(
2452 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2453 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2459 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2461 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2462 struct hwrm_func_vf_cfg_input req = {0};
2465 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2466 req.enables = rte_cpu_to_le_32(
2467 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2468 req.async_event_cr = rte_cpu_to_le_16(
2469 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2470 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2476 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2478 struct hwrm_func_cfg_input req = {0};
2479 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2480 uint16_t dflt_vlan, fid;
2481 uint32_t func_cfg_flags;
2484 HWRM_PREP(req, FUNC_CFG, -1, resp);
2487 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2488 fid = bp->pf.vf_info[vf].fid;
2489 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2491 fid = rte_cpu_to_le_16(0xffff);
2492 func_cfg_flags = bp->pf.func_cfg_flags;
2493 dflt_vlan = bp->vlan;
2496 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2497 req.fid = rte_cpu_to_le_16(fid);
2498 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2499 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2501 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2507 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2508 uint16_t max_bw, uint16_t enables)
2510 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2511 struct hwrm_func_cfg_input req = {0};
2514 HWRM_PREP(req, FUNC_CFG, -1, resp);
2515 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2516 req.enables |= rte_cpu_to_le_32(enables);
2517 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2518 req.max_bw = rte_cpu_to_le_32(max_bw);
2519 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2525 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2527 struct hwrm_func_cfg_input req = {0};
2528 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2531 HWRM_PREP(req, FUNC_CFG, -1, resp);
2532 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2533 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2534 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2535 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2537 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2543 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2544 void *encaped, size_t ec_size)
2547 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2548 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2550 if (ec_size > sizeof(req.encap_request))
2553 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2555 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2556 memcpy(req.encap_request, encaped, ec_size);
2558 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2565 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2566 struct ether_addr *mac)
2568 struct hwrm_func_qcfg_input req = {0};
2569 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2572 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2573 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2574 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2578 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2582 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2583 void *encaped, size_t ec_size)
2586 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2587 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2589 if (ec_size > sizeof(req.encap_request))
2592 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2594 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2595 memcpy(req.encap_request, encaped, ec_size);
2597 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2604 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2605 struct rte_eth_stats *stats)
2608 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2609 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2611 HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2613 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2615 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2619 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2620 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2621 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2622 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2623 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2624 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2626 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2627 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2628 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2629 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2630 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2631 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2633 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2634 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2635 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2640 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2642 struct hwrm_port_qstats_input req = {0};
2643 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2644 struct bnxt_pf_info *pf = &bp->pf;
2647 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2650 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2651 req.port_id = rte_cpu_to_le_16(pf->port_id);
2652 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2653 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2654 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2659 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2661 struct hwrm_port_clr_stats_input req = {0};
2662 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2663 struct bnxt_pf_info *pf = &bp->pf;
2666 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2669 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2670 req.port_id = rte_cpu_to_le_16(pf->port_id);
2671 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2676 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2678 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2679 struct hwrm_port_led_qcaps_input req = {0};
2685 HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2686 req.port_id = bp->pf.port_id;
2687 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2690 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2693 bp->num_leds = resp->num_leds;
2694 memcpy(bp->leds, &resp->led0_id,
2695 sizeof(bp->leds[0]) * bp->num_leds);
2696 for (i = 0; i < bp->num_leds; i++) {
2697 struct bnxt_led_info *led = &bp->leds[i];
2699 uint16_t caps = led->led_state_caps;
2701 if (!led->led_group_id ||
2702 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2711 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2713 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2714 struct hwrm_port_led_cfg_input req = {0};
2715 struct bnxt_led_cfg *led_cfg;
2716 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2717 uint16_t duration = 0;
2720 if (!bp->num_leds || BNXT_VF(bp))
2723 HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2725 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2726 duration = rte_cpu_to_le_16(500);
2728 req.port_id = bp->pf.port_id;
2729 req.num_leds = bp->num_leds;
2730 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2731 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2732 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2733 led_cfg->led_id = bp->leds[i].led_id;
2734 led_cfg->led_state = led_state;
2735 led_cfg->led_blink_on = duration;
2736 led_cfg->led_blink_off = duration;
2737 led_cfg->led_group_id = bp->leds[i].led_group_id;
2740 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2747 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2749 uint32_t *count = cbdata;
2751 *count = *count + 1;
2754 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2755 struct bnxt_vnic_info *vnic __rte_unused)
2760 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2764 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2765 &count, bnxt_vnic_count_hwrm_stub);
2770 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2773 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2774 struct hwrm_func_vf_vnic_ids_query_output *resp =
2775 bp->hwrm_cmd_resp_addr;
2778 /* First query all VNIC ids */
2779 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2781 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2782 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2783 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2785 if (req.vnic_id_tbl_addr == 0) {
2787 "unable to map VNIC ID table address to physical memory\n");
2790 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2792 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2794 } else if (resp->error_code) {
2795 rc = rte_le_to_cpu_16(resp->error_code);
2796 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2800 return rte_le_to_cpu_32(resp->vnic_id_cnt);
2804 * This function queries the VNIC IDs for a specified VF. It then calls
2805 * the vnic_cb to update the necessary field in vnic_info with cbdata.
2806 * Then it calls the hwrm_cb function to program this new vnic configuration.
2808 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2809 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2810 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2812 struct bnxt_vnic_info vnic;
2814 int i, num_vnic_ids;
2819 /* First query all VNIC ids */
2820 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2821 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2822 RTE_CACHE_LINE_SIZE);
2823 if (vnic_ids == NULL) {
2827 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2828 rte_mem_lock_page(((char *)vnic_ids) + sz);
2830 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2832 if (num_vnic_ids < 0)
2833 return num_vnic_ids;
2835 /* Retrieve VNIC, update bd_stall then update */
2837 for (i = 0; i < num_vnic_ids; i++) {
2838 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2839 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2840 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2843 if (vnic.mru <= 4) /* Indicates unallocated */
2846 vnic_cb(&vnic, cbdata);
2848 rc = hwrm_cb(bp, &vnic);
2858 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2861 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2862 struct hwrm_func_cfg_input req = {0};
2865 HWRM_PREP(req, FUNC_CFG, -1, resp);
2866 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2867 req.enables |= rte_cpu_to_le_32(
2868 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2869 req.vlan_antispoof_mode = on ?
2870 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2871 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2872 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2878 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2880 struct bnxt_vnic_info vnic;
2883 int num_vnic_ids, i;
2887 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2888 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2889 RTE_CACHE_LINE_SIZE);
2890 if (vnic_ids == NULL) {
2895 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2896 rte_mem_lock_page(((char *)vnic_ids) + sz);
2898 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2904 * Loop through to find the default VNIC ID.
2905 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2906 * by sending the hwrm_func_qcfg command to the firmware.
2908 for (i = 0; i < num_vnic_ids; i++) {
2909 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2910 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2911 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2912 bp->pf.first_vf_id + vf);
2915 if (vnic.func_default) {
2917 return vnic.fw_vnic_id;
2920 /* Could not find a default VNIC. */
2921 RTE_LOG(ERR, PMD, "No default VNIC\n");