4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 2000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
174 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
178 rte_spinlock_lock(&bp->hwrm_lock);
179 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
180 rte_spinlock_unlock(&bp->hwrm_lock);
184 #define HWRM_PREP(req, type, cr, resp) \
185 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
186 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
187 req.cmpl_ring = rte_cpu_to_le_16(cr); \
188 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
189 req.target_id = rte_cpu_to_le_16(0xffff); \
190 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
192 #define HWRM_CHECK_RESULT \
195 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 if (resp->error_code) { \
200 rc = rte_le_to_cpu_16(resp->error_code); \
201 if (resp->resp_len >= 16) { \
202 struct hwrm_err_output *tmp_hwrm_err_op = \
205 "%s error %d:%d:%08x:%04x\n", \
207 rc, tmp_hwrm_err_op->cmd_err, \
209 tmp_hwrm_err_op->opaque_0), \
211 tmp_hwrm_err_op->opaque_1)); \
215 "%s error %d\n", __func__, rc); \
221 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
224 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
225 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
227 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
228 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
239 struct bnxt_vnic_info *vnic,
241 struct bnxt_vlan_table_entry *vlan_table)
244 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
245 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
248 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
249 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
251 /* FIXME add multicast flag, when multicast adding options is supported
254 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264 if (vnic->mc_addr_cnt) {
265 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
269 if (vlan_count && vlan_table) {
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
271 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
272 rte_mem_virt2phy(vlan_table));
273 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
275 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
285 int bnxt_hwrm_clear_filter(struct bnxt *bp,
286 struct bnxt_filter_info *filter)
289 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
290 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
292 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
294 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
296 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
300 filter->fw_l2_filter_id = -1;
305 int bnxt_hwrm_set_filter(struct bnxt *bp,
307 struct bnxt_filter_info *filter)
310 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
311 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
312 uint32_t enables = 0;
314 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
316 req.flags = rte_cpu_to_le_32(filter->flags);
318 enables = filter->enables |
319 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
320 req.dst_id = rte_cpu_to_le_16(dst_id);
323 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
324 memcpy(req.l2_addr, filter->l2_addr,
327 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
328 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
331 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
332 req.l2_ovlan = filter->l2_ovlan;
334 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
335 req.l2_ovlan_mask = filter->l2_ovlan_mask;
336 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
337 req.src_id = rte_cpu_to_le_32(filter->src_id);
338 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
339 req.src_type = filter->src_type;
341 req.enables = rte_cpu_to_le_32(enables);
343 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
352 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
355 struct hwrm_func_qcaps_input req = {.req_type = 0 };
356 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
357 uint16_t new_max_vfs;
360 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
362 req.fid = rte_cpu_to_le_16(0xffff);
364 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
368 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
370 bp->pf.port_id = resp->port_id;
371 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
372 new_max_vfs = bp->pdev->max_vfs;
373 if (new_max_vfs != bp->pf.max_vfs) {
375 rte_free(bp->pf.vf_info);
376 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
377 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
378 bp->pf.max_vfs = new_max_vfs;
379 for (i = 0; i < new_max_vfs; i++) {
380 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
381 bp->pf.vf_info[i].vlan_table =
382 rte_zmalloc("VF VLAN table",
385 if (bp->pf.vf_info[i].vlan_table == NULL)
387 "Fail to alloc VLAN table for VF %d\n",
391 bp->pf.vf_info[i].vlan_table);
392 STAILQ_INIT(&bp->pf.vf_info[i].filter);
397 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
398 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
399 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
400 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
401 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
402 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
403 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
404 /* TODO: For now, do not support VMDq/RFS on VFs. */
409 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
413 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
415 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
420 int bnxt_hwrm_func_reset(struct bnxt *bp)
423 struct hwrm_func_reset_input req = {.req_type = 0 };
424 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
426 HWRM_PREP(req, FUNC_RESET, -1, resp);
428 req.enables = rte_cpu_to_le_32(0);
430 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
437 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
440 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
441 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
443 if (bp->flags & BNXT_FLAG_REGISTERED)
446 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
447 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
448 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
449 req.ver_maj = RTE_VER_YEAR;
450 req.ver_min = RTE_VER_MONTH;
451 req.ver_upd = RTE_VER_MINOR;
454 req.enables |= rte_cpu_to_le_32(
455 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
456 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
457 RTE_MIN(sizeof(req.vf_req_fwd),
458 sizeof(bp->pf.vf_req_fwd)));
461 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
462 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
464 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
468 bp->flags |= BNXT_FLAG_REGISTERED;
473 int bnxt_hwrm_ver_get(struct bnxt *bp)
476 struct hwrm_ver_get_input req = {.req_type = 0 };
477 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
480 uint16_t max_resp_len;
481 char type[RTE_MEMZONE_NAMESIZE];
482 uint32_t dev_caps_cfg;
484 bp->max_req_len = HWRM_MAX_REQ_LEN;
485 HWRM_PREP(req, VER_GET, -1, resp);
487 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
488 req.hwrm_intf_min = HWRM_VERSION_MINOR;
489 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
492 * Hold the lock since we may be adjusting the response pointers.
494 rte_spinlock_lock(&bp->hwrm_lock);
495 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
499 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
500 resp->hwrm_intf_maj, resp->hwrm_intf_min,
502 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
503 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
504 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
505 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
506 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
508 my_version = HWRM_VERSION_MAJOR << 16;
509 my_version |= HWRM_VERSION_MINOR << 8;
510 my_version |= HWRM_VERSION_UPDATE;
512 fw_version = resp->hwrm_intf_maj << 16;
513 fw_version |= resp->hwrm_intf_min << 8;
514 fw_version |= resp->hwrm_intf_upd;
516 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
517 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
522 if (my_version != fw_version) {
523 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
524 if (my_version < fw_version) {
526 "Firmware API version is newer than driver.\n");
528 "The driver may be missing features.\n");
531 "Firmware API version is older than driver.\n");
533 "Not all driver features may be functional.\n");
537 if (bp->max_req_len > resp->max_req_win_len) {
538 RTE_LOG(ERR, PMD, "Unsupported request length\n");
541 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
542 max_resp_len = resp->max_resp_len;
543 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
545 if (bp->max_resp_len != max_resp_len) {
546 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
547 bp->pdev->addr.domain, bp->pdev->addr.bus,
548 bp->pdev->addr.devid, bp->pdev->addr.function);
550 rte_free(bp->hwrm_cmd_resp_addr);
552 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
553 if (bp->hwrm_cmd_resp_addr == NULL) {
557 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
558 bp->hwrm_cmd_resp_dma_addr =
559 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
560 if (bp->hwrm_cmd_resp_dma_addr == 0) {
562 "Unable to map response buffer to physical memory.\n");
566 bp->max_resp_len = max_resp_len;
570 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
572 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
573 RTE_LOG(DEBUG, PMD, "Short command supported\n");
575 rte_free(bp->hwrm_short_cmd_req_addr);
577 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
579 if (bp->hwrm_short_cmd_req_addr == NULL) {
583 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
584 bp->hwrm_short_cmd_req_dma_addr =
585 rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
586 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
587 rte_free(bp->hwrm_short_cmd_req_addr);
589 "Unable to map buffer to physical memory.\n");
594 bp->flags |= BNXT_FLAG_SHORT_CMD;
598 rte_spinlock_unlock(&bp->hwrm_lock);
602 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
605 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
606 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
608 if (!(bp->flags & BNXT_FLAG_REGISTERED))
611 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
614 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
618 bp->flags &= ~BNXT_FLAG_REGISTERED;
623 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
626 struct hwrm_port_phy_cfg_input req = {0};
627 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
628 uint32_t enables = 0;
629 uint32_t link_speed_mask =
630 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
632 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
635 req.flags = rte_cpu_to_le_32(conf->phy_flags);
636 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
638 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
639 * any auto mode, even "none".
641 if (!conf->link_speed) {
642 req.auto_mode = conf->auto_mode;
643 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
644 if (conf->auto_mode ==
645 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
646 req.auto_link_speed_mask =
647 conf->auto_link_speed_mask;
648 enables |= link_speed_mask;
650 if (bp->link_info.auto_link_speed) {
651 req.auto_link_speed =
652 bp->link_info.auto_link_speed;
654 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
657 req.auto_duplex = conf->duplex;
658 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
659 req.auto_pause = conf->auto_pause;
660 req.force_pause = conf->force_pause;
661 /* Set force_pause if there is no auto or if there is a force */
662 if (req.auto_pause && !req.force_pause)
663 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
665 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
667 req.enables = rte_cpu_to_le_32(enables);
670 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
671 RTE_LOG(INFO, PMD, "Force Link Down\n");
674 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
681 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
682 struct bnxt_link_info *link_info)
685 struct hwrm_port_phy_qcfg_input req = {0};
686 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
688 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
690 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
694 link_info->phy_link_status = resp->link;
696 (link_info->phy_link_status ==
697 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
698 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
699 link_info->duplex = resp->duplex;
700 link_info->pause = resp->pause;
701 link_info->auto_pause = resp->auto_pause;
702 link_info->force_pause = resp->force_pause;
703 link_info->auto_mode = resp->auto_mode;
705 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
706 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
707 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
708 link_info->phy_ver[0] = resp->phy_maj;
709 link_info->phy_ver[1] = resp->phy_min;
710 link_info->phy_ver[2] = resp->phy_bld;
715 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
718 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
719 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
721 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
723 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
727 #define GET_QUEUE_INFO(x) \
728 bp->cos_queue[x].id = resp->queue_id##x; \
729 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
743 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
744 struct bnxt_ring *ring,
745 uint32_t ring_type, uint32_t map_index,
746 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
749 uint32_t enables = 0;
750 struct hwrm_ring_alloc_input req = {.req_type = 0 };
751 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
753 HWRM_PREP(req, RING_ALLOC, -1, resp);
755 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
756 req.fbo = rte_cpu_to_le_32(0);
757 /* Association of ring index with doorbell index */
758 req.logical_id = rte_cpu_to_le_16(map_index);
759 req.length = rte_cpu_to_le_32(ring->ring_size);
762 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
763 req.queue_id = bp->cos_queue[0].id;
765 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
766 req.ring_type = ring_type;
767 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
768 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
769 if (stats_ctx_id != INVALID_STATS_CTX_ID)
771 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
773 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
774 req.ring_type = ring_type;
776 * TODO: Some HWRM versions crash with
777 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
779 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
782 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
786 req.enables = rte_cpu_to_le_32(enables);
788 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
790 if (rc || resp->error_code) {
791 if (rc == 0 && resp->error_code)
792 rc = rte_le_to_cpu_16(resp->error_code);
794 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
796 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
798 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
800 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
802 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
804 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
807 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
812 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
816 int bnxt_hwrm_ring_free(struct bnxt *bp,
817 struct bnxt_ring *ring, uint32_t ring_type)
820 struct hwrm_ring_free_input req = {.req_type = 0 };
821 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
823 HWRM_PREP(req, RING_FREE, -1, resp);
825 req.ring_type = ring_type;
826 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
828 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
830 if (rc || resp->error_code) {
831 if (rc == 0 && resp->error_code)
832 rc = rte_le_to_cpu_16(resp->error_code);
835 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
836 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
839 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
840 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
843 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
844 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
848 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
855 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
858 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
859 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
861 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
863 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
864 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
865 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
866 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
868 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
872 bp->grp_info[idx].fw_grp_id =
873 rte_le_to_cpu_16(resp->ring_group_id);
878 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
881 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
882 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
884 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
886 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
888 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
892 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
896 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
899 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
900 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
902 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
905 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
907 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
909 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
916 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
917 unsigned int idx __rte_unused)
920 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
921 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
923 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
925 req.update_period_ms = rte_cpu_to_le_32(0);
928 rte_cpu_to_le_64(cpr->hw_stats_map);
930 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
934 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
939 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
940 unsigned int idx __rte_unused)
943 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
944 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
946 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
948 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
950 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
957 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
960 struct hwrm_vnic_alloc_input req = { 0 };
961 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
963 /* map ring groups to this vnic */
964 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
965 vnic->start_grp_id, vnic->end_grp_id);
966 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
967 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
968 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
969 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
970 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
971 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
972 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
973 ETHER_CRC_LEN + VLAN_TAG_SIZE;
974 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
976 if (vnic->func_default)
977 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
978 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
982 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
983 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
987 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
988 struct bnxt_vnic_info *vnic,
989 struct bnxt_plcmodes_cfg *pmode)
992 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
993 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
995 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
997 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
999 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1003 pmode->flags = rte_le_to_cpu_32(resp->flags);
1004 /* dflt_vnic bit doesn't exist in the _cfg command */
1005 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1006 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1007 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1008 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1013 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1014 struct bnxt_vnic_info *vnic,
1015 struct bnxt_plcmodes_cfg *pmode)
1018 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1019 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1021 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1023 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1024 req.flags = rte_cpu_to_le_32(pmode->flags);
1025 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1026 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1027 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1028 req.enables = rte_cpu_to_le_32(
1029 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1030 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1031 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1034 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1041 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1044 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1045 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1046 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1047 struct bnxt_plcmodes_cfg pmodes;
1049 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1050 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1054 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1058 HWRM_PREP(req, VNIC_CFG, -1, resp);
1060 /* Only RSS support for now TBD: COS & LB */
1062 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1063 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1064 if (vnic->lb_rule != 0xffff)
1065 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1066 if (vnic->cos_rule != 0xffff)
1067 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1068 if (vnic->rss_rule != 0xffff)
1069 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1070 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1071 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1072 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1073 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1074 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1075 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1076 req.mru = rte_cpu_to_le_16(vnic->mru);
1077 if (vnic->func_default)
1079 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1080 if (vnic->vlan_strip)
1082 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1085 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1086 if (vnic->roce_dual)
1087 req.flags |= rte_cpu_to_le_32(
1088 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1089 if (vnic->roce_only)
1090 req.flags |= rte_cpu_to_le_32(
1091 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1092 if (vnic->rss_dflt_cr)
1093 req.flags |= rte_cpu_to_le_32(
1094 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1096 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1100 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1105 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1109 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1110 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1112 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1113 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1116 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1119 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1120 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1121 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1123 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1127 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1128 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1129 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1130 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1131 vnic->mru = rte_le_to_cpu_16(resp->mru);
1132 vnic->func_default = rte_le_to_cpu_32(
1133 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1134 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1135 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1136 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1137 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1138 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1139 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1140 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1141 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1142 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1143 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1148 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1151 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1152 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1153 bp->hwrm_cmd_resp_addr;
1155 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1157 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1161 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1162 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1167 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1170 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1171 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1172 bp->hwrm_cmd_resp_addr;
1174 if (vnic->rss_rule == 0xffff) {
1175 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1178 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1180 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1182 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1186 vnic->rss_rule = INVALID_HW_RING_ID;
1191 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1194 struct hwrm_vnic_free_input req = {.req_type = 0 };
1195 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1197 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1198 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1202 HWRM_PREP(req, VNIC_FREE, -1, resp);
1204 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1206 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1210 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1214 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1215 struct bnxt_vnic_info *vnic)
1218 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1219 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1221 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1223 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1225 req.ring_grp_tbl_addr =
1226 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1227 req.hash_key_tbl_addr =
1228 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1229 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1231 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1238 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1239 struct bnxt_vnic_info *vnic)
1242 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1243 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1246 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1248 req.flags = rte_cpu_to_le_32(
1249 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1251 req.enables = rte_cpu_to_le_32(
1252 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1254 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1255 size -= RTE_PKTMBUF_HEADROOM;
1257 req.jumbo_thresh = rte_cpu_to_le_16(size);
1258 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1260 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1267 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1268 struct bnxt_vnic_info *vnic, bool enable)
1271 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1272 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1274 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1277 req.enables = rte_cpu_to_le_32(
1278 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1279 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1280 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1281 req.flags = rte_cpu_to_le_32(
1282 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1283 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1284 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1285 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1286 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1287 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1288 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1289 req.max_agg_segs = rte_cpu_to_le_16(5);
1291 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1292 req.min_agg_len = rte_cpu_to_le_32(512);
1295 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1302 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1304 struct hwrm_func_cfg_input req = {0};
1305 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1308 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1309 req.enables = rte_cpu_to_le_32(
1310 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1311 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1312 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1314 HWRM_PREP(req, FUNC_CFG, -1, resp);
1316 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1319 bp->pf.vf_info[vf].random_mac = false;
1324 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1328 struct hwrm_func_qstats_input req = {.req_type = 0};
1329 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1331 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1333 req.fid = rte_cpu_to_le_16(fid);
1335 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1340 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1345 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1346 struct rte_eth_stats *stats)
1349 struct hwrm_func_qstats_input req = {.req_type = 0};
1350 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1352 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1354 req.fid = rte_cpu_to_le_16(fid);
1356 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1360 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1361 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1362 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1363 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1364 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1365 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1367 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1368 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1369 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1370 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1371 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1372 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1374 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1375 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1377 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1382 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1385 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1386 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1388 HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1390 req.fid = rte_cpu_to_le_16(fid);
1392 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1400 * HWRM utility functions
1403 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1408 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1409 struct bnxt_tx_queue *txq;
1410 struct bnxt_rx_queue *rxq;
1411 struct bnxt_cp_ring_info *cpr;
1413 if (i >= bp->rx_cp_nr_rings) {
1414 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1417 rxq = bp->rx_queues[i];
1421 rc = bnxt_hwrm_stat_clear(bp, cpr);
1428 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1432 struct bnxt_cp_ring_info *cpr;
1434 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1436 if (i >= bp->rx_cp_nr_rings)
1437 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1439 cpr = bp->rx_queues[i]->cp_ring;
1440 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1441 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1442 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1444 * TODO. Need a better way to reset grp_info.stats_ctx
1445 * for Rx rings only. stats_ctx is not saved for Tx
1448 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1456 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1461 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1462 struct bnxt_tx_queue *txq;
1463 struct bnxt_rx_queue *rxq;
1464 struct bnxt_cp_ring_info *cpr;
1466 if (i >= bp->rx_cp_nr_rings) {
1467 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1470 rxq = bp->rx_queues[i];
1474 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1482 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1487 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1489 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1491 "Attempt to free invalid ring group %d\n",
1496 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1504 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1505 unsigned int idx __rte_unused)
1507 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1509 bnxt_hwrm_ring_free(bp, cp_ring,
1510 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1511 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1512 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1513 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1514 sizeof(*cpr->cp_desc_ring));
1515 cpr->cp_raw_cons = 0;
1518 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1523 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1524 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1525 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1526 struct bnxt_ring *ring = txr->tx_ring_struct;
1527 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1528 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1530 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1531 bnxt_hwrm_ring_free(bp, ring,
1532 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1533 ring->fw_ring_id = INVALID_HW_RING_ID;
1534 memset(txr->tx_desc_ring, 0,
1535 txr->tx_ring_struct->ring_size *
1536 sizeof(*txr->tx_desc_ring));
1537 memset(txr->tx_buf_ring, 0,
1538 txr->tx_ring_struct->ring_size *
1539 sizeof(*txr->tx_buf_ring));
1543 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1544 bnxt_free_cp_ring(bp, cpr, idx);
1545 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1549 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1550 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1551 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1552 struct bnxt_ring *ring = rxr->rx_ring_struct;
1553 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1554 unsigned int idx = i + 1;
1556 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1557 bnxt_hwrm_ring_free(bp, ring,
1558 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1559 ring->fw_ring_id = INVALID_HW_RING_ID;
1560 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1561 memset(rxr->rx_desc_ring, 0,
1562 rxr->rx_ring_struct->ring_size *
1563 sizeof(*rxr->rx_desc_ring));
1564 memset(rxr->rx_buf_ring, 0,
1565 rxr->rx_ring_struct->ring_size *
1566 sizeof(*rxr->rx_buf_ring));
1568 memset(rxr->ag_buf_ring, 0,
1569 rxr->ag_ring_struct->ring_size *
1570 sizeof(*rxr->ag_buf_ring));
1573 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1574 bnxt_free_cp_ring(bp, cpr, idx);
1575 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1576 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1580 /* Default completion ring */
1582 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1584 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1585 bnxt_free_cp_ring(bp, cpr, 0);
1586 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1593 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1598 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1599 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1606 void bnxt_free_hwrm_resources(struct bnxt *bp)
1608 /* Release memzone */
1609 rte_free(bp->hwrm_cmd_resp_addr);
1610 rte_free(bp->hwrm_short_cmd_req_addr);
1611 bp->hwrm_cmd_resp_addr = NULL;
1612 bp->hwrm_short_cmd_req_addr = NULL;
1613 bp->hwrm_cmd_resp_dma_addr = 0;
1614 bp->hwrm_short_cmd_req_dma_addr = 0;
1617 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1619 struct rte_pci_device *pdev = bp->pdev;
1620 char type[RTE_MEMZONE_NAMESIZE];
1622 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1623 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1624 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1625 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1626 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1627 if (bp->hwrm_cmd_resp_addr == NULL)
1629 bp->hwrm_cmd_resp_dma_addr =
1630 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1631 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1633 "unable to map response address to physical memory\n");
1636 rte_spinlock_init(&bp->hwrm_lock);
1641 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1643 struct bnxt_filter_info *filter;
1646 STAILQ_FOREACH(filter, &vnic->filter, next) {
1647 rc = bnxt_hwrm_clear_filter(bp, filter);
1654 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1656 struct bnxt_filter_info *filter;
1659 STAILQ_FOREACH(filter, &vnic->filter, next) {
1660 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1667 void bnxt_free_tunnel_ports(struct bnxt *bp)
1669 if (bp->vxlan_port_cnt)
1670 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1671 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1673 if (bp->geneve_port_cnt)
1674 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1675 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1676 bp->geneve_port = 0;
1679 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1681 struct bnxt_vnic_info *vnic;
1684 if (bp->vnic_info == NULL)
1687 vnic = &bp->vnic_info[0];
1689 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1691 /* VNIC resources */
1692 for (i = 0; i < bp->nr_vnics; i++) {
1693 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1695 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1697 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1699 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1701 bnxt_hwrm_vnic_free(bp, vnic);
1703 /* Ring resources */
1704 bnxt_free_all_hwrm_rings(bp);
1705 bnxt_free_all_hwrm_ring_grps(bp);
1706 bnxt_free_all_hwrm_stat_ctxs(bp);
1707 bnxt_free_tunnel_ports(bp);
1710 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1712 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1714 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1715 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1717 switch (conf_link_speed) {
1718 case ETH_LINK_SPEED_10M_HD:
1719 case ETH_LINK_SPEED_100M_HD:
1720 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1722 return hw_link_duplex;
1725 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1727 uint16_t eth_link_speed = 0;
1729 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1730 return ETH_LINK_SPEED_AUTONEG;
1732 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1733 case ETH_LINK_SPEED_100M:
1734 case ETH_LINK_SPEED_100M_HD:
1736 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1738 case ETH_LINK_SPEED_1G:
1740 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1742 case ETH_LINK_SPEED_2_5G:
1744 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1746 case ETH_LINK_SPEED_10G:
1748 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1750 case ETH_LINK_SPEED_20G:
1752 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1754 case ETH_LINK_SPEED_25G:
1756 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1758 case ETH_LINK_SPEED_40G:
1760 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1762 case ETH_LINK_SPEED_50G:
1764 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1768 "Unsupported link speed %d; default to AUTO\n",
1772 return eth_link_speed;
1775 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1776 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1777 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1778 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1780 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1784 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1787 if (link_speed & ETH_LINK_SPEED_FIXED) {
1788 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1790 if (one_speed & (one_speed - 1)) {
1792 "Invalid advertised speeds (%u) for port %u\n",
1793 link_speed, port_id);
1796 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1798 "Unsupported advertised speed (%u) for port %u\n",
1799 link_speed, port_id);
1803 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1805 "Unsupported advertised speeds (%u) for port %u\n",
1806 link_speed, port_id);
1814 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1818 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1819 if (bp->link_info.support_speeds)
1820 return bp->link_info.support_speeds;
1821 link_speed = BNXT_SUPPORTED_SPEEDS;
1824 if (link_speed & ETH_LINK_SPEED_100M)
1825 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1826 if (link_speed & ETH_LINK_SPEED_100M_HD)
1827 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1828 if (link_speed & ETH_LINK_SPEED_1G)
1829 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1830 if (link_speed & ETH_LINK_SPEED_2_5G)
1831 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1832 if (link_speed & ETH_LINK_SPEED_10G)
1833 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1834 if (link_speed & ETH_LINK_SPEED_20G)
1835 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1836 if (link_speed & ETH_LINK_SPEED_25G)
1837 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1838 if (link_speed & ETH_LINK_SPEED_40G)
1839 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1840 if (link_speed & ETH_LINK_SPEED_50G)
1841 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1845 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1847 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1849 switch (hw_link_speed) {
1850 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1851 eth_link_speed = ETH_SPEED_NUM_100M;
1853 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1854 eth_link_speed = ETH_SPEED_NUM_1G;
1856 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1857 eth_link_speed = ETH_SPEED_NUM_2_5G;
1859 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1860 eth_link_speed = ETH_SPEED_NUM_10G;
1862 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1863 eth_link_speed = ETH_SPEED_NUM_20G;
1865 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1866 eth_link_speed = ETH_SPEED_NUM_25G;
1868 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1869 eth_link_speed = ETH_SPEED_NUM_40G;
1871 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1872 eth_link_speed = ETH_SPEED_NUM_50G;
1874 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1876 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1880 return eth_link_speed;
1883 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1885 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1887 switch (hw_link_duplex) {
1888 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1889 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1890 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1892 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1893 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1896 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1900 return eth_link_duplex;
1903 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1906 struct bnxt_link_info *link_info = &bp->link_info;
1908 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1911 "Get link config failed with rc %d\n", rc);
1914 if (link_info->link_speed)
1916 bnxt_parse_hw_link_speed(link_info->link_speed);
1918 link->link_speed = ETH_SPEED_NUM_NONE;
1919 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1920 link->link_status = link_info->link_up;
1921 link->link_autoneg = link_info->auto_mode ==
1922 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1923 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1928 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1931 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1932 struct bnxt_link_info link_req;
1935 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1938 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1939 bp->eth_dev->data->port_id);
1943 memset(&link_req, 0, sizeof(link_req));
1944 link_req.link_up = link_up;
1948 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1949 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1951 link_req.phy_flags |=
1952 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1953 link_req.auto_mode =
1954 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1955 link_req.auto_link_speed_mask =
1956 bnxt_parse_eth_link_speed_mask(bp,
1957 dev_conf->link_speeds);
1959 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1960 link_req.link_speed = speed;
1961 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1963 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1964 link_req.auto_pause = bp->link_info.auto_pause;
1965 link_req.force_pause = bp->link_info.force_pause;
1968 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1971 "Set link config failed with rc %d\n", rc);
1979 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1981 struct hwrm_func_qcfg_input req = {0};
1982 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1985 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1986 req.fid = rte_cpu_to_le_16(0xffff);
1988 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1992 /* Hard Coded.. 0xfff VLAN ID mask */
1993 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1995 switch (resp->port_partition_type) {
1996 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1997 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1998 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1999 bp->port_partition_type = resp->port_partition_type;
2002 bp->port_partition_type = 0;
2009 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2010 struct hwrm_func_qcaps_output *qcaps)
2012 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2013 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2014 sizeof(qcaps->mac_address));
2015 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2016 qcaps->max_rx_rings = fcfg->num_rx_rings;
2017 qcaps->max_tx_rings = fcfg->num_tx_rings;
2018 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2019 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2021 qcaps->first_vf_id = 0;
2022 qcaps->max_vnics = fcfg->num_vnics;
2023 qcaps->max_decap_records = 0;
2024 qcaps->max_encap_records = 0;
2025 qcaps->max_tx_wm_flows = 0;
2026 qcaps->max_tx_em_flows = 0;
2027 qcaps->max_rx_wm_flows = 0;
2028 qcaps->max_rx_em_flows = 0;
2029 qcaps->max_flow_id = 0;
2030 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2031 qcaps->max_sp_tx_rings = 0;
2032 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2035 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2037 struct hwrm_func_cfg_input req = {0};
2038 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2041 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2042 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2043 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2044 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2045 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2046 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2047 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2048 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2049 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2050 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2051 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2052 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2053 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2054 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2055 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2056 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2057 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2058 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2059 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2060 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2061 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2062 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2063 req.fid = rte_cpu_to_le_16(0xffff);
2065 HWRM_PREP(req, FUNC_CFG, -1, resp);
2067 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2073 static void populate_vf_func_cfg_req(struct bnxt *bp,
2074 struct hwrm_func_cfg_input *req,
2077 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2078 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2079 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2080 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2081 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2082 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2083 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2084 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2085 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2086 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2088 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2089 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2090 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2091 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2092 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2094 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2095 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2097 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2098 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2099 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2100 /* TODO: For now, do not support VMDq/RFS on VFs. */
2101 req->num_vnics = rte_cpu_to_le_16(1);
2102 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2106 static void add_random_mac_if_needed(struct bnxt *bp,
2107 struct hwrm_func_cfg_input *cfg_req,
2110 struct ether_addr mac;
2112 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2115 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2117 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2118 eth_random_addr(cfg_req->dflt_mac_addr);
2119 bp->pf.vf_info[vf].random_mac = true;
2121 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2125 static void reserve_resources_from_vf(struct bnxt *bp,
2126 struct hwrm_func_cfg_input *cfg_req,
2129 struct hwrm_func_qcaps_input req = {0};
2130 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2133 /* Get the actual allocated values now */
2134 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2135 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2136 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2139 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2140 copy_func_cfg_to_qcaps(cfg_req, resp);
2141 } else if (resp->error_code) {
2142 rc = rte_le_to_cpu_16(resp->error_code);
2143 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2144 copy_func_cfg_to_qcaps(cfg_req, resp);
2147 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2148 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2149 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2150 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2151 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2152 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2154 * TODO: While not supporting VMDq with VFs, max_vnics is always
2155 * forced to 1 in this case
2157 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2158 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2161 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2163 struct hwrm_func_qcfg_input req = {0};
2164 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2167 /* Check for zero MAC address */
2168 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2169 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2170 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2172 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2174 } else if (resp->error_code) {
2175 rc = rte_le_to_cpu_16(resp->error_code);
2176 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2179 return rte_le_to_cpu_16(resp->vlan);
2182 static int update_pf_resource_max(struct bnxt *bp)
2184 struct hwrm_func_qcfg_input req = {0};
2185 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2188 /* And copy the allocated numbers into the pf struct */
2189 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2190 req.fid = rte_cpu_to_le_16(0xffff);
2191 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2194 /* Only TX ring value reflects actual allocation? TODO */
2195 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2196 bp->pf.evb_mode = resp->evb_mode;
2201 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2206 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2210 rc = bnxt_hwrm_func_qcaps(bp);
2214 bp->pf.func_cfg_flags &=
2215 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2216 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2217 bp->pf.func_cfg_flags |=
2218 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2219 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2223 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2225 struct hwrm_func_cfg_input req = {0};
2226 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2233 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2237 rc = bnxt_hwrm_func_qcaps(bp);
2242 bp->pf.active_vfs = num_vfs;
2245 * First, configure the PF to only use one TX ring. This ensures that
2246 * there are enough rings for all VFs.
2248 * If we don't do this, when we call func_alloc() later, we will lock
2249 * extra rings to the PF that won't be available during func_cfg() of
2252 * This has been fixed with firmware versions above 20.6.54
2254 bp->pf.func_cfg_flags &=
2255 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2256 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2257 bp->pf.func_cfg_flags |=
2258 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2259 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2264 * Now, create and register a buffer to hold forwarded VF requests
2266 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2267 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2268 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2269 if (bp->pf.vf_req_buf == NULL) {
2273 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2274 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2275 for (i = 0; i < num_vfs; i++)
2276 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2277 (i * HWRM_MAX_REQ_LEN);
2279 rc = bnxt_hwrm_func_buf_rgtr(bp);
2283 populate_vf_func_cfg_req(bp, &req, num_vfs);
2285 bp->pf.active_vfs = 0;
2286 for (i = 0; i < num_vfs; i++) {
2287 add_random_mac_if_needed(bp, &req, i);
2289 HWRM_PREP(req, FUNC_CFG, -1, resp);
2290 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2291 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2292 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2294 /* Clear enable flag for next pass */
2295 req.enables &= ~rte_cpu_to_le_32(
2296 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2298 if (rc || resp->error_code) {
2300 "Failed to initizlie VF %d\n", i);
2302 "Not all VFs available. (%d, %d)\n",
2303 rc, resp->error_code);
2307 reserve_resources_from_vf(bp, &req, i);
2308 bp->pf.active_vfs++;
2309 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2313 * Now configure the PF to use "the rest" of the resources
2314 * We're using STD_TX_RING_MODE here though which will limit the TX
2315 * rings. This will allow QoS to function properly. Not setting this
2316 * will cause PF rings to break bandwidth settings.
2318 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2322 rc = update_pf_resource_max(bp);
2329 bnxt_hwrm_func_buf_unrgtr(bp);
2333 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2335 struct hwrm_func_cfg_input req = {0};
2336 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2339 HWRM_PREP(req, FUNC_CFG, -1, resp);
2341 req.fid = rte_cpu_to_le_16(0xffff);
2342 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2343 req.evb_mode = bp->pf.evb_mode;
2345 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2351 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2352 uint8_t tunnel_type)
2354 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2355 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2358 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2359 req.tunnel_type = tunnel_type;
2360 req.tunnel_dst_port_val = port;
2361 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2364 switch (tunnel_type) {
2365 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2366 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2367 bp->vxlan_port = port;
2369 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2370 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2371 bp->geneve_port = port;
2379 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2380 uint8_t tunnel_type)
2382 struct hwrm_tunnel_dst_port_free_input req = {0};
2383 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2386 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2387 req.tunnel_type = tunnel_type;
2388 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2389 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2395 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2398 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2399 struct hwrm_func_cfg_input req = {0};
2402 HWRM_PREP(req, FUNC_CFG, -1, resp);
2403 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2404 req.flags = rte_cpu_to_le_32(flags);
2405 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2411 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2413 uint32_t *flag = flagp;
2415 vnic->flags = *flag;
2418 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2420 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2423 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2426 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2427 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2429 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2431 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2432 req.req_buf_page_size = rte_cpu_to_le_16(
2433 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2434 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2435 req.req_buf_page_addr[0] =
2436 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2437 if (req.req_buf_page_addr[0] == 0) {
2439 "unable to map buffer address to physical memory\n");
2443 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2450 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2453 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2454 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2456 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2458 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2465 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2467 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2468 struct hwrm_func_cfg_input req = {0};
2471 HWRM_PREP(req, FUNC_CFG, -1, resp);
2472 req.fid = rte_cpu_to_le_16(0xffff);
2473 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2474 req.enables = rte_cpu_to_le_32(
2475 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2476 req.async_event_cr = rte_cpu_to_le_16(
2477 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2478 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2484 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2486 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2487 struct hwrm_func_vf_cfg_input req = {0};
2490 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2491 req.enables = rte_cpu_to_le_32(
2492 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2493 req.async_event_cr = rte_cpu_to_le_16(
2494 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2495 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2501 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2503 struct hwrm_func_cfg_input req = {0};
2504 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2505 uint16_t dflt_vlan, fid;
2506 uint32_t func_cfg_flags;
2509 HWRM_PREP(req, FUNC_CFG, -1, resp);
2512 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2513 fid = bp->pf.vf_info[vf].fid;
2514 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2516 fid = rte_cpu_to_le_16(0xffff);
2517 func_cfg_flags = bp->pf.func_cfg_flags;
2518 dflt_vlan = bp->vlan;
2521 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2522 req.fid = rte_cpu_to_le_16(fid);
2523 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2524 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2526 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2532 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2533 uint16_t max_bw, uint16_t enables)
2535 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2536 struct hwrm_func_cfg_input req = {0};
2539 HWRM_PREP(req, FUNC_CFG, -1, resp);
2540 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2541 req.enables |= rte_cpu_to_le_32(enables);
2542 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2543 req.max_bw = rte_cpu_to_le_32(max_bw);
2544 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2550 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2552 struct hwrm_func_cfg_input req = {0};
2553 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2556 HWRM_PREP(req, FUNC_CFG, -1, resp);
2557 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2558 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2559 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2560 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2562 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2568 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2569 void *encaped, size_t ec_size)
2572 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2573 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2575 if (ec_size > sizeof(req.encap_request))
2578 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2580 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2581 memcpy(req.encap_request, encaped, ec_size);
2583 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2590 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2591 struct ether_addr *mac)
2593 struct hwrm_func_qcfg_input req = {0};
2594 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2597 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2598 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2599 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2603 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2607 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2608 void *encaped, size_t ec_size)
2611 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2612 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2614 if (ec_size > sizeof(req.encap_request))
2617 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2619 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2620 memcpy(req.encap_request, encaped, ec_size);
2622 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2629 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2630 struct rte_eth_stats *stats)
2633 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2634 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2636 HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2638 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2640 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2644 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2645 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2646 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2647 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2648 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2649 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2651 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2652 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2653 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2654 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2655 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2656 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2658 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2659 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2660 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2665 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2667 struct hwrm_port_qstats_input req = {0};
2668 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2669 struct bnxt_pf_info *pf = &bp->pf;
2672 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2675 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2676 req.port_id = rte_cpu_to_le_16(pf->port_id);
2677 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2678 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2679 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2684 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2686 struct hwrm_port_clr_stats_input req = {0};
2687 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2688 struct bnxt_pf_info *pf = &bp->pf;
2691 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2694 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2695 req.port_id = rte_cpu_to_le_16(pf->port_id);
2696 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2701 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2703 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2704 struct hwrm_port_led_qcaps_input req = {0};
2710 HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2711 req.port_id = bp->pf.port_id;
2712 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2715 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2718 bp->num_leds = resp->num_leds;
2719 memcpy(bp->leds, &resp->led0_id,
2720 sizeof(bp->leds[0]) * bp->num_leds);
2721 for (i = 0; i < bp->num_leds; i++) {
2722 struct bnxt_led_info *led = &bp->leds[i];
2724 uint16_t caps = led->led_state_caps;
2726 if (!led->led_group_id ||
2727 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2736 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2738 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2739 struct hwrm_port_led_cfg_input req = {0};
2740 struct bnxt_led_cfg *led_cfg;
2741 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2742 uint16_t duration = 0;
2745 if (!bp->num_leds || BNXT_VF(bp))
2748 HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2750 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2751 duration = rte_cpu_to_le_16(500);
2753 req.port_id = bp->pf.port_id;
2754 req.num_leds = bp->num_leds;
2755 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2756 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2757 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2758 led_cfg->led_id = bp->leds[i].led_id;
2759 led_cfg->led_state = led_state;
2760 led_cfg->led_blink_on = duration;
2761 led_cfg->led_blink_off = duration;
2762 led_cfg->led_group_id = bp->leds[i].led_group_id;
2765 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2772 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2774 uint32_t *count = cbdata;
2776 *count = *count + 1;
2779 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2780 struct bnxt_vnic_info *vnic __rte_unused)
2785 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2789 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2790 &count, bnxt_vnic_count_hwrm_stub);
2795 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2798 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2799 struct hwrm_func_vf_vnic_ids_query_output *resp =
2800 bp->hwrm_cmd_resp_addr;
2803 /* First query all VNIC ids */
2804 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2806 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2807 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2808 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2810 if (req.vnic_id_tbl_addr == 0) {
2812 "unable to map VNIC ID table address to physical memory\n");
2815 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2817 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2819 } else if (resp->error_code) {
2820 rc = rte_le_to_cpu_16(resp->error_code);
2821 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2825 return rte_le_to_cpu_32(resp->vnic_id_cnt);
2829 * This function queries the VNIC IDs for a specified VF. It then calls
2830 * the vnic_cb to update the necessary field in vnic_info with cbdata.
2831 * Then it calls the hwrm_cb function to program this new vnic configuration.
2833 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2834 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2835 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2837 struct bnxt_vnic_info vnic;
2839 int i, num_vnic_ids;
2844 /* First query all VNIC ids */
2845 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2846 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2847 RTE_CACHE_LINE_SIZE);
2848 if (vnic_ids == NULL) {
2852 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2853 rte_mem_lock_page(((char *)vnic_ids) + sz);
2855 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2857 if (num_vnic_ids < 0)
2858 return num_vnic_ids;
2860 /* Retrieve VNIC, update bd_stall then update */
2862 for (i = 0; i < num_vnic_ids; i++) {
2863 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2864 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2865 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2868 if (vnic.mru <= 4) /* Indicates unallocated */
2871 vnic_cb(&vnic, cbdata);
2873 rc = hwrm_cb(bp, &vnic);
2883 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2886 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2887 struct hwrm_func_cfg_input req = {0};
2890 HWRM_PREP(req, FUNC_CFG, -1, resp);
2891 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2892 req.enables |= rte_cpu_to_le_32(
2893 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2894 req.vlan_antispoof_mode = on ?
2895 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2896 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2897 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2903 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2905 struct bnxt_vnic_info vnic;
2908 int num_vnic_ids, i;
2912 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2913 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2914 RTE_CACHE_LINE_SIZE);
2915 if (vnic_ids == NULL) {
2920 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2921 rte_mem_lock_page(((char *)vnic_ids) + sz);
2923 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2929 * Loop through to find the default VNIC ID.
2930 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2931 * by sending the hwrm_func_qcfg command to the firmware.
2933 for (i = 0; i < num_vnic_ids; i++) {
2934 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2935 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2936 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2937 bp->pf.first_vf_id + vf);
2940 if (vnic.func_default) {
2942 return vnic.fw_vnic_id;
2945 /* Could not find a default VNIC. */
2946 RTE_LOG(ERR, PMD, "No default VNIC\n");