4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
53 #define HWRM_CMD_TIMEOUT 2000
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
66 struct input *req = msg;
67 struct output *resp = bp->hwrm_cmd_resp_addr;
72 /* Write request msg to hwrm channel */
73 for (i = 0; i < msg_len; i += 4) {
74 bar = (uint8_t *)bp->bar0 + i;
75 *(volatile uint32_t *)bar = *data;
79 /* Zero the rest of the request space */
80 for (; i < bp->max_req_len; i += 4) {
81 bar = (uint8_t *)bp->bar0 + i;
82 *(volatile uint32_t *)bar = 0;
85 /* Ring channel doorbell */
86 bar = (uint8_t *)bp->bar0 + 0x100;
87 *(volatile uint32_t *)bar = 1;
89 /* Poll for the valid bit */
90 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91 /* Sanity check on the resp->resp_len */
93 if (resp->resp_len && resp->resp_len <=
95 /* Last byte of resp contains the valid key */
96 valid = (uint8_t *)resp + resp->resp_len - 1;
97 if (*valid == HWRM_RESP_VALID_KEY)
103 if (i >= HWRM_CMD_TIMEOUT) {
104 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
118 rte_spinlock_lock(&bp->hwrm_lock);
119 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120 rte_spinlock_unlock(&bp->hwrm_lock);
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
132 #define HWRM_CHECK_RESULT \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
139 if (resp->error_code) { \
140 rc = rte_le_to_cpu_16(resp->error_code); \
141 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
149 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
152 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
156 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
166 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
170 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
173 /* FIXME add multicast flag, when multicast adding options is supported
176 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
183 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191 struct bnxt_filter_info *filter)
194 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
197 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
199 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
201 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
205 filter->fw_l2_filter_id = -1;
210 int bnxt_hwrm_set_filter(struct bnxt *bp,
211 struct bnxt_vnic_info *vnic,
212 struct bnxt_filter_info *filter)
215 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
216 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
217 uint32_t enables = 0;
219 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
221 req.flags = rte_cpu_to_le_32(filter->flags);
223 enables = filter->enables |
224 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
225 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
228 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
229 memcpy(req.l2_addr, filter->l2_addr,
232 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
233 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
236 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
237 req.l2_ovlan = filter->l2_ovlan;
239 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
240 req.l2_ovlan_mask = filter->l2_ovlan_mask;
242 req.enables = rte_cpu_to_le_32(enables);
244 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
248 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
253 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
256 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
257 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
259 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
261 memcpy(req.encap_request, fwd_cmd,
262 sizeof(req.encap_request));
264 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
271 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
274 struct hwrm_func_qcaps_input req = {.req_type = 0 };
275 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
277 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
279 req.fid = rte_cpu_to_le_16(0xffff);
281 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
285 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
287 struct bnxt_pf_info *pf = &bp->pf;
289 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
290 pf->port_id = resp->port_id;
291 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
292 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
293 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
294 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
295 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
296 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
297 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
298 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
299 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
301 struct bnxt_vf_info *vf = &bp->vf;
303 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
304 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
305 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
316 int bnxt_hwrm_func_reset(struct bnxt *bp)
319 struct hwrm_func_reset_input req = {.req_type = 0 };
320 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
322 HWRM_PREP(req, FUNC_RESET, -1, resp);
324 req.enables = rte_cpu_to_le_32(0);
326 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
333 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
334 uint32_t *vf_req_fwd)
337 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
338 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
340 if (bp->flags & BNXT_FLAG_REGISTERED)
343 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
345 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
346 req.ver_maj = RTE_VER_YEAR;
347 req.ver_min = RTE_VER_MONTH;
348 req.ver_upd = RTE_VER_MINOR;
350 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
352 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
356 bp->flags |= BNXT_FLAG_REGISTERED;
361 int bnxt_hwrm_ver_get(struct bnxt *bp)
364 struct hwrm_ver_get_input req = {.req_type = 0 };
365 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
368 uint16_t max_resp_len;
369 char type[RTE_MEMZONE_NAMESIZE];
371 HWRM_PREP(req, VER_GET, -1, resp);
373 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
374 req.hwrm_intf_min = HWRM_VERSION_MINOR;
375 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
378 * Hold the lock since we may be adjusting the response pointers.
380 rte_spinlock_lock(&bp->hwrm_lock);
381 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
385 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
386 resp->hwrm_intf_maj, resp->hwrm_intf_min,
388 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
389 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
390 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
392 my_version = HWRM_VERSION_MAJOR << 16;
393 my_version |= HWRM_VERSION_MINOR << 8;
394 my_version |= HWRM_VERSION_UPDATE;
396 fw_version = resp->hwrm_intf_maj << 16;
397 fw_version |= resp->hwrm_intf_min << 8;
398 fw_version |= resp->hwrm_intf_upd;
400 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
401 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
406 if (my_version != fw_version) {
407 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
408 if (my_version < fw_version) {
410 "Firmware API version is newer than driver.\n");
412 "The driver may be missing features.\n");
415 "Firmware API version is older than driver.\n");
417 "Not all driver features may be functional.\n");
421 if (bp->max_req_len > resp->max_req_win_len) {
422 RTE_LOG(ERR, PMD, "Unsupported request length\n");
425 bp->max_req_len = resp->max_req_win_len;
426 max_resp_len = resp->max_resp_len;
427 if (bp->max_resp_len != max_resp_len) {
428 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
429 bp->pdev->addr.domain, bp->pdev->addr.bus,
430 bp->pdev->addr.devid, bp->pdev->addr.function);
432 rte_free(bp->hwrm_cmd_resp_addr);
434 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
435 if (bp->hwrm_cmd_resp_addr == NULL) {
439 bp->hwrm_cmd_resp_dma_addr =
440 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
441 bp->max_resp_len = max_resp_len;
445 rte_spinlock_unlock(&bp->hwrm_lock);
449 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
452 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
453 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
455 if (!(bp->flags & BNXT_FLAG_REGISTERED))
458 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
461 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
465 bp->flags &= ~BNXT_FLAG_REGISTERED;
470 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
473 struct hwrm_port_phy_cfg_input req = {.req_type = 0};
474 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
476 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
478 req.flags = conf->phy_flags;
480 req.force_link_speed = conf->link_speed;
482 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
483 * any auto mode, even "none".
485 if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
486 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
488 req.auto_mode = conf->auto_mode;
490 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
491 req.auto_link_speed_mask = conf->auto_link_speed_mask;
493 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
494 req.auto_link_speed = conf->auto_link_speed;
496 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
498 req.auto_duplex = conf->duplex;
499 req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
500 req.auto_pause = conf->auto_pause;
501 /* Set force_pause if there is no auto or if there is a force */
504 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
507 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
508 req.force_pause = conf->force_pause;
511 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
513 req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
514 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
515 req.force_link_speed = 0;
518 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
525 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
526 struct bnxt_link_info *link_info)
529 struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
530 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
532 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
534 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
538 link_info->phy_link_status = resp->link;
539 if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
540 link_info->link_up = 1;
541 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
543 link_info->link_up = 0;
544 link_info->link_speed = 0;
546 link_info->duplex = resp->duplex;
547 link_info->pause = resp->pause;
548 link_info->auto_pause = resp->auto_pause;
549 link_info->force_pause = resp->force_pause;
550 link_info->auto_mode = resp->auto_mode;
552 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
553 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
554 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
555 link_info->phy_ver[0] = resp->phy_maj;
556 link_info->phy_ver[1] = resp->phy_min;
557 link_info->phy_ver[2] = resp->phy_bld;
562 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
565 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
566 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
568 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
574 #define GET_QUEUE_INFO(x) \
575 bp->cos_queue[x].id = resp->queue_id##x; \
576 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
590 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
591 struct bnxt_ring *ring,
592 uint32_t ring_type, uint32_t map_index,
593 uint32_t stats_ctx_id)
596 struct hwrm_ring_alloc_input req = {.req_type = 0 };
597 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
599 HWRM_PREP(req, RING_ALLOC, -1, resp);
601 req.enables = rte_cpu_to_le_32(0);
603 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
604 req.fbo = rte_cpu_to_le_32(0);
605 /* Association of ring index with doorbell index */
606 req.logical_id = rte_cpu_to_le_16(map_index);
609 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
610 req.queue_id = bp->cos_queue[0].id;
611 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
612 req.ring_type = ring_type;
614 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
615 req.length = rte_cpu_to_le_32(ring->ring_size);
616 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
617 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
618 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
620 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
621 req.ring_type = ring_type;
623 * TODO: Some HWRM versions crash with
624 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
626 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
627 req.length = rte_cpu_to_le_32(ring->ring_size);
630 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
637 if (rc || resp->error_code) {
638 if (rc == 0 && resp->error_code)
639 rc = rte_le_to_cpu_16(resp->error_code);
641 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
643 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
645 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
647 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
649 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
651 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
654 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
659 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
663 int bnxt_hwrm_ring_free(struct bnxt *bp,
664 struct bnxt_ring *ring, uint32_t ring_type)
667 struct hwrm_ring_free_input req = {.req_type = 0 };
668 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
670 HWRM_PREP(req, RING_FREE, -1, resp);
672 req.ring_type = ring_type;
673 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
675 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
677 if (rc || resp->error_code) {
678 if (rc == 0 && resp->error_code)
679 rc = rte_le_to_cpu_16(resp->error_code);
682 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
683 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
686 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
687 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
690 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
691 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
695 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
702 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
705 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
706 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
708 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
710 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
711 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
712 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
713 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
715 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
719 bp->grp_info[idx].fw_grp_id =
720 rte_le_to_cpu_16(resp->ring_group_id);
725 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
728 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
729 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
731 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
733 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
735 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
739 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
743 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
746 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
747 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
749 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
751 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
754 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
755 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
757 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
764 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
765 struct bnxt_cp_ring_info *cpr, unsigned int idx)
768 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
769 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
771 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
773 req.update_period_ms = rte_cpu_to_le_32(1000);
775 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
777 rte_cpu_to_le_64(cpr->hw_stats_map);
779 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
783 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
784 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
789 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
790 struct bnxt_cp_ring_info *cpr, unsigned int idx)
793 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
794 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
796 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
798 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
799 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
801 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
805 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
806 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
811 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
814 struct hwrm_vnic_alloc_input req = {.req_type = 0 };
815 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
817 /* map ring groups to this vnic */
818 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
819 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
821 "Not enough ring groups avail:%x req:%x\n", j,
822 (vnic->end_grp_id - vnic->start_grp_id) + 1);
825 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
828 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
829 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
831 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
833 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
837 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
841 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
844 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
845 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
847 HWRM_PREP(req, VNIC_CFG, -1, resp);
849 /* Only RSS support for now TBD: COS & LB */
851 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
852 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
853 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
854 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
856 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
857 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
858 req.cos_rule = rte_cpu_to_le_16(0xffff);
859 req.lb_rule = rte_cpu_to_le_16(0xffff);
860 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
861 ETHER_CRC_LEN + VLAN_TAG_SIZE);
862 if (vnic->func_default)
864 if (vnic->vlan_strip)
866 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
868 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
875 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
878 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
879 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
880 bp->hwrm_cmd_resp_addr;
882 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
888 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
893 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
896 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
897 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
898 bp->hwrm_cmd_resp_addr;
900 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
902 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
904 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
908 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
913 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
916 struct hwrm_vnic_free_input req = {.req_type = 0 };
917 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
919 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
922 HWRM_PREP(req, VNIC_FREE, -1, resp);
924 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
926 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
930 vnic->fw_vnic_id = INVALID_HW_RING_ID;
934 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
935 struct bnxt_vnic_info *vnic)
938 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
939 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
941 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
943 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
945 req.ring_grp_tbl_addr =
946 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
947 req.hash_key_tbl_addr =
948 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
949 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
951 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
959 * HWRM utility functions
962 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
967 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
968 struct bnxt_tx_queue *txq;
969 struct bnxt_rx_queue *rxq;
970 struct bnxt_cp_ring_info *cpr;
972 if (i >= bp->rx_cp_nr_rings) {
973 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
976 rxq = bp->rx_queues[i];
980 rc = bnxt_hwrm_stat_clear(bp, cpr);
987 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
991 struct bnxt_cp_ring_info *cpr;
993 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
994 unsigned int idx = i + 1;
996 if (i >= bp->rx_cp_nr_rings)
997 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
999 cpr = bp->rx_queues[i]->cp_ring;
1000 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1001 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1009 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1014 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1015 struct bnxt_tx_queue *txq;
1016 struct bnxt_rx_queue *rxq;
1017 struct bnxt_cp_ring_info *cpr;
1018 unsigned int idx = i + 1;
1020 if (i >= bp->rx_cp_nr_rings) {
1021 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1024 rxq = bp->rx_queues[i];
1028 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1036 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1041 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1042 unsigned int idx = i + 1;
1044 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1046 "Attempt to free invalid ring group %d\n",
1051 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1059 static void bnxt_free_cp_ring(struct bnxt *bp,
1060 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1062 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1064 bnxt_hwrm_ring_free(bp, cp_ring,
1065 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1066 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1067 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1068 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1069 sizeof(*cpr->cp_desc_ring));
1070 cpr->cp_raw_cons = 0;
1073 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1078 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1079 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1080 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1081 struct bnxt_ring *ring = txr->tx_ring_struct;
1082 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1083 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1085 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1086 bnxt_hwrm_ring_free(bp, ring,
1087 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1088 ring->fw_ring_id = INVALID_HW_RING_ID;
1089 memset(txr->tx_desc_ring, 0,
1090 txr->tx_ring_struct->ring_size *
1091 sizeof(*txr->tx_desc_ring));
1092 memset(txr->tx_buf_ring, 0,
1093 txr->tx_ring_struct->ring_size *
1094 sizeof(*txr->tx_buf_ring));
1098 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1099 bnxt_free_cp_ring(bp, cpr, idx);
1102 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1103 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1104 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1105 struct bnxt_ring *ring = rxr->rx_ring_struct;
1106 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1107 unsigned int idx = i + 1;
1109 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1110 bnxt_hwrm_ring_free(bp, ring,
1111 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1112 ring->fw_ring_id = INVALID_HW_RING_ID;
1113 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1114 memset(rxr->rx_desc_ring, 0,
1115 rxr->rx_ring_struct->ring_size *
1116 sizeof(*rxr->rx_desc_ring));
1117 memset(rxr->rx_buf_ring, 0,
1118 rxr->rx_ring_struct->ring_size *
1119 sizeof(*rxr->rx_buf_ring));
1122 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1123 bnxt_free_cp_ring(bp, cpr, idx);
1126 /* Default completion ring */
1128 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1130 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1131 bnxt_free_cp_ring(bp, cpr, 0);
1137 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1142 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1143 unsigned int idx = i + 1;
1145 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1146 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1149 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1157 void bnxt_free_hwrm_resources(struct bnxt *bp)
1159 /* Release memzone */
1160 rte_free(bp->hwrm_cmd_resp_addr);
1161 bp->hwrm_cmd_resp_addr = NULL;
1162 bp->hwrm_cmd_resp_dma_addr = 0;
1165 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1167 struct rte_pci_device *pdev = bp->pdev;
1168 char type[RTE_MEMZONE_NAMESIZE];
1170 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1171 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1172 bp->max_req_len = HWRM_MAX_REQ_LEN;
1173 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1174 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1175 if (bp->hwrm_cmd_resp_addr == NULL)
1177 bp->hwrm_cmd_resp_dma_addr =
1178 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1179 rte_spinlock_init(&bp->hwrm_lock);
1184 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1186 struct bnxt_filter_info *filter;
1189 STAILQ_FOREACH(filter, &vnic->filter, next) {
1190 rc = bnxt_hwrm_clear_filter(bp, filter);
1197 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1199 struct bnxt_filter_info *filter;
1202 STAILQ_FOREACH(filter, &vnic->filter, next) {
1203 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1210 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1212 struct bnxt_vnic_info *vnic;
1215 if (bp->vnic_info == NULL)
1218 vnic = &bp->vnic_info[0];
1219 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1221 /* VNIC resources */
1222 for (i = 0; i < bp->nr_vnics; i++) {
1223 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1225 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1227 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1228 bnxt_hwrm_vnic_free(bp, vnic);
1230 /* Ring resources */
1231 bnxt_free_all_hwrm_rings(bp);
1232 bnxt_free_all_hwrm_ring_grps(bp);
1233 bnxt_free_all_hwrm_stat_ctxs(bp);
1236 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1238 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1240 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1241 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1243 switch (conf_link_speed) {
1244 case ETH_LINK_SPEED_10M_HD:
1245 case ETH_LINK_SPEED_100M_HD:
1246 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1248 return hw_link_duplex;
1251 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1253 uint16_t eth_link_speed = 0;
1255 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1256 return ETH_LINK_SPEED_AUTONEG;
1258 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1259 case ETH_LINK_SPEED_100M:
1260 case ETH_LINK_SPEED_100M_HD:
1262 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
1264 case ETH_LINK_SPEED_1G:
1266 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1268 case ETH_LINK_SPEED_2_5G:
1270 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1272 case ETH_LINK_SPEED_10G:
1274 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1276 case ETH_LINK_SPEED_20G:
1278 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1280 case ETH_LINK_SPEED_25G:
1282 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1284 case ETH_LINK_SPEED_40G:
1286 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1288 case ETH_LINK_SPEED_50G:
1290 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1294 "Unsupported link speed %d; default to AUTO\n",
1298 return eth_link_speed;
1301 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1302 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1303 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1304 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1306 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1310 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1313 if (link_speed & ETH_LINK_SPEED_FIXED) {
1314 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1316 if (one_speed & (one_speed - 1)) {
1318 "Invalid advertised speeds (%u) for port %u\n",
1319 link_speed, port_id);
1322 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1324 "Unsupported advertised speed (%u) for port %u\n",
1325 link_speed, port_id);
1329 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1331 "Unsupported advertised speeds (%u) for port %u\n",
1332 link_speed, port_id);
1339 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1343 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1344 link_speed = BNXT_SUPPORTED_SPEEDS;
1346 if (link_speed & ETH_LINK_SPEED_100M)
1347 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1348 if (link_speed & ETH_LINK_SPEED_100M_HD)
1349 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1350 if (link_speed & ETH_LINK_SPEED_1G)
1351 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1352 if (link_speed & ETH_LINK_SPEED_2_5G)
1353 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1354 if (link_speed & ETH_LINK_SPEED_10G)
1355 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1356 if (link_speed & ETH_LINK_SPEED_20G)
1357 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1358 if (link_speed & ETH_LINK_SPEED_25G)
1359 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1360 if (link_speed & ETH_LINK_SPEED_40G)
1361 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1362 if (link_speed & ETH_LINK_SPEED_50G)
1363 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1367 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1369 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1371 switch (hw_link_speed) {
1372 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1373 eth_link_speed = ETH_SPEED_NUM_100M;
1375 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1376 eth_link_speed = ETH_SPEED_NUM_1G;
1378 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1379 eth_link_speed = ETH_SPEED_NUM_2_5G;
1381 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1382 eth_link_speed = ETH_SPEED_NUM_10G;
1384 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1385 eth_link_speed = ETH_SPEED_NUM_20G;
1387 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1388 eth_link_speed = ETH_SPEED_NUM_25G;
1390 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1391 eth_link_speed = ETH_SPEED_NUM_40G;
1393 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1394 eth_link_speed = ETH_SPEED_NUM_50G;
1396 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1398 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1402 return eth_link_speed;
1405 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1407 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1409 switch (hw_link_duplex) {
1410 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1411 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1412 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1414 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1415 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1418 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1422 return eth_link_duplex;
1425 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1428 struct bnxt_link_info *link_info = &bp->link_info;
1430 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1433 "Get link config failed with rc %d\n", rc);
1436 if (link_info->link_up)
1438 bnxt_parse_hw_link_speed(link_info->link_speed);
1440 link->link_speed = ETH_LINK_SPEED_10M;
1441 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1442 link->link_status = link_info->link_up;
1443 link->link_autoneg = link_info->auto_mode ==
1444 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1445 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1450 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1453 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1454 struct bnxt_link_info link_req;
1457 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1460 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1461 bp->eth_dev->data->port_id);
1465 memset(&link_req, 0, sizeof(link_req));
1466 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1467 link_req.link_up = link_up;
1469 link_req.phy_flags =
1470 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1471 link_req.auto_mode =
1472 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
1473 link_req.auto_link_speed_mask =
1474 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1475 link_req.auto_link_speed =
1476 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
1478 link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1479 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
1480 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1481 link_req.link_speed = speed;
1483 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1484 link_req.auto_pause = bp->link_info.auto_pause;
1485 link_req.force_pause = bp->link_info.force_pause;
1487 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1490 "Set link config failed with rc %d\n", rc);
1498 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1500 struct hwrm_func_qcfg_input req = {0};
1501 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1504 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1505 req.fid = rte_cpu_to_le_16(0xffff);
1507 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1512 struct bnxt_vf_info *vf = &bp->vf;
1514 /* Hard Coded.. 0xfff VLAN ID mask */
1515 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1518 switch (resp->port_partition_type) {
1519 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1520 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1521 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1522 bp->port_partition_type = resp->port_partition_type;
1525 bp->port_partition_type = 0;