4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
55 #define HWRM_CMD_TIMEOUT 2000
58 * HWRM Functions (sent to HWRM)
59 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
60 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
61 * command was failed by the ChiMP.
64 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
68 struct input *req = msg;
69 struct output *resp = bp->hwrm_cmd_resp_addr;
74 /* Write request msg to hwrm channel */
75 for (i = 0; i < msg_len; i += 4) {
76 bar = (uint8_t *)bp->bar0 + i;
77 rte_write32(*data, bar);
81 /* Zero the rest of the request space */
82 for (; i < bp->max_req_len; i += 4) {
83 bar = (uint8_t *)bp->bar0 + i;
87 /* Ring channel doorbell */
88 bar = (uint8_t *)bp->bar0 + 0x100;
91 /* Poll for the valid bit */
92 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
93 /* Sanity check on the resp->resp_len */
95 if (resp->resp_len && resp->resp_len <=
97 /* Last byte of resp contains the valid key */
98 valid = (uint8_t *)resp + resp->resp_len - 1;
99 if (*valid == HWRM_RESP_VALID_KEY)
105 if (i >= HWRM_CMD_TIMEOUT) {
106 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
116 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
120 rte_spinlock_lock(&bp->hwrm_lock);
121 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
122 rte_spinlock_unlock(&bp->hwrm_lock);
126 #define HWRM_PREP(req, type, cr, resp) \
127 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
128 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
129 req.cmpl_ring = rte_cpu_to_le_16(cr); \
130 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
131 req.target_id = rte_cpu_to_le_16(0xffff); \
132 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
134 #define HWRM_CHECK_RESULT \
137 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
141 if (resp->error_code) { \
142 rc = rte_le_to_cpu_16(resp->error_code); \
143 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
148 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
151 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
152 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
154 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
155 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
158 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
165 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
168 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
169 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
172 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
173 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
175 /* FIXME add multicast flag, when multicast adding options is supported
178 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
180 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
181 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
182 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
185 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
192 int bnxt_hwrm_clear_filter(struct bnxt *bp,
193 struct bnxt_filter_info *filter)
196 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
197 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
199 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
201 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
203 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
207 filter->fw_l2_filter_id = -1;
212 int bnxt_hwrm_set_filter(struct bnxt *bp,
213 struct bnxt_vnic_info *vnic,
214 struct bnxt_filter_info *filter)
217 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
218 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
219 uint32_t enables = 0;
221 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
223 req.flags = rte_cpu_to_le_32(filter->flags);
225 enables = filter->enables |
226 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
227 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
230 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
231 memcpy(req.l2_addr, filter->l2_addr,
234 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
235 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
238 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
239 req.l2_ovlan = filter->l2_ovlan;
241 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
242 req.l2_ovlan_mask = filter->l2_ovlan_mask;
244 req.enables = rte_cpu_to_le_32(enables);
246 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
250 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
255 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
258 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
259 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
261 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
263 memcpy(req.encap_request, fwd_cmd,
264 sizeof(req.encap_request));
266 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
273 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
276 struct hwrm_func_qcaps_input req = {.req_type = 0 };
277 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
279 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
281 req.fid = rte_cpu_to_le_16(0xffff);
283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
287 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
289 struct bnxt_pf_info *pf = &bp->pf;
291 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
292 pf->port_id = resp->port_id;
293 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
294 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
295 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
296 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
297 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
298 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
299 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
300 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
301 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
303 struct bnxt_vf_info *vf = &bp->vf;
305 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
306 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
307 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
308 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
309 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
310 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
311 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
312 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
318 int bnxt_hwrm_func_reset(struct bnxt *bp)
321 struct hwrm_func_reset_input req = {.req_type = 0 };
322 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
324 HWRM_PREP(req, FUNC_RESET, -1, resp);
326 req.enables = rte_cpu_to_le_32(0);
328 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
335 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
336 uint32_t *vf_req_fwd)
339 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
340 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
342 if (bp->flags & BNXT_FLAG_REGISTERED)
345 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
347 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
348 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
349 req.ver_maj = RTE_VER_YEAR;
350 req.ver_min = RTE_VER_MONTH;
351 req.ver_upd = RTE_VER_MINOR;
353 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
355 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
357 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
361 bp->flags |= BNXT_FLAG_REGISTERED;
366 int bnxt_hwrm_ver_get(struct bnxt *bp)
369 struct hwrm_ver_get_input req = {.req_type = 0 };
370 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
373 uint16_t max_resp_len;
374 char type[RTE_MEMZONE_NAMESIZE];
376 HWRM_PREP(req, VER_GET, -1, resp);
378 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
379 req.hwrm_intf_min = HWRM_VERSION_MINOR;
380 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
383 * Hold the lock since we may be adjusting the response pointers.
385 rte_spinlock_lock(&bp->hwrm_lock);
386 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
390 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
391 resp->hwrm_intf_maj, resp->hwrm_intf_min,
393 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
394 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
395 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
397 my_version = HWRM_VERSION_MAJOR << 16;
398 my_version |= HWRM_VERSION_MINOR << 8;
399 my_version |= HWRM_VERSION_UPDATE;
401 fw_version = resp->hwrm_intf_maj << 16;
402 fw_version |= resp->hwrm_intf_min << 8;
403 fw_version |= resp->hwrm_intf_upd;
405 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
406 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
411 if (my_version != fw_version) {
412 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
413 if (my_version < fw_version) {
415 "Firmware API version is newer than driver.\n");
417 "The driver may be missing features.\n");
420 "Firmware API version is older than driver.\n");
422 "Not all driver features may be functional.\n");
426 if (bp->max_req_len > resp->max_req_win_len) {
427 RTE_LOG(ERR, PMD, "Unsupported request length\n");
430 bp->max_req_len = resp->max_req_win_len;
431 max_resp_len = resp->max_resp_len;
432 if (bp->max_resp_len != max_resp_len) {
433 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
434 bp->pdev->addr.domain, bp->pdev->addr.bus,
435 bp->pdev->addr.devid, bp->pdev->addr.function);
437 rte_free(bp->hwrm_cmd_resp_addr);
439 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
440 if (bp->hwrm_cmd_resp_addr == NULL) {
444 bp->hwrm_cmd_resp_dma_addr =
445 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
446 bp->max_resp_len = max_resp_len;
450 rte_spinlock_unlock(&bp->hwrm_lock);
454 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
457 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
458 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
460 if (!(bp->flags & BNXT_FLAG_REGISTERED))
463 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
466 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
470 bp->flags &= ~BNXT_FLAG_REGISTERED;
475 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
478 struct hwrm_port_phy_cfg_input req = {0};
479 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
480 uint32_t enables = 0;
482 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
485 req.flags = rte_cpu_to_le_32(conf->phy_flags);
486 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
488 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
489 * any auto mode, even "none".
491 if (!conf->link_speed) {
492 req.auto_mode |= conf->auto_mode;
493 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
494 req.auto_link_speed_mask = conf->auto_link_speed_mask;
496 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
497 req.auto_link_speed = bp->link_info.auto_link_speed;
499 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
501 req.auto_duplex = conf->duplex;
502 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
503 req.auto_pause = conf->auto_pause;
504 req.force_pause = conf->force_pause;
505 /* Set force_pause if there is no auto or if there is a force */
506 if (req.auto_pause && !req.force_pause)
507 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
509 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
511 req.enables = rte_cpu_to_le_32(enables);
514 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
515 RTE_LOG(INFO, PMD, "Force Link Down\n");
518 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
525 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
526 struct bnxt_link_info *link_info)
529 struct hwrm_port_phy_qcfg_input req = {0};
530 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
532 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
534 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
538 link_info->phy_link_status = resp->link;
539 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
540 link_info->link_up = 1;
541 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
543 link_info->link_up = 0;
544 link_info->link_speed = 0;
546 link_info->duplex = resp->duplex;
547 link_info->pause = resp->pause;
548 link_info->auto_pause = resp->auto_pause;
549 link_info->force_pause = resp->force_pause;
550 link_info->auto_mode = resp->auto_mode;
552 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
553 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
554 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
555 link_info->phy_ver[0] = resp->phy_maj;
556 link_info->phy_ver[1] = resp->phy_min;
557 link_info->phy_ver[2] = resp->phy_bld;
562 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
565 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
566 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
568 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
570 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
574 #define GET_QUEUE_INFO(x) \
575 bp->cos_queue[x].id = resp->queue_id##x; \
576 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
590 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
591 struct bnxt_ring *ring,
592 uint32_t ring_type, uint32_t map_index,
593 uint32_t stats_ctx_id)
596 struct hwrm_ring_alloc_input req = {.req_type = 0 };
597 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
599 HWRM_PREP(req, RING_ALLOC, -1, resp);
601 req.enables = rte_cpu_to_le_32(0);
603 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
604 req.fbo = rte_cpu_to_le_32(0);
605 /* Association of ring index with doorbell index */
606 req.logical_id = rte_cpu_to_le_16(map_index);
609 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
610 req.queue_id = bp->cos_queue[0].id;
612 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
613 req.ring_type = ring_type;
615 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
616 req.length = rte_cpu_to_le_32(ring->ring_size);
617 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
618 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
619 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
621 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
622 req.ring_type = ring_type;
624 * TODO: Some HWRM versions crash with
625 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
627 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
628 req.length = rte_cpu_to_le_32(ring->ring_size);
631 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
636 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
638 if (rc || resp->error_code) {
639 if (rc == 0 && resp->error_code)
640 rc = rte_le_to_cpu_16(resp->error_code);
642 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
644 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
646 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
648 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
650 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
652 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
655 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
660 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
664 int bnxt_hwrm_ring_free(struct bnxt *bp,
665 struct bnxt_ring *ring, uint32_t ring_type)
668 struct hwrm_ring_free_input req = {.req_type = 0 };
669 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
671 HWRM_PREP(req, RING_FREE, -1, resp);
673 req.ring_type = ring_type;
674 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
676 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
678 if (rc || resp->error_code) {
679 if (rc == 0 && resp->error_code)
680 rc = rte_le_to_cpu_16(resp->error_code);
683 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
684 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
687 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
688 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
691 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
692 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
696 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
703 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
706 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
707 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
709 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
711 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
712 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
713 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
714 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
716 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
720 bp->grp_info[idx].fw_grp_id =
721 rte_le_to_cpu_16(resp->ring_group_id);
726 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
729 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
730 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
732 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
734 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
736 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
740 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
744 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
747 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
748 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
750 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
752 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
755 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
756 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
758 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
765 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
766 struct bnxt_cp_ring_info *cpr, unsigned int idx)
769 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
770 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
772 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
774 req.update_period_ms = rte_cpu_to_le_32(1000);
776 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
778 rte_cpu_to_le_64(cpr->hw_stats_map);
780 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
784 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
785 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
790 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
791 struct bnxt_cp_ring_info *cpr, unsigned int idx)
794 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
795 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
797 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
799 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
800 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
802 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
806 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
807 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
812 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
815 struct hwrm_vnic_alloc_input req = { 0 };
816 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
818 /* map ring groups to this vnic */
819 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
820 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
822 "Not enough ring groups avail:%x req:%x\n", j,
823 (vnic->end_grp_id - vnic->start_grp_id) + 1);
826 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
829 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
830 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
832 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
834 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
838 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
842 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
845 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
846 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
848 HWRM_PREP(req, VNIC_CFG, -1, resp);
850 /* Only RSS support for now TBD: COS & LB */
852 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
853 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
854 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
855 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
857 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
858 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
859 req.cos_rule = rte_cpu_to_le_16(0xffff);
860 req.lb_rule = rte_cpu_to_le_16(0xffff);
861 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
862 ETHER_CRC_LEN + VLAN_TAG_SIZE);
863 if (vnic->func_default)
865 if (vnic->vlan_strip)
867 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
869 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
876 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
879 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
880 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
881 bp->hwrm_cmd_resp_addr;
883 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
889 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
894 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
897 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
898 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
899 bp->hwrm_cmd_resp_addr;
901 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
903 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
905 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
909 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
914 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
917 struct hwrm_vnic_free_input req = {.req_type = 0 };
918 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
920 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
923 HWRM_PREP(req, VNIC_FREE, -1, resp);
925 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
927 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
931 vnic->fw_vnic_id = INVALID_HW_RING_ID;
935 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
936 struct bnxt_vnic_info *vnic)
939 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
940 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
942 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
944 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
946 req.ring_grp_tbl_addr =
947 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
948 req.hash_key_tbl_addr =
949 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
950 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
952 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
960 * HWRM utility functions
963 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
968 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
969 struct bnxt_tx_queue *txq;
970 struct bnxt_rx_queue *rxq;
971 struct bnxt_cp_ring_info *cpr;
973 if (i >= bp->rx_cp_nr_rings) {
974 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
977 rxq = bp->rx_queues[i];
981 rc = bnxt_hwrm_stat_clear(bp, cpr);
988 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
992 struct bnxt_cp_ring_info *cpr;
994 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
995 unsigned int idx = i + 1;
997 if (i >= bp->rx_cp_nr_rings)
998 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1000 cpr = bp->rx_queues[i]->cp_ring;
1001 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1002 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1010 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1015 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1016 struct bnxt_tx_queue *txq;
1017 struct bnxt_rx_queue *rxq;
1018 struct bnxt_cp_ring_info *cpr;
1019 unsigned int idx = i + 1;
1021 if (i >= bp->rx_cp_nr_rings) {
1022 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1025 rxq = bp->rx_queues[i];
1029 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1037 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1042 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1043 unsigned int idx = i + 1;
1045 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1047 "Attempt to free invalid ring group %d\n",
1052 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1060 static void bnxt_free_cp_ring(struct bnxt *bp,
1061 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1063 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1065 bnxt_hwrm_ring_free(bp, cp_ring,
1066 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1067 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1068 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1069 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1070 sizeof(*cpr->cp_desc_ring));
1071 cpr->cp_raw_cons = 0;
1074 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1079 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1080 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1081 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1082 struct bnxt_ring *ring = txr->tx_ring_struct;
1083 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1084 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1086 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1087 bnxt_hwrm_ring_free(bp, ring,
1088 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1089 ring->fw_ring_id = INVALID_HW_RING_ID;
1090 memset(txr->tx_desc_ring, 0,
1091 txr->tx_ring_struct->ring_size *
1092 sizeof(*txr->tx_desc_ring));
1093 memset(txr->tx_buf_ring, 0,
1094 txr->tx_ring_struct->ring_size *
1095 sizeof(*txr->tx_buf_ring));
1099 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1100 bnxt_free_cp_ring(bp, cpr, idx);
1103 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1104 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1105 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1106 struct bnxt_ring *ring = rxr->rx_ring_struct;
1107 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1108 unsigned int idx = i + 1;
1110 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1111 bnxt_hwrm_ring_free(bp, ring,
1112 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1113 ring->fw_ring_id = INVALID_HW_RING_ID;
1114 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1115 memset(rxr->rx_desc_ring, 0,
1116 rxr->rx_ring_struct->ring_size *
1117 sizeof(*rxr->rx_desc_ring));
1118 memset(rxr->rx_buf_ring, 0,
1119 rxr->rx_ring_struct->ring_size *
1120 sizeof(*rxr->rx_buf_ring));
1123 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1124 bnxt_free_cp_ring(bp, cpr, idx);
1127 /* Default completion ring */
1129 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1131 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1132 bnxt_free_cp_ring(bp, cpr, 0);
1138 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1143 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1144 unsigned int idx = i + 1;
1146 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1147 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1150 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1158 void bnxt_free_hwrm_resources(struct bnxt *bp)
1160 /* Release memzone */
1161 rte_free(bp->hwrm_cmd_resp_addr);
1162 bp->hwrm_cmd_resp_addr = NULL;
1163 bp->hwrm_cmd_resp_dma_addr = 0;
1166 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1168 struct rte_pci_device *pdev = bp->pdev;
1169 char type[RTE_MEMZONE_NAMESIZE];
1171 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1172 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1173 bp->max_req_len = HWRM_MAX_REQ_LEN;
1174 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1175 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1176 if (bp->hwrm_cmd_resp_addr == NULL)
1178 bp->hwrm_cmd_resp_dma_addr =
1179 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1180 rte_spinlock_init(&bp->hwrm_lock);
1185 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1187 struct bnxt_filter_info *filter;
1190 STAILQ_FOREACH(filter, &vnic->filter, next) {
1191 rc = bnxt_hwrm_clear_filter(bp, filter);
1198 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1200 struct bnxt_filter_info *filter;
1203 STAILQ_FOREACH(filter, &vnic->filter, next) {
1204 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1211 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1213 struct bnxt_vnic_info *vnic;
1216 if (bp->vnic_info == NULL)
1219 vnic = &bp->vnic_info[0];
1220 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1222 /* VNIC resources */
1223 for (i = 0; i < bp->nr_vnics; i++) {
1224 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1226 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1228 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1229 bnxt_hwrm_vnic_free(bp, vnic);
1231 /* Ring resources */
1232 bnxt_free_all_hwrm_rings(bp);
1233 bnxt_free_all_hwrm_ring_grps(bp);
1234 bnxt_free_all_hwrm_stat_ctxs(bp);
1237 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1239 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1241 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1242 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1244 switch (conf_link_speed) {
1245 case ETH_LINK_SPEED_10M_HD:
1246 case ETH_LINK_SPEED_100M_HD:
1247 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1249 return hw_link_duplex;
1252 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1254 uint16_t eth_link_speed = 0;
1256 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1257 return ETH_LINK_SPEED_AUTONEG;
1259 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1260 case ETH_LINK_SPEED_100M:
1261 case ETH_LINK_SPEED_100M_HD:
1263 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1265 case ETH_LINK_SPEED_1G:
1267 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1269 case ETH_LINK_SPEED_2_5G:
1271 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1273 case ETH_LINK_SPEED_10G:
1275 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1277 case ETH_LINK_SPEED_20G:
1279 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1281 case ETH_LINK_SPEED_25G:
1283 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1285 case ETH_LINK_SPEED_40G:
1287 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1289 case ETH_LINK_SPEED_50G:
1291 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1295 "Unsupported link speed %d; default to AUTO\n",
1299 return eth_link_speed;
1302 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1303 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1304 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1305 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1307 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1311 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1314 if (link_speed & ETH_LINK_SPEED_FIXED) {
1315 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1317 if (one_speed & (one_speed - 1)) {
1319 "Invalid advertised speeds (%u) for port %u\n",
1320 link_speed, port_id);
1323 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1325 "Unsupported advertised speed (%u) for port %u\n",
1326 link_speed, port_id);
1330 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1332 "Unsupported advertised speeds (%u) for port %u\n",
1333 link_speed, port_id);
1340 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1344 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1345 link_speed = BNXT_SUPPORTED_SPEEDS;
1347 if (link_speed & ETH_LINK_SPEED_100M)
1348 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1349 if (link_speed & ETH_LINK_SPEED_100M_HD)
1350 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1351 if (link_speed & ETH_LINK_SPEED_1G)
1352 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1353 if (link_speed & ETH_LINK_SPEED_2_5G)
1354 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1355 if (link_speed & ETH_LINK_SPEED_10G)
1356 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1357 if (link_speed & ETH_LINK_SPEED_20G)
1358 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1359 if (link_speed & ETH_LINK_SPEED_25G)
1360 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1361 if (link_speed & ETH_LINK_SPEED_40G)
1362 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1363 if (link_speed & ETH_LINK_SPEED_50G)
1364 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1368 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1370 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1372 switch (hw_link_speed) {
1373 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1374 eth_link_speed = ETH_SPEED_NUM_100M;
1376 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1377 eth_link_speed = ETH_SPEED_NUM_1G;
1379 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1380 eth_link_speed = ETH_SPEED_NUM_2_5G;
1382 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1383 eth_link_speed = ETH_SPEED_NUM_10G;
1385 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1386 eth_link_speed = ETH_SPEED_NUM_20G;
1388 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1389 eth_link_speed = ETH_SPEED_NUM_25G;
1391 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1392 eth_link_speed = ETH_SPEED_NUM_40G;
1394 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1395 eth_link_speed = ETH_SPEED_NUM_50G;
1397 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1399 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1403 return eth_link_speed;
1406 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1408 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1410 switch (hw_link_duplex) {
1411 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1412 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1413 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1415 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1416 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1419 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1423 return eth_link_duplex;
1426 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1429 struct bnxt_link_info *link_info = &bp->link_info;
1431 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1434 "Get link config failed with rc %d\n", rc);
1437 if (link_info->link_up)
1439 bnxt_parse_hw_link_speed(link_info->link_speed);
1441 link->link_speed = ETH_LINK_SPEED_10M;
1442 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1443 link->link_status = link_info->link_up;
1444 link->link_autoneg = link_info->auto_mode ==
1445 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1446 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1451 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1454 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1455 struct bnxt_link_info link_req;
1458 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1461 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1462 bp->eth_dev->data->port_id);
1466 memset(&link_req, 0, sizeof(link_req));
1467 link_req.link_up = link_up;
1471 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1472 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1474 link_req.phy_flags |=
1475 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1476 link_req.auto_mode =
1477 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1478 link_req.auto_link_speed_mask =
1479 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1481 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1482 link_req.link_speed = speed;
1483 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1485 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1486 link_req.auto_pause = bp->link_info.auto_pause;
1487 link_req.force_pause = bp->link_info.force_pause;
1490 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1493 "Set link config failed with rc %d\n", rc);
1496 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1502 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1504 struct hwrm_func_qcfg_input req = {0};
1505 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1508 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1509 req.fid = rte_cpu_to_le_16(0xffff);
1511 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1516 struct bnxt_vf_info *vf = &bp->vf;
1518 /* Hard Coded.. 0xfff VLAN ID mask */
1519 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1522 switch (resp->port_partition_type) {
1523 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1524 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1525 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1526 bp->port_partition_type = resp->port_partition_type;
1529 bp->port_partition_type = 0;