4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
184 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
187 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
188 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
190 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
191 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
194 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
201 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
204 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
205 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
208 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
209 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
211 /* FIXME add multicast flag, when multicast adding options is supported
214 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
215 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
216 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
217 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
218 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
221 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
228 int bnxt_hwrm_clear_filter(struct bnxt *bp,
229 struct bnxt_filter_info *filter)
232 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
233 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
235 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
237 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
239 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
243 filter->fw_l2_filter_id = -1;
248 int bnxt_hwrm_set_filter(struct bnxt *bp,
249 struct bnxt_vnic_info *vnic,
250 struct bnxt_filter_info *filter)
253 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
254 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
255 uint32_t enables = 0;
257 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
259 req.flags = rte_cpu_to_le_32(filter->flags);
261 enables = filter->enables |
262 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
263 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
266 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
267 memcpy(req.l2_addr, filter->l2_addr,
270 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
271 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
274 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
275 req.l2_ovlan = filter->l2_ovlan;
277 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
278 req.l2_ovlan_mask = filter->l2_ovlan_mask;
280 req.enables = rte_cpu_to_le_32(enables);
282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
291 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
294 struct hwrm_func_qcaps_input req = {.req_type = 0 };
295 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
296 uint16_t new_max_vfs;
299 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
301 req.fid = rte_cpu_to_le_16(0xffff);
303 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
307 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
309 bp->pf.port_id = resp->port_id;
310 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
311 new_max_vfs = bp->pdev->max_vfs;
312 if (new_max_vfs != bp->pf.max_vfs) {
314 rte_free(bp->pf.vf_info);
315 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
316 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
317 bp->pf.max_vfs = new_max_vfs;
318 for (i = 0; i < new_max_vfs; i++) {
319 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
320 bp->pf.vf_info[i].vlan_table =
321 rte_zmalloc("VF VLAN table",
324 if (bp->pf.vf_info[i].vlan_table == NULL)
326 "Fail to alloc VLAN table for VF %d\n",
330 bp->pf.vf_info[i].vlan_table);
331 STAILQ_INIT(&bp->pf.vf_info[i].filter);
336 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
337 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
338 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
339 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
340 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
341 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
342 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
343 /* TODO: For now, do not support VMDq/RFS on VFs. */
348 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
352 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
354 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
359 int bnxt_hwrm_func_reset(struct bnxt *bp)
362 struct hwrm_func_reset_input req = {.req_type = 0 };
363 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
365 HWRM_PREP(req, FUNC_RESET, -1, resp);
367 req.enables = rte_cpu_to_le_32(0);
369 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
376 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
379 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
380 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
382 if (bp->flags & BNXT_FLAG_REGISTERED)
385 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
386 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
387 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
388 req.ver_maj = RTE_VER_YEAR;
389 req.ver_min = RTE_VER_MONTH;
390 req.ver_upd = RTE_VER_MINOR;
393 req.enables |= rte_cpu_to_le_32(
394 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
395 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
396 RTE_MIN(sizeof(req.vf_req_fwd),
397 sizeof(bp->pf.vf_req_fwd)));
400 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
401 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
403 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
407 bp->flags |= BNXT_FLAG_REGISTERED;
412 int bnxt_hwrm_ver_get(struct bnxt *bp)
415 struct hwrm_ver_get_input req = {.req_type = 0 };
416 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
419 uint16_t max_resp_len;
420 char type[RTE_MEMZONE_NAMESIZE];
422 HWRM_PREP(req, VER_GET, -1, resp);
424 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
425 req.hwrm_intf_min = HWRM_VERSION_MINOR;
426 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
429 * Hold the lock since we may be adjusting the response pointers.
431 rte_spinlock_lock(&bp->hwrm_lock);
432 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
436 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
437 resp->hwrm_intf_maj, resp->hwrm_intf_min,
439 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
440 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
441 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
442 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
443 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
445 my_version = HWRM_VERSION_MAJOR << 16;
446 my_version |= HWRM_VERSION_MINOR << 8;
447 my_version |= HWRM_VERSION_UPDATE;
449 fw_version = resp->hwrm_intf_maj << 16;
450 fw_version |= resp->hwrm_intf_min << 8;
451 fw_version |= resp->hwrm_intf_upd;
453 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
454 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
459 if (my_version != fw_version) {
460 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
461 if (my_version < fw_version) {
463 "Firmware API version is newer than driver.\n");
465 "The driver may be missing features.\n");
468 "Firmware API version is older than driver.\n");
470 "Not all driver features may be functional.\n");
474 if (bp->max_req_len > resp->max_req_win_len) {
475 RTE_LOG(ERR, PMD, "Unsupported request length\n");
478 bp->max_req_len = resp->max_req_win_len;
479 max_resp_len = resp->max_resp_len;
480 if (bp->max_resp_len != max_resp_len) {
481 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
482 bp->pdev->addr.domain, bp->pdev->addr.bus,
483 bp->pdev->addr.devid, bp->pdev->addr.function);
485 rte_free(bp->hwrm_cmd_resp_addr);
487 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
488 if (bp->hwrm_cmd_resp_addr == NULL) {
492 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
493 bp->hwrm_cmd_resp_dma_addr =
494 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
495 if (bp->hwrm_cmd_resp_dma_addr == 0) {
497 "Unable to map response buffer to physical memory.\n");
501 bp->max_resp_len = max_resp_len;
505 rte_spinlock_unlock(&bp->hwrm_lock);
509 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
512 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
513 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
515 if (!(bp->flags & BNXT_FLAG_REGISTERED))
518 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
521 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
525 bp->flags &= ~BNXT_FLAG_REGISTERED;
530 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
533 struct hwrm_port_phy_cfg_input req = {0};
534 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
535 uint32_t enables = 0;
537 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
540 req.flags = rte_cpu_to_le_32(conf->phy_flags);
541 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
543 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
544 * any auto mode, even "none".
546 if (!conf->link_speed) {
547 req.auto_mode |= conf->auto_mode;
548 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
549 req.auto_link_speed_mask = conf->auto_link_speed_mask;
551 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
552 req.auto_link_speed = bp->link_info.auto_link_speed;
554 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
556 req.auto_duplex = conf->duplex;
557 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
558 req.auto_pause = conf->auto_pause;
559 req.force_pause = conf->force_pause;
560 /* Set force_pause if there is no auto or if there is a force */
561 if (req.auto_pause && !req.force_pause)
562 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
564 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
566 req.enables = rte_cpu_to_le_32(enables);
569 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
570 RTE_LOG(INFO, PMD, "Force Link Down\n");
573 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
580 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
581 struct bnxt_link_info *link_info)
584 struct hwrm_port_phy_qcfg_input req = {0};
585 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
587 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
589 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
593 link_info->phy_link_status = resp->link;
594 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
595 link_info->link_up = 1;
596 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
598 link_info->link_up = 0;
599 link_info->link_speed = 0;
601 link_info->duplex = resp->duplex;
602 link_info->pause = resp->pause;
603 link_info->auto_pause = resp->auto_pause;
604 link_info->force_pause = resp->force_pause;
605 link_info->auto_mode = resp->auto_mode;
607 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
608 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
609 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
610 link_info->phy_ver[0] = resp->phy_maj;
611 link_info->phy_ver[1] = resp->phy_min;
612 link_info->phy_ver[2] = resp->phy_bld;
617 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
620 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
621 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
623 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
625 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
629 #define GET_QUEUE_INFO(x) \
630 bp->cos_queue[x].id = resp->queue_id##x; \
631 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
645 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
646 struct bnxt_ring *ring,
647 uint32_t ring_type, uint32_t map_index,
648 uint32_t stats_ctx_id)
651 struct hwrm_ring_alloc_input req = {.req_type = 0 };
652 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
654 HWRM_PREP(req, RING_ALLOC, -1, resp);
656 req.enables = rte_cpu_to_le_32(0);
658 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
659 req.fbo = rte_cpu_to_le_32(0);
660 /* Association of ring index with doorbell index */
661 req.logical_id = rte_cpu_to_le_16(map_index);
664 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
665 req.queue_id = bp->cos_queue[0].id;
667 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
668 req.ring_type = ring_type;
670 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
671 req.length = rte_cpu_to_le_32(ring->ring_size);
672 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
673 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
674 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
676 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
677 req.ring_type = ring_type;
679 * TODO: Some HWRM versions crash with
680 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
682 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
683 req.length = rte_cpu_to_le_32(ring->ring_size);
686 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
691 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
693 if (rc || resp->error_code) {
694 if (rc == 0 && resp->error_code)
695 rc = rte_le_to_cpu_16(resp->error_code);
697 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
699 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
701 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
703 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
705 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
707 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
710 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
715 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
719 int bnxt_hwrm_ring_free(struct bnxt *bp,
720 struct bnxt_ring *ring, uint32_t ring_type)
723 struct hwrm_ring_free_input req = {.req_type = 0 };
724 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
726 HWRM_PREP(req, RING_FREE, -1, resp);
728 req.ring_type = ring_type;
729 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
731 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
733 if (rc || resp->error_code) {
734 if (rc == 0 && resp->error_code)
735 rc = rte_le_to_cpu_16(resp->error_code);
738 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
739 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
742 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
743 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
746 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
747 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
751 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
758 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
761 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
762 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
764 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
766 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
767 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
768 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
769 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
771 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
775 bp->grp_info[idx].fw_grp_id =
776 rte_le_to_cpu_16(resp->ring_group_id);
781 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
784 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
785 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
787 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
789 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
791 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
799 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
802 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
803 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
805 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
807 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
810 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
811 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
813 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
820 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
821 struct bnxt_cp_ring_info *cpr, unsigned int idx)
824 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
825 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
827 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
829 req.update_period_ms = rte_cpu_to_le_32(1000);
831 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
833 rte_cpu_to_le_64(cpr->hw_stats_map);
835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
839 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
840 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
845 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
846 struct bnxt_cp_ring_info *cpr, unsigned int idx)
849 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
850 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
852 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
854 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
855 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
857 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
861 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
862 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
867 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
870 struct hwrm_vnic_alloc_input req = { 0 };
871 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
873 /* map ring groups to this vnic */
874 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
875 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
877 "Not enough ring groups avail:%x req:%x\n", j,
878 (vnic->end_grp_id - vnic->start_grp_id) + 1);
881 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
883 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
884 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
885 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
886 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
887 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
888 ETHER_CRC_LEN + VLAN_TAG_SIZE;
889 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
891 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
895 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
899 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
900 struct bnxt_vnic_info *vnic,
901 struct bnxt_plcmodes_cfg *pmode)
904 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
905 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
907 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
909 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
911 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
915 pmode->flags = rte_le_to_cpu_32(resp->flags);
916 /* dflt_vnic bit doesn't exist in the _cfg command */
917 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
918 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
919 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
920 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
925 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
926 struct bnxt_vnic_info *vnic,
927 struct bnxt_plcmodes_cfg *pmode)
930 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
931 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
933 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
935 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
936 req.flags = rte_cpu_to_le_32(pmode->flags);
937 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
938 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
939 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
940 req.enables = rte_cpu_to_le_32(
941 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
942 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
943 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
946 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
953 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
956 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
957 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
958 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
959 struct bnxt_plcmodes_cfg pmodes;
961 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
965 HWRM_PREP(req, VNIC_CFG, -1, resp);
967 /* Only RSS support for now TBD: COS & LB */
969 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
970 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
971 if (vnic->lb_rule != 0xffff)
972 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
973 if (vnic->cos_rule != 0xffff)
974 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
975 if (vnic->rss_rule != 0xffff)
976 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
977 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
978 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
979 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
980 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
981 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
982 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
983 req.mru = rte_cpu_to_le_16(vnic->mru);
984 if (vnic->func_default)
986 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
987 if (vnic->vlan_strip)
989 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
992 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
994 req.flags |= rte_cpu_to_le_32(
995 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
997 req.flags |= rte_cpu_to_le_32(
998 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
999 if (vnic->rss_dflt_cr)
1000 req.flags |= rte_cpu_to_le_32(
1001 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1003 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1007 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1012 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1016 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1017 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1019 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1022 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1023 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1024 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1026 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1030 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1031 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1032 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1033 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1034 vnic->mru = rte_le_to_cpu_16(resp->mru);
1035 vnic->func_default = rte_le_to_cpu_32(
1036 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1037 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1038 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1039 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1040 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1041 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1042 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1043 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1044 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1045 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1046 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1051 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1054 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1055 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1056 bp->hwrm_cmd_resp_addr;
1058 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1060 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1064 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1069 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1072 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1073 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1074 bp->hwrm_cmd_resp_addr;
1076 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1078 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1080 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1084 vnic->rss_rule = INVALID_HW_RING_ID;
1089 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1092 struct hwrm_vnic_free_input req = {.req_type = 0 };
1093 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1095 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1098 HWRM_PREP(req, VNIC_FREE, -1, resp);
1100 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1102 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1106 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1110 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1111 struct bnxt_vnic_info *vnic)
1114 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1115 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1117 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1119 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1121 req.ring_grp_tbl_addr =
1122 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1123 req.hash_key_tbl_addr =
1124 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1125 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1127 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1134 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1136 struct hwrm_func_cfg_input req = {0};
1137 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1140 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1141 req.enables = rte_cpu_to_le_32(
1142 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1143 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1144 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1146 HWRM_PREP(req, FUNC_CFG, -1, resp);
1148 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1151 bp->pf.vf_info[vf].random_mac = false;
1157 * HWRM utility functions
1160 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1165 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1166 struct bnxt_tx_queue *txq;
1167 struct bnxt_rx_queue *rxq;
1168 struct bnxt_cp_ring_info *cpr;
1170 if (i >= bp->rx_cp_nr_rings) {
1171 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1174 rxq = bp->rx_queues[i];
1178 rc = bnxt_hwrm_stat_clear(bp, cpr);
1185 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1189 struct bnxt_cp_ring_info *cpr;
1191 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1192 unsigned int idx = i + 1;
1194 if (i >= bp->rx_cp_nr_rings)
1195 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1197 cpr = bp->rx_queues[i]->cp_ring;
1198 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1199 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1207 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1212 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1213 struct bnxt_tx_queue *txq;
1214 struct bnxt_rx_queue *rxq;
1215 struct bnxt_cp_ring_info *cpr;
1216 unsigned int idx = i + 1;
1218 if (i >= bp->rx_cp_nr_rings) {
1219 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1222 rxq = bp->rx_queues[i];
1226 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1234 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1239 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1240 unsigned int idx = i + 1;
1242 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1244 "Attempt to free invalid ring group %d\n",
1249 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1257 static void bnxt_free_cp_ring(struct bnxt *bp,
1258 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1260 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1262 bnxt_hwrm_ring_free(bp, cp_ring,
1263 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1264 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1265 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1266 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1267 sizeof(*cpr->cp_desc_ring));
1268 cpr->cp_raw_cons = 0;
1271 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1276 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1277 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1278 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1279 struct bnxt_ring *ring = txr->tx_ring_struct;
1280 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1281 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1283 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1284 bnxt_hwrm_ring_free(bp, ring,
1285 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1286 ring->fw_ring_id = INVALID_HW_RING_ID;
1287 memset(txr->tx_desc_ring, 0,
1288 txr->tx_ring_struct->ring_size *
1289 sizeof(*txr->tx_desc_ring));
1290 memset(txr->tx_buf_ring, 0,
1291 txr->tx_ring_struct->ring_size *
1292 sizeof(*txr->tx_buf_ring));
1296 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1297 bnxt_free_cp_ring(bp, cpr, idx);
1300 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1301 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1302 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1303 struct bnxt_ring *ring = rxr->rx_ring_struct;
1304 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1305 unsigned int idx = i + 1;
1307 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1308 bnxt_hwrm_ring_free(bp, ring,
1309 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1310 ring->fw_ring_id = INVALID_HW_RING_ID;
1311 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1312 memset(rxr->rx_desc_ring, 0,
1313 rxr->rx_ring_struct->ring_size *
1314 sizeof(*rxr->rx_desc_ring));
1315 memset(rxr->rx_buf_ring, 0,
1316 rxr->rx_ring_struct->ring_size *
1317 sizeof(*rxr->rx_buf_ring));
1320 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1321 bnxt_free_cp_ring(bp, cpr, idx);
1324 /* Default completion ring */
1326 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1328 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1329 bnxt_free_cp_ring(bp, cpr, 0);
1335 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1340 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1341 unsigned int idx = i + 1;
1343 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1344 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1347 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1355 void bnxt_free_hwrm_resources(struct bnxt *bp)
1357 /* Release memzone */
1358 rte_free(bp->hwrm_cmd_resp_addr);
1359 bp->hwrm_cmd_resp_addr = NULL;
1360 bp->hwrm_cmd_resp_dma_addr = 0;
1363 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1365 struct rte_pci_device *pdev = bp->pdev;
1366 char type[RTE_MEMZONE_NAMESIZE];
1368 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1369 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1370 bp->max_req_len = HWRM_MAX_REQ_LEN;
1371 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1372 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1373 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1374 if (bp->hwrm_cmd_resp_addr == NULL)
1376 bp->hwrm_cmd_resp_dma_addr =
1377 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1378 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1380 "unable to map response address to physical memory\n");
1383 rte_spinlock_init(&bp->hwrm_lock);
1388 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1390 struct bnxt_filter_info *filter;
1393 STAILQ_FOREACH(filter, &vnic->filter, next) {
1394 rc = bnxt_hwrm_clear_filter(bp, filter);
1401 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1403 struct bnxt_filter_info *filter;
1406 STAILQ_FOREACH(filter, &vnic->filter, next) {
1407 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1414 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1416 struct bnxt_vnic_info *vnic;
1419 if (bp->vnic_info == NULL)
1422 vnic = &bp->vnic_info[0];
1424 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1426 /* VNIC resources */
1427 for (i = 0; i < bp->nr_vnics; i++) {
1428 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1430 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1432 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1433 bnxt_hwrm_vnic_free(bp, vnic);
1435 /* Ring resources */
1436 bnxt_free_all_hwrm_rings(bp);
1437 bnxt_free_all_hwrm_ring_grps(bp);
1438 bnxt_free_all_hwrm_stat_ctxs(bp);
1441 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1443 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1445 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1446 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1448 switch (conf_link_speed) {
1449 case ETH_LINK_SPEED_10M_HD:
1450 case ETH_LINK_SPEED_100M_HD:
1451 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1453 return hw_link_duplex;
1456 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1458 uint16_t eth_link_speed = 0;
1460 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1461 return ETH_LINK_SPEED_AUTONEG;
1463 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1464 case ETH_LINK_SPEED_100M:
1465 case ETH_LINK_SPEED_100M_HD:
1467 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1469 case ETH_LINK_SPEED_1G:
1471 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1473 case ETH_LINK_SPEED_2_5G:
1475 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1477 case ETH_LINK_SPEED_10G:
1479 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1481 case ETH_LINK_SPEED_20G:
1483 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1485 case ETH_LINK_SPEED_25G:
1487 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1489 case ETH_LINK_SPEED_40G:
1491 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1493 case ETH_LINK_SPEED_50G:
1495 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1499 "Unsupported link speed %d; default to AUTO\n",
1503 return eth_link_speed;
1506 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1507 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1508 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1509 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1511 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1515 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1518 if (link_speed & ETH_LINK_SPEED_FIXED) {
1519 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1521 if (one_speed & (one_speed - 1)) {
1523 "Invalid advertised speeds (%u) for port %u\n",
1524 link_speed, port_id);
1527 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1529 "Unsupported advertised speed (%u) for port %u\n",
1530 link_speed, port_id);
1534 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1536 "Unsupported advertised speeds (%u) for port %u\n",
1537 link_speed, port_id);
1544 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1548 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1549 link_speed = BNXT_SUPPORTED_SPEEDS;
1551 if (link_speed & ETH_LINK_SPEED_100M)
1552 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1553 if (link_speed & ETH_LINK_SPEED_100M_HD)
1554 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1555 if (link_speed & ETH_LINK_SPEED_1G)
1556 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1557 if (link_speed & ETH_LINK_SPEED_2_5G)
1558 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1559 if (link_speed & ETH_LINK_SPEED_10G)
1560 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1561 if (link_speed & ETH_LINK_SPEED_20G)
1562 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1563 if (link_speed & ETH_LINK_SPEED_25G)
1564 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1565 if (link_speed & ETH_LINK_SPEED_40G)
1566 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1567 if (link_speed & ETH_LINK_SPEED_50G)
1568 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1572 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1574 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1576 switch (hw_link_speed) {
1577 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1578 eth_link_speed = ETH_SPEED_NUM_100M;
1580 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1581 eth_link_speed = ETH_SPEED_NUM_1G;
1583 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1584 eth_link_speed = ETH_SPEED_NUM_2_5G;
1586 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1587 eth_link_speed = ETH_SPEED_NUM_10G;
1589 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1590 eth_link_speed = ETH_SPEED_NUM_20G;
1592 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1593 eth_link_speed = ETH_SPEED_NUM_25G;
1595 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1596 eth_link_speed = ETH_SPEED_NUM_40G;
1598 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1599 eth_link_speed = ETH_SPEED_NUM_50G;
1601 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1603 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1607 return eth_link_speed;
1610 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1612 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1614 switch (hw_link_duplex) {
1615 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1616 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1617 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1619 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1620 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1623 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1627 return eth_link_duplex;
1630 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1633 struct bnxt_link_info *link_info = &bp->link_info;
1635 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1638 "Get link config failed with rc %d\n", rc);
1641 if (link_info->link_up)
1643 bnxt_parse_hw_link_speed(link_info->link_speed);
1645 link->link_speed = ETH_LINK_SPEED_10M;
1646 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1647 link->link_status = link_info->link_up;
1648 link->link_autoneg = link_info->auto_mode ==
1649 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1650 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1655 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1658 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1659 struct bnxt_link_info link_req;
1662 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1665 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1666 bp->eth_dev->data->port_id);
1670 memset(&link_req, 0, sizeof(link_req));
1671 link_req.link_up = link_up;
1675 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1676 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1678 link_req.phy_flags |=
1679 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1680 link_req.auto_mode =
1681 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1682 link_req.auto_link_speed_mask =
1683 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1685 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1686 link_req.link_speed = speed;
1687 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1689 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1690 link_req.auto_pause = bp->link_info.auto_pause;
1691 link_req.force_pause = bp->link_info.force_pause;
1694 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1697 "Set link config failed with rc %d\n", rc);
1700 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1706 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1708 struct hwrm_func_qcfg_input req = {0};
1709 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1712 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1713 req.fid = rte_cpu_to_le_16(0xffff);
1715 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1719 /* Hard Coded.. 0xfff VLAN ID mask */
1720 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1722 switch (resp->port_partition_type) {
1723 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1724 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1725 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1726 bp->port_partition_type = resp->port_partition_type;
1729 bp->port_partition_type = 0;
1736 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1737 struct hwrm_func_qcaps_output *qcaps)
1739 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1740 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1741 sizeof(qcaps->mac_address));
1742 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1743 qcaps->max_rx_rings = fcfg->num_rx_rings;
1744 qcaps->max_tx_rings = fcfg->num_tx_rings;
1745 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1746 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1748 qcaps->first_vf_id = 0;
1749 qcaps->max_vnics = fcfg->num_vnics;
1750 qcaps->max_decap_records = 0;
1751 qcaps->max_encap_records = 0;
1752 qcaps->max_tx_wm_flows = 0;
1753 qcaps->max_tx_em_flows = 0;
1754 qcaps->max_rx_wm_flows = 0;
1755 qcaps->max_rx_em_flows = 0;
1756 qcaps->max_flow_id = 0;
1757 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1758 qcaps->max_sp_tx_rings = 0;
1759 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1762 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1764 struct hwrm_func_cfg_input req = {0};
1765 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1768 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1769 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1770 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1771 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1772 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1773 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1774 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1775 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1776 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1777 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1778 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1779 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1780 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1781 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1782 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1783 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1784 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1785 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1786 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1787 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1788 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1789 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1790 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1791 req.fid = rte_cpu_to_le_16(0xffff);
1793 HWRM_PREP(req, FUNC_CFG, -1, resp);
1795 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1801 static void populate_vf_func_cfg_req(struct bnxt *bp,
1802 struct hwrm_func_cfg_input *req,
1805 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1806 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1807 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1808 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1809 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1810 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1811 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1812 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1813 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1814 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1816 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1817 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1818 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1819 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1820 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1822 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1823 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1825 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1826 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1827 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1828 /* TODO: For now, do not support VMDq/RFS on VFs. */
1829 req->num_vnics = rte_cpu_to_le_16(1);
1830 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1834 static void add_random_mac_if_needed(struct bnxt *bp,
1835 struct hwrm_func_cfg_input *cfg_req,
1838 struct ether_addr mac;
1840 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1843 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1845 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1846 eth_random_addr(cfg_req->dflt_mac_addr);
1847 bp->pf.vf_info[vf].random_mac = true;
1849 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1853 static void reserve_resources_from_vf(struct bnxt *bp,
1854 struct hwrm_func_cfg_input *cfg_req,
1857 struct hwrm_func_qcaps_input req = {0};
1858 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1861 /* Get the actual allocated values now */
1862 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1863 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1864 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1867 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1868 copy_func_cfg_to_qcaps(cfg_req, resp);
1869 } else if (resp->error_code) {
1870 rc = rte_le_to_cpu_16(resp->error_code);
1871 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1872 copy_func_cfg_to_qcaps(cfg_req, resp);
1875 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1876 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1877 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1878 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1879 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1880 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1882 * TODO: While not supporting VMDq with VFs, max_vnics is always
1883 * forced to 1 in this case
1885 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1886 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1889 static int update_pf_resource_max(struct bnxt *bp)
1891 struct hwrm_func_qcfg_input req = {0};
1892 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1895 /* And copy the allocated numbers into the pf struct */
1896 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1897 req.fid = rte_cpu_to_le_16(0xffff);
1898 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1901 /* Only TX ring value reflects actual allocation? TODO */
1902 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1903 bp->pf.evb_mode = resp->evb_mode;
1908 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1913 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1917 rc = bnxt_hwrm_func_qcaps(bp);
1921 bp->pf.func_cfg_flags &=
1922 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1923 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1924 bp->pf.func_cfg_flags |=
1925 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1926 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1930 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1932 struct hwrm_func_cfg_input req = {0};
1933 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1940 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1944 rc = bnxt_hwrm_func_qcaps(bp);
1949 bp->pf.active_vfs = num_vfs;
1952 * First, configure the PF to only use one TX ring. This ensures that
1953 * there are enough rings for all VFs.
1955 * If we don't do this, when we call func_alloc() later, we will lock
1956 * extra rings to the PF that won't be available during func_cfg() of
1959 * This has been fixed with firmware versions above 20.6.54
1961 bp->pf.func_cfg_flags &=
1962 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1963 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1964 bp->pf.func_cfg_flags |=
1965 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1966 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1971 * Now, create and register a buffer to hold forwarded VF requests
1973 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
1974 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
1975 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
1976 if (bp->pf.vf_req_buf == NULL) {
1980 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
1981 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
1982 for (i = 0; i < num_vfs; i++)
1983 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
1984 (i * HWRM_MAX_REQ_LEN);
1986 rc = bnxt_hwrm_func_buf_rgtr(bp);
1990 populate_vf_func_cfg_req(bp, &req, num_vfs);
1992 bp->pf.active_vfs = 0;
1993 for (i = 0; i < num_vfs; i++) {
1994 add_random_mac_if_needed(bp, &req, i);
1996 HWRM_PREP(req, FUNC_CFG, -1, resp);
1997 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
1998 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
1999 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2001 /* Clear enable flag for next pass */
2002 req.enables &= ~rte_cpu_to_le_32(
2003 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2005 if (rc || resp->error_code) {
2007 "Failed to initizlie VF %d\n", i);
2009 "Not all VFs available. (%d, %d)\n",
2010 rc, resp->error_code);
2014 reserve_resources_from_vf(bp, &req, i);
2015 bp->pf.active_vfs++;
2019 * Now configure the PF to use "the rest" of the resources
2020 * We're using STD_TX_RING_MODE here though which will limit the TX
2021 * rings. This will allow QoS to function properly. Not setting this
2022 * will cause PF rings to break bandwidth settings.
2024 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2028 rc = update_pf_resource_max(bp);
2035 bnxt_hwrm_func_buf_unrgtr(bp);
2040 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2043 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2044 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2046 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2048 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2049 req.req_buf_page_size = rte_cpu_to_le_16(
2050 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2051 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2052 req.req_buf_page_addr[0] =
2053 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2054 if (req.req_buf_page_addr[0] == 0) {
2056 "unable to map buffer address to physical memory\n");
2060 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2067 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2070 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2071 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2073 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2075 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2082 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2084 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2085 struct hwrm_func_cfg_input req = {0};
2088 HWRM_PREP(req, FUNC_CFG, -1, resp);
2089 req.fid = rte_cpu_to_le_16(0xffff);
2090 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2091 req.enables = rte_cpu_to_le_32(
2092 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2093 req.async_event_cr = rte_cpu_to_le_16(
2094 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2095 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2101 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2103 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2104 struct hwrm_func_vf_cfg_input req = {0};
2107 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2108 req.enables = rte_cpu_to_le_32(
2109 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2110 req.async_event_cr = rte_cpu_to_le_16(
2111 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2112 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2118 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2119 void *encaped, size_t ec_size)
2122 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2123 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2125 if (ec_size > sizeof(req.encap_request))
2128 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2130 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2131 memcpy(req.encap_request, encaped, ec_size);
2133 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2140 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2141 struct ether_addr *mac)
2143 struct hwrm_func_qcfg_input req = {0};
2144 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2147 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2148 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2149 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2153 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2157 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2158 void *encaped, size_t ec_size)
2161 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2162 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2164 if (ec_size > sizeof(req.encap_request))
2167 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2169 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2170 memcpy(req.encap_request, encaped, ec_size);
2172 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));