4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_common.h>
40 #include <rte_cycles.h>
41 #include <rte_malloc.h>
42 #include <rte_memzone.h>
43 #include <rte_version.h>
47 #include "bnxt_filter.h"
48 #include "bnxt_hwrm.h"
51 #include "bnxt_ring.h"
54 #include "bnxt_vnic.h"
55 #include "hsi_struct_def_dpdk.h"
59 #define HWRM_CMD_TIMEOUT 2000
61 struct bnxt_plcmodes_cfg {
63 uint16_t jumbo_thresh;
65 uint16_t hds_threshold;
68 static int page_getenum(size_t size)
84 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
85 return sizeof(void *) * 8 - 1;
88 static int page_roundup(size_t size)
90 return 1 << page_getenum(size);
94 * HWRM Functions (sent to HWRM)
95 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
96 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
97 * command was failed by the ChiMP.
100 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
104 struct input *req = msg;
105 struct output *resp = bp->hwrm_cmd_resp_addr;
106 uint32_t *data = msg;
110 /* Write request msg to hwrm channel */
111 for (i = 0; i < msg_len; i += 4) {
112 bar = (uint8_t *)bp->bar0 + i;
113 rte_write32(*data, bar);
117 /* Zero the rest of the request space */
118 for (; i < bp->max_req_len; i += 4) {
119 bar = (uint8_t *)bp->bar0 + i;
123 /* Ring channel doorbell */
124 bar = (uint8_t *)bp->bar0 + 0x100;
127 /* Poll for the valid bit */
128 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
129 /* Sanity check on the resp->resp_len */
131 if (resp->resp_len && resp->resp_len <=
133 /* Last byte of resp contains the valid key */
134 valid = (uint8_t *)resp + resp->resp_len - 1;
135 if (*valid == HWRM_RESP_VALID_KEY)
141 if (i >= HWRM_CMD_TIMEOUT) {
142 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
152 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
156 rte_spinlock_lock(&bp->hwrm_lock);
157 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
158 rte_spinlock_unlock(&bp->hwrm_lock);
162 #define HWRM_PREP(req, type, cr, resp) \
163 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
164 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
165 req.cmpl_ring = rte_cpu_to_le_16(cr); \
166 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
167 req.target_id = rte_cpu_to_le_16(0xffff); \
168 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
170 #define HWRM_CHECK_RESULT \
173 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
177 if (resp->error_code) { \
178 rc = rte_le_to_cpu_16(resp->error_code); \
179 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
184 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
187 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
188 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
190 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
191 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
194 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
201 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
204 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
205 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
208 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
209 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
211 /* FIXME add multicast flag, when multicast adding options is supported
214 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
215 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
216 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
217 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
218 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
221 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
228 int bnxt_hwrm_clear_filter(struct bnxt *bp,
229 struct bnxt_filter_info *filter)
232 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
233 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
235 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
237 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
239 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
243 filter->fw_l2_filter_id = -1;
248 int bnxt_hwrm_set_filter(struct bnxt *bp,
249 struct bnxt_vnic_info *vnic,
250 struct bnxt_filter_info *filter)
253 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
254 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
255 uint32_t enables = 0;
257 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
259 req.flags = rte_cpu_to_le_32(filter->flags);
261 enables = filter->enables |
262 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
263 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
266 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
267 memcpy(req.l2_addr, filter->l2_addr,
270 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
271 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
274 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
275 req.l2_ovlan = filter->l2_ovlan;
277 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
278 req.l2_ovlan_mask = filter->l2_ovlan_mask;
280 req.enables = rte_cpu_to_le_32(enables);
282 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
291 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
294 struct hwrm_func_qcaps_input req = {.req_type = 0 };
295 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
296 uint16_t new_max_vfs;
299 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
301 req.fid = rte_cpu_to_le_16(0xffff);
303 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
307 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
309 bp->pf.port_id = resp->port_id;
310 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
311 new_max_vfs = bp->pdev->max_vfs;
312 if (new_max_vfs != bp->pf.max_vfs) {
314 rte_free(bp->pf.vf_info);
315 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
316 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
317 bp->pf.max_vfs = new_max_vfs;
318 for (i = 0; i < new_max_vfs; i++) {
319 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
320 bp->pf.vf_info[i].vlan_table =
321 rte_zmalloc("VF VLAN table",
324 if (bp->pf.vf_info[i].vlan_table == NULL)
326 "Fail to alloc VLAN table for VF %d\n",
330 bp->pf.vf_info[i].vlan_table);
331 STAILQ_INIT(&bp->pf.vf_info[i].filter);
336 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
337 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
338 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
339 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
340 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
341 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
342 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
343 /* TODO: For now, do not support VMDq/RFS on VFs. */
348 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
352 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
354 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
359 int bnxt_hwrm_func_reset(struct bnxt *bp)
362 struct hwrm_func_reset_input req = {.req_type = 0 };
363 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
365 HWRM_PREP(req, FUNC_RESET, -1, resp);
367 req.enables = rte_cpu_to_le_32(0);
369 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
376 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
379 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
380 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
382 if (bp->flags & BNXT_FLAG_REGISTERED)
385 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
386 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
387 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
388 req.ver_maj = RTE_VER_YEAR;
389 req.ver_min = RTE_VER_MONTH;
390 req.ver_upd = RTE_VER_MINOR;
393 req.enables |= rte_cpu_to_le_32(
394 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
395 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
396 RTE_MIN(sizeof(req.vf_req_fwd),
397 sizeof(bp->pf.vf_req_fwd)));
400 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
401 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
403 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
407 bp->flags |= BNXT_FLAG_REGISTERED;
412 int bnxt_hwrm_ver_get(struct bnxt *bp)
415 struct hwrm_ver_get_input req = {.req_type = 0 };
416 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
419 uint16_t max_resp_len;
420 char type[RTE_MEMZONE_NAMESIZE];
422 HWRM_PREP(req, VER_GET, -1, resp);
424 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
425 req.hwrm_intf_min = HWRM_VERSION_MINOR;
426 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
429 * Hold the lock since we may be adjusting the response pointers.
431 rte_spinlock_lock(&bp->hwrm_lock);
432 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
436 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
437 resp->hwrm_intf_maj, resp->hwrm_intf_min,
439 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
440 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
441 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
442 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
443 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
445 my_version = HWRM_VERSION_MAJOR << 16;
446 my_version |= HWRM_VERSION_MINOR << 8;
447 my_version |= HWRM_VERSION_UPDATE;
449 fw_version = resp->hwrm_intf_maj << 16;
450 fw_version |= resp->hwrm_intf_min << 8;
451 fw_version |= resp->hwrm_intf_upd;
453 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
454 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
459 if (my_version != fw_version) {
460 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
461 if (my_version < fw_version) {
463 "Firmware API version is newer than driver.\n");
465 "The driver may be missing features.\n");
468 "Firmware API version is older than driver.\n");
470 "Not all driver features may be functional.\n");
474 if (bp->max_req_len > resp->max_req_win_len) {
475 RTE_LOG(ERR, PMD, "Unsupported request length\n");
478 bp->max_req_len = resp->max_req_win_len;
479 max_resp_len = resp->max_resp_len;
480 if (bp->max_resp_len != max_resp_len) {
481 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
482 bp->pdev->addr.domain, bp->pdev->addr.bus,
483 bp->pdev->addr.devid, bp->pdev->addr.function);
485 rte_free(bp->hwrm_cmd_resp_addr);
487 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
488 if (bp->hwrm_cmd_resp_addr == NULL) {
492 bp->hwrm_cmd_resp_dma_addr =
493 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
494 bp->max_resp_len = max_resp_len;
498 rte_spinlock_unlock(&bp->hwrm_lock);
502 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
505 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
506 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
508 if (!(bp->flags & BNXT_FLAG_REGISTERED))
511 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
514 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
518 bp->flags &= ~BNXT_FLAG_REGISTERED;
523 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
526 struct hwrm_port_phy_cfg_input req = {0};
527 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
528 uint32_t enables = 0;
530 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
533 req.flags = rte_cpu_to_le_32(conf->phy_flags);
534 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
536 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
537 * any auto mode, even "none".
539 if (!conf->link_speed) {
540 req.auto_mode |= conf->auto_mode;
541 enables = HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
542 req.auto_link_speed_mask = conf->auto_link_speed_mask;
544 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
545 req.auto_link_speed = bp->link_info.auto_link_speed;
547 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
549 req.auto_duplex = conf->duplex;
550 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
551 req.auto_pause = conf->auto_pause;
552 req.force_pause = conf->force_pause;
553 /* Set force_pause if there is no auto or if there is a force */
554 if (req.auto_pause && !req.force_pause)
555 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
557 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
559 req.enables = rte_cpu_to_le_32(enables);
562 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
563 RTE_LOG(INFO, PMD, "Force Link Down\n");
566 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
573 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
574 struct bnxt_link_info *link_info)
577 struct hwrm_port_phy_qcfg_input req = {0};
578 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
580 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
582 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
586 link_info->phy_link_status = resp->link;
587 if (link_info->phy_link_status != HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK) {
588 link_info->link_up = 1;
589 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
591 link_info->link_up = 0;
592 link_info->link_speed = 0;
594 link_info->duplex = resp->duplex;
595 link_info->pause = resp->pause;
596 link_info->auto_pause = resp->auto_pause;
597 link_info->force_pause = resp->force_pause;
598 link_info->auto_mode = resp->auto_mode;
600 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
601 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
602 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
603 link_info->phy_ver[0] = resp->phy_maj;
604 link_info->phy_ver[1] = resp->phy_min;
605 link_info->phy_ver[2] = resp->phy_bld;
610 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
613 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
614 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
616 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
618 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
622 #define GET_QUEUE_INFO(x) \
623 bp->cos_queue[x].id = resp->queue_id##x; \
624 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
638 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
639 struct bnxt_ring *ring,
640 uint32_t ring_type, uint32_t map_index,
641 uint32_t stats_ctx_id)
644 struct hwrm_ring_alloc_input req = {.req_type = 0 };
645 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
647 HWRM_PREP(req, RING_ALLOC, -1, resp);
649 req.enables = rte_cpu_to_le_32(0);
651 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
652 req.fbo = rte_cpu_to_le_32(0);
653 /* Association of ring index with doorbell index */
654 req.logical_id = rte_cpu_to_le_16(map_index);
657 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
658 req.queue_id = bp->cos_queue[0].id;
660 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
661 req.ring_type = ring_type;
663 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
664 req.length = rte_cpu_to_le_32(ring->ring_size);
665 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
666 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
667 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
669 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
670 req.ring_type = ring_type;
672 * TODO: Some HWRM versions crash with
673 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
675 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
676 req.length = rte_cpu_to_le_32(ring->ring_size);
679 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
684 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
686 if (rc || resp->error_code) {
687 if (rc == 0 && resp->error_code)
688 rc = rte_le_to_cpu_16(resp->error_code);
690 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
692 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
694 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
696 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
698 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
700 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
703 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
708 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
712 int bnxt_hwrm_ring_free(struct bnxt *bp,
713 struct bnxt_ring *ring, uint32_t ring_type)
716 struct hwrm_ring_free_input req = {.req_type = 0 };
717 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
719 HWRM_PREP(req, RING_FREE, -1, resp);
721 req.ring_type = ring_type;
722 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
724 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
726 if (rc || resp->error_code) {
727 if (rc == 0 && resp->error_code)
728 rc = rte_le_to_cpu_16(resp->error_code);
731 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
732 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
735 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
736 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
739 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
740 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
744 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
751 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
754 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
755 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
757 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
759 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
760 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
761 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
762 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
764 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
768 bp->grp_info[idx].fw_grp_id =
769 rte_le_to_cpu_16(resp->ring_group_id);
774 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
777 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
778 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
780 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
782 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
784 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
788 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
792 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
795 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
796 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
798 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
800 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
803 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
804 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
806 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
813 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
814 struct bnxt_cp_ring_info *cpr, unsigned int idx)
817 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
818 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
820 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
822 req.update_period_ms = rte_cpu_to_le_32(1000);
824 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
826 rte_cpu_to_le_64(cpr->hw_stats_map);
828 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
832 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
833 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
838 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
839 struct bnxt_cp_ring_info *cpr, unsigned int idx)
842 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
843 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
845 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
847 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
848 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
850 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
854 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
855 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
860 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
863 struct hwrm_vnic_alloc_input req = { 0 };
864 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
866 /* map ring groups to this vnic */
867 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
868 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
870 "Not enough ring groups avail:%x req:%x\n", j,
871 (vnic->end_grp_id - vnic->start_grp_id) + 1);
874 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
876 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
877 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
878 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
879 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
880 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
881 ETHER_CRC_LEN + VLAN_TAG_SIZE;
882 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
884 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
888 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
892 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
893 struct bnxt_vnic_info *vnic,
894 struct bnxt_plcmodes_cfg *pmode)
897 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
898 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
900 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
902 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
904 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
908 pmode->flags = rte_le_to_cpu_32(resp->flags);
909 /* dflt_vnic bit doesn't exist in the _cfg command */
910 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
911 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
912 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
913 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
918 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
919 struct bnxt_vnic_info *vnic,
920 struct bnxt_plcmodes_cfg *pmode)
923 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
924 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
926 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
928 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
929 req.flags = rte_cpu_to_le_32(pmode->flags);
930 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
931 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
932 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
933 req.enables = rte_cpu_to_le_32(
934 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
935 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
936 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
939 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
946 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
949 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
950 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
951 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
952 struct bnxt_plcmodes_cfg pmodes;
954 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
958 HWRM_PREP(req, VNIC_CFG, -1, resp);
960 /* Only RSS support for now TBD: COS & LB */
962 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
963 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
964 if (vnic->lb_rule != 0xffff)
965 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
966 if (vnic->cos_rule != 0xffff)
967 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
968 if (vnic->rss_rule != 0xffff)
969 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
970 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
971 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
972 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
973 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
974 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
975 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
976 req.mru = rte_cpu_to_le_16(vnic->mru);
977 if (vnic->func_default)
979 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
980 if (vnic->vlan_strip)
982 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
985 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
987 req.flags |= rte_cpu_to_le_32(
988 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
990 req.flags |= rte_cpu_to_le_32(
991 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
992 if (vnic->rss_dflt_cr)
993 req.flags |= rte_cpu_to_le_32(
994 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
996 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1005 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1009 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1010 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1012 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1015 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1016 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1017 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1019 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1023 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1024 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1025 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1026 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1027 vnic->mru = rte_le_to_cpu_16(resp->mru);
1028 vnic->func_default = rte_le_to_cpu_32(
1029 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1030 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1031 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1032 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1033 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1034 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1035 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1036 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1037 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1038 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1039 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1044 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1047 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1048 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1049 bp->hwrm_cmd_resp_addr;
1051 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1053 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1057 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1062 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1065 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1066 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1067 bp->hwrm_cmd_resp_addr;
1069 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1071 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1073 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1077 vnic->rss_rule = INVALID_HW_RING_ID;
1082 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1085 struct hwrm_vnic_free_input req = {.req_type = 0 };
1086 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1088 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
1091 HWRM_PREP(req, VNIC_FREE, -1, resp);
1093 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1095 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1099 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1103 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1104 struct bnxt_vnic_info *vnic)
1107 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1108 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1110 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1112 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1114 req.ring_grp_tbl_addr =
1115 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1116 req.hash_key_tbl_addr =
1117 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1118 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1120 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1127 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1129 struct hwrm_func_cfg_input req = {0};
1130 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1133 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1134 req.enables = rte_cpu_to_le_32(
1135 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1136 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1137 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1139 HWRM_PREP(req, FUNC_CFG, -1, resp);
1141 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1144 bp->pf.vf_info[vf].random_mac = false;
1150 * HWRM utility functions
1153 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1158 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1159 struct bnxt_tx_queue *txq;
1160 struct bnxt_rx_queue *rxq;
1161 struct bnxt_cp_ring_info *cpr;
1163 if (i >= bp->rx_cp_nr_rings) {
1164 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1167 rxq = bp->rx_queues[i];
1171 rc = bnxt_hwrm_stat_clear(bp, cpr);
1178 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1182 struct bnxt_cp_ring_info *cpr;
1184 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1185 unsigned int idx = i + 1;
1187 if (i >= bp->rx_cp_nr_rings)
1188 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1190 cpr = bp->rx_queues[i]->cp_ring;
1191 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1192 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1200 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1205 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1206 struct bnxt_tx_queue *txq;
1207 struct bnxt_rx_queue *rxq;
1208 struct bnxt_cp_ring_info *cpr;
1209 unsigned int idx = i + 1;
1211 if (i >= bp->rx_cp_nr_rings) {
1212 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1215 rxq = bp->rx_queues[i];
1219 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1227 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1232 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1233 unsigned int idx = i + 1;
1235 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1237 "Attempt to free invalid ring group %d\n",
1242 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1250 static void bnxt_free_cp_ring(struct bnxt *bp,
1251 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1253 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1255 bnxt_hwrm_ring_free(bp, cp_ring,
1256 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1257 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1258 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1259 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1260 sizeof(*cpr->cp_desc_ring));
1261 cpr->cp_raw_cons = 0;
1264 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1269 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1270 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1271 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1272 struct bnxt_ring *ring = txr->tx_ring_struct;
1273 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1274 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1276 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1277 bnxt_hwrm_ring_free(bp, ring,
1278 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1279 ring->fw_ring_id = INVALID_HW_RING_ID;
1280 memset(txr->tx_desc_ring, 0,
1281 txr->tx_ring_struct->ring_size *
1282 sizeof(*txr->tx_desc_ring));
1283 memset(txr->tx_buf_ring, 0,
1284 txr->tx_ring_struct->ring_size *
1285 sizeof(*txr->tx_buf_ring));
1289 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1290 bnxt_free_cp_ring(bp, cpr, idx);
1293 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1294 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1295 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1296 struct bnxt_ring *ring = rxr->rx_ring_struct;
1297 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1298 unsigned int idx = i + 1;
1300 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1301 bnxt_hwrm_ring_free(bp, ring,
1302 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1303 ring->fw_ring_id = INVALID_HW_RING_ID;
1304 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1305 memset(rxr->rx_desc_ring, 0,
1306 rxr->rx_ring_struct->ring_size *
1307 sizeof(*rxr->rx_desc_ring));
1308 memset(rxr->rx_buf_ring, 0,
1309 rxr->rx_ring_struct->ring_size *
1310 sizeof(*rxr->rx_buf_ring));
1313 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1314 bnxt_free_cp_ring(bp, cpr, idx);
1317 /* Default completion ring */
1319 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1321 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1322 bnxt_free_cp_ring(bp, cpr, 0);
1328 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1333 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1334 unsigned int idx = i + 1;
1336 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1337 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1340 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1348 void bnxt_free_hwrm_resources(struct bnxt *bp)
1350 /* Release memzone */
1351 rte_free(bp->hwrm_cmd_resp_addr);
1352 bp->hwrm_cmd_resp_addr = NULL;
1353 bp->hwrm_cmd_resp_dma_addr = 0;
1356 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1358 struct rte_pci_device *pdev = bp->pdev;
1359 char type[RTE_MEMZONE_NAMESIZE];
1361 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1362 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1363 bp->max_req_len = HWRM_MAX_REQ_LEN;
1364 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1365 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1366 if (bp->hwrm_cmd_resp_addr == NULL)
1368 bp->hwrm_cmd_resp_dma_addr =
1369 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1370 rte_spinlock_init(&bp->hwrm_lock);
1375 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1377 struct bnxt_filter_info *filter;
1380 STAILQ_FOREACH(filter, &vnic->filter, next) {
1381 rc = bnxt_hwrm_clear_filter(bp, filter);
1388 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1390 struct bnxt_filter_info *filter;
1393 STAILQ_FOREACH(filter, &vnic->filter, next) {
1394 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1401 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1403 struct bnxt_vnic_info *vnic;
1406 if (bp->vnic_info == NULL)
1409 vnic = &bp->vnic_info[0];
1411 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1413 /* VNIC resources */
1414 for (i = 0; i < bp->nr_vnics; i++) {
1415 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1417 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1419 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1420 bnxt_hwrm_vnic_free(bp, vnic);
1422 /* Ring resources */
1423 bnxt_free_all_hwrm_rings(bp);
1424 bnxt_free_all_hwrm_ring_grps(bp);
1425 bnxt_free_all_hwrm_stat_ctxs(bp);
1428 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1430 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1432 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1433 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1435 switch (conf_link_speed) {
1436 case ETH_LINK_SPEED_10M_HD:
1437 case ETH_LINK_SPEED_100M_HD:
1438 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1440 return hw_link_duplex;
1443 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1445 uint16_t eth_link_speed = 0;
1447 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1448 return ETH_LINK_SPEED_AUTONEG;
1450 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1451 case ETH_LINK_SPEED_100M:
1452 case ETH_LINK_SPEED_100M_HD:
1454 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1456 case ETH_LINK_SPEED_1G:
1458 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1460 case ETH_LINK_SPEED_2_5G:
1462 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1464 case ETH_LINK_SPEED_10G:
1466 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1468 case ETH_LINK_SPEED_20G:
1470 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1472 case ETH_LINK_SPEED_25G:
1474 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1476 case ETH_LINK_SPEED_40G:
1478 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1480 case ETH_LINK_SPEED_50G:
1482 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1486 "Unsupported link speed %d; default to AUTO\n",
1490 return eth_link_speed;
1493 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1494 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1495 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1496 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1498 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1502 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1505 if (link_speed & ETH_LINK_SPEED_FIXED) {
1506 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1508 if (one_speed & (one_speed - 1)) {
1510 "Invalid advertised speeds (%u) for port %u\n",
1511 link_speed, port_id);
1514 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1516 "Unsupported advertised speed (%u) for port %u\n",
1517 link_speed, port_id);
1521 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1523 "Unsupported advertised speeds (%u) for port %u\n",
1524 link_speed, port_id);
1531 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1535 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1536 link_speed = BNXT_SUPPORTED_SPEEDS;
1538 if (link_speed & ETH_LINK_SPEED_100M)
1539 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1540 if (link_speed & ETH_LINK_SPEED_100M_HD)
1541 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1542 if (link_speed & ETH_LINK_SPEED_1G)
1543 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1544 if (link_speed & ETH_LINK_SPEED_2_5G)
1545 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1546 if (link_speed & ETH_LINK_SPEED_10G)
1547 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1548 if (link_speed & ETH_LINK_SPEED_20G)
1549 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1550 if (link_speed & ETH_LINK_SPEED_25G)
1551 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1552 if (link_speed & ETH_LINK_SPEED_40G)
1553 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1554 if (link_speed & ETH_LINK_SPEED_50G)
1555 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1559 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1561 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1563 switch (hw_link_speed) {
1564 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1565 eth_link_speed = ETH_SPEED_NUM_100M;
1567 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1568 eth_link_speed = ETH_SPEED_NUM_1G;
1570 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1571 eth_link_speed = ETH_SPEED_NUM_2_5G;
1573 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1574 eth_link_speed = ETH_SPEED_NUM_10G;
1576 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1577 eth_link_speed = ETH_SPEED_NUM_20G;
1579 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1580 eth_link_speed = ETH_SPEED_NUM_25G;
1582 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1583 eth_link_speed = ETH_SPEED_NUM_40G;
1585 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1586 eth_link_speed = ETH_SPEED_NUM_50G;
1588 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1590 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1594 return eth_link_speed;
1597 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1599 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1601 switch (hw_link_duplex) {
1602 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1603 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1604 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1606 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1607 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1610 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1614 return eth_link_duplex;
1617 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1620 struct bnxt_link_info *link_info = &bp->link_info;
1622 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1625 "Get link config failed with rc %d\n", rc);
1628 if (link_info->link_up)
1630 bnxt_parse_hw_link_speed(link_info->link_speed);
1632 link->link_speed = ETH_LINK_SPEED_10M;
1633 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1634 link->link_status = link_info->link_up;
1635 link->link_autoneg = link_info->auto_mode ==
1636 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1637 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1642 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1645 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1646 struct bnxt_link_info link_req;
1649 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1652 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1653 bp->eth_dev->data->port_id);
1657 memset(&link_req, 0, sizeof(link_req));
1658 link_req.link_up = link_up;
1662 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1663 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1665 link_req.phy_flags |=
1666 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1667 link_req.auto_mode =
1668 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1669 link_req.auto_link_speed_mask =
1670 bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1672 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1673 link_req.link_speed = speed;
1674 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1676 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1677 link_req.auto_pause = bp->link_info.auto_pause;
1678 link_req.force_pause = bp->link_info.force_pause;
1681 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1684 "Set link config failed with rc %d\n", rc);
1687 rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
1693 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1695 struct hwrm_func_qcfg_input req = {0};
1696 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1699 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1700 req.fid = rte_cpu_to_le_16(0xffff);
1702 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1706 /* Hard Coded.. 0xfff VLAN ID mask */
1707 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1709 switch (resp->port_partition_type) {
1710 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1711 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1712 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1713 bp->port_partition_type = resp->port_partition_type;
1716 bp->port_partition_type = 0;
1723 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
1724 struct hwrm_func_qcaps_output *qcaps)
1726 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
1727 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
1728 sizeof(qcaps->mac_address));
1729 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
1730 qcaps->max_rx_rings = fcfg->num_rx_rings;
1731 qcaps->max_tx_rings = fcfg->num_tx_rings;
1732 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
1733 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
1735 qcaps->first_vf_id = 0;
1736 qcaps->max_vnics = fcfg->num_vnics;
1737 qcaps->max_decap_records = 0;
1738 qcaps->max_encap_records = 0;
1739 qcaps->max_tx_wm_flows = 0;
1740 qcaps->max_tx_em_flows = 0;
1741 qcaps->max_rx_wm_flows = 0;
1742 qcaps->max_rx_em_flows = 0;
1743 qcaps->max_flow_id = 0;
1744 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
1745 qcaps->max_sp_tx_rings = 0;
1746 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
1749 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
1751 struct hwrm_func_cfg_input req = {0};
1752 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1755 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1756 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1757 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1758 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1759 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1760 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1761 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1762 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1763 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1764 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1765 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
1766 req.mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1767 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1768 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1769 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1770 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
1771 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
1772 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
1773 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
1774 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
1775 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
1776 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
1777 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
1778 req.fid = rte_cpu_to_le_16(0xffff);
1780 HWRM_PREP(req, FUNC_CFG, -1, resp);
1782 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1788 static void populate_vf_func_cfg_req(struct bnxt *bp,
1789 struct hwrm_func_cfg_input *req,
1792 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
1793 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
1794 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
1795 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
1796 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
1797 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
1798 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
1799 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
1800 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
1801 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
1803 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1804 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1805 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1806 ETHER_CRC_LEN + VLAN_TAG_SIZE);
1807 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
1809 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
1810 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
1812 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
1813 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
1814 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
1815 /* TODO: For now, do not support VMDq/RFS on VFs. */
1816 req->num_vnics = rte_cpu_to_le_16(1);
1817 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
1821 static void add_random_mac_if_needed(struct bnxt *bp,
1822 struct hwrm_func_cfg_input *cfg_req,
1825 struct ether_addr mac;
1827 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
1830 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
1832 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1833 eth_random_addr(cfg_req->dflt_mac_addr);
1834 bp->pf.vf_info[vf].random_mac = true;
1836 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
1840 static void reserve_resources_from_vf(struct bnxt *bp,
1841 struct hwrm_func_cfg_input *cfg_req,
1844 struct hwrm_func_qcaps_input req = {0};
1845 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
1848 /* Get the actual allocated values now */
1849 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
1850 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1851 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1854 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
1855 copy_func_cfg_to_qcaps(cfg_req, resp);
1856 } else if (resp->error_code) {
1857 rc = rte_le_to_cpu_16(resp->error_code);
1858 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
1859 copy_func_cfg_to_qcaps(cfg_req, resp);
1862 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
1863 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
1864 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
1865 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
1866 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
1867 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
1869 * TODO: While not supporting VMDq with VFs, max_vnics is always
1870 * forced to 1 in this case
1872 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
1873 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
1876 static int update_pf_resource_max(struct bnxt *bp)
1878 struct hwrm_func_qcfg_input req = {0};
1879 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1882 /* And copy the allocated numbers into the pf struct */
1883 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1884 req.fid = rte_cpu_to_le_16(0xffff);
1885 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1888 /* Only TX ring value reflects actual allocation? TODO */
1889 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
1890 bp->pf.evb_mode = resp->evb_mode;
1895 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
1900 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1904 rc = bnxt_hwrm_func_qcaps(bp);
1908 bp->pf.func_cfg_flags &=
1909 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1910 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1911 bp->pf.func_cfg_flags |=
1912 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
1913 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
1917 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
1919 struct hwrm_func_cfg_input req = {0};
1920 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1927 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
1931 rc = bnxt_hwrm_func_qcaps(bp);
1936 bp->pf.active_vfs = num_vfs;
1939 * First, configure the PF to only use one TX ring. This ensures that
1940 * there are enough rings for all VFs.
1942 * If we don't do this, when we call func_alloc() later, we will lock
1943 * extra rings to the PF that won't be available during func_cfg() of
1946 * This has been fixed with firmware versions above 20.6.54
1948 bp->pf.func_cfg_flags &=
1949 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
1950 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
1951 bp->pf.func_cfg_flags |=
1952 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
1953 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
1958 * Now, create and register a buffer to hold forwarded VF requests
1960 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
1961 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
1962 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
1963 if (bp->pf.vf_req_buf == NULL) {
1967 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
1968 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
1969 for (i = 0; i < num_vfs; i++)
1970 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
1971 (i * HWRM_MAX_REQ_LEN);
1973 rc = bnxt_hwrm_func_buf_rgtr(bp);
1977 populate_vf_func_cfg_req(bp, &req, num_vfs);
1979 bp->pf.active_vfs = 0;
1980 for (i = 0; i < num_vfs; i++) {
1981 add_random_mac_if_needed(bp, &req, i);
1983 HWRM_PREP(req, FUNC_CFG, -1, resp);
1984 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
1985 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
1986 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1988 /* Clear enable flag for next pass */
1989 req.enables &= ~rte_cpu_to_le_32(
1990 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1992 if (rc || resp->error_code) {
1994 "Failed to initizlie VF %d\n", i);
1996 "Not all VFs available. (%d, %d)\n",
1997 rc, resp->error_code);
2001 reserve_resources_from_vf(bp, &req, i);
2002 bp->pf.active_vfs++;
2006 * Now configure the PF to use "the rest" of the resources
2007 * We're using STD_TX_RING_MODE here though which will limit the TX
2008 * rings. This will allow QoS to function properly. Not setting this
2009 * will cause PF rings to break bandwidth settings.
2011 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2015 rc = update_pf_resource_max(bp);
2022 bnxt_hwrm_func_buf_unrgtr(bp);
2027 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2030 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2031 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2033 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2035 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2036 req.req_buf_page_size = rte_cpu_to_le_16(
2037 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2038 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2039 req.req_buf_page_addr[0] =
2040 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2041 if (req.req_buf_page_addr[0] == 0) {
2043 "unable to map buffer address to physical memory\n");
2047 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2054 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2057 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2058 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2060 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2062 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2069 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2071 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2072 struct hwrm_func_cfg_input req = {0};
2075 HWRM_PREP(req, FUNC_CFG, -1, resp);
2076 req.fid = rte_cpu_to_le_16(0xffff);
2077 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2078 req.enables = rte_cpu_to_le_32(
2079 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2080 req.async_event_cr = rte_cpu_to_le_16(
2081 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2082 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2088 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2090 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2091 struct hwrm_func_vf_cfg_input req = {0};
2094 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2095 req.enables = rte_cpu_to_le_32(
2096 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2097 req.async_event_cr = rte_cpu_to_le_16(
2098 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2099 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2105 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2106 void *encaped, size_t ec_size)
2109 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2110 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2112 if (ec_size > sizeof(req.encap_request))
2115 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2117 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2118 memcpy(req.encap_request, encaped, ec_size);
2120 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2127 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2128 struct ether_addr *mac)
2130 struct hwrm_func_qcfg_input req = {0};
2131 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2134 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2135 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2136 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2140 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2144 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2145 void *encaped, size_t ec_size)
2148 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2149 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2151 if (ec_size > sizeof(req.encap_request))
2154 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2156 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2157 memcpy(req.encap_request, encaped, ec_size);
2159 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));