4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 2000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
174 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
178 rte_spinlock_lock(&bp->hwrm_lock);
179 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
180 rte_spinlock_unlock(&bp->hwrm_lock);
184 #define HWRM_PREP(req, type, cr, resp) \
185 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
186 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
187 req.cmpl_ring = rte_cpu_to_le_16(cr); \
188 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
189 req.target_id = rte_cpu_to_le_16(0xffff); \
190 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
192 #define HWRM_CHECK_RESULT \
195 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 if (resp->error_code) { \
200 rc = rte_le_to_cpu_16(resp->error_code); \
201 if (resp->resp_len >= 16) { \
202 struct hwrm_err_output *tmp_hwrm_err_op = \
205 "%s error %d:%d:%08x:%04x\n", \
207 rc, tmp_hwrm_err_op->cmd_err, \
209 tmp_hwrm_err_op->opaque_0), \
211 tmp_hwrm_err_op->opaque_1)); \
215 "%s error %d\n", __func__, rc); \
221 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
224 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
225 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
227 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
228 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
238 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
239 struct bnxt_vnic_info *vnic,
241 struct bnxt_vlan_table_entry *vlan_table)
244 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
245 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
248 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
249 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
251 /* FIXME add multicast flag, when multicast adding options is supported
254 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
255 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
256 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
257 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
258 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
259 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
260 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
261 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
262 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
263 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
264 if (vnic->mc_addr_cnt) {
265 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
266 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
267 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
269 if (vlan_count && vlan_table) {
270 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
271 req.vlan_tag_tbl_addr = rte_cpu_to_le_16(
272 rte_mem_virt2phy(vlan_table));
273 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
275 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
278 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
285 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
287 struct bnxt_vlan_antispoof_table_entry *vlan_table)
290 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
291 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
292 bp->hwrm_cmd_resp_addr;
295 * Older HWRM versions did not support this command, and the set_rx_mask
296 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
297 * removed from set_rx_mask call, and this command was added.
299 * This command is also present from 1.7.8.11 and higher,
302 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
303 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
304 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
309 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, -1, resp);
310 req.fid = rte_cpu_to_le_16(fid);
312 req.vlan_tag_mask_tbl_addr =
313 rte_cpu_to_le_64(rte_mem_virt2phy(vlan_table));
314 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
316 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
323 int bnxt_hwrm_clear_filter(struct bnxt *bp,
324 struct bnxt_filter_info *filter)
327 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
328 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
330 if (filter->fw_l2_filter_id == UINT64_MAX)
333 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
335 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
337 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
341 filter->fw_l2_filter_id = -1;
346 int bnxt_hwrm_set_filter(struct bnxt *bp,
348 struct bnxt_filter_info *filter)
351 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
352 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
353 uint32_t enables = 0;
355 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
357 req.flags = rte_cpu_to_le_32(filter->flags);
359 enables = filter->enables |
360 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
361 req.dst_id = rte_cpu_to_le_16(dst_id);
364 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
365 memcpy(req.l2_addr, filter->l2_addr,
368 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
369 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
372 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
373 req.l2_ovlan = filter->l2_ovlan;
375 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
376 req.l2_ovlan_mask = filter->l2_ovlan_mask;
377 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
378 req.src_id = rte_cpu_to_le_32(filter->src_id);
379 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
380 req.src_type = filter->src_type;
382 req.enables = rte_cpu_to_le_32(enables);
384 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
388 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
393 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
396 struct hwrm_func_qcaps_input req = {.req_type = 0 };
397 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
398 uint16_t new_max_vfs;
401 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
403 req.fid = rte_cpu_to_le_16(0xffff);
405 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
409 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
411 bp->pf.port_id = resp->port_id;
412 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
413 new_max_vfs = bp->pdev->max_vfs;
414 if (new_max_vfs != bp->pf.max_vfs) {
416 rte_free(bp->pf.vf_info);
417 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
418 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
419 bp->pf.max_vfs = new_max_vfs;
420 for (i = 0; i < new_max_vfs; i++) {
421 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
422 bp->pf.vf_info[i].vlan_table =
423 rte_zmalloc("VF VLAN table",
426 if (bp->pf.vf_info[i].vlan_table == NULL)
428 "Fail to alloc VLAN table for VF %d\n",
432 bp->pf.vf_info[i].vlan_table);
433 bp->pf.vf_info[i].vlan_as_table =
434 rte_zmalloc("VF VLAN AS table",
437 if (bp->pf.vf_info[i].vlan_as_table == NULL)
439 "Alloc VLAN AS table for VF %d fail\n",
443 bp->pf.vf_info[i].vlan_as_table);
444 STAILQ_INIT(&bp->pf.vf_info[i].filter);
449 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
450 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
451 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
452 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
453 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
454 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
455 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
456 /* TODO: For now, do not support VMDq/RFS on VFs. */
461 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
465 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
467 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
472 int bnxt_hwrm_func_reset(struct bnxt *bp)
475 struct hwrm_func_reset_input req = {.req_type = 0 };
476 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
478 HWRM_PREP(req, FUNC_RESET, -1, resp);
480 req.enables = rte_cpu_to_le_32(0);
482 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
489 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
492 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
493 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
495 if (bp->flags & BNXT_FLAG_REGISTERED)
498 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
499 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
500 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
501 req.ver_maj = RTE_VER_YEAR;
502 req.ver_min = RTE_VER_MONTH;
503 req.ver_upd = RTE_VER_MINOR;
506 req.enables |= rte_cpu_to_le_32(
507 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
508 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
509 RTE_MIN(sizeof(req.vf_req_fwd),
510 sizeof(bp->pf.vf_req_fwd)));
513 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
514 memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
516 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
520 bp->flags |= BNXT_FLAG_REGISTERED;
525 int bnxt_hwrm_ver_get(struct bnxt *bp)
528 struct hwrm_ver_get_input req = {.req_type = 0 };
529 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
532 uint16_t max_resp_len;
533 char type[RTE_MEMZONE_NAMESIZE];
534 uint32_t dev_caps_cfg;
536 bp->max_req_len = HWRM_MAX_REQ_LEN;
537 HWRM_PREP(req, VER_GET, -1, resp);
539 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
540 req.hwrm_intf_min = HWRM_VERSION_MINOR;
541 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
544 * Hold the lock since we may be adjusting the response pointers.
546 rte_spinlock_lock(&bp->hwrm_lock);
547 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
551 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
552 resp->hwrm_intf_maj, resp->hwrm_intf_min,
554 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
555 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
556 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
557 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
558 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
560 my_version = HWRM_VERSION_MAJOR << 16;
561 my_version |= HWRM_VERSION_MINOR << 8;
562 my_version |= HWRM_VERSION_UPDATE;
564 fw_version = resp->hwrm_intf_maj << 16;
565 fw_version |= resp->hwrm_intf_min << 8;
566 fw_version |= resp->hwrm_intf_upd;
568 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
569 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
574 if (my_version != fw_version) {
575 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
576 if (my_version < fw_version) {
578 "Firmware API version is newer than driver.\n");
580 "The driver may be missing features.\n");
583 "Firmware API version is older than driver.\n");
585 "Not all driver features may be functional.\n");
589 if (bp->max_req_len > resp->max_req_win_len) {
590 RTE_LOG(ERR, PMD, "Unsupported request length\n");
593 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
594 max_resp_len = resp->max_resp_len;
595 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
597 if (bp->max_resp_len != max_resp_len) {
598 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
599 bp->pdev->addr.domain, bp->pdev->addr.bus,
600 bp->pdev->addr.devid, bp->pdev->addr.function);
602 rte_free(bp->hwrm_cmd_resp_addr);
604 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
605 if (bp->hwrm_cmd_resp_addr == NULL) {
609 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
610 bp->hwrm_cmd_resp_dma_addr =
611 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
612 if (bp->hwrm_cmd_resp_dma_addr == 0) {
614 "Unable to map response buffer to physical memory.\n");
618 bp->max_resp_len = max_resp_len;
622 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
624 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
625 RTE_LOG(DEBUG, PMD, "Short command supported\n");
627 rte_free(bp->hwrm_short_cmd_req_addr);
629 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
631 if (bp->hwrm_short_cmd_req_addr == NULL) {
635 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
636 bp->hwrm_short_cmd_req_dma_addr =
637 rte_mem_virt2phy(bp->hwrm_short_cmd_req_addr);
638 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
639 rte_free(bp->hwrm_short_cmd_req_addr);
641 "Unable to map buffer to physical memory.\n");
646 bp->flags |= BNXT_FLAG_SHORT_CMD;
650 rte_spinlock_unlock(&bp->hwrm_lock);
654 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
657 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
658 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
660 if (!(bp->flags & BNXT_FLAG_REGISTERED))
663 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
666 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
670 bp->flags &= ~BNXT_FLAG_REGISTERED;
675 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
678 struct hwrm_port_phy_cfg_input req = {0};
679 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
680 uint32_t enables = 0;
681 uint32_t link_speed_mask =
682 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
684 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
687 req.flags = rte_cpu_to_le_32(conf->phy_flags);
688 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
690 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
691 * any auto mode, even "none".
693 if (!conf->link_speed) {
694 req.auto_mode = conf->auto_mode;
695 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
696 if (conf->auto_mode ==
697 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
698 req.auto_link_speed_mask =
699 conf->auto_link_speed_mask;
700 enables |= link_speed_mask;
702 if (bp->link_info.auto_link_speed) {
703 req.auto_link_speed =
704 bp->link_info.auto_link_speed;
706 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
709 req.auto_duplex = conf->duplex;
710 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
711 req.auto_pause = conf->auto_pause;
712 req.force_pause = conf->force_pause;
713 /* Set force_pause if there is no auto or if there is a force */
714 if (req.auto_pause && !req.force_pause)
715 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
717 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
719 req.enables = rte_cpu_to_le_32(enables);
722 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
723 RTE_LOG(INFO, PMD, "Force Link Down\n");
726 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
733 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
734 struct bnxt_link_info *link_info)
737 struct hwrm_port_phy_qcfg_input req = {0};
738 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
740 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
742 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
746 link_info->phy_link_status = resp->link;
748 (link_info->phy_link_status ==
749 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
750 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
751 link_info->duplex = resp->duplex;
752 link_info->pause = resp->pause;
753 link_info->auto_pause = resp->auto_pause;
754 link_info->force_pause = resp->force_pause;
755 link_info->auto_mode = resp->auto_mode;
757 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
758 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
759 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
760 link_info->phy_ver[0] = resp->phy_maj;
761 link_info->phy_ver[1] = resp->phy_min;
762 link_info->phy_ver[2] = resp->phy_bld;
767 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
770 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
771 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
773 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
775 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
779 #define GET_QUEUE_INFO(x) \
780 bp->cos_queue[x].id = resp->queue_id##x; \
781 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
795 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
796 struct bnxt_ring *ring,
797 uint32_t ring_type, uint32_t map_index,
798 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
801 uint32_t enables = 0;
802 struct hwrm_ring_alloc_input req = {.req_type = 0 };
803 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
805 HWRM_PREP(req, RING_ALLOC, -1, resp);
807 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
808 req.fbo = rte_cpu_to_le_32(0);
809 /* Association of ring index with doorbell index */
810 req.logical_id = rte_cpu_to_le_16(map_index);
811 req.length = rte_cpu_to_le_32(ring->ring_size);
814 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
815 req.queue_id = bp->cos_queue[0].id;
817 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
818 req.ring_type = ring_type;
819 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
820 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
821 if (stats_ctx_id != INVALID_STATS_CTX_ID)
823 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
825 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
826 req.ring_type = ring_type;
828 * TODO: Some HWRM versions crash with
829 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
831 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
834 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
838 req.enables = rte_cpu_to_le_32(enables);
840 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
842 if (rc || resp->error_code) {
843 if (rc == 0 && resp->error_code)
844 rc = rte_le_to_cpu_16(resp->error_code);
846 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
848 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
850 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
852 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
854 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
856 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
859 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
864 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
868 int bnxt_hwrm_ring_free(struct bnxt *bp,
869 struct bnxt_ring *ring, uint32_t ring_type)
872 struct hwrm_ring_free_input req = {.req_type = 0 };
873 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
875 HWRM_PREP(req, RING_FREE, -1, resp);
877 req.ring_type = ring_type;
878 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
880 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
882 if (rc || resp->error_code) {
883 if (rc == 0 && resp->error_code)
884 rc = rte_le_to_cpu_16(resp->error_code);
887 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
888 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
891 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
892 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
895 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
896 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
900 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
907 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
910 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
911 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
913 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
915 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
916 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
917 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
918 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
920 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
924 bp->grp_info[idx].fw_grp_id =
925 rte_le_to_cpu_16(resp->ring_group_id);
930 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
933 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
934 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
936 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
938 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
940 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
944 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
948 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
951 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
952 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
954 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
957 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
959 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
961 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
968 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
969 unsigned int idx __rte_unused)
972 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
973 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
975 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
977 req.update_period_ms = rte_cpu_to_le_32(0);
980 rte_cpu_to_le_64(cpr->hw_stats_map);
982 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
986 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
991 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
992 unsigned int idx __rte_unused)
995 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
996 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
998 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
1000 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1002 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1009 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1012 struct hwrm_vnic_alloc_input req = { 0 };
1013 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1015 /* map ring groups to this vnic */
1016 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1017 vnic->start_grp_id, vnic->end_grp_id);
1018 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1019 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1020 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1021 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1022 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1023 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1024 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1025 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1026 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
1028 if (vnic->func_default)
1029 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1030 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1034 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1035 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1039 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1040 struct bnxt_vnic_info *vnic,
1041 struct bnxt_plcmodes_cfg *pmode)
1044 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1045 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1047 HWRM_PREP(req, VNIC_PLCMODES_QCFG, -1, resp);
1049 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1051 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1055 pmode->flags = rte_le_to_cpu_32(resp->flags);
1056 /* dflt_vnic bit doesn't exist in the _cfg command */
1057 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1058 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1059 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1060 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1065 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1066 struct bnxt_vnic_info *vnic,
1067 struct bnxt_plcmodes_cfg *pmode)
1070 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1071 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1073 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1075 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1076 req.flags = rte_cpu_to_le_32(pmode->flags);
1077 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1078 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1079 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1080 req.enables = rte_cpu_to_le_32(
1081 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1082 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1083 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1086 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1093 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1096 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1097 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1098 uint32_t ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1099 struct bnxt_plcmodes_cfg pmodes;
1101 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1102 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1106 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1110 HWRM_PREP(req, VNIC_CFG, -1, resp);
1112 /* Only RSS support for now TBD: COS & LB */
1114 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
1115 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
1116 if (vnic->lb_rule != 0xffff)
1117 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1118 if (vnic->cos_rule != 0xffff)
1119 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1120 if (vnic->rss_rule != 0xffff)
1121 ctx_enable_flag = HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1122 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1123 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1124 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1125 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1126 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1127 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1128 req.mru = rte_cpu_to_le_16(vnic->mru);
1129 if (vnic->func_default)
1131 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1132 if (vnic->vlan_strip)
1134 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1137 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1138 if (vnic->roce_dual)
1139 req.flags |= rte_cpu_to_le_32(
1140 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1141 if (vnic->roce_only)
1142 req.flags |= rte_cpu_to_le_32(
1143 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1144 if (vnic->rss_dflt_cr)
1145 req.flags |= rte_cpu_to_le_32(
1146 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1148 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1152 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1157 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1161 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1162 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1164 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1165 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1168 HWRM_PREP(req, VNIC_QCFG, -1, resp);
1171 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1172 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1173 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1175 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1179 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1180 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1181 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1182 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1183 vnic->mru = rte_le_to_cpu_16(resp->mru);
1184 vnic->func_default = rte_le_to_cpu_32(
1185 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1186 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1187 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1188 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1189 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1190 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1191 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1192 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1193 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1194 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1195 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1200 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1203 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1204 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1205 bp->hwrm_cmd_resp_addr;
1207 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
1209 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1213 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1214 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1219 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1222 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1223 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1224 bp->hwrm_cmd_resp_addr;
1226 if (vnic->rss_rule == 0xffff) {
1227 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1230 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
1232 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1234 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1238 vnic->rss_rule = INVALID_HW_RING_ID;
1243 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1246 struct hwrm_vnic_free_input req = {.req_type = 0 };
1247 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1249 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1250 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1254 HWRM_PREP(req, VNIC_FREE, -1, resp);
1256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1258 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1262 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1266 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1267 struct bnxt_vnic_info *vnic)
1270 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1271 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1273 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
1275 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1277 req.ring_grp_tbl_addr =
1278 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1279 req.hash_key_tbl_addr =
1280 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1281 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1283 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1290 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1291 struct bnxt_vnic_info *vnic)
1294 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1295 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1298 HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
1300 req.flags = rte_cpu_to_le_32(
1301 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1303 req.enables = rte_cpu_to_le_32(
1304 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1306 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1307 size -= RTE_PKTMBUF_HEADROOM;
1309 req.jumbo_thresh = rte_cpu_to_le_16(size);
1310 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1312 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1319 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1320 struct bnxt_vnic_info *vnic, bool enable)
1323 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1324 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1326 HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
1329 req.enables = rte_cpu_to_le_32(
1330 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1331 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1332 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1333 req.flags = rte_cpu_to_le_32(
1334 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1335 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1336 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1337 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1338 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1339 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1340 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1341 req.max_agg_segs = rte_cpu_to_le_16(5);
1343 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1344 req.min_agg_len = rte_cpu_to_le_32(512);
1347 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1354 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1356 struct hwrm_func_cfg_input req = {0};
1357 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1360 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1361 req.enables = rte_cpu_to_le_32(
1362 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1363 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1364 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1366 HWRM_PREP(req, FUNC_CFG, -1, resp);
1368 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1371 bp->pf.vf_info[vf].random_mac = false;
1376 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1380 struct hwrm_func_qstats_input req = {.req_type = 0};
1381 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1383 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1385 req.fid = rte_cpu_to_le_16(fid);
1387 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1392 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1397 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1398 struct rte_eth_stats *stats)
1401 struct hwrm_func_qstats_input req = {.req_type = 0};
1402 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1404 HWRM_PREP(req, FUNC_QSTATS, -1, resp);
1406 req.fid = rte_cpu_to_le_16(fid);
1408 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1412 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1413 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1414 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1415 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1416 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1417 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1419 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1420 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1421 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1422 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1423 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1424 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1426 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1427 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1429 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1434 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1437 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1438 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1440 HWRM_PREP(req, FUNC_CLR_STATS, -1, resp);
1442 req.fid = rte_cpu_to_le_16(fid);
1444 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1452 * HWRM utility functions
1455 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1460 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1461 struct bnxt_tx_queue *txq;
1462 struct bnxt_rx_queue *rxq;
1463 struct bnxt_cp_ring_info *cpr;
1465 if (i >= bp->rx_cp_nr_rings) {
1466 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1469 rxq = bp->rx_queues[i];
1473 rc = bnxt_hwrm_stat_clear(bp, cpr);
1480 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1484 struct bnxt_cp_ring_info *cpr;
1486 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1488 if (i >= bp->rx_cp_nr_rings)
1489 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1491 cpr = bp->rx_queues[i]->cp_ring;
1492 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1493 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1494 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1496 * TODO. Need a better way to reset grp_info.stats_ctx
1497 * for Rx rings only. stats_ctx is not saved for Tx
1500 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
1508 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1513 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1514 struct bnxt_tx_queue *txq;
1515 struct bnxt_rx_queue *rxq;
1516 struct bnxt_cp_ring_info *cpr;
1518 if (i >= bp->rx_cp_nr_rings) {
1519 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1522 rxq = bp->rx_queues[i];
1526 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1534 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1539 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1541 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1543 "Attempt to free invalid ring group %d\n",
1548 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1556 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1557 unsigned int idx __rte_unused)
1559 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1561 bnxt_hwrm_ring_free(bp, cp_ring,
1562 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1563 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1564 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1565 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1566 sizeof(*cpr->cp_desc_ring));
1567 cpr->cp_raw_cons = 0;
1570 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1575 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1576 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1577 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1578 struct bnxt_ring *ring = txr->tx_ring_struct;
1579 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1580 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1582 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1583 bnxt_hwrm_ring_free(bp, ring,
1584 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1585 ring->fw_ring_id = INVALID_HW_RING_ID;
1586 memset(txr->tx_desc_ring, 0,
1587 txr->tx_ring_struct->ring_size *
1588 sizeof(*txr->tx_desc_ring));
1589 memset(txr->tx_buf_ring, 0,
1590 txr->tx_ring_struct->ring_size *
1591 sizeof(*txr->tx_buf_ring));
1595 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1596 bnxt_free_cp_ring(bp, cpr, idx);
1597 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1601 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1602 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1603 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1604 struct bnxt_ring *ring = rxr->rx_ring_struct;
1605 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1606 unsigned int idx = i + 1;
1608 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1609 bnxt_hwrm_ring_free(bp, ring,
1610 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1611 ring->fw_ring_id = INVALID_HW_RING_ID;
1612 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1613 memset(rxr->rx_desc_ring, 0,
1614 rxr->rx_ring_struct->ring_size *
1615 sizeof(*rxr->rx_desc_ring));
1616 memset(rxr->rx_buf_ring, 0,
1617 rxr->rx_ring_struct->ring_size *
1618 sizeof(*rxr->rx_buf_ring));
1620 memset(rxr->ag_buf_ring, 0,
1621 rxr->ag_ring_struct->ring_size *
1622 sizeof(*rxr->ag_buf_ring));
1625 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1626 bnxt_free_cp_ring(bp, cpr, idx);
1627 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1628 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1632 /* Default completion ring */
1634 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1636 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1637 bnxt_free_cp_ring(bp, cpr, 0);
1638 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1645 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1650 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1651 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1658 void bnxt_free_hwrm_resources(struct bnxt *bp)
1660 /* Release memzone */
1661 rte_free(bp->hwrm_cmd_resp_addr);
1662 rte_free(bp->hwrm_short_cmd_req_addr);
1663 bp->hwrm_cmd_resp_addr = NULL;
1664 bp->hwrm_short_cmd_req_addr = NULL;
1665 bp->hwrm_cmd_resp_dma_addr = 0;
1666 bp->hwrm_short_cmd_req_dma_addr = 0;
1669 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1671 struct rte_pci_device *pdev = bp->pdev;
1672 char type[RTE_MEMZONE_NAMESIZE];
1674 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1675 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1676 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1677 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1678 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1679 if (bp->hwrm_cmd_resp_addr == NULL)
1681 bp->hwrm_cmd_resp_dma_addr =
1682 rte_mem_virt2phy(bp->hwrm_cmd_resp_addr);
1683 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1685 "unable to map response address to physical memory\n");
1688 rte_spinlock_init(&bp->hwrm_lock);
1693 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1695 struct bnxt_filter_info *filter;
1698 STAILQ_FOREACH(filter, &vnic->filter, next) {
1699 rc = bnxt_hwrm_clear_filter(bp, filter);
1706 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1708 struct bnxt_filter_info *filter;
1711 STAILQ_FOREACH(filter, &vnic->filter, next) {
1712 rc = bnxt_hwrm_set_filter(bp, vnic->fw_vnic_id, filter);
1719 void bnxt_free_tunnel_ports(struct bnxt *bp)
1721 if (bp->vxlan_port_cnt)
1722 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1723 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1725 if (bp->geneve_port_cnt)
1726 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1727 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1728 bp->geneve_port = 0;
1731 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1733 struct bnxt_vnic_info *vnic;
1736 if (bp->vnic_info == NULL)
1739 vnic = &bp->vnic_info[0];
1741 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1743 /* VNIC resources */
1744 for (i = 0; i < bp->nr_vnics; i++) {
1745 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1747 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1749 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1751 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1753 bnxt_hwrm_vnic_free(bp, vnic);
1755 /* Ring resources */
1756 bnxt_free_all_hwrm_rings(bp);
1757 bnxt_free_all_hwrm_ring_grps(bp);
1758 bnxt_free_all_hwrm_stat_ctxs(bp);
1759 bnxt_free_tunnel_ports(bp);
1762 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1764 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1766 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1767 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1769 switch (conf_link_speed) {
1770 case ETH_LINK_SPEED_10M_HD:
1771 case ETH_LINK_SPEED_100M_HD:
1772 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1774 return hw_link_duplex;
1777 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1779 uint16_t eth_link_speed = 0;
1781 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1782 return ETH_LINK_SPEED_AUTONEG;
1784 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1785 case ETH_LINK_SPEED_100M:
1786 case ETH_LINK_SPEED_100M_HD:
1788 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1790 case ETH_LINK_SPEED_1G:
1792 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1794 case ETH_LINK_SPEED_2_5G:
1796 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1798 case ETH_LINK_SPEED_10G:
1800 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1802 case ETH_LINK_SPEED_20G:
1804 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1806 case ETH_LINK_SPEED_25G:
1808 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1810 case ETH_LINK_SPEED_40G:
1812 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1814 case ETH_LINK_SPEED_50G:
1816 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1820 "Unsupported link speed %d; default to AUTO\n",
1824 return eth_link_speed;
1827 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1828 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1829 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1830 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1832 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1836 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1839 if (link_speed & ETH_LINK_SPEED_FIXED) {
1840 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1842 if (one_speed & (one_speed - 1)) {
1844 "Invalid advertised speeds (%u) for port %u\n",
1845 link_speed, port_id);
1848 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1850 "Unsupported advertised speed (%u) for port %u\n",
1851 link_speed, port_id);
1855 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1857 "Unsupported advertised speeds (%u) for port %u\n",
1858 link_speed, port_id);
1866 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1870 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1871 if (bp->link_info.support_speeds)
1872 return bp->link_info.support_speeds;
1873 link_speed = BNXT_SUPPORTED_SPEEDS;
1876 if (link_speed & ETH_LINK_SPEED_100M)
1877 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1878 if (link_speed & ETH_LINK_SPEED_100M_HD)
1879 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1880 if (link_speed & ETH_LINK_SPEED_1G)
1881 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1882 if (link_speed & ETH_LINK_SPEED_2_5G)
1883 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1884 if (link_speed & ETH_LINK_SPEED_10G)
1885 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1886 if (link_speed & ETH_LINK_SPEED_20G)
1887 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1888 if (link_speed & ETH_LINK_SPEED_25G)
1889 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1890 if (link_speed & ETH_LINK_SPEED_40G)
1891 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1892 if (link_speed & ETH_LINK_SPEED_50G)
1893 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1897 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1899 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1901 switch (hw_link_speed) {
1902 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1903 eth_link_speed = ETH_SPEED_NUM_100M;
1905 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1906 eth_link_speed = ETH_SPEED_NUM_1G;
1908 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1909 eth_link_speed = ETH_SPEED_NUM_2_5G;
1911 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1912 eth_link_speed = ETH_SPEED_NUM_10G;
1914 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1915 eth_link_speed = ETH_SPEED_NUM_20G;
1917 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1918 eth_link_speed = ETH_SPEED_NUM_25G;
1920 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1921 eth_link_speed = ETH_SPEED_NUM_40G;
1923 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1924 eth_link_speed = ETH_SPEED_NUM_50G;
1926 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1928 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1932 return eth_link_speed;
1935 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1937 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1939 switch (hw_link_duplex) {
1940 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1941 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1942 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1944 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1945 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1948 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1952 return eth_link_duplex;
1955 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1958 struct bnxt_link_info *link_info = &bp->link_info;
1960 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1963 "Get link config failed with rc %d\n", rc);
1966 if (link_info->link_speed)
1968 bnxt_parse_hw_link_speed(link_info->link_speed);
1970 link->link_speed = ETH_SPEED_NUM_NONE;
1971 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1972 link->link_status = link_info->link_up;
1973 link->link_autoneg = link_info->auto_mode ==
1974 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1975 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1980 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1983 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1984 struct bnxt_link_info link_req;
1987 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1990 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1991 bp->eth_dev->data->port_id);
1995 memset(&link_req, 0, sizeof(link_req));
1996 link_req.link_up = link_up;
2000 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2001 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2003 link_req.phy_flags |=
2004 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2005 link_req.auto_mode =
2006 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
2007 link_req.auto_link_speed_mask =
2008 bnxt_parse_eth_link_speed_mask(bp,
2009 dev_conf->link_speeds);
2011 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2012 link_req.link_speed = speed;
2013 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
2015 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2016 link_req.auto_pause = bp->link_info.auto_pause;
2017 link_req.force_pause = bp->link_info.force_pause;
2020 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2023 "Set link config failed with rc %d\n", rc);
2031 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2033 struct hwrm_func_qcfg_input req = {0};
2034 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2037 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2038 req.fid = rte_cpu_to_le_16(0xffff);
2040 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2044 /* Hard Coded.. 0xfff VLAN ID mask */
2045 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2047 switch (resp->port_partition_type) {
2048 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2049 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2050 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2051 bp->port_partition_type = resp->port_partition_type;
2054 bp->port_partition_type = 0;
2061 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2062 struct hwrm_func_qcaps_output *qcaps)
2064 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2065 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2066 sizeof(qcaps->mac_address));
2067 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2068 qcaps->max_rx_rings = fcfg->num_rx_rings;
2069 qcaps->max_tx_rings = fcfg->num_tx_rings;
2070 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2071 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2073 qcaps->first_vf_id = 0;
2074 qcaps->max_vnics = fcfg->num_vnics;
2075 qcaps->max_decap_records = 0;
2076 qcaps->max_encap_records = 0;
2077 qcaps->max_tx_wm_flows = 0;
2078 qcaps->max_tx_em_flows = 0;
2079 qcaps->max_rx_wm_flows = 0;
2080 qcaps->max_rx_em_flows = 0;
2081 qcaps->max_flow_id = 0;
2082 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2083 qcaps->max_sp_tx_rings = 0;
2084 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2087 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2089 struct hwrm_func_cfg_input req = {0};
2090 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2093 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2094 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2095 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2096 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2097 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2098 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2099 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2100 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2101 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2102 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2103 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2104 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2105 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2106 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2107 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2108 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2109 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2110 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2111 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2112 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2113 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2114 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2115 req.fid = rte_cpu_to_le_16(0xffff);
2117 HWRM_PREP(req, FUNC_CFG, -1, resp);
2119 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2125 static void populate_vf_func_cfg_req(struct bnxt *bp,
2126 struct hwrm_func_cfg_input *req,
2129 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2130 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2131 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2132 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2133 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2134 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2135 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2136 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2137 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2138 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2140 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2141 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2142 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2143 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2144 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2146 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2147 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2149 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2150 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2151 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2152 /* TODO: For now, do not support VMDq/RFS on VFs. */
2153 req->num_vnics = rte_cpu_to_le_16(1);
2154 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2158 static void add_random_mac_if_needed(struct bnxt *bp,
2159 struct hwrm_func_cfg_input *cfg_req,
2162 struct ether_addr mac;
2164 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2167 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2169 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2170 eth_random_addr(cfg_req->dflt_mac_addr);
2171 bp->pf.vf_info[vf].random_mac = true;
2173 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2177 static void reserve_resources_from_vf(struct bnxt *bp,
2178 struct hwrm_func_cfg_input *cfg_req,
2181 struct hwrm_func_qcaps_input req = {0};
2182 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2185 /* Get the actual allocated values now */
2186 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
2187 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2188 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2191 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2192 copy_func_cfg_to_qcaps(cfg_req, resp);
2193 } else if (resp->error_code) {
2194 rc = rte_le_to_cpu_16(resp->error_code);
2195 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2196 copy_func_cfg_to_qcaps(cfg_req, resp);
2199 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2200 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2201 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2202 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2203 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2204 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2206 * TODO: While not supporting VMDq with VFs, max_vnics is always
2207 * forced to 1 in this case
2209 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2210 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2213 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2215 struct hwrm_func_qcfg_input req = {0};
2216 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2219 /* Check for zero MAC address */
2220 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2221 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2222 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2224 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2226 } else if (resp->error_code) {
2227 rc = rte_le_to_cpu_16(resp->error_code);
2228 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2231 return rte_le_to_cpu_16(resp->vlan);
2234 static int update_pf_resource_max(struct bnxt *bp)
2236 struct hwrm_func_qcfg_input req = {0};
2237 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2240 /* And copy the allocated numbers into the pf struct */
2241 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2242 req.fid = rte_cpu_to_le_16(0xffff);
2243 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2246 /* Only TX ring value reflects actual allocation? TODO */
2247 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2248 bp->pf.evb_mode = resp->evb_mode;
2253 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2258 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2262 rc = bnxt_hwrm_func_qcaps(bp);
2266 bp->pf.func_cfg_flags &=
2267 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2268 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2269 bp->pf.func_cfg_flags |=
2270 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2271 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2275 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2277 struct hwrm_func_cfg_input req = {0};
2278 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2285 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2289 rc = bnxt_hwrm_func_qcaps(bp);
2294 bp->pf.active_vfs = num_vfs;
2297 * First, configure the PF to only use one TX ring. This ensures that
2298 * there are enough rings for all VFs.
2300 * If we don't do this, when we call func_alloc() later, we will lock
2301 * extra rings to the PF that won't be available during func_cfg() of
2304 * This has been fixed with firmware versions above 20.6.54
2306 bp->pf.func_cfg_flags &=
2307 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2308 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2309 bp->pf.func_cfg_flags |=
2310 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2311 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2316 * Now, create and register a buffer to hold forwarded VF requests
2318 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2319 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2320 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2321 if (bp->pf.vf_req_buf == NULL) {
2325 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2326 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2327 for (i = 0; i < num_vfs; i++)
2328 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2329 (i * HWRM_MAX_REQ_LEN);
2331 rc = bnxt_hwrm_func_buf_rgtr(bp);
2335 populate_vf_func_cfg_req(bp, &req, num_vfs);
2337 bp->pf.active_vfs = 0;
2338 for (i = 0; i < num_vfs; i++) {
2339 add_random_mac_if_needed(bp, &req, i);
2341 HWRM_PREP(req, FUNC_CFG, -1, resp);
2342 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2343 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2344 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2346 /* Clear enable flag for next pass */
2347 req.enables &= ~rte_cpu_to_le_32(
2348 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2350 if (rc || resp->error_code) {
2352 "Failed to initizlie VF %d\n", i);
2354 "Not all VFs available. (%d, %d)\n",
2355 rc, resp->error_code);
2359 reserve_resources_from_vf(bp, &req, i);
2360 bp->pf.active_vfs++;
2361 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2365 * Now configure the PF to use "the rest" of the resources
2366 * We're using STD_TX_RING_MODE here though which will limit the TX
2367 * rings. This will allow QoS to function properly. Not setting this
2368 * will cause PF rings to break bandwidth settings.
2370 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2374 rc = update_pf_resource_max(bp);
2381 bnxt_hwrm_func_buf_unrgtr(bp);
2385 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2387 struct hwrm_func_cfg_input req = {0};
2388 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2391 HWRM_PREP(req, FUNC_CFG, -1, resp);
2393 req.fid = rte_cpu_to_le_16(0xffff);
2394 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2395 req.evb_mode = bp->pf.evb_mode;
2397 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2403 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2404 uint8_t tunnel_type)
2406 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2407 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2410 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, -1, resp);
2411 req.tunnel_type = tunnel_type;
2412 req.tunnel_dst_port_val = port;
2413 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2416 switch (tunnel_type) {
2417 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2418 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2419 bp->vxlan_port = port;
2421 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2422 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2423 bp->geneve_port = port;
2431 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2432 uint8_t tunnel_type)
2434 struct hwrm_tunnel_dst_port_free_input req = {0};
2435 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2438 HWRM_PREP(req, TUNNEL_DST_PORT_FREE, -1, resp);
2439 req.tunnel_type = tunnel_type;
2440 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2441 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2447 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2450 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2451 struct hwrm_func_cfg_input req = {0};
2454 HWRM_PREP(req, FUNC_CFG, -1, resp);
2455 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2456 req.flags = rte_cpu_to_le_32(flags);
2457 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2463 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2465 uint32_t *flag = flagp;
2467 vnic->flags = *flag;
2470 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2472 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2475 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2478 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2479 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2481 HWRM_PREP(req, FUNC_BUF_RGTR, -1, resp);
2483 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2484 req.req_buf_page_size = rte_cpu_to_le_16(
2485 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2486 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2487 req.req_buf_page_addr[0] =
2488 rte_cpu_to_le_64(rte_mem_virt2phy(bp->pf.vf_req_buf));
2489 if (req.req_buf_page_addr[0] == 0) {
2491 "unable to map buffer address to physical memory\n");
2495 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2502 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2505 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2506 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2508 HWRM_PREP(req, FUNC_BUF_UNRGTR, -1, resp);
2510 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2517 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2519 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2520 struct hwrm_func_cfg_input req = {0};
2523 HWRM_PREP(req, FUNC_CFG, -1, resp);
2524 req.fid = rte_cpu_to_le_16(0xffff);
2525 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2526 req.enables = rte_cpu_to_le_32(
2527 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2528 req.async_event_cr = rte_cpu_to_le_16(
2529 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2530 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2536 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2538 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2539 struct hwrm_func_vf_cfg_input req = {0};
2542 HWRM_PREP(req, FUNC_VF_CFG, -1, resp);
2543 req.enables = rte_cpu_to_le_32(
2544 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2545 req.async_event_cr = rte_cpu_to_le_16(
2546 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2547 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2553 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2555 struct hwrm_func_cfg_input req = {0};
2556 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2557 uint16_t dflt_vlan, fid;
2558 uint32_t func_cfg_flags;
2561 HWRM_PREP(req, FUNC_CFG, -1, resp);
2564 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2565 fid = bp->pf.vf_info[vf].fid;
2566 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2568 fid = rte_cpu_to_le_16(0xffff);
2569 func_cfg_flags = bp->pf.func_cfg_flags;
2570 dflt_vlan = bp->vlan;
2573 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2574 req.fid = rte_cpu_to_le_16(fid);
2575 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2576 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2578 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2584 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2585 uint16_t max_bw, uint16_t enables)
2587 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2588 struct hwrm_func_cfg_input req = {0};
2591 HWRM_PREP(req, FUNC_CFG, -1, resp);
2592 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2593 req.enables |= rte_cpu_to_le_32(enables);
2594 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2595 req.max_bw = rte_cpu_to_le_32(max_bw);
2596 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2602 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2604 struct hwrm_func_cfg_input req = {0};
2605 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2608 HWRM_PREP(req, FUNC_CFG, -1, resp);
2609 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2610 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2611 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2612 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2614 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2620 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2621 void *encaped, size_t ec_size)
2624 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2625 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2627 if (ec_size > sizeof(req.encap_request))
2630 HWRM_PREP(req, REJECT_FWD_RESP, -1, resp);
2632 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2633 memcpy(req.encap_request, encaped, ec_size);
2635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2642 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2643 struct ether_addr *mac)
2645 struct hwrm_func_qcfg_input req = {0};
2646 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2649 HWRM_PREP(req, FUNC_QCFG, -1, resp);
2650 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2651 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2655 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2659 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2660 void *encaped, size_t ec_size)
2663 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2664 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2666 if (ec_size > sizeof(req.encap_request))
2669 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
2671 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2672 memcpy(req.encap_request, encaped, ec_size);
2674 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2681 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2682 struct rte_eth_stats *stats)
2685 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2686 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2688 HWRM_PREP(req, STAT_CTX_QUERY, -1, resp);
2690 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2692 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2696 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2697 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2698 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2699 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2700 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2701 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2703 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2704 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2705 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2706 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2707 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2708 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2710 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2711 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2712 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2717 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2719 struct hwrm_port_qstats_input req = {0};
2720 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2721 struct bnxt_pf_info *pf = &bp->pf;
2724 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2727 HWRM_PREP(req, PORT_QSTATS, -1, resp);
2728 req.port_id = rte_cpu_to_le_16(pf->port_id);
2729 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2730 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2731 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2736 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2738 struct hwrm_port_clr_stats_input req = {0};
2739 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2740 struct bnxt_pf_info *pf = &bp->pf;
2743 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2746 HWRM_PREP(req, PORT_CLR_STATS, -1, resp);
2747 req.port_id = rte_cpu_to_le_16(pf->port_id);
2748 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2753 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2755 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2756 struct hwrm_port_led_qcaps_input req = {0};
2762 HWRM_PREP(req, PORT_LED_QCAPS, -1, resp);
2763 req.port_id = bp->pf.port_id;
2764 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2767 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2770 bp->num_leds = resp->num_leds;
2771 memcpy(bp->leds, &resp->led0_id,
2772 sizeof(bp->leds[0]) * bp->num_leds);
2773 for (i = 0; i < bp->num_leds; i++) {
2774 struct bnxt_led_info *led = &bp->leds[i];
2776 uint16_t caps = led->led_state_caps;
2778 if (!led->led_group_id ||
2779 !BNXT_LED_ALT_BLINK_CAP(caps)) {
2788 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
2790 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2791 struct hwrm_port_led_cfg_input req = {0};
2792 struct bnxt_led_cfg *led_cfg;
2793 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
2794 uint16_t duration = 0;
2797 if (!bp->num_leds || BNXT_VF(bp))
2800 HWRM_PREP(req, PORT_LED_CFG, -1, resp);
2802 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
2803 duration = rte_cpu_to_le_16(500);
2805 req.port_id = bp->pf.port_id;
2806 req.num_leds = bp->num_leds;
2807 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
2808 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
2809 req.enables |= BNXT_LED_DFLT_ENABLES(i);
2810 led_cfg->led_id = bp->leds[i].led_id;
2811 led_cfg->led_state = led_state;
2812 led_cfg->led_blink_on = duration;
2813 led_cfg->led_blink_off = duration;
2814 led_cfg->led_group_id = bp->leds[i].led_group_id;
2817 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2824 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
2826 uint32_t *count = cbdata;
2828 *count = *count + 1;
2831 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
2832 struct bnxt_vnic_info *vnic __rte_unused)
2837 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
2841 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
2842 &count, bnxt_vnic_count_hwrm_stub);
2847 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
2850 struct hwrm_func_vf_vnic_ids_query_input req = {0};
2851 struct hwrm_func_vf_vnic_ids_query_output *resp =
2852 bp->hwrm_cmd_resp_addr;
2855 /* First query all VNIC ids */
2856 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, -1, resp_vf_vnic_ids);
2858 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
2859 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
2860 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2phy(vnic_ids));
2862 if (req.vnic_id_tbl_addr == 0) {
2864 "unable to map VNIC ID table address to physical memory\n");
2867 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2869 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
2871 } else if (resp->error_code) {
2872 rc = rte_le_to_cpu_16(resp->error_code);
2873 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
2877 return rte_le_to_cpu_32(resp->vnic_id_cnt);
2881 * This function queries the VNIC IDs for a specified VF. It then calls
2882 * the vnic_cb to update the necessary field in vnic_info with cbdata.
2883 * Then it calls the hwrm_cb function to program this new vnic configuration.
2885 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
2886 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
2887 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
2889 struct bnxt_vnic_info vnic;
2891 int i, num_vnic_ids;
2896 /* First query all VNIC ids */
2897 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2898 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2899 RTE_CACHE_LINE_SIZE);
2900 if (vnic_ids == NULL) {
2904 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2905 rte_mem_lock_page(((char *)vnic_ids) + sz);
2907 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2909 if (num_vnic_ids < 0)
2910 return num_vnic_ids;
2912 /* Retrieve VNIC, update bd_stall then update */
2914 for (i = 0; i < num_vnic_ids; i++) {
2915 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2916 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2917 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
2920 if (vnic.mru <= 4) /* Indicates unallocated */
2923 vnic_cb(&vnic, cbdata);
2925 rc = hwrm_cb(bp, &vnic);
2935 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
2938 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2939 struct hwrm_func_cfg_input req = {0};
2942 HWRM_PREP(req, FUNC_CFG, -1, resp);
2943 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2944 req.enables |= rte_cpu_to_le_32(
2945 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
2946 req.vlan_antispoof_mode = on ?
2947 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
2948 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
2949 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2955 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
2957 struct bnxt_vnic_info vnic;
2960 int num_vnic_ids, i;
2964 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
2965 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
2966 RTE_CACHE_LINE_SIZE);
2967 if (vnic_ids == NULL) {
2972 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
2973 rte_mem_lock_page(((char *)vnic_ids) + sz);
2975 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
2981 * Loop through to find the default VNIC ID.
2982 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
2983 * by sending the hwrm_func_qcfg command to the firmware.
2985 for (i = 0; i < num_vnic_ids; i++) {
2986 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
2987 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
2988 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
2989 bp->pf.first_vf_id + vf);
2992 if (vnic.func_default) {
2994 return vnic.fw_vnic_id;
2997 /* Could not find a default VNIC. */
2998 RTE_LOG(ERR, PMD, "No default VNIC\n");