X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_hwrm.c;h=09a73286b39ff2249df2965873eab36ceebe6e78;hb=02a95625fe9c23dd7fb4cd7e4175c51db695be27;hp=cda2c8d1d80d7b119e5cff98ab057474c655d9ab;hpb=fc2d87b935bc4365799371404ae2793762c9d633;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c index cda2c8d1d8..09a73286b3 100644 --- a/drivers/net/bnxt/bnxt_hwrm.c +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -11,9 +11,9 @@ #include #include #include +#include #include "bnxt.h" -#include "bnxt_cpr.h" #include "bnxt_filter.h" #include "bnxt_hwrm.h" #include "bnxt_rxq.h" @@ -24,9 +24,6 @@ #include "bnxt_vnic.h" #include "hsi_struct_def_dpdk.h" -#include - -#define HWRM_CMD_TIMEOUT 6000000 #define HWRM_SPEC_CODE_1_8_3 0x10803 #define HWRM_VERSION_1_9_1 0x10901 #define HWRM_VERSION_1_9_2 0x10903 @@ -77,9 +74,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, /* * HWRM Functions (sent to HWRM) - * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() - * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM - * command was failed by the ChiMP. + * These are named bnxt_hwrm_*() and return 0 on success or -110 if the + * HWRM command times out, or a negative error code if the HWRM + * command was failed by the FW. */ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, @@ -97,6 +94,13 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, GRCPF_REG_KONG_CHANNEL_OFFSET : GRCPF_REG_CHIMP_CHANNEL_OFFSET; uint16_t mb_trigger_offset = use_kong_mb ? GRCPF_REG_KONG_COMM_TRIGGER : GRCPF_REG_CHIMP_COMM_TRIGGER; + uint32_t timeout; + + /* Do not send HWRM commands to firmware in error state */ + if (bp->flags & BNXT_FLAG_FATAL_ERROR) + return 0; + + timeout = bp->hwrm_cmd_timeout; if (bp->flags & BNXT_FLAG_SHORT_CMD || msg_len > bp->max_req_len) { @@ -115,9 +119,6 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, data = (uint32_t *)&short_input; msg_len = sizeof(short_input); - /* Sync memory write before updating doorbell */ - rte_wmb(); - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; } @@ -137,11 +138,17 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, /* Ring channel doorbell */ bar = (uint8_t *)bp->bar0 + mb_trigger_offset; rte_write32(1, bar); + /* + * Make sure the channel doorbell ring command complete before + * reading the response to avoid getting stale or invalid + * responses. + */ + rte_io_mb(); /* Poll for the valid bit */ - for (i = 0; i < HWRM_CMD_TIMEOUT; i++) { + for (i = 0; i < timeout; i++) { /* Sanity check on the resp->resp_len */ - rte_rmb(); + rte_cio_rmb(); if (resp->resp_len && resp->resp_len <= bp->max_resp_len) { /* Last byte of resp contains the valid key */ valid = (uint8_t *)resp + resp->resp_len - 1; @@ -151,7 +158,12 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_delay_us(1); } - if (i >= HWRM_CMD_TIMEOUT) { + if (i >= timeout) { + /* Suppress VER_GET timeout messages during reset recovery */ + if (bp->flags & BNXT_FLAG_FW_RESET && + rte_cpu_to_le_16(req->req_type) == HWRM_VER_GET) + return -ETIMEDOUT; + PMD_DRV_LOG(ERR, "Error(timeout) sending msg 0x%04x\n", req->req_type); return -ETIMEDOUT; @@ -160,25 +172,29 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, } /* - * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the + * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the * spinlock, and does initial processing. * * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It - * releases the spinlock only if it returns. If the regular int return codes + * releases the spinlock only if it returns. If the regular int return codes * are not used by the function, HWRM_CHECK_RESULT() should not be used * directly, rather it should be copied and modified to suit the function. * * HWRM_UNLOCK() must be called after all response processing is completed. */ -#define HWRM_PREP(req, type, kong) do { \ +#define HWRM_PREP(req, type, kong) do { \ rte_spinlock_lock(&bp->hwrm_lock); \ + if (bp->hwrm_cmd_resp_addr == NULL) { \ + rte_spinlock_unlock(&bp->hwrm_lock); \ + return -EACCES; \ + } \ memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ - req.req_type = rte_cpu_to_le_16(HWRM_##type); \ - req.cmpl_ring = rte_cpu_to_le_16(-1); \ - req.seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ - rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ - req.target_id = rte_cpu_to_le_16(0xffff); \ - req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ + (req)->req_type = rte_cpu_to_le_16(type); \ + (req)->cmpl_ring = rte_cpu_to_le_16(-1); \ + (req)->seq_id = kong ? rte_cpu_to_le_16(bp->kong_cmd_seq++) :\ + rte_cpu_to_le_16(bp->chimp_cmd_seq++); \ + (req)->target_id = rte_cpu_to_le_16(0xffff); \ + (req)->resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \ } while (0) #define HWRM_CHECK_RESULT_SILENT() do {\ @@ -199,8 +215,14 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_spinlock_unlock(&bp->hwrm_lock); \ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ rc = -EACCES; \ - else if (rc > 0) \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ if (resp->error_code) { \ @@ -221,21 +243,110 @@ static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, rte_spinlock_unlock(&bp->hwrm_lock); \ if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \ rc = -EACCES; \ - else if (rc > 0) \ + else if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) \ + rc = -ENOSPC; \ + else if (rc == HWRM_ERR_CODE_INVALID_PARAMS) \ rc = -EINVAL; \ + else if (rc == HWRM_ERR_CODE_CMD_NOT_SUPPORTED) \ + rc = -ENOTSUP; \ + else if (rc > 0) \ + rc = -EIO; \ return rc; \ } \ } while (0) #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock) +int bnxt_hwrm_tf_message_direct(struct bnxt *bp, + bool use_kong_mb, + uint16_t msg_type, + void *msg, + uint32_t msg_len, + void *resp_msg, + uint32_t resp_len) +{ + int rc = 0; + bool mailbox = BNXT_USE_CHIMP_MB; + struct input *req = msg; + struct output *resp = bp->hwrm_cmd_resp_addr; + + if (use_kong_mb) + mailbox = BNXT_USE_KONG(bp); + + HWRM_PREP(req, msg_type, mailbox); + + rc = bnxt_hwrm_send_message(bp, req, msg_len, mailbox); + + HWRM_CHECK_RESULT(); + + if (resp_msg) + memcpy(resp_msg, resp, resp_len); + + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_tf_message_tunneled(struct bnxt *bp, + bool use_kong_mb, + uint16_t tf_type, + uint16_t tf_subtype, + uint32_t *tf_response_code, + void *msg, + uint32_t msg_len, + void *response, + uint32_t response_len) +{ + int rc = 0; + struct hwrm_cfa_tflib_input req = { .req_type = 0 }; + struct hwrm_cfa_tflib_output *resp = bp->hwrm_cmd_resp_addr; + bool mailbox = BNXT_USE_CHIMP_MB; + + if (msg_len > sizeof(req.tf_req)) + return -ENOMEM; + + if (use_kong_mb) + mailbox = BNXT_USE_KONG(bp); + + HWRM_PREP(&req, HWRM_TF, mailbox); + /* Build request using the user supplied request payload. + * TLV request size is checked at build time against HWRM + * request max size, thus no checking required. + */ + req.tf_type = tf_type; + req.tf_subtype = tf_subtype; + memcpy(req.tf_req, msg, msg_len); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), mailbox); + HWRM_CHECK_RESULT(); + + /* Copy the resp to user provided response buffer */ + if (response != NULL) + /* Post process response data. We need to copy only + * the 'payload' as the HWRM data structure really is + * HWRM header + msg header + payload and the TFLIB + * only provided a payload place holder. + */ + if (response_len != 0) { + memcpy(response, + resp->tf_resp, + response_len); + } + + /* Extract the internal tflib response code */ + *tf_response_code = resp->tf_resp_code; + HWRM_UNLOCK(); + + return rc; +} + int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc = 0; struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.mask = 0; @@ -260,23 +371,20 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return rc; - HWRM_PREP(req, CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_SET_RX_MASK, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); - /* FIXME add multicast flag, when multicast adding options is supported - * by ethtool. - */ if (vnic->flags & BNXT_VNIC_INFO_BCAST) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN; + if (vnic->flags & BNXT_VNIC_INFO_PROMISC) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; - if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) + + if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) { mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; - if (vnic->flags & BNXT_VNIC_INFO_MCAST) - mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; - if (vnic->mc_addr_cnt) { + } else if (vnic->flags & BNXT_VNIC_INFO_MCAST) { mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt); req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr); @@ -284,8 +392,8 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, if (vlan_table) { if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN)) mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY; - req.vlan_tag_tbl_addr = rte_cpu_to_le_64( - rte_mem_virt2iova(vlan_table)); + req.vlan_tag_tbl_addr = + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count); } req.mask = rte_cpu_to_le_32(mask); @@ -322,11 +430,11 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, return 0; } } - HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_VLAN_ANTISPOOF_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); req.vlan_tag_mask_tbl_addr = - rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table)); + rte_cpu_to_le_64(rte_malloc_virt2iova(vlan_table)); req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -338,16 +446,33 @@ int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid, } int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, - struct bnxt_filter_info *filter) + struct bnxt_filter_info *filter) { int rc = 0; + struct bnxt_filter_info *l2_filter = filter; + struct bnxt_vnic_info *vnic = NULL; struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; if (filter->fw_l2_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); + if (filter->matching_l2_fltr_ptr) + l2_filter = filter->matching_l2_fltr_ptr; + + PMD_DRV_LOG(DEBUG, "filter: %p l2_filter: %p ref_cnt: %d\n", + filter, l2_filter, l2_filter->l2_ref_cnt); + + if (l2_filter->l2_ref_cnt == 0) + return 0; + + if (l2_filter->l2_ref_cnt > 0) + l2_filter->l2_ref_cnt--; + + if (l2_filter->l2_ref_cnt > 0) + return 0; + + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_FREE, BNXT_USE_CHIMP_MB); req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); @@ -357,6 +482,14 @@ int bnxt_hwrm_clear_l2_filter(struct bnxt *bp, HWRM_UNLOCK(); filter->fw_l2_filter_id = UINT64_MAX; + if (l2_filter->l2_ref_cnt == 0) { + vnic = l2_filter->vnic; + if (vnic) { + STAILQ_REMOVE(&vnic->filter, l2_filter, + bnxt_filter_info, next); + bnxt_free_filter(bp, l2_filter); + } + } return 0; } @@ -390,11 +523,9 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, if (filter->fw_l2_filter_id != UINT64_MAX) bnxt_hwrm_clear_l2_filter(bp, filter); - HWRM_PREP(req, CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_L2_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); - req.flags |= - rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST); enables = filter->enables | HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; @@ -424,6 +555,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, req.src_id = rte_cpu_to_le_32(filter->src_id); if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE) req.src_type = filter->src_type; + if (filter->pri_hint) { + req.pri_hint = filter->pri_hint; + req.l2_filter_id_hint = + rte_cpu_to_le_64(filter->l2_filter_id_hint); + } req.enables = rte_cpu_to_le_32(enables); @@ -432,8 +568,11 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); + filter->l2_ref_cnt++; + return rc; } @@ -447,7 +586,7 @@ int bnxt_hwrm_ptp_cfg(struct bnxt *bp) if (!ptp) return 0; - HWRM_PREP(req, PORT_MAC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_MAC_CFG, BNXT_USE_CHIMP_MB); if (ptp->rx_filter) flags |= HWRM_PORT_MAC_CFG_INPUT_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; @@ -477,11 +616,10 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; -/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */ if (ptp) return 0; - HWRM_PREP(req, PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_MAC_PTP_QCFG, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(bp->pf.port_id); @@ -489,31 +627,37 @@ static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp) HWRM_CHECK_RESULT(); - if (!(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) + if (!BNXT_CHIP_THOR(bp) && + !(resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_DIRECT_ACCESS)) return 0; + if (resp->flags & HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_ONE_STEP_TX_TS) + bp->flags |= BNXT_FLAG_FW_CAP_ONE_STEP_TX_TS; + ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0); if (!ptp) return -ENOMEM; - ptp->rx_regs[BNXT_PTP_RX_TS_L] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); - ptp->rx_regs[BNXT_PTP_RX_TS_H] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); - ptp->rx_regs[BNXT_PTP_RX_SEQ] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); - ptp->rx_regs[BNXT_PTP_RX_FIFO] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); - ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = - rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); - ptp->tx_regs[BNXT_PTP_TX_TS_L] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); - ptp->tx_regs[BNXT_PTP_TX_TS_H] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); - ptp->tx_regs[BNXT_PTP_TX_SEQ] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); - ptp->tx_regs[BNXT_PTP_TX_FIFO] = - rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + if (!BNXT_CHIP_THOR(bp)) { + ptp->rx_regs[BNXT_PTP_RX_TS_L] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_lower); + ptp->rx_regs[BNXT_PTP_RX_TS_H] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_upper); + ptp->rx_regs[BNXT_PTP_RX_SEQ] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id); + ptp->rx_regs[BNXT_PTP_RX_FIFO] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo); + ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] = + rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv); + ptp->tx_regs[BNXT_PTP_TX_TS_L] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_lower); + ptp->tx_regs[BNXT_PTP_TX_TS_H] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_upper); + ptp->tx_regs[BNXT_PTP_TX_SEQ] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id); + ptp->tx_regs[BNXT_PTP_TX_FIFO] = + rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo); + } ptp->bp = bp; bp->ptp_cfg = ptp; @@ -530,7 +674,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) uint32_t flags; int i; - HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); @@ -588,8 +732,9 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); bp->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); bp->max_rx_em_flows = rte_le_to_cpu_16(resp->max_rx_em_flows); - bp->max_l2_ctx = - rte_le_to_cpu_16(resp->max_l2_ctxs) + bp->max_rx_em_flows; + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; /* TODO: For now, do not support VMDq/RFS on VFs. */ if (BNXT_PF(bp)) { if (bp->pf.max_vfs) @@ -599,6 +744,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } else { bp->max_vnics = 1; } + PMD_DRV_LOG(DEBUG, "Max l2_cntxts is %d vnics is %d\n", + bp->max_l2_ctx, bp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); if (BNXT_PF(bp)) { bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics); @@ -613,6 +760,17 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED) bp->flags |= BNXT_FLAG_EXT_STATS_SUPPORTED; + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE) { + bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY; + PMD_DRV_LOG(DEBUG, "Adapter Error recovery SUPPORTED\n"); + } + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD) + bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD; + + if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE) + bp->fw_cap |= BNXT_FW_CAP_HOT_RESET; + HWRM_UNLOCK(); return rc; @@ -633,6 +791,39 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags |= BNXT_FLAG_NEW_RM; } + /* On older FW, + * bnxt_hwrm_func_resc_qcaps can fail and cause init failure. + * But the error can be ignored. Return success. + */ + + return 0; +} + +/* VNIC cap covers capability of all VNICs. So no need to pass vnic_id */ +int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_vnic_qcaps_input req = {.req_type = 0 }; + struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(&req, HWRM_VNIC_QCAPS, BNXT_USE_CHIMP_MB); + + req.target_id = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (rte_le_to_cpu_32(resp->flags) & + HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP) { + bp->vnic_cap_flags |= BNXT_VNIC_CAP_COS_CLASSIFY; + PMD_DRV_LOG(INFO, "CoS assignment capability enabled\n"); + } + + bp->max_tpa_v2 = rte_le_to_cpu_16(resp->max_aggs_supported); + + HWRM_UNLOCK(); + return rc; } @@ -642,7 +833,7 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) struct hwrm_func_reset_input req = {.req_type = 0 }; struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_RESET, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_RESET, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(0); @@ -657,13 +848,25 @@ int bnxt_hwrm_func_reset(struct bnxt *bp) int bnxt_hwrm_func_driver_register(struct bnxt *bp) { int rc; + uint32_t flags = 0; struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; if (bp->flags & BNXT_FLAG_REGISTERED) return 0; - HWRM_PREP(req, FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); + if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) + flags = HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT; + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT; + + /* PFs and trusted VFs should indicate the support of the + * Master capability on non Stingray platform + */ + if ((BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)) && !BNXT_STINGRAY(bp)) + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT; + + HWRM_PREP(&req, HWRM_FUNC_DRV_RGTR, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER | HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD); req.ver_maj = RTE_VER_YEAR; @@ -683,21 +886,35 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp) * this HWRM sniffer list in FW because DPDK PF driver does * not support this. */ - req.flags = - rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE); + flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE; } + req.flags = rte_cpu_to_le_32(flags); + req.async_event_fwd[0] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE | ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED | - ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE); + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE | + ASYNC_CMPL_EVENT_ID_LINK_SPEED_CHANGE | + ASYNC_CMPL_EVENT_ID_RESET_NOTIFY); + if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) + req.async_event_fwd[0] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_ERROR_RECOVERY); req.async_event_fwd[1] |= rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD | ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE); + if (BNXT_PF(bp)) + req.async_event_fwd[1] |= + rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_DBG_NOTIFICATION); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; + HWRM_UNLOCK(); bp->flags |= BNXT_FLAG_REGISTERED; @@ -721,7 +938,7 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_vf_cfg_input req = {0}; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); enables = HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS | HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS | @@ -739,7 +956,8 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) AGG_RING_MULTIPLIER); req.num_stat_ctxs = rte_cpu_to_le_16(bp->rx_nr_rings + bp->tx_nr_rings); req.num_cmpl_rings = rte_cpu_to_le_16(bp->rx_nr_rings + - bp->tx_nr_rings); + bp->tx_nr_rings + + BNXT_NUM_ASYNC_CPR(bp)); req.num_vnics = rte_cpu_to_le_16(bp->rx_nr_rings); if (bp->vf_resv_strategy == HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MINIMAL_STATIC) { @@ -749,6 +967,10 @@ int bnxt_hwrm_func_reserve_vf_resc(struct bnxt *bp, bool test) req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX); req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX); req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC); + } else if (bp->vf_resv_strategy == + HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) { + enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS; + req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); } if (test) @@ -782,12 +1004,12 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_func_resource_qcaps_input req = {0}; - HWRM_PREP(req, FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_RESOURCE_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + HWRM_CHECK_RESULT_SILENT(); if (BNXT_VF(bp)) { bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); @@ -798,9 +1020,9 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) /* func_resource_qcaps does not return max_rx_em_flows. * So use the value provided by func_qcaps. */ - bp->max_l2_ctx = - rte_le_to_cpu_16(resp->max_l2_ctxs) + - bp->max_rx_em_flows; + bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + if (!BNXT_CHIP_THOR(bp)) + bp->max_l2_ctx += bp->max_rx_em_flows; bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics); bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx); } @@ -815,7 +1037,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp) return rc; } -int bnxt_hwrm_ver_get(struct bnxt *bp) +int bnxt_hwrm_ver_get(struct bnxt *bp, uint32_t timeout) { int rc = 0; struct hwrm_ver_get_input req = {.req_type = 0 }; @@ -826,7 +1048,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) uint32_t dev_caps_cfg; bp->max_req_len = HWRM_MAX_REQ_LEN; - HWRM_PREP(req, VER_GET, BNXT_USE_CHIMP_MB); + bp->hwrm_cmd_timeout = timeout; + HWRM_PREP(&req, HWRM_VER_GET, BNXT_USE_CHIMP_MB); req.hwrm_intf_maj = HWRM_VERSION_MAJOR; req.hwrm_intf_min = HWRM_VERSION_MINOR; @@ -834,7 +1057,10 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - HWRM_CHECK_RESULT(); + if (bp->flags & BNXT_FLAG_FW_RESET) + HWRM_CHECK_RESULT_SILENT(); + else + HWRM_CHECK_RESULT(); PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n", resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b, @@ -852,6 +1078,13 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) fw_version |= resp->hwrm_intf_upd_8b; bp->hwrm_spec_code = fw_version; + /* def_req_timeout value is in milliseconds */ + bp->hwrm_cmd_timeout = rte_le_to_cpu_16(resp->def_req_timeout); + /* convert timeout to usec */ + bp->hwrm_cmd_timeout *= 1000; + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + if (resp->hwrm_intf_maj_8b != HWRM_VERSION_MAJOR) { PMD_DRV_LOG(ERR, "Unsupported firmware API version\n"); rc = -EINVAL; @@ -882,9 +1115,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "Unable to map response buffer to physical memory.\n"); @@ -919,9 +1151,8 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) rc = -ENOMEM; goto error; } - rte_mem_lock_page(bp->hwrm_short_cmd_req_addr); bp->hwrm_short_cmd_req_dma_addr = - rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr); + rte_malloc_virt2iova(bp->hwrm_short_cmd_req_addr); if (bp->hwrm_short_cmd_req_dma_addr == RTE_BAD_IOVA) { rte_free(bp->hwrm_short_cmd_req_addr); PMD_DRV_LOG(ERR, @@ -938,6 +1169,18 @@ int bnxt_hwrm_ver_get(struct bnxt *bp) if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED) PMD_DRV_LOG(DEBUG, "FW supports Trusted VFs\n"); + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED) { + bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_MGMT; + PMD_DRV_LOG(DEBUG, "FW supports advanced flow management\n"); + } + + if (dev_caps_cfg & + HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED) { + PMD_DRV_LOG(DEBUG, "FW supports advanced flow counters\n"); + bp->fw_cap |= BNXT_FW_CAP_ADV_FLOW_COUNTERS; + } + error: HWRM_UNLOCK(); @@ -953,7 +1196,7 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) if (!(bp->flags & BNXT_FLAG_REGISTERED)) return 0; - HWRM_PREP(req, FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_DRV_UNRGTR, BNXT_USE_CHIMP_MB); req.flags = flags; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -961,8 +1204,6 @@ int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) HWRM_CHECK_RESULT(); HWRM_UNLOCK(); - bp->flags &= ~BNXT_FLAG_REGISTERED; - return rc; } @@ -973,7 +1214,7 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; uint32_t enables = 0; - HWRM_PREP(req, PORT_PHY_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_PHY_CFG, BNXT_USE_CHIMP_MB); if (conf->link_up) { /* Setting Fixed Speed. But AutoNeg is ON, So disable it */ @@ -1037,7 +1278,7 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, struct hwrm_port_phy_qcfg_input req = {0}; struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_PHY_QCFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1078,49 +1319,101 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, return rc; } +static bool bnxt_find_lossy_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile == + HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + return true; + } + } + return false; +} + +static void bnxt_find_first_valid_profile(struct bnxt *bp) +{ + int i = 0; + + for (i = BNXT_COS_QUEUE_COUNT - 1; i >= 0; i--) { + if (bp->tx_cos_queue[i].profile != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN && + bp->tx_cos_queue[i].id != + HWRM_QUEUE_SERVICE_PROFILE_UNKNOWN) { + bp->tx_cosq_id[0] = bp->tx_cos_queue[i].id; + break; + } + } +} + int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { int rc = 0; struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; int i; - HWRM_PREP(req, QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); +get_rx_info: + HWRM_PREP(&req, HWRM_QUEUE_QPORTCFG, BNXT_USE_CHIMP_MB); - req.flags = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX; - /* HWRM Version >= 1.9.1 */ - if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1) + req.flags = rte_cpu_to_le_32(dir); + /* HWRM Version >= 1.9.1 only if COS Classification is not required. */ + if (bp->hwrm_spec_code >= HWRM_VERSION_1_9_1 && + !(bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY)) req.drv_qmap_cap = HWRM_QUEUE_QPORTCFG_INPUT_DRV_QMAP_CAP_ENABLED; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); -#define GET_QUEUE_INFO(x) \ - bp->cos_queue[x].id = resp->queue_id##x; \ - bp->cos_queue[x].profile = resp->queue_id##x##_service_profile - - GET_QUEUE_INFO(0); - GET_QUEUE_INFO(1); - GET_QUEUE_INFO(2); - GET_QUEUE_INFO(3); - GET_QUEUE_INFO(4); - GET_QUEUE_INFO(5); - GET_QUEUE_INFO(6); - GET_QUEUE_INFO(7); + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) { + GET_TX_QUEUE_INFO(0); + GET_TX_QUEUE_INFO(1); + GET_TX_QUEUE_INFO(2); + GET_TX_QUEUE_INFO(3); + GET_TX_QUEUE_INFO(4); + GET_TX_QUEUE_INFO(5); + GET_TX_QUEUE_INFO(6); + GET_TX_QUEUE_INFO(7); + } else { + GET_RX_QUEUE_INFO(0); + GET_RX_QUEUE_INFO(1); + GET_RX_QUEUE_INFO(2); + GET_RX_QUEUE_INFO(3); + GET_RX_QUEUE_INFO(4); + GET_RX_QUEUE_INFO(5); + GET_RX_QUEUE_INFO(6); + GET_RX_QUEUE_INFO(7); + } HWRM_UNLOCK(); + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX) + goto done; + if (bp->hwrm_spec_code < HWRM_VERSION_1_9_1) { - bp->tx_cosq_id = bp->cos_queue[0].id; + bp->tx_cosq_id[0] = bp->tx_cos_queue[0].id; } else { + int j; + /* iterate and find the COSq profile to use for Tx */ - for (i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { - if (bp->cos_queue[i].profile == - HWRM_QUEUE_SERVICE_PROFILE_LOSSY) { - bp->tx_cosq_id = bp->cos_queue[i].id; - break; + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) { + for (j = 0, i = 0; i < BNXT_COS_QUEUE_COUNT; i++) { + if (bp->tx_cos_queue[i].id != 0xff) + bp->tx_cosq_id[j++] = + bp->tx_cos_queue[i].id; } + } else { + /* When CoS classification is disabled, for normal NIC + * operations, ideally we should look to use LOSSY. + * If not found, fallback to the first valid profile + */ + if (!bnxt_find_lossy_profile(bp)) + bnxt_find_first_valid_profile(bp); + } } @@ -1130,15 +1423,20 @@ int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) bp->max_tc = BNXT_MAX_QUEUE; bp->max_q = bp->max_tc; - PMD_DRV_LOG(DEBUG, "Tx Cos Queue to use: %d\n", bp->tx_cosq_id); + if (dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) { + dir = HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX; + goto get_rx_info; + } +done: return rc; } int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct bnxt_ring *ring, uint32_t ring_type, uint32_t map_index, - uint32_t stats_ctx_id, uint32_t cmpl_ring_id) + uint32_t stats_ctx_id, uint32_t cmpl_ring_id, + uint16_t tx_cosq_id) { int rc = 0; uint32_t enables = 0; @@ -1147,7 +1445,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, struct rte_mempool *mb_pool; uint16_t rx_buf_size; - HWRM_PREP(req, RING_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_ALLOC, BNXT_USE_CHIMP_MB); req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); req.fbo = rte_cpu_to_le_32(0); @@ -1160,7 +1458,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, req.ring_type = ring_type; req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id); req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); - req.queue_id = rte_cpu_to_le_16(bp->tx_cosq_id); + req.queue_id = rte_cpu_to_le_16(tx_cosq_id); if (stats_ctx_id != INVALID_STATS_CTX_ID) enables |= HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID; @@ -1173,6 +1471,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, mb_pool = bp->rx_queues[0]->mb_pool; rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); enables |= HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID; @@ -1203,6 +1502,7 @@ int bnxt_hwrm_ring_alloc(struct bnxt *bp, mb_pool = bp->rx_queues[0]->mb_pool; rx_buf_size = rte_pktmbuf_data_room_size(mb_pool) - RTE_PKTMBUF_HEADROOM; + rx_buf_size = RTE_MIN(BNXT_MAX_PKT_LEN, rx_buf_size); req.rx_buf_size = rte_cpu_to_le_16(rx_buf_size); req.stat_ctx_id = rte_cpu_to_le_32(stats_ctx_id); @@ -1269,7 +1569,7 @@ int bnxt_hwrm_ring_free(struct bnxt *bp, struct hwrm_ring_free_input req = {.req_type = 0 }; struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_FREE, BNXT_USE_CHIMP_MB); req.ring_type = ring_type; req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); @@ -1317,7 +1617,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_GRP_ALLOC, BNXT_USE_CHIMP_MB); req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); @@ -1328,8 +1628,7 @@ int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) HWRM_CHECK_RESULT(); - bp->grp_info[idx].fw_grp_id = - rte_le_to_cpu_16(resp->ring_group_id); + bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id); HWRM_UNLOCK(); @@ -1342,7 +1641,7 @@ int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) struct hwrm_ring_grp_free_input req = {.req_type = 0 }; struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, RING_GRP_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_GRP_FREE, BNXT_USE_CHIMP_MB); req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); @@ -1364,7 +1663,7 @@ int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) return rc; - HWRM_PREP(req, STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_CLR_STATS, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1383,12 +1682,11 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_ALLOC, BNXT_USE_CHIMP_MB); req.update_period_ms = rte_cpu_to_le_32(0); - req.stats_dma_addr = - rte_cpu_to_le_64(cpr->hw_stats_map); + req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -1408,7 +1706,7 @@ int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_FREE, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cpr->hw_stats_ctx_id); @@ -1441,9 +1739,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE; skip_ring_grps: - vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; - HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB); + vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu); + HWRM_PREP(&req, HWRM_VNIC_ALLOC, BNXT_USE_CHIMP_MB); if (vnic->func_default) req.flags = @@ -1466,7 +1763,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp, struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 }; struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_QCFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1499,7 +1796,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp, return rc; } - HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.flags = rte_cpu_to_le_32(pmode->flags); @@ -1538,12 +1835,32 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (rc) return rc; - HWRM_PREP(req, VNIC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_CFG, BNXT_USE_CHIMP_MB); if (BNXT_CHIP_THOR(bp)) { - struct bnxt_rx_queue *rxq = bp->eth_dev->data->rx_queues[0]; - struct bnxt_rx_ring_info *rxr = rxq->rx_ring; - struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + int dflt_rxq = vnic->start_grp_id; + struct bnxt_rx_ring_info *rxr; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_queue *rxq; + int i; + + /* + * The first active receive ring is used as the VNIC + * default receive ring. If there are no active receive + * rings (all corresponding receive queues are stopped), + * the first receive ring is used. + */ + for (i = vnic->start_grp_id; i < vnic->end_grp_id; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + if (rxq->rx_started) { + dflt_rxq = i; + break; + } + } + + rxq = bp->eth_dev->data->rx_queues[dflt_rxq]; + rxr = rxq->rx_ring; + cpr = rxq->cp_ring; req.default_rx_ring_id = rte_cpu_to_le_16(rxr->rx_ring_struct->fw_ring_id); @@ -1564,6 +1881,11 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU; ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE; } + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_COS_CLASSIFY) { + ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID; + req.queue_id = rte_cpu_to_le_16(vnic->cos_queue_id); + } + enables |= ctx_enable_flag; req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp); req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule); @@ -1617,7 +1939,7 @@ int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic, PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id); return rc; } - HWRM_PREP(req, VNIC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_QCFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID); @@ -1660,7 +1982,7 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1676,8 +1998,9 @@ int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, return rc; } -int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, - struct bnxt_vnic_info *vnic, uint16_t ctx_idx) +static +int _bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, + struct bnxt_vnic_info *vnic, uint16_t ctx_idx) { int rc = 0; struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 }; @@ -1688,7 +2011,7 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule); return rc; } - HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, BNXT_USE_CHIMP_MB); req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(ctx_idx); @@ -1700,6 +2023,28 @@ int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, return rc; } +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + + if (BNXT_CHIP_THOR(bp)) { + int j; + + for (j = 0; j < vnic->num_lb_ctxts; j++) { + rc = _bnxt_hwrm_vnic_ctx_free(bp, + vnic, + vnic->fw_grp_ids[j]); + vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; + } + vnic->num_lb_ctxts = 0; + } else { + rc = _bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule); + vnic->rss_rule = INVALID_HW_RING_ID; + } + + return rc; +} + int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) { int rc = 0; @@ -1711,7 +2056,7 @@ int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } - HWRM_PREP(req, VNIC_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_FREE, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1738,7 +2083,7 @@ bnxt_hwrm_vnic_rss_cfg_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; for (i = 0; i < nr_ctxs; i++) { - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); @@ -1776,7 +2121,7 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, if (BNXT_CHIP_THOR(bp)) return bnxt_hwrm_vnic_rss_cfg_thor(bp, vnic); - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); req.hash_mode_flags = vnic->hash_mode; @@ -1809,7 +2154,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, return rc; } - HWRM_PREP(req, VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_PLCMODES_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32( HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT); @@ -1819,6 +2164,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp, size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool); size -= RTE_PKTMBUF_HEADROOM; + size = RTE_MIN(BNXT_MAX_PKT_LEN, size); req.jumbo_thresh = rte_cpu_to_le_16(size); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1838,10 +2184,18 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 }; struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr; - if (BNXT_CHIP_THOR(bp)) + if (BNXT_CHIP_THOR(bp) && !bp->max_tpa_v2) { + if (enable) + PMD_DRV_LOG(ERR, "No HW support for LRO\n"); + return -ENOTSUP; + } + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { + PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); return 0; + } - HWRM_PREP(req, VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_TPA_CFG, BNXT_USE_CHIMP_MB); if (enable) { req.enables = rte_cpu_to_le_32( @@ -1855,9 +2209,8 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp, HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN | HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ); - req.max_agg_segs = rte_cpu_to_le_16(5); - req.max_aggs = - rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX); + req.max_agg_segs = rte_cpu_to_le_16(BNXT_TPA_MAX_AGGS(bp)); + req.max_aggs = rte_cpu_to_le_16(BNXT_TPA_MAX_SEGS(bp)); req.min_agg_len = rte_cpu_to_le_32(512); } req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); @@ -1882,7 +2235,7 @@ int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr) memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr)); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -1900,7 +2253,7 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); @@ -1917,19 +2270,26 @@ int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid, } int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, - struct rte_eth_stats *stats) + struct rte_eth_stats *stats, + struct hwrm_func_qstats_output *func_qstats) { int rc = 0; struct hwrm_func_qstats_input req = {.req_type = 0}; struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QSTATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); + if (func_qstats) + memcpy(func_qstats, resp, + sizeof(struct hwrm_func_qstats_output)); + + if (!stats) + goto exit; stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts); stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts); @@ -1949,6 +2309,7 @@ int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid, stats->ierrors = rte_le_to_cpu_64(resp->rx_drop_pkts); stats->oerrors = rte_le_to_cpu_64(resp->tx_discard_pkts); +exit: HWRM_UNLOCK(); return rc; @@ -1960,7 +2321,7 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) struct hwrm_func_clr_stats_input req = {.req_type = 0}; struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CLR_STATS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(fid); @@ -1972,10 +2333,6 @@ int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid) return rc; } -/* - * HWRM utility functions - */ - int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) { unsigned int i; @@ -2001,7 +2358,8 @@ int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) return 0; } -int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +static int +bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) { int rc; unsigned int i; @@ -2052,7 +2410,8 @@ int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) return rc; } -int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +static int +bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) { uint16_t idx; uint32_t rc = 0; @@ -2073,7 +2432,7 @@ int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) return rc; } -static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -2083,9 +2442,10 @@ static void bnxt_free_nq_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * sizeof(*cpr->cp_desc_ring)); cpr->cp_raw_cons = 0; + cpr->valid = 0; } -static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) { struct bnxt_ring *cp_ring = cpr->cp_ring_struct; @@ -2135,17 +2495,15 @@ void bnxt_free_hwrm_rx_ring(struct bnxt *bp, int queue_index) bp->grp_info[queue_index].ag_fw_ring_id = INVALID_HW_RING_ID; } - if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) bnxt_free_cp_ring(bp, cpr); - if (rxq->nq_ring) - bnxt_free_nq_ring(bp, rxq->nq_ring); - } if (BNXT_HAS_RING_GRPS(bp)) bp->grp_info[queue_index].cp_fw_ring_id = INVALID_HW_RING_ID; } -int bnxt_free_all_hwrm_rings(struct bnxt *bp) +static int +bnxt_free_all_hwrm_rings(struct bnxt *bp) { unsigned int i; @@ -2171,8 +2529,6 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp) if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) { bnxt_free_cp_ring(bp, cpr); cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID; - if (txq->nq_ring) - bnxt_free_nq_ring(bp, txq->nq_ring); } } @@ -2198,6 +2554,10 @@ int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) return rc; } +/* + * HWRM utility functions + */ + void bnxt_free_hwrm_resources(struct bnxt *bp) { /* Release memzone */ @@ -2218,11 +2578,10 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) pdev->addr.bus, pdev->addr.devid, pdev->addr.function); bp->max_resp_len = HWRM_MAX_RESP_LEN; bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); - rte_mem_lock_page(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_addr == NULL) return -ENOMEM; bp->hwrm_cmd_resp_dma_addr = - rte_mem_virt2iova(bp->hwrm_cmd_resp_addr); + rte_malloc_virt2iova(bp->hwrm_cmd_resp_addr); if (bp->hwrm_cmd_resp_dma_addr == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -2233,7 +2592,8 @@ int bnxt_alloc_hwrm_resources(struct bnxt *bp) return 0; } -int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +static int +bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) { struct bnxt_filter_info *filter; int rc = 0; @@ -2243,11 +2603,9 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next); - //if (rc) - //break; + bnxt_free_filter(bp, filter); } return rc; } @@ -2259,20 +2617,18 @@ bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct rte_flow *flow; int rc = 0; - STAILQ_FOREACH(flow, &vnic->flow_list, next) { + while (!STAILQ_EMPTY(&vnic->flow_list)) { + flow = STAILQ_FIRST(&vnic->flow_list); filter = flow->filter; - PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type); + PMD_DRV_LOG(DEBUG, "filter type %d\n", filter->filter_type); if (filter->filter_type == HWRM_CFA_EM_FILTER) rc = bnxt_hwrm_clear_em_filter(bp, filter); else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) rc = bnxt_hwrm_clear_ntuple_filter(bp, filter); - else - rc = bnxt_hwrm_clear_l2_filter(bp, filter); + rc = bnxt_hwrm_clear_l2_filter(bp, filter); STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next); rte_free(flow); - //if (rc) - //break; } return rc; } @@ -2298,7 +2654,8 @@ int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) return rc; } -void bnxt_free_tunnel_ports(struct bnxt *bp) +static void +bnxt_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id, @@ -2312,7 +2669,7 @@ void bnxt_free_tunnel_ports(struct bnxt *bp) void bnxt_free_all_hwrm_resources(struct bnxt *bp) { - int i, j; + int i; if (bp->vnic_info == NULL) return; @@ -2321,29 +2678,17 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp) * Cleanup VNICs in reverse order, to make sure the L2 filter * from vnic0 is last to be cleaned up. */ - for (i = bp->nr_vnics - 1; i >= 0; i--) { + for (i = bp->max_vnics - 1; i >= 0; i--) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; - if (vnic->fw_vnic_id == INVALID_HW_RING_ID) { - PMD_DRV_LOG(DEBUG, "Invalid vNIC ID\n"); - return; - } + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + continue; bnxt_clear_hwrm_vnic_flows(bp, vnic); bnxt_clear_hwrm_vnic_filters(bp, vnic); - if (BNXT_CHIP_THOR(bp)) { - for (j = 0; j < vnic->num_lb_ctxts; j++) { - bnxt_hwrm_vnic_ctx_free(bp, vnic, - vnic->fw_grp_ids[j]); - vnic->fw_grp_ids[j] = INVALID_HW_RING_ID; - } - vnic->num_lb_ctxts = 0; - } else { - bnxt_hwrm_vnic_ctx_free(bp, vnic, vnic->rss_rule); - vnic->rss_rule = INVALID_HW_RING_ID; - } + bnxt_hwrm_vnic_ctx_free(bp, vnic); bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false); @@ -2614,11 +2959,27 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) goto port_phy_cfg; autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds); + if (BNXT_CHIP_THOR(bp) && + dev_conf->link_speeds == ETH_LINK_SPEED_40G) { + /* 40G is not supported as part of media auto detect. + * The speed should be forced and autoneg disabled + * to configure 40G speed. + */ + PMD_DRV_LOG(INFO, "Disabling autoneg for 40G\n"); + autoneg = 0; + } + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; - /* Autoneg can be done only when the FW allows */ - if (autoneg == 1 && !(bp->link_info.auto_link_speed || - bp->link_info.force_link_speed)) { + /* Autoneg can be done only when the FW allows. + * When user configures fixed speed of 40G and later changes to + * any other speed, auto_link_speed/force_link_speed is still set + * to 40G until link comes up at new speed. + */ + if (autoneg == 1 && + !(!BNXT_CHIP_THOR(bp) && + (bp->link_info.auto_link_speed || + bp->link_info.force_link_speed))) { link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; link_req.auto_link_speed_mask = @@ -2666,8 +3027,10 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; uint16_t flags; int rc = 0; + bp->func_svif = BNXT_SVIF_INVALID; + uint16_t svif_info; - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -2676,27 +3039,40 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) /* Hard Coded.. 0xfff VLAN ID mask */ bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff; + + svif_info = rte_le_to_cpu_16(resp->svif_info); + if (svif_info & HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_VALID) + bp->func_svif = svif_info & + HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK; + flags = rte_le_to_cpu_16(resp->flags); if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; - if (BNXT_VF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + if (BNXT_VF(bp) && + !BNXT_VF_IS_TRUSTED(bp) && + (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { bp->flags |= BNXT_FLAG_TRUSTED_VF_EN; PMD_DRV_LOG(INFO, "Trusted VF cap enabled\n"); + } else if (BNXT_VF(bp) && + BNXT_VF_IS_TRUSTED(bp) && + !(flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF)) { + bp->flags &= ~BNXT_FLAG_TRUSTED_VF_EN; + PMD_DRV_LOG(INFO, "Trusted VF cap disabled\n"); } if (mtu) - *mtu = resp->mtu; + *mtu = rte_le_to_cpu_16(resp->mtu); switch (resp->port_partition_type) { case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5: case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0: /* FALLTHROUGH */ - bp->port_partition_type = resp->port_partition_type; + bp->flags |= BNXT_FLAG_NPAR_PF; break; default: - bp->port_partition_type = 0; + bp->flags &= ~BNXT_FLAG_NPAR_PF; break; } @@ -2705,6 +3081,32 @@ int bnxt_hwrm_func_qcfg(struct bnxt *bp, uint16_t *mtu) return rc; } +int bnxt_hwrm_port_mac_qcfg(struct bnxt *bp) +{ + struct hwrm_port_mac_qcfg_input req = {0}; + struct hwrm_port_mac_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t port_svif_info; + int rc; + + bp->port_svif = BNXT_SVIF_INVALID; + + HWRM_PREP(&req, HWRM_PORT_MAC_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + port_svif_info = rte_le_to_cpu_16(resp->port_svif_info); + if (port_svif_info & + HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID) + bp->port_svif = port_svif_info & + HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK; + + HWRM_UNLOCK(); + + return 0; +} + static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg, struct hwrm_func_qcaps_output *qcaps) { @@ -2758,9 +3160,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU); - req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx); req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx); req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings); @@ -2771,7 +3171,7 @@ static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings) req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(enables); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -2799,9 +3199,7 @@ static void populate_vf_func_cfg_req(struct bnxt *bp, req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * BNXT_NUM_VLANS); - req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + - RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * - BNXT_NUM_VLANS); + req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu)); req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx / (num_vfs + 1)); req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1)); @@ -2836,16 +3234,16 @@ static void add_random_mac_if_needed(struct bnxt *bp, } } -static void reserve_resources_from_vf(struct bnxt *bp, - struct hwrm_func_cfg_input *cfg_req, - int vf) +static int reserve_resources_from_vf(struct bnxt *bp, + struct hwrm_func_cfg_input *cfg_req, + int vf) { struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; int rc; /* Get the actual allocated values now */ - HWRM_PREP(req, FUNC_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCAPS, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -2872,6 +3270,8 @@ static void reserve_resources_from_vf(struct bnxt *bp, bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps); HWRM_UNLOCK(); + + return 0; } int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) @@ -2881,17 +3281,10 @@ int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf) int rc; /* Check for zero MAC address */ - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); - if (rc) { - PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc); - return -1; - } else if (resp->error_code) { - rc = rte_le_to_cpu_16(resp->error_code); - PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc); - return -1; - } + HWRM_CHECK_RESULT(); rc = rte_le_to_cpu_16(resp->vlan); HWRM_UNLOCK(); @@ -2906,7 +3299,7 @@ static int update_pf_resource_max(struct bnxt *bp) int rc; /* And copy the allocated numbers into the pf struct */ - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3009,7 +3402,7 @@ int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs) for (i = 0; i < num_vfs; i++) { add_random_mac_if_needed(bp, &req, i); - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid); rc = bnxt_hwrm_send_message(bp, @@ -3065,7 +3458,7 @@ int bnxt_hwrm_pf_evb_mode(struct bnxt *bp) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE); @@ -3085,7 +3478,7 @@ int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_val = port; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3116,7 +3509,7 @@ int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port, struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_TUNNEL_DST_PORT_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = tunnel_type; req.tunnel_dst_port_id = rte_cpu_to_be_16(port); @@ -3135,7 +3528,7 @@ int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.flags = rte_cpu_to_le_32(flags); @@ -3165,14 +3558,14 @@ int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) struct hwrm_func_buf_rgtr_input req = {.req_type = 0 }; struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BUF_RGTR, BNXT_USE_CHIMP_MB); req.req_buf_num_pages = rte_cpu_to_le_16(1); req.req_buf_page_size = rte_cpu_to_le_16( page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN)); req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN); req.req_buf_page_addr0 = - rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf)); + rte_cpu_to_le_64(rte_malloc_virt2iova(bp->pf.vf_req_buf)); if (req.req_buf_page_addr0 == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map buffer address to physical memory\n"); @@ -3196,7 +3589,7 @@ int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp) if (!(BNXT_PF(bp) && bp->pdev->max_vfs)) return 0; - HWRM_PREP(req, FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BUF_UNRGTR, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3212,14 +3605,14 @@ int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(0xffff); req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags); req.enables = rte_cpu_to_le_32( HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3234,12 +3627,12 @@ int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp) struct hwrm_func_vf_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32( HWRM_FUNC_VF_CFG_INPUT_ENABLES_ASYNC_EVENT_CR); req.async_event_cr = rte_cpu_to_le_16( - bp->def_cp_ring->cp_ring_struct->fw_ring_id); + bp->async_cp_ring->cp_ring_struct->fw_ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3256,7 +3649,7 @@ int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf) uint32_t func_cfg_flags; int rc = 0; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); if (is_vf) { dflt_vlan = bp->pf.vf_info[vf].dflt_vlan; @@ -3288,7 +3681,7 @@ int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32(enables); @@ -3308,7 +3701,7 @@ int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf) struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); @@ -3345,7 +3738,7 @@ int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_REJECT_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); @@ -3365,7 +3758,7 @@ int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf, struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; int rc; - HWRM_PREP(req, FUNC_QCFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_QCFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3389,7 +3782,7 @@ int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id, if (ec_size > sizeof(req.encap_request)) return -1; - HWRM_PREP(req, EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_EXEC_FWD_RESP, BNXT_USE_CHIMP_MB); req.encap_resp_target_id = rte_cpu_to_le_16(target_id); memcpy(req.encap_request, encaped, ec_size); @@ -3409,7 +3802,7 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, struct hwrm_stat_ctx_query_input req = {.req_type = 0}; struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_STAT_CTX_QUERY, BNXT_USE_CHIMP_MB); req.stat_ctx_id = rte_cpu_to_le_32(cid); @@ -3435,7 +3828,6 @@ int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx, stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes); } - HWRM_UNLOCK(); return rc; @@ -3448,7 +3840,7 @@ int bnxt_hwrm_port_qstats(struct bnxt *bp) struct bnxt_pf_info *pf = &bp->pf; int rc; - HWRM_PREP(req, PORT_QSTATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_QSTATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map); @@ -3473,7 +3865,7 @@ int bnxt_hwrm_port_clr_stats(struct bnxt *bp) BNXT_NPAR(bp) || BNXT_MH(bp) || BNXT_TOTAL_VFS(bp)) return 0; - HWRM_PREP(req, PORT_CLR_STATS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_CLR_STATS, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3493,7 +3885,7 @@ int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp)) return 0; - HWRM_PREP(req, PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_LED_QCAPS, BNXT_USE_CHIMP_MB); req.port_id = bp->pf.port_id; rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3535,7 +3927,7 @@ int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on) if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; - HWRM_PREP(req, PORT_LED_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_LED_CFG, BNXT_USE_CHIMP_MB); if (led_on) { led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT; @@ -3568,7 +3960,7 @@ int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries, struct hwrm_nvm_get_dir_info_input req = {0}; struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_GET_DIR_INFO, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3603,16 +3995,15 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data) buflen = dir_entries * entry_length; buf = rte_malloc("nvm_dir", buflen, 0); - rte_mem_lock_page(buf); if (buf == NULL) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_GET_DIR_ENTRIES, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); @@ -3637,17 +4028,16 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index, struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr; buf = rte_malloc("nvm_item", length, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); return -ENOMEM; } - HWRM_PREP(req, NVM_READ, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_READ, BNXT_USE_CHIMP_MB); req.host_dest_addr = rte_cpu_to_le_64(dma_handle); req.dir_idx = rte_cpu_to_le_16(index); req.offset = rte_cpu_to_le_32(offset); @@ -3669,7 +4059,7 @@ int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index) struct hwrm_nvm_erase_dir_entry_input req = {0}; struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr; - HWRM_PREP(req, NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_ERASE_DIR_ENTRY, BNXT_USE_CHIMP_MB); req.dir_idx = rte_cpu_to_le_16(index); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -3691,11 +4081,10 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, uint8_t *buf; buf = rte_malloc("nvm_write", data_len, 0); - rte_mem_lock_page(buf); if (!buf) return -ENOMEM; - dma_handle = rte_mem_virt2iova(buf); + dma_handle = rte_malloc_virt2iova(buf); if (dma_handle == RTE_BAD_IOVA) { PMD_DRV_LOG(ERR, "unable to map response address to physical memory\n"); @@ -3703,7 +4092,7 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type, } memcpy(buf, data, data_len); - HWRM_PREP(req, NVM_WRITE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_NVM_WRITE, BNXT_USE_CHIMP_MB); req.dir_type = rte_cpu_to_le_16(dir_type); req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal); @@ -3754,11 +4143,11 @@ static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf, int rc; /* First query all VNIC ids */ - HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_VNIC_IDS_QUERY, BNXT_USE_CHIMP_MB); req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf); req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics); - req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids)); + req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_malloc_virt2iova(vnic_ids)); if (req.vnic_id_tbl_addr == RTE_BAD_IOVA) { HWRM_UNLOCK(); @@ -3836,7 +4225,7 @@ int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf, struct hwrm_func_cfg_input req = {0}; int rc; - HWRM_PREP(req, FUNC_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_CFG, BNXT_USE_CHIMP_MB); req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid); req.enables |= rte_cpu_to_le_32( @@ -3911,7 +4300,7 @@ int bnxt_hwrm_set_em_filter(struct bnxt *bp, if (filter->fw_em_filter_id != UINT64_MAX) bnxt_hwrm_clear_em_filter(bp, filter); - HWRM_PREP(req, CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_ALLOC, BNXT_USE_KONG(bp)); req.flags = rte_cpu_to_le_32(filter->flags); @@ -3983,8 +4372,7 @@ int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter) if (filter->fw_em_filter_id == UINT64_MAX) return 0; - PMD_DRV_LOG(ERR, "Clear EM filter\n"); - HWRM_PREP(req, CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_EM_FLOW_FREE, BNXT_USE_KONG(bp)); req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id); @@ -4012,7 +4400,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id != UINT64_MAX) bnxt_hwrm_clear_ntuple_filter(bp, filter); - HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_ALLOC, BNXT_USE_CHIMP_MB); req.flags = rte_cpu_to_le_32(filter->flags); @@ -4020,7 +4408,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID; req.dst_id = rte_cpu_to_le_16(dst_id); - if (filter->ip_addr_type) { req.ip_addr_type = filter->ip_addr_type; enables |= @@ -4033,10 +4420,6 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR) memcpy(req.src_macaddr, filter->src_macaddr, RTE_ETHER_ADDR_LEN); - //if (enables & - //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR) - //memcpy(req.dst_macaddr, filter->dst_macaddr, - //RTE_ETHER_ADDR_LEN); if (enables & HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE) req.ethertype = rte_cpu_to_be_16(filter->ethertype); @@ -4080,6 +4463,7 @@ int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp, HWRM_CHECK_RESULT(); filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id); + filter->flow_id = rte_le_to_cpu_32(resp->flow_id); HWRM_UNLOCK(); return rc; @@ -4096,7 +4480,7 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp, if (filter->fw_ntuple_filter_id == UINT64_MAX) return 0; - HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_CFA_NTUPLE_FILTER_FREE, BNXT_USE_CHIMP_MB); req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id); @@ -4127,14 +4511,16 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) struct bnxt_rx_ring_info *rxr; struct bnxt_cp_ring_info *cpr; - HWRM_PREP(req, VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_VNIC_RSS_CFG, BNXT_USE_CHIMP_MB); req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); req.hash_type = rte_cpu_to_le_32(vnic->hash_type); req.hash_mode_flags = vnic->hash_mode; req.ring_grp_tbl_addr = - rte_cpu_to_le_64(vnic->rss_table_dma_addr); + rte_cpu_to_le_64(vnic->rss_table_dma_addr + + i * BNXT_RSS_ENTRIES_PER_CTX_THOR * + 2 * sizeof(*ring_tbl)); req.hash_key_tbl_addr = rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); @@ -4154,8 +4540,10 @@ bnxt_vnic_rss_configure_thor(struct bnxt *bp, struct bnxt_vnic_info *vnic) } /* Return if no rings are active. */ - if (cnt == max_rings) + if (cnt == max_rings) { + HWRM_UNLOCK(); return 0; + } /* Add rx/cp ring pair to RSS table. */ rxr = rxqs[k]->rx_ring; @@ -4189,23 +4577,31 @@ int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic) if (BNXT_CHIP_THOR(bp)) return bnxt_vnic_rss_configure_thor(bp, vnic); - /* - * Fill the RSS hash & redirection table with - * ring group ids for all VNICs - */ - for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; - rss_idx++, fw_idx++) { - for (i = 0; i < bp->rx_cp_nr_rings; i++) { - fw_idx %= bp->rx_cp_nr_rings; - if (vnic->fw_grp_ids[fw_idx] != INVALID_HW_RING_ID) - break; - fw_idx++; + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return 0; + + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + fw_idx %= bp->rx_cp_nr_rings; + if (vnic->fw_grp_ids[fw_idx] != + INVALID_HW_RING_ID) + break; + fw_idx++; + } + if (i == bp->rx_cp_nr_rings) + return 0; + vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; } - if (i == bp->rx_cp_nr_rings) - return 0; - vnic->rss_table[rss_idx] = vnic->fw_grp_ids[fw_idx]; + return bnxt_hwrm_vnic_rss_cfg(bp, vnic); } - return bnxt_hwrm_vnic_rss_cfg(bp, vnic); + + return 0; } static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, @@ -4247,7 +4643,7 @@ static int bnxt_hwrm_set_coal_params_thor(struct bnxt *bp, uint16_t flags; int rc; - HWRM_PREP(req, RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_RING_AGGINT_QCAPS, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4284,7 +4680,9 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, return 0; } - HWRM_PREP(req, RING_CMPL_RING_CFG_AGGINT_PARAMS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, + BNXT_USE_CHIMP_MB); req.ring_id = rte_cpu_to_le_16(ring_id); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); @@ -4298,7 +4696,10 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) struct hwrm_func_backing_store_qcaps_input req = {0}; struct hwrm_func_backing_store_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - int rc; + struct bnxt_ctx_pg_info *ctx_pg; + struct bnxt_ctx_mem_info *ctx; + int total_alloc_len; + int rc, i; if (!BNXT_CHIP_THOR(bp) || bp->hwrm_spec_code < HWRM_VERSION_1_9_2 || @@ -4306,74 +4707,64 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) bp->ctx) return 0; - HWRM_PREP(req, FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_QCAPS, BNXT_USE_CHIMP_MB); rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT_SILENT(); - if (!rc) { - struct bnxt_ctx_pg_info *ctx_pg; - struct bnxt_ctx_mem_info *ctx; - int total_alloc_len; - int i; - - total_alloc_len = sizeof(*ctx); - ctx = rte_malloc("bnxt_ctx_mem", total_alloc_len, - RTE_CACHE_LINE_SIZE); - if (!ctx) { - rc = -ENOMEM; - goto ctx_err; - } - memset(ctx, 0, total_alloc_len); + total_alloc_len = sizeof(*ctx); + ctx = rte_zmalloc("bnxt_ctx_mem", total_alloc_len, + RTE_CACHE_LINE_SIZE); + if (!ctx) { + rc = -ENOMEM; + goto ctx_err; + } - ctx_pg = rte_malloc("bnxt_ctx_pg_mem", - sizeof(*ctx_pg) * BNXT_MAX_Q, - RTE_CACHE_LINE_SIZE); - if (!ctx_pg) { - rc = -ENOMEM; - goto ctx_err; - } - for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++) - ctx->tqm_mem[i] = ctx_pg; - - bp->ctx = ctx; - ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); - ctx->qp_min_qp1_entries = - rte_le_to_cpu_16(resp->qp_min_qp1_entries); - ctx->qp_max_l2_entries = - rte_le_to_cpu_16(resp->qp_max_l2_entries); - ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); - ctx->srq_max_l2_entries = - rte_le_to_cpu_16(resp->srq_max_l2_entries); - ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); - ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); - ctx->cq_max_l2_entries = - rte_le_to_cpu_16(resp->cq_max_l2_entries); - ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); - ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); - ctx->vnic_max_vnic_entries = - rte_le_to_cpu_16(resp->vnic_max_vnic_entries); - ctx->vnic_max_ring_table_entries = - rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); - ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); - ctx->stat_max_entries = - rte_le_to_cpu_32(resp->stat_max_entries); - ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); - ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); - ctx->tqm_min_entries_per_ring = - rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); - ctx->tqm_max_entries_per_ring = - rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); - ctx->tqm_entries_multiple = resp->tqm_entries_multiple; - if (!ctx->tqm_entries_multiple) - ctx->tqm_entries_multiple = 1; - ctx->mrav_max_entries = - rte_le_to_cpu_32(resp->mrav_max_entries); - ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); - ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); - ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); - } else { - rc = 0; + ctx_pg = rte_malloc("bnxt_ctx_pg_mem", + sizeof(*ctx_pg) * BNXT_MAX_Q, + RTE_CACHE_LINE_SIZE); + if (!ctx_pg) { + rc = -ENOMEM; + goto ctx_err; } + for (i = 0; i < BNXT_MAX_Q; i++, ctx_pg++) + ctx->tqm_mem[i] = ctx_pg; + + bp->ctx = ctx; + ctx->qp_max_entries = rte_le_to_cpu_32(resp->qp_max_entries); + ctx->qp_min_qp1_entries = + rte_le_to_cpu_16(resp->qp_min_qp1_entries); + ctx->qp_max_l2_entries = + rte_le_to_cpu_16(resp->qp_max_l2_entries); + ctx->qp_entry_size = rte_le_to_cpu_16(resp->qp_entry_size); + ctx->srq_max_l2_entries = + rte_le_to_cpu_16(resp->srq_max_l2_entries); + ctx->srq_max_entries = rte_le_to_cpu_32(resp->srq_max_entries); + ctx->srq_entry_size = rte_le_to_cpu_16(resp->srq_entry_size); + ctx->cq_max_l2_entries = + rte_le_to_cpu_16(resp->cq_max_l2_entries); + ctx->cq_max_entries = rte_le_to_cpu_32(resp->cq_max_entries); + ctx->cq_entry_size = rte_le_to_cpu_16(resp->cq_entry_size); + ctx->vnic_max_vnic_entries = + rte_le_to_cpu_16(resp->vnic_max_vnic_entries); + ctx->vnic_max_ring_table_entries = + rte_le_to_cpu_16(resp->vnic_max_ring_table_entries); + ctx->vnic_entry_size = rte_le_to_cpu_16(resp->vnic_entry_size); + ctx->stat_max_entries = + rte_le_to_cpu_32(resp->stat_max_entries); + ctx->stat_entry_size = rte_le_to_cpu_16(resp->stat_entry_size); + ctx->tqm_entry_size = rte_le_to_cpu_16(resp->tqm_entry_size); + ctx->tqm_min_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_min_entries_per_ring); + ctx->tqm_max_entries_per_ring = + rte_le_to_cpu_32(resp->tqm_max_entries_per_ring); + ctx->tqm_entries_multiple = resp->tqm_entries_multiple; + if (!ctx->tqm_entries_multiple) + ctx->tqm_entries_multiple = 1; + ctx->mrav_max_entries = + rte_le_to_cpu_32(resp->mrav_max_entries); + ctx->mrav_entry_size = rte_le_to_cpu_16(resp->mrav_entry_size); + ctx->tim_entry_size = rte_le_to_cpu_16(resp->tim_entry_size); + ctx->tim_max_entries = rte_le_to_cpu_32(resp->tim_max_entries); ctx_err: HWRM_UNLOCK(); return rc; @@ -4395,7 +4786,7 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, uint32_t enables) if (!ctx) return 0; - HWRM_PREP(req, FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_BACKING_STORE_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(enables); if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) { @@ -4488,7 +4879,7 @@ int bnxt_hwrm_ext_port_qstats(struct bnxt *bp) bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS)) return 0; - HWRM_PREP(req, PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_PORT_QSTATS_EXT, BNXT_USE_CHIMP_MB); req.port_id = rte_cpu_to_le_16(pf->port_id); if (bp->flags & BNXT_FLAG_EXT_TX_PORT_STATS) { @@ -4529,10 +4920,10 @@ bnxt_hwrm_tunnel_redirect(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_ALLOC, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -4548,10 +4939,10 @@ bnxt_hwrm_tunnel_redirect_free(struct bnxt *bp, uint8_t type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_FREE, BNXT_USE_CHIMP_MB); req.tunnel_type = type; req.dest_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); HWRM_UNLOCK(); @@ -4566,13 +4957,13 @@ int bnxt_hwrm_tunnel_redirect_query(struct bnxt *bp, uint32_t *type) bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); if (type) - *type = resp->tunnel_mask; + *type = rte_le_to_cpu_32(resp->tunnel_mask); HWRM_UNLOCK(); @@ -4587,14 +4978,14 @@ int bnxt_hwrm_tunnel_redirect_info(struct bnxt *bp, uint8_t tun_type, bp->hwrm_cmd_resp_addr; int rc = 0; - HWRM_PREP(req, CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_KONG(bp)); + HWRM_PREP(&req, HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO, BNXT_USE_CHIMP_MB); req.src_fid = bp->fw_fid; req.tunnel_type = tun_type; - rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); HWRM_CHECK_RESULT(); if (dst_fid) - *dst_fid = resp->dest_fid; + *dst_fid = rte_le_to_cpu_16(resp->dest_fid); PMD_DRV_LOG(DEBUG, "dst_fid: %x\n", resp->dest_fid); @@ -4612,7 +5003,7 @@ int bnxt_hwrm_set_mac(struct bnxt *bp) if (!BNXT_VF(bp)) return 0; - HWRM_PREP(req, FUNC_VF_CFG, BNXT_USE_CHIMP_MB); + HWRM_PREP(&req, HWRM_FUNC_VF_CFG, BNXT_USE_CHIMP_MB); req.enables = rte_cpu_to_le_32(HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR); @@ -4627,3 +5018,389 @@ int bnxt_hwrm_set_mac(struct bnxt *bp) return rc; } + +int bnxt_hwrm_if_change(struct bnxt *bp, bool up) +{ + struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_drv_if_change_input req = {0}; + uint32_t flags; + int rc; + + if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) + return 0; + + /* Do not issue FUNC_DRV_IF_CHANGE during reset recovery. + * If we issue FUNC_DRV_IF_CHANGE with flags down before + * FUNC_DRV_UNRGTR, FW resets before FUNC_DRV_UNRGTR + */ + if (!up && (bp->flags & BNXT_FLAG_FW_RESET)) + return 0; + + HWRM_PREP(&req, HWRM_FUNC_DRV_IF_CHANGE, BNXT_USE_CHIMP_MB); + + if (up) + req.flags = + rte_cpu_to_le_32(HWRM_FUNC_DRV_IF_CHANGE_INPUT_FLAGS_UP); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (!up) + return 0; + + if (flags & HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE) { + PMD_DRV_LOG(INFO, "FW reset happened while port was down\n"); + bp->flags |= BNXT_FLAG_IF_CHANGE_HOT_FW_RESET_DONE; + } + + return 0; +} + +int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) +{ + struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct bnxt_error_recovery_info *info = bp->recovery_info; + struct hwrm_error_recovery_qcfg_input req = {0}; + uint32_t flags = 0; + unsigned int i; + int rc; + + /* Older FW does not have error recovery support */ + if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) + return 0; + + if (!info) { + info = rte_zmalloc("bnxt_hwrm_error_recovery_qcfg", + sizeof(*info), 0); + bp->recovery_info = info; + if (info == NULL) + return -ENOMEM; + } else { + memset(info, 0, sizeof(*info)); + } + + HWRM_PREP(&req, HWRM_ERROR_RECOVERY_QCFG, BNXT_USE_CHIMP_MB); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + flags = rte_le_to_cpu_32(resp->flags); + if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_HOST; + else if (flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) + info->flags |= BNXT_FLAG_ERROR_RECOVERY_CO_CPU; + + if ((info->flags & BNXT_FLAG_ERROR_RECOVERY_CO_CPU) && + !(bp->flags & BNXT_FLAG_KONG_MB_EN)) { + rc = -EINVAL; + goto err; + } + + /* FW returned values are in units of 100msec */ + info->driver_polling_freq = + rte_le_to_cpu_32(resp->driver_polling_freq) * 100; + info->master_func_wait_period = + rte_le_to_cpu_32(resp->master_func_wait_period) * 100; + info->normal_func_wait_period = + rte_le_to_cpu_32(resp->normal_func_wait_period) * 100; + info->master_func_wait_period_after_reset = + rte_le_to_cpu_32(resp->master_func_wait_period_after_reset) * 100; + info->max_bailout_time_after_reset = + rte_le_to_cpu_32(resp->max_bailout_time_after_reset) * 100; + info->status_regs[BNXT_FW_STATUS_REG] = + rte_le_to_cpu_32(resp->fw_health_status_reg); + info->status_regs[BNXT_FW_HEARTBEAT_CNT_REG] = + rte_le_to_cpu_32(resp->fw_heartbeat_reg); + info->status_regs[BNXT_FW_RECOVERY_CNT_REG] = + rte_le_to_cpu_32(resp->fw_reset_cnt_reg); + info->status_regs[BNXT_FW_RESET_INPROG_REG] = + rte_le_to_cpu_32(resp->reset_inprogress_reg); + info->reg_array_cnt = + rte_le_to_cpu_32(resp->reg_array_cnt); + + if (info->reg_array_cnt >= BNXT_NUM_RESET_REG) { + rc = -EINVAL; + goto err; + } + + for (i = 0; i < info->reg_array_cnt; i++) { + info->reset_reg[i] = + rte_le_to_cpu_32(resp->reset_reg[i]); + info->reset_reg_val[i] = + rte_le_to_cpu_32(resp->reset_reg_val[i]); + info->delay_after_reset[i] = + resp->delay_after_reset[i]; + } +err: + HWRM_UNLOCK(); + + /* Map the FW status registers */ + if (!rc) + rc = bnxt_map_fw_health_status_regs(bp); + + if (rc) { + rte_free(bp->recovery_info); + bp->recovery_info = NULL; + } + return rc; +} + +int bnxt_hwrm_fw_reset(struct bnxt *bp) +{ + struct hwrm_fw_reset_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_fw_reset_input req = {0}; + int rc; + + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + + HWRM_PREP(&req, HWRM_FW_RESET, BNXT_USE_KONG(bp)); + + req.embedded_proc_type = + HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP; + req.selfrst_status = + HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP; + req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), + BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_port_ts_query(struct bnxt *bp, uint8_t path, uint64_t *timestamp) +{ + struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_ts_query_input req = {0}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + uint32_t flags = 0; + int rc; + + if (!ptp) + return 0; + + HWRM_PREP(&req, HWRM_PORT_TS_QUERY, BNXT_USE_CHIMP_MB); + + switch (path) { + case BNXT_PTP_FLAGS_PATH_TX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_TX; + break; + case BNXT_PTP_FLAGS_PATH_RX: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_PATH_RX; + break; + case BNXT_PTP_FLAGS_CURRENT_TIME: + flags |= HWRM_PORT_TS_QUERY_INPUT_FLAGS_CURRENT_TIME; + break; + } + + req.flags = rte_cpu_to_le_32(flags); + req.port_id = rte_cpu_to_le_16(bp->pf.port_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB); + + HWRM_CHECK_RESULT(); + + if (timestamp) { + *timestamp = rte_le_to_cpu_32(resp->ptp_msg_ts[0]); + *timestamp |= + (uint64_t)(rte_le_to_cpu_32(resp->ptp_msg_ts[1])) << 32; + } + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(struct bnxt *bp) +{ + struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp = + bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; + uint32_t flags = 0; + int rc = 0; + + if (!(bp->fw_cap & BNXT_FW_CAP_ADV_FLOW_MGMT)) + return rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, BNXT_USE_KONG(bp)); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + flags = rte_le_to_cpu_32(resp->flags); + HWRM_UNLOCK(); + + if (flags & HWRM_CFA_ADV_FLOW_MGNT_QCAPS_L2_HDR_SRC_FILTER_EN) { + bp->flow_flags |= BNXT_FLOW_FLAG_L2_HDR_SRC_FILTER_EN; + PMD_DRV_LOG(INFO, "Source L2 header filtering enabled\n"); + } + + return rc; +} + +int bnxt_hwrm_cfa_counter_qcaps(struct bnxt *bp, uint16_t *max_fc) +{ + int rc = 0; + + struct hwrm_cfa_counter_qcaps_input req = {0}; + struct hwrm_cfa_counter_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_QCAPS, BNXT_USE_KONG(bp)); + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + if (max_fc) + *max_fc = rte_le_to_cpu_16(resp->max_rx_fc); + HWRM_UNLOCK(); + + PMD_DRV_LOG(DEBUG, "max_fc = %d\n", *max_fc); + return 0; +} + +int bnxt_hwrm_ctx_rgtr(struct bnxt *bp, rte_iova_t dma_addr, uint16_t *ctx_id) +{ + int rc = 0; + struct hwrm_cfa_ctx_mem_rgtr_input req = {.req_type = 0 }; + struct hwrm_cfa_ctx_mem_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_CTX_MEM_RGTR, BNXT_USE_KONG(bp)); + + req.page_level = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0; + req.page_size = HWRM_CFA_CTX_MEM_RGTR_INPUT_PAGE_SIZE_2M; + req.page_dir = rte_cpu_to_le_64(dma_addr); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + if (ctx_id) { + *ctx_id = rte_le_to_cpu_16(resp->ctx_id); + PMD_DRV_LOG(DEBUG, "ctx_id = %d\n", *ctx_id); + } + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_ctx_unrgtr(struct bnxt *bp, uint16_t ctx_id) +{ + int rc = 0; + struct hwrm_cfa_ctx_mem_unrgtr_input req = {.req_type = 0 }; + struct hwrm_cfa_ctx_mem_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_CTX_MEM_UNRGTR, BNXT_USE_KONG(bp)); + + req.ctx_id = rte_cpu_to_le_16(ctx_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return rc; +} + +int bnxt_hwrm_cfa_counter_cfg(struct bnxt *bp, enum bnxt_flow_dir dir, + uint16_t cntr, uint16_t ctx_id, + uint32_t num_entries, bool enable) +{ + struct hwrm_cfa_counter_cfg_input req = {0}; + struct hwrm_cfa_counter_cfg_output *resp = bp->hwrm_cmd_resp_addr; + uint16_t flags = 0; + int rc; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_CFG, BNXT_USE_KONG(bp)); + + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + req.counter_type = rte_cpu_to_le_16(cntr); + flags = enable ? HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_ENABLE : + HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_CFG_MODE_DISABLE; + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_DATA_TRANSFER_MODE_PULL; + if (dir == BNXT_DIR_RX) + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_RX; + else if (dir == BNXT_DIR_TX) + flags |= HWRM_CFA_COUNTER_CFG_INPUT_FLAGS_PATH_TX; + req.flags = rte_cpu_to_le_16(flags); + req.ctx_id = rte_cpu_to_le_16(ctx_id); + req.num_entries = rte_cpu_to_le_32(num_entries); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return 0; +} + +int bnxt_hwrm_cfa_counter_qstats(struct bnxt *bp, + enum bnxt_flow_dir dir, + uint16_t cntr, + uint16_t num_entries) +{ + struct hwrm_cfa_counter_qstats_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_counter_qstats_input req = {0}; + uint16_t flow_ctx_id = 0; + uint16_t flags = 0; + int rc = 0; + + if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp))) { + PMD_DRV_LOG(DEBUG, + "Not a PF or trusted VF. Command not supported\n"); + return 0; + } + + if (dir == BNXT_DIR_RX) { + flow_ctx_id = bp->rx_fc_in_tbl.ctx_id; + flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_RX; + } else if (dir == BNXT_DIR_TX) { + flow_ctx_id = bp->tx_fc_in_tbl.ctx_id; + flags = HWRM_CFA_COUNTER_QSTATS_INPUT_FLAGS_PATH_TX; + } + + HWRM_PREP(&req, HWRM_CFA_COUNTER_QSTATS, BNXT_USE_KONG(bp)); + req.target_id = rte_cpu_to_le_16(bp->fw_fid); + req.counter_type = rte_cpu_to_le_16(cntr); + req.input_flow_ctx_id = rte_cpu_to_le_16(flow_ctx_id); + req.num_entries = rte_cpu_to_le_16(num_entries); + req.flags = rte_cpu_to_le_16(flags); + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_KONG(bp)); + + HWRM_CHECK_RESULT(); + HWRM_UNLOCK(); + + return 0; +}