#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_version.h>
+#include <rte_io.h>
#include "bnxt.h"
#include "bnxt_filter.h"
#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
-#include <rte_io.h>
-
#define HWRM_SPEC_CODE_1_8_3 0x10803
#define HWRM_VERSION_1_9_1 0x10901
#define HWRM_VERSION_1_9_2 0x10903
/*
* HWRM Functions (sent to HWRM)
- * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
- * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
- * command was failed by the ChiMP.
+ * These are named bnxt_hwrm_*() and return 0 on success or -110 if the
+ * HWRM command times out, or a negative error code if the HWRM
+ * command was failed by the FW.
*/
static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
}
/*
- * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
+ * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
* spinlock, and does initial processing.
*
* HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
- * releases the spinlock only if it returns. If the regular int return codes
+ * releases the spinlock only if it returns. If the regular int return codes
* are not used by the function, HWRM_CHECK_RESULT() should not be used
* directly, rather it should be copied and modified to suit the function.
*
struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
-/* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
if (ptp)
return 0;
req.num_rsscos_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_RSS_CTX);
req.num_l2_ctxs = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_L2_CTX);
req.num_vnics = rte_cpu_to_le_16(BNXT_VF_RSV_NUM_VNIC);
+ } else if (bp->vf_resv_strategy ==
+ HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESV_STRATEGY_MAXIMAL) {
+ enables |= HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS;
+ req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
}
if (test)
HWRM_CHECK_RESULT();
- bp->grp_info[idx].fw_grp_id =
- rte_le_to_cpu_16(resp->ring_group_id);
+ bp->grp_info[idx].fw_grp_id = rte_le_to_cpu_16(resp->ring_group_id);
HWRM_UNLOCK();
req.update_period_ms = rte_cpu_to_le_32(0);
- req.stats_dma_addr =
- rte_cpu_to_le_64(cpr->hw_stats_map);
+ req.stats_dma_addr = rte_cpu_to_le_64(cpr->hw_stats_map);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req), BNXT_USE_CHIMP_MB);
vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
skip_ring_grps:
- vnic->mru = bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ vnic->mru = BNXT_VNIC_MRU(bp->eth_dev->data->mtu);
HWRM_PREP(req, VNIC_ALLOC, BNXT_USE_CHIMP_MB);
if (vnic->func_default)
return rc;
}
-/*
- * HWRM utility functions
- */
-
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
{
unsigned int i;
return rc;
}
+/*
+ * HWRM utility functions
+ */
+
void bnxt_free_hwrm_resources(struct bnxt *bp)
{
/* Release memzone */
rc = bnxt_hwrm_clear_l2_filter(bp, filter);
STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
bnxt_free_filter(bp, filter);
- //if (rc)
- //break;
}
return rc;
}
STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
rte_free(flow);
- //if (rc)
- //break;
}
return rc;
}
for (i = bp->max_vnics - 1; i >= 0; i--) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- // If the VNIC ID is invalid we are not currently using the VNIC
if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
continue;
}
if (mtu)
- *mtu = resp->mtu;
+ *mtu = rte_le_to_cpu_16(resp->mtu);
switch (resp->port_partition_type) {
case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
}
req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
- req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
- req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
- BNXT_NUM_VLANS);
+ req.mtu = rte_cpu_to_le_16(RTE_MIN(bp->eth_dev->data->mtu,
+ BNXT_MAX_MTU)); //FW adds hdr sizes
+ req.mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
BNXT_NUM_VLANS);
- req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE *
- BNXT_NUM_VLANS);
+ req->mru = rte_cpu_to_le_16(BNXT_VNIC_MRU(bp->eth_dev->data->mtu));
req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
(num_vfs + 1));
req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
}
-
HWRM_UNLOCK();
return rc;
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
req.dst_id = rte_cpu_to_le_16(dst_id);
-
if (filter->ip_addr_type) {
req.ip_addr_type = filter->ip_addr_type;
enables |=
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
memcpy(req.src_macaddr, filter->src_macaddr,
RTE_ETHER_ADDR_LEN);
- //if (enables &
- //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
- //memcpy(req.dst_macaddr, filter->dst_macaddr,
- //RTE_ETHER_ADDR_LEN);
if (enables &
HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
req.ethertype = rte_cpu_to_be_16(filter->ethertype);