{ .vendor_id = 0, /* sentinel */ },
};
-#define BNXT_DEVARG_TRUFLOW "host-based-truflow"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
#define BNXT_DEVARG_REPRESENTOR "representor"
static const char *const bnxt_dev_args[] = {
BNXT_DEVARG_REPRESENTOR,
- BNXT_DEVARG_TRUFLOW,
BNXT_DEVARG_FLOW_XSTAT,
BNXT_DEVARG_MAX_NUM_KFLOWS,
BNXT_DEVARG_REP_BASED_PF,
NULL
};
-/*
- * truflow == false to disable the feature
- * truflow == true to enable the feature
- */
-#define BNXT_DEVARG_TRUFLOW_INVALID(truflow) ((truflow) > 1)
-
/*
* flow_xstat == false to disable the feature
* flow_xstat == true to enable the feature
static void bnxt_free_parent_info(struct bnxt *bp)
{
rte_free(bp->parent);
+ bp->parent = NULL;
}
static void bnxt_free_pf_info(struct bnxt *bp)
{
rte_free(bp->pf);
+ bp->pf = NULL;
}
static void bnxt_free_link_info(struct bnxt *bp)
{
rte_free(bp->link_info);
+ bp->link_info = NULL;
}
static void bnxt_free_leds_info(struct bnxt *bp)
static void bnxt_free_cos_queues(struct bnxt *bp)
{
rte_free(bp->rx_cos_queue);
+ bp->rx_cos_queue = NULL;
rte_free(bp->tx_cos_queue);
+ bp->tx_cos_queue = NULL;
}
static void bnxt_free_mem(struct bnxt *bp, bool reconfig)
if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS) {
int j, nr_ctxs = bnxt_rss_ctxts(bp);
+ /* RSS table size in Thor is 512.
+ * Cap max Rx rings to same value
+ */
if (bp->rx_nr_rings > BNXT_RSS_TBL_SIZE_P5) {
PMD_DRV_LOG(ERR, "RxQ cnt %d > reta_size %d\n",
bp->rx_nr_rings, BNXT_RSS_TBL_SIZE_P5);
- PMD_DRV_LOG(ERR,
- "Only queues 0-%d will be in RSS table\n",
- BNXT_RSS_TBL_SIZE_P5 - 1);
+ goto err_out;
}
rc = 0;
return rc;
}
+static void bnxt_free_prev_ring_stats(struct bnxt *bp)
+{
+ rte_free(bp->prev_rx_ring_stats);
+ rte_free(bp->prev_tx_ring_stats);
+
+ bp->prev_rx_ring_stats = NULL;
+ bp->prev_tx_ring_stats = NULL;
+}
+
+static int bnxt_alloc_prev_ring_stats(struct bnxt *bp)
+{
+ bp->prev_rx_ring_stats = rte_zmalloc("bnxt_prev_rx_ring_stats",
+ sizeof(struct bnxt_ring_stats) *
+ bp->rx_cp_nr_rings,
+ 0);
+ if (bp->prev_rx_ring_stats == NULL)
+ return -ENOMEM;
+
+ bp->prev_tx_ring_stats = rte_zmalloc("bnxt_prev_tx_ring_stats",
+ sizeof(struct bnxt_ring_stats) *
+ bp->tx_cp_nr_rings,
+ 0);
+ if (bp->prev_tx_ring_stats == NULL)
+ goto error;
+
+ return 0;
+
+error:
+ bnxt_free_prev_ring_stats(bp);
+ return -ENOMEM;
+}
+
static int bnxt_start_nic(struct bnxt *bp)
{
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
uint32_t bnxt_get_speed_capabilities(struct bnxt *bp)
{
- uint32_t link_speed = bp->link_info->support_speeds;
+ uint32_t link_speed = 0;
uint32_t speed_capa = 0;
+ if (bp->link_info == NULL)
+ return 0;
+
+ link_speed = bp->link_info->support_speeds;
+
/* If PAM4 is configured, use PAM4 supported speed */
if (link_speed == 0 && bp->link_info->support_pam4_speeds > 0)
link_speed = bp->link_info->support_pam4_speeds;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
- /* *INDENT-OFF* */
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
.pthresh = 8,
BNXT_SWITCH_PORT_ID_TRUSTED_VF;
}
- /* *INDENT-ON* */
-
/*
* TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim
* need further investigation.
return bnxt_recv_pkts;
}
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
-#ifndef RTE_LIBRTE_IEEE1588
+#if (defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)) && \
+ !defined(RTE_LIBRTE_IEEE1588)
+
+ /* Vector mode receive cannot be enabled if scattered rx is in use. */
+ if (eth_dev->data->scattered_rx)
+ goto use_scalar_rx;
+
+ /*
+ * Vector mode receive cannot be enabled if Truflow is enabled or if
+ * asynchronous completions and receive completions can be placed in
+ * the same completion ring.
+ */
+ if (BNXT_TRUFLOW_EN(bp) || !BNXT_NUM_ASYNC_CPR(bp))
+ goto use_scalar_rx;
+
/*
- * Vector mode receive can be enabled only if scatter rx is not
- * in use and rx offloads are limited to VLAN stripping and
- * CRC stripping.
+ * Vector mode receive cannot be enabled if any receive offloads outside
+ * a limited subset have been enabled.
*/
- if (!eth_dev->data->scattered_rx &&
- !(eth_dev->data->dev_conf.rxmode.offloads &
- ~(DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_IPV4_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
- DEV_RX_OFFLOAD_RSS_HASH |
- DEV_RX_OFFLOAD_VLAN_FILTER)) &&
- !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
+ if (eth_dev->data->dev_conf.rxmode.offloads &
+ ~(DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC |
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH |
+ DEV_RX_OFFLOAD_VLAN_FILTER))
+ goto use_scalar_rx;
+
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
+ PMD_DRV_LOG(INFO,
+ "Using AVX2 vector mode receive for port %d\n",
+ eth_dev->data->port_id);
+ bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
+ return bnxt_recv_pkts_vec_avx2;
+ }
+ #endif
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+ PMD_DRV_LOG(INFO,
+ "Using SSE vector mode receive for port %d\n",
eth_dev->data->port_id);
bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
return bnxt_recv_pkts_vec;
}
+
+use_scalar_rx:
PMD_DRV_LOG(INFO, "Vector mode receive disabled for port %d\n",
eth_dev->data->port_id);
PMD_DRV_LOG(INFO,
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
eth_dev->data->dev_conf.rxmode.offloads);
-#endif
#endif
bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE;
return bnxt_recv_pkts;
if (BNXT_CHIP_SR2(bp))
return bnxt_xmit_pkts;
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
-#ifndef RTE_LIBRTE_IEEE1588
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) && \
+ !defined(RTE_LIBRTE_IEEE1588)
uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
/*
* Vector mode transmit can be enabled only if not using scatter rx
* or tx offloads.
*/
- if (!eth_dev->data->scattered_rx &&
- !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
- !BNXT_TRUFLOW_EN(bp) &&
- rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
- PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
+ if (eth_dev->data->scattered_rx ||
+ (offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) ||
+ BNXT_TRUFLOW_EN(bp))
+ goto use_scalar_tx;
+
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1) {
+ PMD_DRV_LOG(INFO,
+ "Using AVX2 vector mode transmit for port %d\n",
+ eth_dev->data->port_id);
+ return bnxt_xmit_pkts_vec_avx2;
+ }
+#endif
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+ PMD_DRV_LOG(INFO,
+ "Using SSE vector mode transmit for port %d\n",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
}
+
+use_scalar_tx:
PMD_DRV_LOG(INFO, "Vector mode transmit disabled for port %d\n",
eth_dev->data->port_id);
PMD_DRV_LOG(INFO,
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
offloads);
-#endif
#endif
return bnxt_xmit_pkts;
}
{
int rc = 0;
- if (bp->switch_domain_id) {
- rc = rte_eth_switch_domain_free(bp->switch_domain_id);
- if (rc)
- PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n",
- bp->switch_domain_id, rc);
- }
+ if (!(BNXT_PF(bp) || BNXT_VF_IS_TRUSTED(bp)))
+ return;
+
+ rc = rte_eth_switch_domain_free(bp->switch_domain_id);
+ if (rc)
+ PMD_DRV_LOG(ERR, "free switch domain:%d fail: %d\n",
+ bp->switch_domain_id, rc);
}
static void bnxt_ptp_get_current_time(void *arg)
bnxt_shutdown_nic(bp);
bnxt_hwrm_if_change(bp, false);
+ bnxt_free_prev_ring_stats(bp);
rte_free(bp->mark_table);
bp->mark_table = NULL;
if (rc)
goto error;
+ rc = bnxt_alloc_prev_ring_stats(bp);
+ if (rc)
+ goto error;
+
eth_dev->data->dev_started = 1;
bnxt_link_update_op(eth_dev, 1);
static void bnxt_drv_uninit(struct bnxt *bp)
{
- bnxt_free_switch_domain(bp);
bnxt_free_leds_info(bp);
bnxt_free_cos_queues(bp);
bnxt_free_link_info(bp);
- bnxt_free_pf_info(bp);
bnxt_free_parent_info(bp);
bnxt_uninit_locks(bp);
bp->rx_mem_zone = NULL;
bnxt_free_vf_info(bp);
+ bnxt_free_pf_info(bp);
rte_free(bp->grp_info);
bp->grp_info = NULL;
return rc;
memset(&new, 0, sizeof(new));
+
+ if (bp->link_info == NULL)
+ goto out;
+
do {
/* Retrieve link info from hardware */
rc = bnxt_get_hwrm_link_config(bp, &new);
if (rc)
return rc;
- if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- PMD_DRV_LOG(ERR, "Flow Control Settings cannot be modified\n");
+ if (!BNXT_SINGLE_PF(bp)) {
+ PMD_DRV_LOG(ERR,
+ "Flow Control Settings cannot be modified on VF or on shared PF\n");
return -ENOTSUP;
}
ret = snprintf(fw_version, fw_size, "%d.%d.%d.%d",
fw_major, fw_minor, fw_updt, fw_rsvd);
+ if (ret < 0)
+ return -EINVAL;
ret += 1; /* add the size of '\0' */
- if (fw_size < (uint32_t)ret)
+ if (fw_size < (size_t)ret)
return ret;
else
return 0;
eth_rx_burst_t pkt_burst;
const char *info;
} bnxt_rx_burst_info[] = {
- {bnxt_recv_pkts, "Scalar"},
+ {bnxt_recv_pkts, "Scalar"},
#if defined(RTE_ARCH_X86)
- {bnxt_recv_pkts_vec, "Vector SSE"},
-#elif defined(RTE_ARCH_ARM64)
- {bnxt_recv_pkts_vec, "Vector Neon"},
+ {bnxt_recv_pkts_vec, "Vector SSE"},
+#endif
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ {bnxt_recv_pkts_vec_avx2, "Vector AVX2"},
+#endif
+#if defined(RTE_ARCH_ARM64)
+ {bnxt_recv_pkts_vec, "Vector Neon"},
#endif
};
eth_tx_burst_t pkt_burst;
const char *info;
} bnxt_tx_burst_info[] = {
- {bnxt_xmit_pkts, "Scalar"},
+ {bnxt_xmit_pkts, "Scalar"},
#if defined(RTE_ARCH_X86)
- {bnxt_xmit_pkts_vec, "Vector SSE"},
-#elif defined(RTE_ARCH_ARM64)
- {bnxt_xmit_pkts_vec, "Vector Neon"},
+ {bnxt_xmit_pkts_vec, "Vector SSE"},
+#endif
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ {bnxt_xmit_pkts_vec_avx2, "Vector AVX2"},
+#endif
+#if defined(RTE_ARCH_ARM64)
+ {bnxt_xmit_pkts_vec, "Vector Neon"},
#endif
};
if (rc)
return rc;
- if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp)) {
- PMD_DRV_LOG(ERR,
- "PVID cannot be modified for this function\n");
+ if (!BNXT_SINGLE_PF(bp)) {
+ PMD_DRV_LOG(ERR, "PVID cannot be modified on VF or on shared PF\n");
return -ENOTSUP;
}
bp->vlan = on ? pvid : 0;
return 0;
}
+static int bnxt_clr_rx_ts(struct bnxt *bp, uint64_t *last_ts)
+{
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_pf_info *pf = bp->pf;
+ uint16_t port_id;
+ int i = 0;
+ uint32_t fifo;
+
+ if (!ptp || (bp->flags & BNXT_FLAG_CHIP_P5))
+ return -EINVAL;
+
+ port_id = pf->port_id;
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ while ((fifo & BNXT_PTP_RX_FIFO_PENDING) && (i < BNXT_PTP_RX_PND_CNT)) {
+ rte_write32(1 << port_id, (uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO_ADV]);
+ fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
+ *last_ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
+ *last_ts |= (uint64_t)rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
+ ptp->rx_mapped_regs[BNXT_PTP_RX_TS_H])) << 32;
+ i++;
+ }
+
+ if (i >= BNXT_PTP_RX_PND_CNT)
+ return -EBUSY;
+
+ return 0;
+}
+
static int bnxt_get_rx_ts(struct bnxt *bp, uint64_t *ts)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
fifo = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_FIFO]));
- if (fifo & BNXT_PTP_RX_FIFO_PENDING) {
-/* bnxt_clr_rx_ts(bp); TBD */
- return -EBUSY;
- }
+ if (fifo & BNXT_PTP_RX_FIFO_PENDING)
+ return bnxt_clr_rx_ts(bp, ts);
*ts = rte_le_to_cpu_32(rte_read32((uint8_t *)bp->bar0 +
ptp->rx_mapped_regs[BNXT_PTP_RX_TS_L]));
struct rte_ether_addr *addr;
uint64_t pool_mask;
uint32_t pool = 0;
- uint16_t i;
+ uint32_t i;
int rc;
if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp))
struct bnxt_error_recovery_info *info = bp->recovery_info;
uint32_t reg = info->status_regs[index];
uint32_t type, offset, val = 0;
+ int ret = 0;
type = BNXT_FW_STATUS_REG_TYPE(reg);
offset = BNXT_FW_STATUS_REG_OFF(reg);
switch (type) {
case BNXT_FW_STATUS_REG_TYPE_CFG:
- rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
+ ret = rte_pci_read_config(bp->pdev, &val, sizeof(val), offset);
+ if (ret < 0)
+ PMD_DRV_LOG(ERR, "Failed to read PCI offset %#x",
+ offset);
break;
case BNXT_FW_STATUS_REG_TYPE_GRC:
offset = info->mapped_status_regs[index];
bp->flags |= BNXT_FLAG_FATAL_ERROR;
bp->flags |= BNXT_FLAG_FW_RESET;
+ bnxt_stop_rxtx(bp);
+
PMD_DRV_LOG(ERR, "Detected FW dead condition\n");
if (bnxt_is_master_func(bp))
static void bnxt_cancel_fw_health_check(struct bnxt *bp)
{
- if (!bnxt_is_recovery_enabled(bp))
- return;
-
rte_eal_alarm_cancel(bnxt_check_fw_health, (void *)bp);
bp->flags &= ~BNXT_FLAG_FW_HEALTH_CHECK_SCHEDULED;
}
return 0;
}
-static int
-bnxt_parse_devarg_truflow(__rte_unused const char *key,
- const char *value, void *opaque_arg)
-{
- struct bnxt *bp = opaque_arg;
- unsigned long truflow;
- char *end = NULL;
-
- if (!value || !opaque_arg) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to truflow devargs.\n");
- return -EINVAL;
- }
-
- truflow = strtoul(value, &end, 10);
- if (end == NULL || *end != '\0' ||
- (truflow == ULONG_MAX && errno == ERANGE)) {
- PMD_DRV_LOG(ERR,
- "Invalid parameter passed to truflow devargs.\n");
- return -EINVAL;
- }
-
- if (BNXT_DEVARG_TRUFLOW_INVALID(truflow)) {
- PMD_DRV_LOG(ERR,
- "Invalid value passed to truflow devargs.\n");
- return -EINVAL;
- }
-
- if (truflow) {
- bp->flags |= BNXT_FLAG_TRUFLOW_EN;
- PMD_DRV_LOG(INFO, "Host-based truflow feature enabled.\n");
- } else {
- bp->flags &= ~BNXT_FLAG_TRUFLOW_EN;
- PMD_DRV_LOG(INFO, "Host-based truflow feature disabled.\n");
- }
-
- return 0;
-}
-
static int
bnxt_parse_devarg_flow_xstat(__rte_unused const char *key,
const char *value, void *opaque_arg)
if (kvlist == NULL)
return -EINVAL;
- /*
- * Handler for "truflow" devarg.
- * Invoked as for ex: "-a 0000:00:0d.0,host-based-truflow=1"
- */
- ret = rte_kvargs_process(kvlist, BNXT_DEVARG_TRUFLOW,
- bnxt_parse_devarg_truflow, bp);
- if (ret)
- goto err;
-
/*
* Handler for "flow_xstat" devarg.
* Invoked as for ex: "-a 0000:00:0d.0,flow_xstat=1"
goto error_free;
PMD_DRV_LOG(INFO,
- DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
+ "Found %s device at mem %" PRIX64 ", node addr %pM\n",
+ DRV_MODULE_NAME,
pci_dev->mem_resource[0].phys_addr,
pci_dev->mem_resource[0].addr);
bnxt_free_mem(bp, reconfig_dev);
bnxt_hwrm_func_buf_unrgtr(bp);
- rte_free(bp->pf->vf_req_buf);
+ if (bp->pf != NULL) {
+ rte_free(bp->pf->vf_req_buf);
+ bp->pf->vf_req_buf = NULL;
+ }
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bp->flags &= ~BNXT_FLAG_REGISTERED;
bnxt_uninit_ctx_mem(bp);
bnxt_free_flow_stats_info(bp);
+ if (bp->rep_info != NULL)
+ bnxt_free_switch_domain(bp);
bnxt_free_rep_info(bp);
rte_free(bp->ptp_cfg);
bp->ptp_cfg = NULL;
return is_device_supported(dev, &bnxt_rte_pmd);
}
-RTE_LOG_REGISTER(bnxt_logtype_driver, pmd.net.bnxt.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(bnxt_logtype_driver, driver, NOTICE);
RTE_PMD_REGISTER_PCI(net_bnxt, bnxt_rte_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_bnxt, bnxt_pci_id_map);
RTE_PMD_REGISTER_KMOD_DEP(net_bnxt, "* igb_uio | uio_pci_generic | vfio-pci");