#include <rte_cycles.h>
#include <rte_alarm.h>
#include <rte_kvargs.h>
+#include <rte_vect.h>
#include "bnxt.h"
#include "bnxt_filter.h"
#include "bnxt_nvm_defs.h"
#include "bnxt_tf_common.h"
#include "ulp_flow_db.h"
+#include "rte_pmd_bnxt.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
*/
#define BNXT_DEVARG_REP_FC_F2R_INVALID(rep_fc_f2r) ((rep_fc_f2r) > 1)
+int bnxt_cfa_code_dynfield_offset = -1;
+
/*
* max_num_kflows must be >= 32
* and must be a power-of-2 supported value
speed_capa |= ETH_LINK_SPEED_50G;
if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB)
speed_capa |= ETH_LINK_SPEED_100G;
- if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_200GB)
+ if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G)
+ speed_capa |= ETH_LINK_SPEED_50G;
+ if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G)
+ speed_capa |= ETH_LINK_SPEED_100G;
+ if (link_speed & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G)
speed_capa |= ETH_LINK_SPEED_200G;
if (bp->link_info->auto_mode ==
dev_info->rx_offload_capa = BNXT_DEV_RX_OFFLOAD_SUPPORT;
if (bp->flags & BNXT_FLAG_PTP_SUPPORTED)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TIMESTAMP;
- dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT;
+ dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ dev_info->tx_offload_capa = BNXT_DEV_TX_OFFLOAD_SUPPORT |
+ dev_info->tx_queue_offload_capa;
dev_info->flow_type_rss_offloads = BNXT_ETH_RSS_SUPPORT;
dev_info->speed_capa = bnxt_get_speed_capabilities(bp);
.wthresh = 0,
},
.rx_free_thresh = 32,
- /* If no descriptors available, pkts are dropped by default */
- .rx_drop_en = 1,
+ .rx_drop_en = BNXT_DEFAULT_RX_DROP_EN,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_VLAN_FILTER)) &&
- !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp)) {
+ !BNXT_TRUFLOW_EN(bp) && BNXT_NUM_ASYNC_CPR(bp) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
PMD_DRV_LOG(INFO, "Using vector mode receive for port %d\n",
eth_dev->data->port_id);
bp->flags |= BNXT_FLAG_RX_VECTOR_PKT_MODE;
{
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
+ uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads;
struct bnxt *bp = eth_dev->data->dev_private;
/*
* or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !eth_dev->data->dev_conf.txmode.offloads &&
- !BNXT_TRUFLOW_EN(bp)) {
+ !(offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ !BNXT_TRUFLOW_EN(bp) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
"Port %d scatter: %d tx offload: %" PRIX64 "\n",
eth_dev->data->port_id,
eth_dev->data->scattered_rx,
- eth_dev->data->dev_conf.txmode.offloads);
+ offloads);
#endif
#endif
return bnxt_xmit_pkts;
if (bp->rx_cp_nr_rings > RTE_ETHDEV_QUEUE_STAT_CNTRS) {
PMD_DRV_LOG(ERR,
- "RxQ cnt %d > CONFIG_RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
+ "RxQ cnt %d > RTE_ETHDEV_QUEUE_STAT_CNTRS %d\n",
bp->rx_cp_nr_rings, RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
eth_dev->data->scattered_rx = bnxt_scattered_rx(eth_dev);
eth_dev->data->dev_started = 1;
- bnxt_link_update(eth_dev, 1, ETH_LINK_UP);
+ bnxt_link_update_op(eth_dev, 1);
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
vlan_mask |= ETH_VLAN_FILTER_MASK;
}
/* Unload the driver, release resources */
-static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
+static int bnxt_dev_stop_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct rte_eth_link link;
+ int ret;
eth_dev->data->dev_started = 0;
eth_dev->data->scattered_rx = 0;
rte_intr_disable(intr_handle);
/* Stop the child representors for this device */
- bnxt_rep_stop_all(bp);
+ ret = bnxt_rep_stop_all(bp);
+ if (ret != 0)
+ return ret;
/* delete the bnxt ULP port details */
bnxt_ulp_port_deinit(bp);
bnxt_cancel_fw_health_check(bp);
/* Do not bring link down during reset recovery */
- if (!is_bnxt_in_error(bp))
+ if (!is_bnxt_in_error(bp)) {
bnxt_dev_set_link_down_op(eth_dev);
-
- /* Wait for link to be reset and the async notification to process.
- * During reset recovery, there is no need to wait and
- * VF/NPAR functions do not have privilege to change PHY config.
- */
- if (!is_bnxt_in_error(bp) && BNXT_SINGLE_PF(bp))
- bnxt_link_update(eth_dev, 1, ETH_LINK_DOWN);
+ /* Wait for link to be reset */
+ if (BNXT_SINGLE_PF(bp))
+ rte_delay_ms(500);
+ /* clear the recorded link status */
+ memset(&link, 0, sizeof(link));
+ rte_eth_linkstatus_set(eth_dev, &link);
+ }
/* Clean queue intr-vector mapping */
rte_intr_efd_disable(intr_handle);
/* All filters are deleted on a port stop. */
if (BNXT_FLOW_XSTATS_EN(bp))
bp->flow_stat->flow_count = 0;
+
+ return 0;
}
-static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
+static int bnxt_dev_close_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
+ int ret = 0;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
/* cancel the recovery handler before remove dev */
rte_eal_alarm_cancel(bnxt_dev_reset_and_resume, (void *)bp);
bnxt_cancel_fc_thread(bp);
if (eth_dev->data->dev_started)
- bnxt_dev_stop_op(eth_dev);
+ ret = bnxt_dev_stop_op(eth_dev);
bnxt_free_switch_domain(bp);
bnxt_free_pf_info(bp);
bnxt_free_parent_info(bp);
- eth_dev->dev_ops = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
-
rte_memzone_free((const struct rte_memzone *)bp->tx_mem_zone);
bp->tx_mem_zone = NULL;
rte_memzone_free((const struct rte_memzone *)bp->rx_mem_zone);
rte_free(bp->grp_info);
bp->grp_info = NULL;
+
+ return ret;
}
static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev,
if (rc)
return rc;
- if (BNXT_VF(bp) & !BNXT_VF_IS_TRUSTED(bp)) {
+ if (BNXT_VF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
PMD_DRV_LOG(ERR, "Cannot add MAC address to a VF interface\n");
return -ENOTSUP;
}
return rc;
}
-int bnxt_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete,
- bool exp_link_status)
+int bnxt_link_update_op(struct rte_eth_dev *eth_dev, int wait_to_complete)
{
int rc = 0;
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_eth_link new;
- int cnt = exp_link_status ? BNXT_LINK_UP_WAIT_CNT :
- BNXT_LINK_DOWN_WAIT_CNT;
+ int cnt = wait_to_complete ? BNXT_MAX_LINK_WAIT_CNT :
+ BNXT_MIN_LINK_WAIT_CNT;
rc = is_bnxt_in_error(bp);
if (rc)
goto out;
}
- if (!wait_to_complete || new.link_status == exp_link_status)
+ if (!wait_to_complete || new.link_status)
break;
rte_delay_ms(BNXT_LINK_WAIT_INTERVAL);
} while (cnt--);
+ /* Only single function PF can bring phy down.
+ * When port is stopped, report link down for VF/MH/NPAR functions.
+ */
+ if (!BNXT_SINGLE_PF(bp) && !eth_dev->data->dev_started)
+ memset(&new, 0, sizeof(new));
+
out:
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
return rc;
}
-int bnxt_link_update_op(struct rte_eth_dev *eth_dev,
- int wait_to_complete)
-{
- return bnxt_link_update(eth_dev, wait_to_complete, ETH_LINK_UP);
-}
-
static int bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
/* Update the default RSS VNIC(s) */
vnic = BNXT_GET_DEFAULT_VNIC(bp);
vnic->hash_type = bnxt_rte_to_hwrm_hash_types(rss_conf->rss_hf);
+ vnic->hash_mode =
+ bnxt_rte_to_hwrm_hash_level(bp, rss_conf->rss_hf,
+ ETH_RSS_LEVEL(rss_conf->rss_hf));
/*
* If hashkey is not specified, use the previously configured
hash_types &=
~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6;
}
+
+ rss_conf->rss_hf |=
+ bnxt_hwrm_to_rte_rss_level(bp, vnic->hash_mode);
+
if (hash_types) {
PMD_DRV_LOG(ERR,
"Unknown RSS config from firmware (%08x), RSS disabled",
}
rc = bnxt_hwrm_tunnel_dst_port_free(bp, port, tunnel_type);
- if (!rc) {
- if (tunnel_type ==
- HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN)
- bp->vxlan_port = 0;
- if (tunnel_type ==
- HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE)
- bp->geneve_port = 0;
- }
return rc;
}
qinfo->nb_desc = rxq->nb_rx_desc;
qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
- qinfo->conf.rx_drop_en = 0;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
}
static void
qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
qinfo->conf.tx_rs_thresh = 0;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
}
static const struct {
.tx_burst_mode_get = bnxt_tx_burst_mode_get,
.dev_led_on = bnxt_dev_led_on_op,
.dev_led_off = bnxt_dev_led_off_op,
- .xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
- .xstats_get_names_by_id = bnxt_dev_xstats_get_names_by_id_op,
.rx_queue_start = bnxt_rx_queue_start,
.rx_queue_stop = bnxt_rx_queue_stop,
.tx_queue_start = bnxt_tx_queue_start,
if (!BNXT_PF(bp))
return;
-#define ALLOW_FUNC(x) \
- { \
- uint32_t arg = (x); \
- bp->pf->vf_req_fwd[((arg) >> 5)] &= \
- ~rte_cpu_to_le_32(1 << ((arg) & 0x1f)); \
- }
+ memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
- /* Forward all requests if firmware is new enough */
- if (((bp->fw_ver >= ((20 << 24) | (6 << 16) | (100 << 8))) &&
- (bp->fw_ver < ((20 << 24) | (7 << 16)))) ||
- ((bp->fw_ver >= ((20 << 24) | (8 << 16))))) {
- memset(bp->pf->vf_req_fwd, 0xff, sizeof(bp->pf->vf_req_fwd));
- } else {
- PMD_DRV_LOG(WARNING,
- "Firmware too old for VF mailbox functionality\n");
- memset(bp->pf->vf_req_fwd, 0, sizeof(bp->pf->vf_req_fwd));
- }
-
- /*
- * The following are used for driver cleanup. If we disallow these,
- * VF drivers can't clean up cleanly.
- */
- ALLOW_FUNC(HWRM_FUNC_DRV_UNRGTR);
- ALLOW_FUNC(HWRM_VNIC_FREE);
- ALLOW_FUNC(HWRM_RING_FREE);
- ALLOW_FUNC(HWRM_RING_GRP_FREE);
- ALLOW_FUNC(HWRM_VNIC_RSS_COS_LB_CTX_FREE);
- ALLOW_FUNC(HWRM_CFA_L2_FILTER_FREE);
- ALLOW_FUNC(HWRM_STAT_CTX_FREE);
- ALLOW_FUNC(HWRM_PORT_PHY_QCFG);
- ALLOW_FUNC(HWRM_VNIC_TPA_CFG);
+ if (!(bp->fw_cap & BNXT_FW_CAP_LINK_ADMIN))
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_PORT_PHY_QCFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_CFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_FUNC_VF_CFG);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_CFA_L2_FILTER_ALLOC);
+ BNXT_HWRM_CMD_TO_FORWARD(HWRM_OEM_CMD);
}
uint16_t
return 0;
rte_eth_copy_pci_info(eth_dev, pci_dev);
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
bp = eth_dev->data->dev_private;
pci_dev->id.device_id == BROADCOM_DEV_ID_58802_VF)
bp->flags |= BNXT_FLAG_STINGRAY;
+ if (BNXT_TRUFLOW_EN(bp)) {
+ /* extra mbuf field is required to store CFA code from mark */
+ static const struct rte_mbuf_dynfield bnxt_cfa_code_dynfield_desc = {
+ .name = RTE_PMD_BNXT_CFA_CODE_DYNFIELD_NAME,
+ .size = sizeof(bnxt_cfa_code_dynfield_t),
+ .align = __alignof__(bnxt_cfa_code_dynfield_t),
+ };
+ bnxt_cfa_code_dynfield_offset =
+ rte_mbuf_dynfield_register(&bnxt_cfa_code_dynfield_desc);
+ if (bnxt_cfa_code_dynfield_offset < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to register mbuf field for TruFlow mark\n");
+ return -rte_errno;
+ }
+ }
+
rc = bnxt_init_board(eth_dev);
if (rc) {
PMD_DRV_LOG(ERR,
bnxt_alloc_switch_domain(bp);
- /* Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
PMD_DRV_LOG(INFO,
DRV_MODULE_NAME "found at mem %" PRIX64 ", node addr %pM\n",
pci_dev->mem_resource[0].phys_addr,
bnxt_free_int(bp);
bnxt_free_mem(bp, reconfig_dev);
+
bnxt_hwrm_func_buf_unrgtr(bp);
+ rte_free(bp->pf->vf_req_buf);
+
rc = bnxt_hwrm_func_driver_unregister(bp, 0);
bp->flags &= ~BNXT_FLAG_REGISTERED;
bnxt_free_ctx_mem(bp);
struct bnxt *backing_bp;
uint16_t num_rep;
int i, ret = 0;
- struct rte_kvargs *kvlist;
+ struct rte_kvargs *kvlist = NULL;
num_rep = eth_da.nb_representor_ports;
if (num_rep > BNXT_MAX_VF_REPS) {
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF,
- bnxt_parse_devarg_rep_is_pf,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_IS_PF,
+ bnxt_parse_devarg_rep_is_pf,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
/*
* Handler for "rep_based_pf" devarg.
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_BASED_PF,
- bnxt_parse_devarg_rep_based_pf,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist,
+ BNXT_DEVARG_REP_BASED_PF,
+ bnxt_parse_devarg_rep_based_pf,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
/*
* Handler for "rep_based_pf" devarg.
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F,
- bnxt_parse_devarg_rep_q_r2f,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_R2F,
+ bnxt_parse_devarg_rep_q_r2f,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
/*
* Handler for "rep_based_pf" devarg.
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R,
- bnxt_parse_devarg_rep_q_f2r,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_Q_F2R,
+ bnxt_parse_devarg_rep_q_f2r,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
/*
* Handler for "rep_based_pf" devarg.
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F,
- bnxt_parse_devarg_rep_fc_r2f,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_R2F,
+ bnxt_parse_devarg_rep_fc_r2f,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
/*
* Handler for "rep_based_pf" devarg.
* Invoked as for ex: "-w 000:00:0d.0,
* rep-based-pf=<pf index> rep-is-pf=<VF=0 or PF=1>"
*/
- rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R,
- bnxt_parse_devarg_rep_fc_f2r,
- (void *)&representor);
+ ret = rte_kvargs_process(kvlist, BNXT_DEVARG_REP_FC_F2R,
+ bnxt_parse_devarg_rep_fc_f2r,
+ (void *)&representor);
+ if (ret) {
+ ret = -EINVAL;
+ goto err;
+ }
}
ret = rte_eth_dev_create(&pci_dev->device, name,
}
+ rte_kvargs_free(kvlist);
return 0;
err:
*/
if (num_rep > 1)
bnxt_pci_remove_dev_with_reps(backing_eth_dev);
+ rte_errno = -ret;
+ rte_kvargs_free(kvlist);
return ret;
}
}
PMD_DRV_LOG(DEBUG, "BNXT Port:%d pci probe\n",
backing_eth_dev->data->port_id);
+
+ if (!num_rep)
+ return ret;
+
/* probe representor ports now */
ret = bnxt_rep_port_probe(pci_dev, eth_da, backing_eth_dev,
pci_dev->device.devargs->args);