#include "hsi_struct_def_dpdk.h"
#include "bnxt_nvm_defs.h"
#include "bnxt_tf_common.h"
+#include "ulp_flow_db.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
#define BNXT_DEVARG_TRUFLOW "host-based-truflow"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
+#define BNXT_DEVARG_REPRESENTOR "representor"
static const char *const bnxt_dev_args[] = {
+ BNXT_DEVARG_REPRESENTOR,
BNXT_DEVARG_TRUFLOW,
BNXT_DEVARG_FLOW_XSTAT,
BNXT_DEVARG_MAX_NUM_KFLOWS,
static void bnxt_free_leds_info(struct bnxt *bp)
{
+ if (BNXT_VF(bp))
+ return;
+
rte_free(bp->leds);
bp->leds = NULL;
}
static int bnxt_alloc_leds_info(struct bnxt *bp)
{
+ if (BNXT_VF(bp))
+ return 0;
+
bp->leds = rte_zmalloc("bnxt_leds",
BNXT_MAX_LED * sizeof(struct bnxt_led_info),
0);
{
struct bnxt *bp = eth_dev->data->dev_private;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
/*
* Vector mode receive can be enabled only if scatter rx is not
static eth_tx_burst_t
bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
+ struct bnxt *bp = eth_dev->data->dev_private;
+
/*
* Vector mode transmit can be enabled only if not using scatter rx
* or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !eth_dev->data->dev_conf.txmode.offloads) {
+ !eth_dev->data->dev_conf.txmode.offloads &&
+ !BNXT_TRUFLOW_EN(bp)) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
bnxt_schedule_fw_health_check(bp);
pthread_mutex_unlock(&bp->def_cp_lock);
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_init(bp);
+ bnxt_ulp_init(bp);
return 0;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_deinit(bp);
-
eth_dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use */
eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ bnxt_ulp_destroy_df_rules(bp, false);
+ bnxt_ulp_deinit(bp);
+
bnxt_cancel_fw_health_check(bp);
bnxt_dev_set_link_down_op(eth_dev);
if (rc != 0)
vnic->flags = old_flags;
+ bnxt_ulp_create_df_rules(bp);
+
return rc;
}
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
break;
- case 0x9100:
+ case RTE_ETHER_TYPE_QINQ1:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
break;
- case 0x9200:
+ case RTE_ETHER_TYPE_QINQ2:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
break;
- case 0x9300:
+ case RTE_ETHER_TYPE_QINQ3:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
break;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+static int
+bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+
+ if (pkt_burst == bnxt_recv_pkts) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Scalar");
+ return 0;
+ }
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (pkt_burst == bnxt_recv_pkts_vec) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Vector SSE");
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
+
+static int
+bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+
+ if (pkt_burst == bnxt_xmit_pkts) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Scalar");
+ return 0;
+ }
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (pkt_burst == bnxt_xmit_pkts_vec) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Vector SSE");
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
+
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
/*
* If vector-mode tx/rx is active, disallow any MTU change that would
* require scattered receive support.
return ret;
}
-static int
+int
bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
- ret = is_bnxt_in_error(dev->data->dev_private);
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
+ struct bnxt_vf_representor *vfr = dev->data->dev_private;
+ bp = vfr->parent_dev->data->dev_private;
+ }
+
+ ret = is_bnxt_in_error(bp);
if (ret)
return ret;
.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
.rxq_info_get = bnxt_rxq_info_get_op,
.txq_info_get = bnxt_txq_info_get_op,
+ .rx_burst_mode_get = bnxt_rx_burst_mode_get,
+ .tx_burst_mode_get = bnxt_tx_burst_mode_get,
.dev_led_on = bnxt_dev_led_on_op,
.dev_led_off = bnxt_dev_led_off_op,
.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
}
uint16_t
-bnxt_get_svif(uint16_t port_id, bool func_svif)
+bnxt_get_svif(uint16_t port_id, bool func_svif,
+ enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->svif;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
return func_svif ? bp->func_svif : bp->port_svif;
}
uint16_t
-bnxt_get_vnic_id(uint16_t port)
+bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt_vnic_info *vnic;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->dflt_vnic_id;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
}
uint16_t
-bnxt_get_fw_func_id(uint16_t port)
+bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
return bp->fw_fid;
return BNXT_ULP_INTF_TYPE_VF_REP;
bp = eth_dev->data->dev_private;
- return BNXT_PF(bp) ? BNXT_ULP_INTF_TYPE_PF
- : BNXT_ULP_INTF_TYPE_VF;
+ if (BNXT_PF(bp))
+ return BNXT_ULP_INTF_TYPE_PF;
+ else if (BNXT_VF_IS_TRUSTED(bp))
+ return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
+ else if (BNXT_VF(bp))
+ return BNXT_ULP_INTF_TYPE_VF;
+
+ return BNXT_ULP_INTF_TYPE_INVALID;
}
uint16_t
eth_dev = &rte_eth_devices[port_id];
if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
eth_dev = vfr->parent_dev;
}
}
uint16_t
-bnxt_get_parif(uint16_t port_id)
+bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
{
- struct bnxt_vf_representor *vfr;
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port_id];
if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
- vfr = eth_dev->data->dev_private;
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid - 1;
+
eth_dev = vfr->parent_dev;
}
bnxt_hwrm_port_phy_qcaps(bp);
- rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
- if (rc)
- return rc;
-
bnxt_alloc_error_recovery_info(bp);
/* Get the adapter error recovery support info */
rc = bnxt_hwrm_error_recovery_qcfg(bp);