#include "bnxt_vnic.h"
#include "hsi_struct_def_dpdk.h"
#include "bnxt_nvm_defs.h"
+#include "bnxt_tf_common.h"
+#include "ulp_flow_db.h"
#define DRV_MODULE_NAME "bnxt"
static const char bnxt_version[] =
#define BNXT_DEVARG_TRUFLOW "host-based-truflow"
#define BNXT_DEVARG_FLOW_XSTAT "flow-xstat"
#define BNXT_DEVARG_MAX_NUM_KFLOWS "max-num-kflows"
+#define BNXT_DEVARG_REPRESENTOR "representor"
static const char *const bnxt_dev_args[] = {
+ BNXT_DEVARG_REPRESENTOR,
BNXT_DEVARG_TRUFLOW,
BNXT_DEVARG_FLOW_XSTAT,
BNXT_DEVARG_MAX_NUM_KFLOWS,
{
struct bnxt *bp = eth_dev->data->dev_private;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
/*
* Vector mode receive can be enabled only if scatter rx is not
static eth_tx_burst_t
bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
+ struct bnxt *bp = eth_dev->data->dev_private;
+
/*
* Vector mode transmit can be enabled only if not using scatter rx
* or tx offloads.
*/
if (!eth_dev->data->scattered_rx &&
- !eth_dev->data->dev_conf.txmode.offloads) {
+ !eth_dev->data->dev_conf.txmode.offloads &&
+ !BNXT_TRUFLOW_EN(bp)) {
PMD_DRV_LOG(INFO, "Using vector mode transmit for port %d\n",
eth_dev->data->port_id);
return bnxt_xmit_pkts_vec;
return rc;
}
+static int32_t
+bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
+ uint32_t *flow_id)
+{
+ uint16_t port_id = bp->eth_dev->data->port_id;
+ struct ulp_tlv_param param_list[] = {
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
+ .length = 2,
+ .value = {(port_id >> 8) & 0xff, port_id & 0xff}
+ },
+ {
+ .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
+ .length = 0,
+ .value = {0}
+ }
+ };
+
+ return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
+ flow_id);
+}
+
+static int32_t
+bnxt_create_df_rules(struct bnxt *bp)
+{
+ struct bnxt_ulp_data *cfg_data;
+ int rc;
+
+ cfg_data = bp->ulp_ctx->cfg_data;
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
+ &cfg_data->port_to_app_flow_id);
+ if (rc) {
+ PMD_DRV_LOG(ERR,
+ "Failed to create port to app default rule\n");
+ return rc;
+ }
+
+ BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n");
+ rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
+ &cfg_data->app_to_port_flow_id);
+ if (!rc) {
+ rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
+ cfg_data->app_to_port_flow_id,
+ &cfg_data->tx_cfa_action);
+ if (rc)
+ goto err;
+
+ BNXT_TF_DBG(DEBUG,
+ "***** created app to port default rule *****\n");
+ return 0;
+ }
+
+err:
+ BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n");
+ return rc;
+}
+
+static void
+bnxt_destroy_df_rules(struct bnxt *bp)
+{
+ struct bnxt_ulp_data *cfg_data;
+
+ cfg_data = bp->ulp_ctx->cfg_data;
+ ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id);
+ ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id);
+}
+
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_deinit(bp);
-
eth_dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use */
eth_dev->rx_pkt_burst = &bnxt_dummy_recv_pkts;
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
+ if (BNXT_TRUFLOW_EN(bp)) {
+ if (bp->rep_info != NULL)
+ bnxt_destroy_df_rules(bp);
+ bnxt_ulp_deinit(bp);
+ }
+
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
if (rc != 0)
vnic->flags = old_flags;
+ if (BNXT_TRUFLOW_EN(bp) && bp->rep_info != NULL)
+ bnxt_create_df_rules(bp);
+
return rc;
}
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
+static int
+bnxt_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+
+ if (pkt_burst == bnxt_recv_pkts) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Scalar");
+ return 0;
+ }
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (pkt_burst == bnxt_recv_pkts_vec) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Vector SSE");
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
+
+static int
+bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+
+ if (pkt_burst == bnxt_xmit_pkts) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Scalar");
+ return 0;
+ }
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+ if (pkt_burst == bnxt_xmit_pkts_vec) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ "Vector SSE");
+ return 0;
+ }
+#endif
+
+ return -EINVAL;
+}
+
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
struct bnxt *bp = eth_dev->data->dev_private;
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
/*
* If vector-mode tx/rx is active, disallow any MTU change that would
* require scattered receive support.
return ret;
}
-static int
+int
bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
- ret = is_bnxt_in_error(dev->data->dev_private);
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
+ struct bnxt_vf_representor *vfr = dev->data->dev_private;
+ bp = vfr->parent_dev->data->dev_private;
+ }
+
+ ret = is_bnxt_in_error(bp);
if (ret)
return ret;
.set_mc_addr_list = bnxt_dev_set_mc_addr_list_op,
.rxq_info_get = bnxt_rxq_info_get_op,
.txq_info_get = bnxt_txq_info_get_op,
+ .rx_burst_mode_get = bnxt_rx_burst_mode_get,
+ .tx_burst_mode_get = bnxt_tx_burst_mode_get,
.dev_led_on = bnxt_dev_led_on_op,
.dev_led_off = bnxt_dev_led_off_op,
.xstats_get_by_id = bnxt_dev_xstats_get_by_id_op,
}
uint16_t
-bnxt_get_svif(uint16_t port_id, bool func_svif)
+bnxt_get_svif(uint16_t port_id, bool func_svif,
+ enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->svif;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
return func_svif ? bp->func_svif : bp->port_svif;
}
uint16_t
-bnxt_get_vnic_id(uint16_t port)
+bnxt_get_vnic_id(uint16_t port, enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt_vnic_info *vnic;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->dflt_vnic_id;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
vnic = BNXT_GET_DEFAULT_VNIC(bp);
}
uint16_t
-bnxt_get_fw_func_id(uint16_t port)
+bnxt_get_fw_func_id(uint16_t port, enum bnxt_ulp_intf_type type)
{
struct rte_eth_dev *eth_dev;
struct bnxt *bp;
eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid;
+
+ eth_dev = vfr->parent_dev;
+ }
+
bp = eth_dev->data->dev_private;
return bp->fw_fid;
}
+enum bnxt_ulp_intf_type
+bnxt_get_interface_type(uint16_t port)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev))
+ return BNXT_ULP_INTF_TYPE_VF_REP;
+
+ bp = eth_dev->data->dev_private;
+ if (BNXT_PF(bp))
+ return BNXT_ULP_INTF_TYPE_PF;
+ else if (BNXT_VF_IS_TRUSTED(bp))
+ return BNXT_ULP_INTF_TYPE_TRUSTED_VF;
+ else if (BNXT_VF(bp))
+ return BNXT_ULP_INTF_TYPE_VF;
+
+ return BNXT_ULP_INTF_TYPE_INVALID;
+}
+
+uint16_t
+bnxt_get_phy_port_id(uint16_t port_id)
+{
+ struct bnxt_vf_representor *vfr;
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return BNXT_PF(bp) ? bp->pf->port_id : bp->parent->port_id;
+}
+
+uint16_t
+bnxt_get_parif(uint16_t port_id, enum bnxt_ulp_intf_type type)
+{
+ struct rte_eth_dev *eth_dev;
+ struct bnxt *bp;
+
+ eth_dev = &rte_eth_devices[port_id];
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(eth_dev)) {
+ struct bnxt_vf_representor *vfr = eth_dev->data->dev_private;
+ if (!vfr)
+ return 0;
+
+ if (type == BNXT_ULP_INTF_TYPE_VF_REP)
+ return vfr->fw_fid - 1;
+
+ eth_dev = vfr->parent_dev;
+ }
+
+ bp = eth_dev->data->dev_private;
+
+ return BNXT_PF(bp) ? bp->fw_fid - 1 : bp->parent->fid - 1;
+}
+
+uint16_t
+bnxt_get_vport(uint16_t port_id)
+{
+ return (1 << bnxt_get_phy_port_id(port_id));
+}
+
static void bnxt_alloc_error_recovery_info(struct bnxt *bp)
{
struct bnxt_error_recovery_info *info = bp->recovery_info;
bnxt_hwrm_parent_pf_qcfg(bp);
+ bnxt_hwrm_port_phy_qcaps(bp);
+
rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
if (rc)
return rc;