static void bnxt_free_leds_info(struct bnxt *bp)
{
+ if (BNXT_VF(bp))
+ return;
+
rte_free(bp->leds);
bp->leds = NULL;
}
static int bnxt_alloc_leds_info(struct bnxt *bp)
{
+ if (BNXT_VF(bp))
+ return 0;
+
bp->leds = rte_zmalloc("bnxt_leds",
BNXT_MAX_LED * sizeof(struct bnxt_led_info),
0);
{
struct bnxt *bp = eth_dev->data->dev_private;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
/*
* Vector mode receive can be enabled only if scatter rx is not
static eth_tx_burst_t
bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev)
{
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
#ifndef RTE_LIBRTE_IEEE1588
struct bnxt *bp = eth_dev->data->dev_private;
return rc;
}
-static int32_t
-bnxt_create_port_app_df_rule(struct bnxt *bp, uint8_t flow_type,
- uint32_t *flow_id)
-{
- uint16_t port_id = bp->eth_dev->data->port_id;
- struct ulp_tlv_param param_list[] = {
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_DEV_PORT_ID,
- .length = 2,
- .value = {(port_id >> 8) & 0xff, port_id & 0xff}
- },
- {
- .type = BNXT_ULP_DF_PARAM_TYPE_LAST,
- .length = 0,
- .value = {0}
- }
- };
-
- return ulp_default_flow_create(bp->eth_dev, param_list, flow_type,
- flow_id);
-}
-
-static int32_t
-bnxt_create_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
- int rc;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_PORT_TO_VS,
- &cfg_data->port_to_app_flow_id);
- if (rc) {
- PMD_DRV_LOG(ERR,
- "Failed to create port to app default rule\n");
- return rc;
- }
-
- BNXT_TF_DBG(DEBUG, "***** created port to app default rule ******\n");
- rc = bnxt_create_port_app_df_rule(bp, BNXT_ULP_DF_TPL_VS_TO_PORT,
- &cfg_data->app_to_port_flow_id);
- if (!rc) {
- rc = ulp_default_flow_db_cfa_action_get(bp->ulp_ctx,
- cfg_data->app_to_port_flow_id,
- &cfg_data->tx_cfa_action);
- if (rc)
- goto err;
-
- BNXT_TF_DBG(DEBUG,
- "***** created app to port default rule *****\n");
- return 0;
- }
-
-err:
- BNXT_TF_DBG(DEBUG, "Failed to create app to port default rule\n");
- return rc;
-}
-
-static void
-bnxt_destroy_df_rules(struct bnxt *bp)
-{
- struct bnxt_ulp_data *cfg_data;
-
- cfg_data = bp->ulp_ctx->cfg_data;
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->port_to_app_flow_id);
- ulp_default_flow_destroy(bp->eth_dev, cfg_data->app_to_port_flow_id);
-}
-
static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = eth_dev->data->dev_private;
bnxt_schedule_fw_health_check(bp);
pthread_mutex_unlock(&bp->def_cp_lock);
- if (BNXT_TRUFLOW_EN(bp))
- bnxt_ulp_init(bp);
+ bnxt_ulp_init(bp);
return 0;
/* disable uio/vfio intr/eventfd mapping */
rte_intr_disable(intr_handle);
+ bnxt_ulp_destroy_df_rules(bp, false);
+ bnxt_ulp_deinit(bp);
+
bnxt_cancel_fw_health_check(bp);
bnxt_dev_set_link_down_op(eth_dev);
rte_eal_alarm_cancel(bnxt_dev_recover, (void *)bp);
bnxt_cancel_fc_thread(bp);
- if (BNXT_TRUFLOW_EN(bp)) {
- if (bp->rep_info != NULL)
- bnxt_destroy_df_rules(bp);
- bnxt_ulp_deinit(bp);
- }
-
if (eth_dev->data->dev_started)
bnxt_dev_stop_op(eth_dev);
if (rc != 0)
vnic->flags = old_flags;
- if (BNXT_TRUFLOW_EN(bp) && bp->rep_info != NULL)
- bnxt_create_df_rules(bp);
+ bnxt_ulp_create_df_rules(bp);
return rc;
}
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
break;
- case 0x9100:
+ case RTE_ETHER_TYPE_QINQ1:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100;
break;
- case 0x9200:
+ case RTE_ETHER_TYPE_QINQ2:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200;
break;
- case 0x9300:
+ case RTE_ETHER_TYPE_QINQ3:
bp->outer_tpid_bd =
TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300;
break;
"Scalar");
return 0;
}
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
if (pkt_burst == bnxt_recv_pkts_vec) {
snprintf(mode->info, sizeof(mode->info), "%s",
"Vector SSE");
"Scalar");
return 0;
}
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
if (pkt_burst == bnxt_xmit_pkts_vec) {
snprintf(mode->info, sizeof(mode->info), "%s",
"Vector SSE");
new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
VLAN_TAG_SIZE * BNXT_NUM_VLANS;
-#ifdef RTE_ARCH_X86
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
/*
* If vector-mode tx/rx is active, disallow any MTU change that would
* require scattered receive support.
return ret;
}
-static int
+int
bnxt_filter_ctrl_op(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op, void *arg)
struct bnxt *bp = dev->data->dev_private;
int ret = 0;
- ret = is_bnxt_in_error(dev->data->dev_private);
+ if (BNXT_ETH_DEV_IS_REPRESENTOR(dev)) {
+ struct bnxt_vf_representor *vfr = dev->data->dev_private;
+ bp = vfr->parent_dev->data->dev_private;
+ }
+
+ ret = is_bnxt_in_error(bp);
if (ret)
return ret;
bnxt_hwrm_port_phy_qcaps(bp);
- rc = bnxt_hwrm_cfa_adv_flow_mgmt_qcaps(bp);
- if (rc)
- return rc;
-
bnxt_alloc_error_recovery_info(bp);
/* Get the adapter error recovery support info */
rc = bnxt_hwrm_error_recovery_qcfg(bp);