boolean_t tso;
boolean_t tso_encap;
+ uint64_t negotiated_rx_metadata;
+
uint32_t rxd_wait_timeout_ns;
bool switchdev;
return nb_repr;
}
+static int
+sfc_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ uint64_t supported = 0;
+
+ sfc_adapter_lock(sa);
+
+ if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_FLAG) != 0)
+ supported |= RTE_ETH_RX_METADATA_USER_FLAG;
+
+ if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0)
+ supported |= RTE_ETH_RX_METADATA_USER_MARK;
+
+ sa->negotiated_rx_metadata = supported & *features;
+ *features = sa->negotiated_rx_metadata;
+
+ sfc_adapter_unlock(sa);
+
+ return 0;
+}
+
static const struct eth_dev_ops sfc_eth_dev_ops = {
.dev_configure = sfc_dev_configure,
.dev_start = sfc_dev_start,
.xstats_get_names_by_id = sfc_xstats_get_names_by_id,
.pool_ops_supported = sfc_pool_ops_supported,
.representor_info_get = sfc_representor_info_get,
+ .rx_metadata_negotiate = sfc_rx_metadata_negotiate,
};
struct sfc_ethdev_init_data {
goto fail_dp_rx_name;
}
+ if (strcmp(dp_rx->dp.name, SFC_KVARG_DATAPATH_EF10_ESSB) == 0) {
+ /* FLAG and MARK are always available from Rx prefix. */
+ sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
+ sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
+ }
+
sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
struct sfc_flow_spec *spec = &flow->spec;
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
const unsigned int dp_rx_features = sa->priv.dp_rx->features;
+ const uint64_t rx_metadata = sa->negotiated_rx_metadata;
uint32_t actions_set = 0;
const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
(1UL << RTE_FLOW_ACTION_TYPE_RSS) |
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"FLAG action is not supported on the current Rx datapath");
return -rte_errno;
+ } else if ((rx_metadata &
+ RTE_ETH_RX_METADATA_USER_FLAG) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "flag delivery has not been negotiated");
+ return -rte_errno;
}
spec_filter->template.efs_flags |=
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"MARK action is not supported on the current Rx datapath");
return -rte_errno;
+ } else if ((rx_metadata &
+ RTE_ETH_RX_METADATA_USER_MARK) == 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "mark delivery has not been negotiated");
+ return -rte_errno;
}
rc = sfc_flow_parse_mark(sa, actions->conf, flow);
efx_mae_actions_t *spec,
struct rte_flow_error *error)
{
+ const uint64_t rx_metadata = sa->negotiated_rx_metadata;
bool custom_error = B_FALSE;
int rc = 0;
case RTE_FLOW_ACTION_TYPE_FLAG:
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_FLAG,
bundle->actions_mask);
- rc = efx_mae_action_set_populate_flag(spec);
+ if ((rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0) {
+ rc = efx_mae_action_set_populate_flag(spec);
+ } else {
+ rc = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag delivery has not been negotiated");
+ custom_error = B_TRUE;
+ }
break;
case RTE_FLOW_ACTION_TYPE_MARK:
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
bundle->actions_mask);
- rc = sfc_mae_rule_parse_action_mark(sa, action->conf, spec);
+ if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0) {
+ rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
+ spec);
+ } else {
+ rc = rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "mark delivery has not been negotiated");
+ custom_error = B_TRUE;
+ }
break;
case RTE_FLOW_ACTION_TYPE_PHY_PORT:
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_PHY_PORT,