return 0;
}
- sfc_log_init(sa, "init MAE");
- rc = efx_mae_init(sa->nic);
- if (rc != 0)
- goto fail_mae_init;
+ if (encp->enc_mae_admin) {
+ sfc_log_init(sa, "init MAE");
+ rc = efx_mae_init(sa->nic);
+ if (rc != 0)
+ goto fail_mae_init;
- sfc_log_init(sa, "get MAE limits");
- rc = efx_mae_get_limits(sa->nic, &limits);
- if (rc != 0)
- goto fail_mae_get_limits;
+ sfc_log_init(sa, "get MAE limits");
+ rc = efx_mae_get_limits(sa->nic, &limits);
+ if (rc != 0)
+ goto fail_mae_get_limits;
- sfc_log_init(sa, "init MAE counter registry");
- rc = sfc_mae_counter_registry_init(&mae->counter_registry,
- limits.eml_max_n_counters);
- if (rc != 0) {
- sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
- limits.eml_max_n_counters, rte_strerror(rc));
- goto fail_counter_registry_init;
+ sfc_log_init(sa, "init MAE counter registry");
+ rc = sfc_mae_counter_registry_init(&mae->counter_registry,
+ limits.eml_max_n_counters);
+ if (rc != 0) {
+ sfc_err(sa, "failed to init MAE counters registry for %u entries: %s",
+ limits.eml_max_n_counters, rte_strerror(rc));
+ goto fail_counter_registry_init;
+ }
}
sfc_log_init(sa, "assign entity MPORT");
if (rc != 0)
goto fail_mae_assign_switch_port;
- sfc_log_init(sa, "allocate encap. header bounce buffer");
- bounce_eh->buf_size = limits.eml_encap_header_size_limit;
- bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
- bounce_eh->buf_size, 0);
- if (bounce_eh->buf == NULL)
- goto fail_mae_alloc_bounce_eh;
-
- mae->status = SFC_MAE_STATUS_SUPPORTED;
- mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
- mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
- mae->encap_types_supported = limits.eml_encap_types_supported;
+ if (encp->enc_mae_admin) {
+ sfc_log_init(sa, "allocate encap. header bounce buffer");
+ bounce_eh->buf_size = limits.eml_encap_header_size_limit;
+ bounce_eh->buf = rte_malloc("sfc_mae_bounce_eh",
+ bounce_eh->buf_size, 0);
+ if (bounce_eh->buf == NULL)
+ goto fail_mae_alloc_bounce_eh;
+
+ mae->nb_outer_rule_prios_max = limits.eml_max_n_outer_prios;
+ mae->nb_action_rule_prios_max = limits.eml_max_n_action_prios;
+ mae->encap_types_supported = limits.eml_encap_types_supported;
+ }
+
TAILQ_INIT(&mae->outer_rules);
TAILQ_INIT(&mae->encap_headers);
TAILQ_INIT(&mae->action_sets);
+ if (encp->enc_mae_admin)
+ mae->status = SFC_MAE_STATUS_ADMIN;
+ else
+ mae->status = SFC_MAE_STATUS_SUPPORTED;
+
sfc_log_init(sa, "done");
return 0;
fail_mae_assign_switch_port:
fail_mae_assign_switch_domain:
fail_mae_assign_entity_mport:
- sfc_mae_counter_registry_fini(&mae->counter_registry);
+ if (encp->enc_mae_admin)
+ sfc_mae_counter_registry_fini(&mae->counter_registry);
fail_counter_registry_init:
fail_mae_get_limits:
- efx_mae_fini(sa->nic);
+ if (encp->enc_mae_admin)
+ efx_mae_fini(sa->nic);
fail_mae_init:
sfc_log_init(sa, "failed %d", rc);
mae->nb_action_rule_prios_max = 0;
mae->status = SFC_MAE_STATUS_UNKNOWN;
- if (status_prev != SFC_MAE_STATUS_SUPPORTED)
+ if (status_prev != SFC_MAE_STATUS_ADMIN)
return;
rte_free(mae->bounce_eh.buf);
return 0;
}
+static int
+sfc_mae_rule_parse_item_port_representor(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const struct rte_flow_item_ethdev supp_mask = {
+ .port_id = 0xffff,
+ };
+ const void *def_mask = &rte_flow_item_ethdev_mask;
+ const struct rte_flow_item_ethdev *spec = NULL;
+ const struct rte_flow_item_ethdev *mask = NULL;
+ efx_mport_sel_t mport_sel;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask, def_mask,
+ sizeof(struct rte_flow_item_ethdev), error);
+ if (rc != 0)
+ return rc;
+
+ if (mask->port_id != supp_mask.port_id) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the PORT_REPRESENTOR pattern item");
+ }
+
+ /* If "spec" is not set, could be any port ID */
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_mae_switch_port_by_ethdev(
+ ctx_mae->sa->mae.switch_domain_id,
+ spec->port_id, &mport_sel);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't find RTE ethdev by the port ID");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
+ &mport_sel, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the port ID");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
static int
sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
struct sfc_flow_parse_ctx *ctx,
const uint8_t *mask = NULL;
int rc;
- /*
- * We're about to start processing inner frame items.
- * Process pattern data that has been deferred so far
- * and reset pattern data storage.
- */
- rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
- if (rc != 0)
- return rc;
+ if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
+ /*
+ * As a workaround, pattern processing has started from
+ * this (tunnel) item. No pattern data to process yet.
+ */
+ } else {
+ /*
+ * We're about to start processing inner frame items.
+ * Process pattern data that has been deferred so far
+ * and reset pattern data storage.
+ */
+ rc = sfc_mae_rule_process_pattern_data(ctx_mae, error);
+ if (rc != 0)
+ return rc;
+ }
memset(&ctx_mae->pattern_data, 0, sizeof(ctx_mae->pattern_data));
.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
.parse = sfc_mae_rule_parse_item_port_id,
},
+ {
+ .type = RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR,
+ .name = "PORT_REPRESENTOR",
+ /*
+ * In terms of RTE flow, this item is a META one,
+ * and its position in the pattern is don't care.
+ */
+ .prev_layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .layer = SFC_FLOW_ITEM_ANY_LAYER,
+ .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
+ .parse = sfc_mae_rule_parse_item_port_representor,
+ },
{
.type = RTE_FLOW_ITEM_TYPE_PHY_PORT,
.name = "PHY_PORT",
static int
sfc_mae_rule_encap_parse_init(struct sfc_adapter *sa,
- const struct rte_flow_item pattern[],
struct sfc_mae_parse_ctx *ctx,
struct rte_flow_error *error)
{
+ const struct rte_flow_item *pattern = ctx->pattern;
struct sfc_mae *mae = &sa->mae;
uint8_t recirc_id = 0;
int rc;
RTE_FLOW_ERROR_TYPE_ITEM,
pattern, "tunnel offload: GROUP: tunnel type mismatch");
}
+
+ /*
+ * The HW/FW hasn't got support for the use of "ENC" fields in
+ * action rules (except the VNET_ID one) yet. As a workaround,
+ * start parsing the pattern from the tunnel item.
+ */
+ ctx->pattern = pattern;
break;
default:
SFC_ASSERT(B_FALSE);
ctx_mae.encap_type = EFX_TUNNEL_PROTOCOL_NONE;
ctx_mae.match_spec = ctx_mae.match_spec_action;
ctx_mae.field_ids_remap = field_ids_no_remap;
+ ctx_mae.pattern = pattern;
ctx.type = SFC_FLOW_PARSE_CTX_MAE;
ctx.mae = &ctx_mae;
- rc = sfc_mae_rule_encap_parse_init(sa, pattern, &ctx_mae, error);
+ rc = sfc_mae_rule_encap_parse_init(sa, &ctx_mae, error);
if (rc != 0)
goto fail_encap_parse_init;
spec->ft = ctx_mae.ft;
rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
- pattern, &ctx, error);
+ ctx_mae.pattern, &ctx, error);
if (rc != 0)
goto fail_parse_pattern;
return 0;
}
- if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
+ if (mae->status != SFC_MAE_STATUS_ADMIN) {
rc = ENOTSUP;
- sfc_err(sa, "failed to init switchdev - no MAE support");
+ sfc_err(sa, "failed to init switchdev - no admin MAE privilege");
goto fail_no_mae;
}