+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (fw_rsrc->refcnt == 0) {
+ SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
+ SFC_ASSERT(action_set->spec != NULL);
+
+ rc = sfc_mae_encap_header_enable(sa, encap_header,
+ action_set->spec);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_mae_counters_enable(sa, counters,
+ action_set->n_counters,
+ action_set->spec);
+ if (rc != 0) {
+ sfc_err(sa, "failed to enable %u MAE counters: %s",
+ action_set->n_counters, rte_strerror(rc));
+
+ sfc_mae_encap_header_disable(sa, encap_header);
+ return rc;
+ }
+
+ rc = efx_mae_action_set_alloc(sa->nic, action_set->spec,
+ &fw_rsrc->aset_id);
+ if (rc != 0) {
+ sfc_err(sa, "failed to enable action_set=%p: %s",
+ action_set, strerror(rc));
+
+ (void)sfc_mae_counters_disable(sa, counters,
+ action_set->n_counters);
+ sfc_mae_encap_header_disable(sa, encap_header);
+ return rc;
+ }
+
+ sfc_dbg(sa, "enabled action_set=%p: AS_ID=0x%08x",
+ action_set, fw_rsrc->aset_id.id);
+ }
+
+ ++(fw_rsrc->refcnt);
+
+ return 0;
+}
+
+static void
+sfc_mae_action_set_disable(struct sfc_adapter *sa,
+ struct sfc_mae_action_set *action_set)
+{
+ struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
+ int rc;
+
+ SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+ if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
+ fw_rsrc->refcnt == 0) {
+ sfc_err(sa, "failed to disable action_set=%p: already disabled; AS_ID=0x%08x, refcnt=%u",
+ action_set, fw_rsrc->aset_id.id, fw_rsrc->refcnt);
+ return;
+ }
+
+ if (fw_rsrc->refcnt == 1) {
+ rc = efx_mae_action_set_free(sa->nic, &fw_rsrc->aset_id);
+ if (rc == 0) {
+ sfc_dbg(sa, "disabled action_set=%p with AS_ID=0x%08x",
+ action_set, fw_rsrc->aset_id.id);
+ } else {
+ sfc_err(sa, "failed to disable action_set=%p with AS_ID=0x%08x: %s",
+ action_set, fw_rsrc->aset_id.id, strerror(rc));
+ }
+ fw_rsrc->aset_id.id = EFX_MAE_RSRC_ID_INVALID;
+
+ rc = sfc_mae_counters_disable(sa, action_set->counters,
+ action_set->n_counters);
+ if (rc != 0) {
+ sfc_err(sa, "failed to disable %u MAE counters: %s",
+ action_set->n_counters, rte_strerror(rc));
+ }
+
+ sfc_mae_encap_header_disable(sa, action_set->encap_header);
+ }
+
+ --(fw_rsrc->refcnt);
+}
+
+void
+sfc_mae_flow_cleanup(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ struct sfc_flow_spec *spec;
+ struct sfc_flow_spec_mae *spec_mae;
+
+ if (flow == NULL)
+ return;
+
+ spec = &flow->spec;
+
+ if (spec == NULL)
+ return;
+
+ spec_mae = &spec->mae;
+
+ if (spec_mae->ft != NULL) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
+ spec_mae->ft->jump_rule_is_set = B_FALSE;
+
+ SFC_ASSERT(spec_mae->ft->refcnt != 0);
+ --(spec_mae->ft->refcnt);
+ }
+
+ SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
+
+ if (spec_mae->outer_rule != NULL)
+ sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
+
+ if (spec_mae->action_set != NULL)
+ sfc_mae_action_set_del(sa, spec_mae->action_set);
+
+ if (spec_mae->match_spec != NULL)
+ efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
+}
+
+static int
+sfc_mae_set_ethertypes(struct sfc_mae_parse_ctx *ctx)
+{
+ struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
+ const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
+ const efx_mae_field_id_t field_ids[] = {
+ EFX_MAE_FIELD_VLAN0_PROTO_BE,
+ EFX_MAE_FIELD_VLAN1_PROTO_BE,
+ };
+ const struct sfc_mae_ethertype *et;
+ unsigned int i;
+ int rc;
+
+ /*
+ * In accordance with RTE flow API convention, the innermost L2
+ * item's "type" ("inner_type") is a L3 EtherType. If there is
+ * no L3 item, it's 0x0000/0x0000.
+ */
+ et = &pdata->ethertypes[pdata->nb_vlan_tags];
+ rc = efx_mae_match_spec_field_set(ctx->match_spec,
+ fremap[EFX_MAE_FIELD_ETHER_TYPE_BE],
+ sizeof(et->value),
+ (const uint8_t *)&et->value,
+ sizeof(et->mask),
+ (const uint8_t *)&et->mask);
+ if (rc != 0)
+ return rc;
+
+ /*
+ * sfc_mae_rule_parse_item_vlan() has already made sure
+ * that pdata->nb_vlan_tags does not exceed this figure.
+ */
+ RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
+
+ for (i = 0; i < pdata->nb_vlan_tags; ++i) {
+ et = &pdata->ethertypes[i];
+
+ rc = efx_mae_match_spec_field_set(ctx->match_spec,
+ fremap[field_ids[i]],
+ sizeof(et->value),
+ (const uint8_t *)&et->value,
+ sizeof(et->mask),
+ (const uint8_t *)&et->mask);
+ if (rc != 0)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
+ struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
+ struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
+ const rte_be16_t supported_tpids[] = {
+ /* VLAN standard TPID (always the first element) */
+ RTE_BE16(RTE_ETHER_TYPE_VLAN),
+
+ /* Double-tagging TPIDs */
+ RTE_BE16(RTE_ETHER_TYPE_QINQ),
+ RTE_BE16(RTE_ETHER_TYPE_QINQ1),
+ RTE_BE16(RTE_ETHER_TYPE_QINQ2),
+ RTE_BE16(RTE_ETHER_TYPE_QINQ3),
+ };
+ bool enforce_tag_presence[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {0};
+ unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
+ unsigned int ethertype_idx;
+ const uint8_t *valuep;
+ const uint8_t *maskp;
+ int rc;
+
+ if (pdata->innermost_ethertype_restriction.mask != 0 &&
+ pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
+ /*
+ * If a single item VLAN is followed by a L3 item, value
+ * of "type" in item ETH can't be a double-tagging TPID.
+ */
+ nb_supported_tpids = 1;
+ }
+
+ /*
+ * sfc_mae_rule_parse_item_vlan() has already made sure
+ * that pdata->nb_vlan_tags does not exceed this figure.
+ */
+ RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
+
+ for (ethertype_idx = 0;
+ ethertype_idx < pdata->nb_vlan_tags; ++ethertype_idx) {
+ rte_be16_t tpid_v = ethertypes[ethertype_idx].value;
+ rte_be16_t tpid_m = ethertypes[ethertype_idx].mask;
+ unsigned int tpid_idx;
+
+ /*
+ * This loop can have only two iterations. On the second one,
+ * drop outer tag presence enforcement bit because the inner
+ * tag presence automatically assumes that for the outer tag.
+ */
+ enforce_tag_presence[0] = B_FALSE;
+
+ if (tpid_m == RTE_BE16(0)) {
+ if (pdata->tci_masks[ethertype_idx] == RTE_BE16(0))
+ enforce_tag_presence[ethertype_idx] = B_TRUE;
+
+ /* No match on this field, and no value check. */
+ nb_supported_tpids = 1;
+ continue;
+ }
+
+ /* Exact match is supported only. */
+ if (tpid_m != RTE_BE16(0xffff)) {
+ sfc_err(ctx->sa, "TPID mask must be 0x0 or 0xffff; got 0x%04x",
+ rte_be_to_cpu_16(tpid_m));
+ rc = EINVAL;
+ goto fail;
+ }
+
+ for (tpid_idx = pdata->nb_vlan_tags - ethertype_idx - 1;
+ tpid_idx < nb_supported_tpids; ++tpid_idx) {
+ if (tpid_v == supported_tpids[tpid_idx])
+ break;
+ }
+
+ if (tpid_idx == nb_supported_tpids) {
+ sfc_err(ctx->sa, "TPID 0x%04x is unsupported",
+ rte_be_to_cpu_16(tpid_v));
+ rc = EINVAL;
+ goto fail;
+ }
+
+ nb_supported_tpids = 1;
+ }
+
+ if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
+ struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
+ rte_be16_t enforced_et;
+
+ enforced_et = pdata->innermost_ethertype_restriction.value;
+
+ if (et->mask == 0) {
+ et->mask = RTE_BE16(0xffff);
+ et->value = enforced_et;
+ } else if (et->mask != RTE_BE16(0xffff) ||
+ et->value != enforced_et) {
+ sfc_err(ctx->sa, "L3 EtherType must be 0x0/0x0 or 0x%04x/0xffff; got 0x%04x/0x%04x",
+ rte_be_to_cpu_16(enforced_et),
+ rte_be_to_cpu_16(et->value),
+ rte_be_to_cpu_16(et->mask));
+ rc = EINVAL;
+ goto fail;
+ }
+ }
+
+ /*
+ * Now, when the number of VLAN tags is known, set fields
+ * ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
+ * one is either a valid L3 EtherType (or 0x0000/0x0000),
+ * and the last two are valid TPIDs (or 0x0000/0x0000).
+ */
+ rc = sfc_mae_set_ethertypes(ctx);
+ if (rc != 0)
+ goto fail;
+
+ if (pdata->l3_next_proto_restriction_mask == 0xff) {
+ if (pdata->l3_next_proto_mask == 0) {
+ pdata->l3_next_proto_mask = 0xff;
+ pdata->l3_next_proto_value =
+ pdata->l3_next_proto_restriction_value;
+ } else if (pdata->l3_next_proto_mask != 0xff ||
+ pdata->l3_next_proto_value !=
+ pdata->l3_next_proto_restriction_value) {
+ sfc_err(ctx->sa, "L3 next protocol must be 0x0/0x0 or 0x%02x/0xff; got 0x%02x/0x%02x",
+ pdata->l3_next_proto_restriction_value,
+ pdata->l3_next_proto_value,
+ pdata->l3_next_proto_mask);
+ rc = EINVAL;
+ goto fail;
+ }
+ }
+
+ if (enforce_tag_presence[0] || pdata->has_ovlan_mask) {
+ rc = efx_mae_match_spec_bit_set(ctx->match_spec,
+ fremap[EFX_MAE_FIELD_HAS_OVLAN],
+ enforce_tag_presence[0] ||
+ pdata->has_ovlan_value);
+ if (rc != 0)
+ goto fail;
+ }
+
+ if (enforce_tag_presence[1] || pdata->has_ivlan_mask) {
+ rc = efx_mae_match_spec_bit_set(ctx->match_spec,
+ fremap[EFX_MAE_FIELD_HAS_IVLAN],
+ enforce_tag_presence[1] ||
+ pdata->has_ivlan_value);
+ if (rc != 0)
+ goto fail;
+ }
+
+ valuep = (const uint8_t *)&pdata->l3_next_proto_value;
+ maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
+ rc = efx_mae_match_spec_field_set(ctx->match_spec,
+ fremap[EFX_MAE_FIELD_IP_PROTO],
+ sizeof(pdata->l3_next_proto_value),
+ valuep,
+ sizeof(pdata->l3_next_proto_mask),
+ maskp);
+ if (rc != 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
+ "Failed to process pattern data");
+}
+
+static int
+sfc_mae_rule_parse_item_mark(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_mark *spec = item->spec;
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+
+ if (spec == NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NULL spec in item MARK");
+ }
+
+ /*
+ * This item is used in tunnel offload support only.
+ * It must go before any network header items. This
+ * way, sfc_mae_rule_preparse_item_mark() must have
+ * already parsed it. Only one item MARK is allowed.
+ */
+ if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
+ spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "invalid item MARK");
+ }
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_port_id(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const struct rte_flow_item_port_id supp_mask = {
+ .id = 0xffffffff,
+ };
+ const void *def_mask = &rte_flow_item_port_id_mask;
+ const struct rte_flow_item_port_id *spec = NULL;
+ const struct rte_flow_item_port_id *mask = NULL;
+ efx_mport_sel_t mport_sel;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask, def_mask,
+ sizeof(struct rte_flow_item_port_id), error);
+ if (rc != 0)
+ return rc;
+
+ if (mask->id != supp_mask.id) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the PORT_ID pattern item");
+ }
+
+ /* If "spec" is not set, could be any port ID */
+ if (spec == NULL)
+ return 0;
+
+ if (spec->id > UINT16_MAX) {
+ return rte_flow_error_set(error, EOVERFLOW,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "The port ID is too large");
+ }
+
+ rc = sfc_mae_switch_port_by_ethdev(ctx_mae->sa->mae.switch_domain_id,
+ spec->id, &mport_sel);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't find RTE ethdev by the port ID");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
+ &mport_sel, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the port ID");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_port_representor(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const struct rte_flow_item_ethdev supp_mask = {
+ .port_id = 0xffff,
+ };
+ const void *def_mask = &rte_flow_item_ethdev_mask;
+ const struct rte_flow_item_ethdev *spec = NULL;
+ const struct rte_flow_item_ethdev *mask = NULL;
+ efx_mport_sel_t mport_sel;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask, def_mask,
+ sizeof(struct rte_flow_item_ethdev), error);
+ if (rc != 0)
+ return rc;
+
+ if (mask->port_id != supp_mask.port_id) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the PORT_REPRESENTOR pattern item");
+ }
+
+ /* If "spec" is not set, could be any port ID */
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_mae_switch_port_by_ethdev(
+ ctx_mae->sa->mae.switch_domain_id,
+ spec->port_id, &mport_sel);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't find RTE ethdev by the port ID");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec,
+ &mport_sel, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the port ID");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_phy_port(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const struct rte_flow_item_phy_port supp_mask = {
+ .index = 0xffffffff,
+ };
+ const void *def_mask = &rte_flow_item_phy_port_mask;
+ const struct rte_flow_item_phy_port *spec = NULL;
+ const struct rte_flow_item_phy_port *mask = NULL;
+ efx_mport_sel_t mport_v;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask, def_mask,
+ sizeof(struct rte_flow_item_phy_port), error);
+ if (rc != 0)
+ return rc;
+
+ if (mask->index != supp_mask.index) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the PHY_PORT pattern item");
+ }
+
+ /* If "spec" is not set, could be any physical port */
+ if (spec == NULL)
+ return 0;
+
+ rc = efx_mae_mport_by_phy_port(spec->index, &mport_v);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to convert the PHY_PORT index");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the PHY_PORT");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_pf(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
+ efx_mport_sel_t mport_v;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = efx_mae_mport_by_pcie_function(encp->enc_pf, EFX_PCI_VF_INVALID,
+ &mport_v);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to convert the PF ID");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the PF");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
+static int
+sfc_mae_rule_parse_item_vf(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(ctx_mae->sa->nic);
+ const struct rte_flow_item_vf supp_mask = {
+ .id = 0xffffffff,
+ };
+ const void *def_mask = &rte_flow_item_vf_mask;
+ const struct rte_flow_item_vf *spec = NULL;
+ const struct rte_flow_item_vf *mask = NULL;
+ efx_mport_sel_t mport_v;
+ int rc;
+
+ if (ctx_mae->match_mport_set) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't handle multiple traffic source items");
+ }
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask, def_mask,
+ sizeof(struct rte_flow_item_vf), error);
+ if (rc != 0)
+ return rc;
+
+ if (mask->id != supp_mask.id) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad mask in the VF pattern item");
+ }
+
+ /*
+ * If "spec" is not set, the item requests any VF related to the
+ * PF of the current DPDK port (but not the PF itself).
+ * Reject this match criterion as unsupported.
+ */
+ if (spec == NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Bad spec in the VF pattern item");
+ }
+
+ rc = efx_mae_mport_by_pcie_function(encp->enc_pf, spec->id, &mport_v);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to convert the PF + VF IDs");
+ }
+
+ rc = efx_mae_match_spec_mport_set(ctx_mae->match_spec, &mport_v, NULL);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Failed to set MPORT for the PF + VF");
+ }
+
+ ctx_mae->match_mport_set = B_TRUE;
+
+ return 0;
+}
+
+/*
+ * Having this field ID in a field locator means that this
+ * locator cannot be used to actually set the field at the
+ * time when the corresponding item gets encountered. Such
+ * fields get stashed in the parsing context instead. This
+ * is required to resolve dependencies between the stashed
+ * fields. See sfc_mae_rule_process_pattern_data().
+ */
+#define SFC_MAE_FIELD_HANDLING_DEFERRED EFX_MAE_FIELD_NIDS
+
+struct sfc_mae_field_locator {
+ efx_mae_field_id_t field_id;
+ size_t size;
+ /* Field offset in the corresponding rte_flow_item_ struct */
+ size_t ofst;
+};
+
+static void
+sfc_mae_item_build_supp_mask(const struct sfc_mae_field_locator *field_locators,
+ unsigned int nb_field_locators, void *mask_ptr,
+ size_t mask_size)
+{
+ unsigned int i;
+
+ memset(mask_ptr, 0, mask_size);
+
+ for (i = 0; i < nb_field_locators; ++i) {
+ const struct sfc_mae_field_locator *fl = &field_locators[i];
+
+ SFC_ASSERT(fl->ofst + fl->size <= mask_size);
+ memset(RTE_PTR_ADD(mask_ptr, fl->ofst), 0xff, fl->size);
+ }
+}
+
+static int
+sfc_mae_parse_item(const struct sfc_mae_field_locator *field_locators,
+ unsigned int nb_field_locators, const uint8_t *spec,
+ const uint8_t *mask, struct sfc_mae_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ const efx_mae_field_id_t *fremap = ctx->field_ids_remap;
+ unsigned int i;
+ int rc = 0;
+
+ for (i = 0; i < nb_field_locators; ++i) {
+ const struct sfc_mae_field_locator *fl = &field_locators[i];
+
+ if (fl->field_id == SFC_MAE_FIELD_HANDLING_DEFERRED)
+ continue;
+
+ rc = efx_mae_match_spec_field_set(ctx->match_spec,
+ fremap[fl->field_id],
+ fl->size, spec + fl->ofst,
+ fl->size, mask + fl->ofst);
+ if (rc != 0)
+ break;
+ }
+
+ if (rc != 0) {
+ rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Failed to process item fields");
+ }
+
+ return rc;
+}
+
+static const struct sfc_mae_field_locator flocs_eth[] = {
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, type),
+ offsetof(struct rte_flow_item_eth, type),
+ },
+ {
+ EFX_MAE_FIELD_ETH_DADDR_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, dst),
+ offsetof(struct rte_flow_item_eth, dst),
+ },
+ {
+ EFX_MAE_FIELD_ETH_SADDR_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_eth, src),
+ offsetof(struct rte_flow_item_eth, src),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_eth(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct rte_flow_item_eth override_mask;
+ struct rte_flow_item_eth supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ int rc;
+
+ sfc_mae_item_build_supp_mask(flocs_eth, RTE_DIM(flocs_eth),
+ &supp_mask, sizeof(supp_mask));
+ supp_mask.has_vlan = 1;
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask,
+ &rte_flow_item_eth_mask,
+ sizeof(struct rte_flow_item_eth), error);
+ if (rc != 0)
+ return rc;
+
+ if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
+ /*
+ * The HW/FW hasn't got support for match on MAC addresses in
+ * outer rules yet (this will change). Match on VLAN presence
+ * isn't supported either. Ignore these match criteria.
+ */
+ memcpy(&override_mask, mask, sizeof(override_mask));
+ memset(&override_mask.hdr.dst_addr, 0,
+ sizeof(override_mask.hdr.dst_addr));
+ memset(&override_mask.hdr.src_addr, 0,
+ sizeof(override_mask.hdr.src_addr));
+ override_mask.has_vlan = 0;
+
+ mask = (const uint8_t *)&override_mask;
+ }
+
+ if (spec != NULL) {
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
+ const struct rte_flow_item_eth *item_spec;
+ const struct rte_flow_item_eth *item_mask;
+
+ item_spec = (const struct rte_flow_item_eth *)spec;
+ item_mask = (const struct rte_flow_item_eth *)mask;
+
+ /*
+ * Remember various match criteria in the parsing context.
+ * sfc_mae_rule_process_pattern_data() will consider them
+ * altogether when the rest of the items have been parsed.
+ */
+ ethertypes[0].value = item_spec->type;
+ ethertypes[0].mask = item_mask->type;
+ if (item_mask->has_vlan) {
+ pdata->has_ovlan_mask = B_TRUE;
+ if (item_spec->has_vlan)
+ pdata->has_ovlan_value = B_TRUE;
+ }
+ } else {
+ /*
+ * The specification is empty. The overall pattern
+ * validity will be enforced at the end of parsing.
+ * See sfc_mae_rule_process_pattern_data().
+ */
+ return 0;
+ }
+
+ return sfc_mae_parse_item(flocs_eth, RTE_DIM(flocs_eth), spec, mask,
+ ctx_mae, error);
+}
+
+static const struct sfc_mae_field_locator flocs_vlan[] = {
+ /* Outermost tag */
+ {
+ EFX_MAE_FIELD_VLAN0_TCI_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
+ offsetof(struct rte_flow_item_vlan, tci),
+ },
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
+ offsetof(struct rte_flow_item_vlan, inner_type),
+ },
+
+ /* Innermost tag */
+ {
+ EFX_MAE_FIELD_VLAN1_TCI_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, tci),
+ offsetof(struct rte_flow_item_vlan, tci),
+ },
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_vlan, inner_type),
+ offsetof(struct rte_flow_item_vlan, inner_type),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_vlan(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ boolean_t *has_vlan_mp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
+ &pdata->has_ovlan_mask,
+ &pdata->has_ivlan_mask,
+ };
+ boolean_t *has_vlan_vp_by_nb_tags[SFC_MAE_MATCH_VLAN_MAX_NTAGS] = {
+ &pdata->has_ovlan_value,
+ &pdata->has_ivlan_value,
+ };
+ boolean_t *cur_tag_presence_bit_mp;
+ boolean_t *cur_tag_presence_bit_vp;
+ const struct sfc_mae_field_locator *flocs;
+ struct rte_flow_item_vlan supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ unsigned int nb_flocs;
+ int rc;
+
+ RTE_BUILD_BUG_ON(SFC_MAE_MATCH_VLAN_MAX_NTAGS != 2);
+
+ if (pdata->nb_vlan_tags == SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't match that many VLAN tags");
+ }
+
+ cur_tag_presence_bit_mp = has_vlan_mp_by_nb_tags[pdata->nb_vlan_tags];
+ cur_tag_presence_bit_vp = has_vlan_vp_by_nb_tags[pdata->nb_vlan_tags];
+
+ if (*cur_tag_presence_bit_mp == B_TRUE &&
+ *cur_tag_presence_bit_vp == B_FALSE) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "The previous item enforces no (more) VLAN, "
+ "so the current item (VLAN) must not exist");
+ }
+
+ nb_flocs = RTE_DIM(flocs_vlan) / SFC_MAE_MATCH_VLAN_MAX_NTAGS;
+ flocs = flocs_vlan + pdata->nb_vlan_tags * nb_flocs;
+
+ sfc_mae_item_build_supp_mask(flocs, nb_flocs,
+ &supp_mask, sizeof(supp_mask));
+ /*
+ * This only means that the field is supported by the driver and libefx.
+ * Support on NIC level will be checked when all items have been parsed.
+ */
+ supp_mask.has_more_vlan = 1;
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask,
+ &rte_flow_item_vlan_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (rc != 0)
+ return rc;
+
+ if (spec != NULL) {
+ struct sfc_mae_ethertype *et = pdata->ethertypes;
+ const struct rte_flow_item_vlan *item_spec;
+ const struct rte_flow_item_vlan *item_mask;
+
+ item_spec = (const struct rte_flow_item_vlan *)spec;
+ item_mask = (const struct rte_flow_item_vlan *)mask;
+
+ /*
+ * Remember various match criteria in the parsing context.
+ * sfc_mae_rule_process_pattern_data() will consider them
+ * altogether when the rest of the items have been parsed.
+ */
+ et[pdata->nb_vlan_tags + 1].value = item_spec->inner_type;
+ et[pdata->nb_vlan_tags + 1].mask = item_mask->inner_type;
+ pdata->tci_masks[pdata->nb_vlan_tags] = item_mask->tci;
+ if (item_mask->has_more_vlan) {
+ if (pdata->nb_vlan_tags ==
+ SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Can't use 'has_more_vlan' in "
+ "the second item VLAN");
+ }
+ pdata->has_ivlan_mask = B_TRUE;
+ if (item_spec->has_more_vlan)
+ pdata->has_ivlan_value = B_TRUE;
+ }
+
+ /* Convert TCI to MAE representation right now. */
+ rc = sfc_mae_parse_item(flocs, nb_flocs, spec, mask,
+ ctx_mae, error);
+ if (rc != 0)
+ return rc;
+ }
+
+ ++(pdata->nb_vlan_tags);
+
+ return 0;
+}
+
+static const struct sfc_mae_field_locator flocs_ipv4[] = {
+ {
+ EFX_MAE_FIELD_SRC_IP4_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
+ offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
+ },
+ {
+ EFX_MAE_FIELD_DST_IP4_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
+ offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
+ },
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
+ offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
+ },
+ {
+ EFX_MAE_FIELD_IP_TOS,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
+ hdr.type_of_service),
+ offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
+ },
+ {
+ EFX_MAE_FIELD_IP_TTL,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
+ offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ struct rte_flow_item_ipv4 supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ int rc;
+
+ sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
+ &supp_mask, sizeof(supp_mask));
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (rc != 0)
+ return rc;
+
+ pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
+ pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
+
+ if (spec != NULL) {
+ const struct rte_flow_item_ipv4 *item_spec;
+ const struct rte_flow_item_ipv4 *item_mask;
+
+ item_spec = (const struct rte_flow_item_ipv4 *)spec;
+ item_mask = (const struct rte_flow_item_ipv4 *)mask;
+
+ pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
+ pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
+ } else {
+ return 0;
+ }
+
+ return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
+ ctx_mae, error);
+}
+
+static const struct sfc_mae_field_locator flocs_ipv6[] = {
+ {
+ EFX_MAE_FIELD_SRC_IP6_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.src_addr),
+ offsetof(struct rte_flow_item_ipv6, hdr.src_addr),
+ },
+ {
+ EFX_MAE_FIELD_DST_IP6_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.dst_addr),
+ offsetof(struct rte_flow_item_ipv6, hdr.dst_addr),
+ },
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.proto),
+ offsetof(struct rte_flow_item_ipv6, hdr.proto),
+ },
+ {
+ EFX_MAE_FIELD_IP_TTL,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv6, hdr.hop_limits),
+ offsetof(struct rte_flow_item_ipv6, hdr.hop_limits),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_ipv6(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ rte_be16_t ethertype_ipv6_be = RTE_BE16(RTE_ETHER_TYPE_IPV6);
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ const efx_mae_field_id_t *fremap = ctx_mae->field_ids_remap;
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ struct rte_flow_item_ipv6 supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ rte_be32_t vtc_flow_be;
+ uint32_t vtc_flow;
+ uint8_t tc_value;
+ uint8_t tc_mask;
+ int rc;
+
+ sfc_mae_item_build_supp_mask(flocs_ipv6, RTE_DIM(flocs_ipv6),
+ &supp_mask, sizeof(supp_mask));
+
+ vtc_flow_be = RTE_BE32(RTE_IPV6_HDR_TC_MASK);
+ memcpy(&supp_mask, &vtc_flow_be, sizeof(vtc_flow_be));
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask,
+ &rte_flow_item_ipv6_mask,
+ sizeof(struct rte_flow_item_ipv6), error);
+ if (rc != 0)
+ return rc;
+
+ pdata->innermost_ethertype_restriction.value = ethertype_ipv6_be;
+ pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
+
+ if (spec != NULL) {
+ const struct rte_flow_item_ipv6 *item_spec;
+ const struct rte_flow_item_ipv6 *item_mask;
+
+ item_spec = (const struct rte_flow_item_ipv6 *)spec;
+ item_mask = (const struct rte_flow_item_ipv6 *)mask;
+
+ pdata->l3_next_proto_value = item_spec->hdr.proto;
+ pdata->l3_next_proto_mask = item_mask->hdr.proto;
+ } else {
+ return 0;
+ }
+
+ rc = sfc_mae_parse_item(flocs_ipv6, RTE_DIM(flocs_ipv6), spec, mask,
+ ctx_mae, error);
+ if (rc != 0)
+ return rc;
+
+ memcpy(&vtc_flow_be, spec, sizeof(vtc_flow_be));
+ vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
+ tc_value = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
+
+ memcpy(&vtc_flow_be, mask, sizeof(vtc_flow_be));
+ vtc_flow = rte_be_to_cpu_32(vtc_flow_be);
+ tc_mask = (vtc_flow & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT;
+
+ rc = efx_mae_match_spec_field_set(ctx_mae->match_spec,
+ fremap[EFX_MAE_FIELD_IP_TOS],
+ sizeof(tc_value), &tc_value,
+ sizeof(tc_mask), &tc_mask);
+ if (rc != 0) {
+ return rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "Failed to process item fields");
+ }
+
+ return 0;
+}
+
+static const struct sfc_mae_field_locator flocs_tcp[] = {
+ {
+ EFX_MAE_FIELD_L4_SPORT_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.src_port),
+ offsetof(struct rte_flow_item_tcp, hdr.src_port),
+ },
+ {
+ EFX_MAE_FIELD_L4_DPORT_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.dst_port),
+ offsetof(struct rte_flow_item_tcp, hdr.dst_port),
+ },
+ {
+ EFX_MAE_FIELD_TCP_FLAGS_BE,
+ /*
+ * The values have been picked intentionally since the
+ * target MAE field is oversize (16 bit). This mapping
+ * relies on the fact that the MAE field is big-endian.
+ */
+ RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.data_off) +
+ RTE_SIZEOF_FIELD(struct rte_flow_item_tcp, hdr.tcp_flags),
+ offsetof(struct rte_flow_item_tcp, hdr.data_off),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_tcp(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ struct rte_flow_item_tcp supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ int rc;
+
+ /*
+ * When encountered among outermost items, item TCP is invalid.
+ * Check which match specification is being constructed now.
+ */
+ if (ctx_mae->match_spec != ctx_mae->match_spec_action) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "TCP in outer frame is invalid");
+ }
+
+ sfc_mae_item_build_supp_mask(flocs_tcp, RTE_DIM(flocs_tcp),
+ &supp_mask, sizeof(supp_mask));