sfc_mae_rule_process_pattern_data(struct sfc_mae_parse_ctx *ctx,
struct rte_flow_error *error)
{
+ efx_mae_match_spec_t *efx_spec = ctx->match_spec_action;
struct sfc_mae_pattern_data *pdata = &ctx->pattern_data;
struct sfc_mae_ethertype *ethertypes = pdata->ethertypes;
const rte_be16_t supported_tpids[] = {
};
unsigned int nb_supported_tpids = RTE_DIM(supported_tpids);
unsigned int ethertype_idx;
+ const uint8_t *valuep;
+ const uint8_t *maskp;
int rc;
+ if (pdata->innermost_ethertype_restriction.mask != 0 &&
+ pdata->nb_vlan_tags < SFC_MAE_MATCH_VLAN_MAX_NTAGS) {
+ /*
+ * If a single item VLAN is followed by a L3 item, value
+ * of "type" in item ETH can't be a double-tagging TPID.
+ */
+ nb_supported_tpids = 1;
+ }
+
/*
* sfc_mae_rule_parse_item_vlan() has already made sure
* that pdata->nb_vlan_tags does not exceed this figure.
nb_supported_tpids = 1;
}
+ if (pdata->innermost_ethertype_restriction.mask == RTE_BE16(0xffff)) {
+ struct sfc_mae_ethertype *et = ðertypes[ethertype_idx];
+
+ if (et->mask == 0) {
+ et->mask = RTE_BE16(0xffff);
+ et->value =
+ pdata->innermost_ethertype_restriction.value;
+ } else if (et->mask != RTE_BE16(0xffff) ||
+ et->value !=
+ pdata->innermost_ethertype_restriction.value) {
+ rc = EINVAL;
+ goto fail;
+ }
+ }
+
/*
* Now, when the number of VLAN tags is known, set fields
* ETHER_TYPE, VLAN0_PROTO and VLAN1_PROTO so that the first
if (rc != 0)
goto fail;
+ valuep = (const uint8_t *)&pdata->l3_next_proto_value;
+ maskp = (const uint8_t *)&pdata->l3_next_proto_mask;
+ rc = efx_mae_match_spec_field_set(efx_spec, EFX_MAE_FIELD_IP_PROTO,
+ sizeof(pdata->l3_next_proto_value),
+ valuep,
+ sizeof(pdata->l3_next_proto_mask),
+ maskp);
+ if (rc != 0)
+ goto fail;
+
return 0;
fail:
ctx_mae->match_spec_action, error);
}
+static const struct sfc_mae_field_locator flocs_ipv4[] = {
+ {
+ EFX_MAE_FIELD_SRC_IP4_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.src_addr),
+ offsetof(struct rte_flow_item_ipv4, hdr.src_addr),
+ },
+ {
+ EFX_MAE_FIELD_DST_IP4_BE,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.dst_addr),
+ offsetof(struct rte_flow_item_ipv4, hdr.dst_addr),
+ },
+ {
+ /*
+ * This locator is used only for building supported fields mask.
+ * The field is handled by sfc_mae_rule_process_pattern_data().
+ */
+ SFC_MAE_FIELD_HANDLING_DEFERRED,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.next_proto_id),
+ offsetof(struct rte_flow_item_ipv4, hdr.next_proto_id),
+ },
+ {
+ EFX_MAE_FIELD_IP_TOS,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4,
+ hdr.type_of_service),
+ offsetof(struct rte_flow_item_ipv4, hdr.type_of_service),
+ },
+ {
+ EFX_MAE_FIELD_IP_TTL,
+ RTE_SIZEOF_FIELD(struct rte_flow_item_ipv4, hdr.time_to_live),
+ offsetof(struct rte_flow_item_ipv4, hdr.time_to_live),
+ },
+};
+
+static int
+sfc_mae_rule_parse_item_ipv4(const struct rte_flow_item *item,
+ struct sfc_flow_parse_ctx *ctx,
+ struct rte_flow_error *error)
+{
+ rte_be16_t ethertype_ipv4_be = RTE_BE16(RTE_ETHER_TYPE_IPV4);
+ struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct sfc_mae_pattern_data *pdata = &ctx_mae->pattern_data;
+ struct rte_flow_item_ipv4 supp_mask;
+ const uint8_t *spec = NULL;
+ const uint8_t *mask = NULL;
+ int rc;
+
+ sfc_mae_item_build_supp_mask(flocs_ipv4, RTE_DIM(flocs_ipv4),
+ &supp_mask, sizeof(supp_mask));
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec, (const void **)&mask,
+ (const void *)&supp_mask,
+ &rte_flow_item_ipv4_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (rc != 0)
+ return rc;
+
+ pdata->innermost_ethertype_restriction.value = ethertype_ipv4_be;
+ pdata->innermost_ethertype_restriction.mask = RTE_BE16(0xffff);
+
+ if (spec != NULL) {
+ const struct rte_flow_item_ipv4 *item_spec;
+ const struct rte_flow_item_ipv4 *item_mask;
+
+ item_spec = (const struct rte_flow_item_ipv4 *)spec;
+ item_mask = (const struct rte_flow_item_ipv4 *)mask;
+
+ pdata->l3_next_proto_value = item_spec->hdr.next_proto_id;
+ pdata->l3_next_proto_mask = item_mask->hdr.next_proto_id;
+ } else {
+ return 0;
+ }
+
+ return sfc_mae_parse_item(flocs_ipv4, RTE_DIM(flocs_ipv4), spec, mask,
+ ctx_mae->match_spec_action, error);
+}
+
static const struct sfc_flow_item sfc_flow_items[] = {
{
.type = RTE_FLOW_ITEM_TYPE_PORT_ID,
.ctx_type = SFC_FLOW_PARSE_CTX_MAE,
.parse = sfc_mae_rule_parse_item_vlan,
},
+ {
+ .type = RTE_FLOW_ITEM_TYPE_IPV4,
+ .prev_layer = SFC_FLOW_ITEM_L2,
+ .layer = SFC_FLOW_ITEM_L3,
+ .ctx_type = SFC_FLOW_PARSE_CTX_MAE,
+ .parse = sfc_mae_rule_parse_item_ipv4,
+ },
};
int
* values (0x88a8, 0x9100, 0x9200, 0x9300), and the outermost
* VLAN item must have "inner_type" set to TPID value 0x8100.
*
+ * - If a L2 item is followed by a L3 one, the former must
+ * indicate "type" ("inner_type") which corresponds to
+ * the protocol used in the L3 item, or 0x0000/0x0000.
+ *
* In turn, mapping between RTE convention (above requirements) and
* MAE fields is non-trivial. The following scheme indicates
* which item EtherTypes go to which MAE fields in the case
*/
struct sfc_mae_ethertype ethertypes[SFC_MAE_L2_MAX_NITEMS];
unsigned int nb_vlan_tags;
+
+ /**
+ * L3 requirement for the innermost L2 item's "type" ("inner_type").
+ * This contains one of:
+ * - 0x0800/0xffff: IPV4
+ * - 0x0000/0x0000: no L3 item
+ */
+ struct sfc_mae_ethertype innermost_ethertype_restriction;
+
+ /**
+ * The following two fields keep track of L3 "proto" mask and value.
+ * The corresponding fields get filled in MAE match specification
+ * at the end of parsing.
+ */
+ uint8_t l3_next_proto_value;
+ uint8_t l3_next_proto_mask;
};
struct sfc_mae_parse_ctx {