[rte_flow items]
conntrack = Y
ecpri = Y
+esp = Y
eth = Y
flex = Y
geneve = Y
- Matching on IPv4 Internet Header Length (IHL).
- Matching on GTP extension header with raw encap/decap action.
- Matching on Geneve TLV option header with raw encap/decap action.
+- Matching on ESP header SPI field.
- RSS support in sample action.
- E-Switch mirroring and jump.
- E-Switch mirroring and modify.
* Added support for promiscuous mode on Windows.
* Added support for MTU on Windows.
+ * Added matching and RSS on IPsec ESP.
* **Updated Marvell cnxk crypto driver.**
# [ "MACRO to define if found", "header for the search",
# "symbol to search" ]
has_sym_args = [
+ [ 'HAVE_IBV_RX_HASH_IPSEC_SPI', 'infiniband/verbs.h',
+ 'IBV_RX_HASH_IPSEC_SPI' ],
[ 'HAVE_IBV_RELAXED_ORDERING', 'infiniband/verbs.h',
'IBV_ACCESS_RELAXED_ORDERING ' ],
[ 'HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT', 'infiniband/mlx5dv.h',
u8 reserved_at_120[0xa];
u8 geneve_opt_len[0x6];
u8 geneve_protocol_type[0x10];
- u8 reserved_at_140[0xc0];
+ u8 reserved_at_140[0x20];
+ u8 inner_esp_spi[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_ipv4_layout_bits {
/* Supported RSS */
#define MLX5_RSS_HF_MASK (~(RTE_ETH_RSS_IP | RTE_ETH_RSS_UDP | RTE_ETH_RSS_TCP | \
- MLX5_RSS_SRC_DST_ONLY))
+ MLX5_RSS_SRC_DST_ONLY | RTE_ETH_RSS_ESP))
/* Timeout in seconds to get a valid link status. */
#define MLX5_LINK_STATUS_TIMEOUT 10
(!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
(!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
- MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT |
+ (!!(hash_fields & IBV_RX_HASH_IPSEC_SPI)) <<
+ MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI;
}
if (is_hairpin)
tir_attr->transport_domain = priv->sh->td->id;
case RTE_FLOW_ITEM_TYPE_IPV6:
case RTE_FLOW_ITEM_TYPE_UDP:
case RTE_FLOW_ITEM_TYPE_TCP:
+ case RTE_FLOW_ITEM_TYPE_ESP:
case RTE_FLOW_ITEM_TYPE_VXLAN:
case RTE_FLOW_ITEM_TYPE_NVGRE:
case RTE_FLOW_ITEM_TYPE_GRE:
case IPPROTO_IPV6:
type = RTE_FLOW_ITEM_TYPE_IPV6;
break;
+ case IPPROTO_ESP:
+ type = RTE_FLOW_ITEM_TYPE_ESP;
+ break;
default:
type = RTE_FLOW_ITEM_TYPE_END;
}
MLX5_EXPANSION_OUTER_IPV4,
MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_OUTER_IPV4_ESP,
MLX5_EXPANSION_OUTER_IPV6,
MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_OUTER_IPV6_ESP,
MLX5_EXPANSION_VXLAN,
MLX5_EXPANSION_STD_VXLAN,
MLX5_EXPANSION_L3_VXLAN,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV4_UDP,
MLX5_EXPANSION_IPV4_TCP,
+ MLX5_EXPANSION_IPV4_ESP,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_ESP,
MLX5_EXPANSION_IPV6_FRAG_EXT,
MLX5_EXPANSION_GTP,
MLX5_EXPANSION_GENEVE,
.next = MLX5_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV4_UDP,
MLX5_EXPANSION_OUTER_IPV4_TCP,
+ MLX5_EXPANSION_OUTER_IPV4_ESP,
MLX5_EXPANSION_GRE,
MLX5_EXPANSION_NVGRE,
MLX5_EXPANSION_IPV4,
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
+ [MLX5_EXPANSION_OUTER_IPV4_ESP] = {
+ .type = RTE_FLOW_ITEM_TYPE_ESP,
+ .rss_types = RTE_ETH_RSS_ESP,
+ },
[MLX5_EXPANSION_OUTER_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT
(MLX5_EXPANSION_OUTER_IPV6_UDP,
MLX5_EXPANSION_OUTER_IPV6_TCP,
+ MLX5_EXPANSION_OUTER_IPV6_ESP,
MLX5_EXPANSION_IPV4,
MLX5_EXPANSION_IPV6,
MLX5_EXPANSION_GRE,
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
+ [MLX5_EXPANSION_OUTER_IPV6_ESP] = {
+ .type = RTE_FLOW_ITEM_TYPE_ESP,
+ .rss_types = RTE_ETH_RSS_ESP,
+ },
[MLX5_EXPANSION_VXLAN] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
MLX5_EXPANSION_IPV4,
},
[MLX5_EXPANSION_IPV4] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
- MLX5_EXPANSION_IPV4_TCP),
+ MLX5_EXPANSION_IPV4_TCP,
+ MLX5_EXPANSION_IPV4_ESP),
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.rss_types = RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV4_TCP,
},
+ [MLX5_EXPANSION_IPV4_ESP] = {
+ .type = RTE_FLOW_ITEM_TYPE_ESP,
+ .rss_types = RTE_ETH_RSS_ESP,
+ },
[MLX5_EXPANSION_IPV6] = {
.next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
MLX5_EXPANSION_IPV6_TCP,
+ MLX5_EXPANSION_IPV6_ESP,
MLX5_EXPANSION_IPV6_FRAG_EXT),
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.rss_types = RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
.type = RTE_FLOW_ITEM_TYPE_TCP,
.rss_types = RTE_ETH_RSS_NONFRAG_IPV6_TCP,
},
+ [MLX5_EXPANSION_IPV6_ESP] = {
+ .type = RTE_FLOW_ITEM_TYPE_ESP,
+ .rss_types = RTE_ETH_RSS_ESP,
+ },
[MLX5_EXPANSION_IPV6_FRAG_EXT] = {
.type = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT,
},
return 0;
}
+/**
+ * Validate ESP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_esp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with ESP layer");
+ if (!mask)
+ mask = &rte_flow_item_esp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_esp_mask,
+ sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
/**
* Validate UDP item.
*
#define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
#define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
+/* ESP item */
+#define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
+
/* Outer Masks. */
#define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
#define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
+
+#ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
+#define IBV_RX_HASH_IPSEC_SPI (1U << 8)
+#endif
+
+#define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
+#define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
+ MLX5_RSS_HASH_ESP_SPI)
+#define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
+ MLX5_RSS_HASH_ESP_SPI)
#define MLX5_RSS_HASH_NONE 0ULL
MLX5_RSS_HASH_IPV4,
MLX5_RSS_HASH_IPV4_TCP,
MLX5_RSS_HASH_IPV4_UDP,
+ MLX5_RSS_HASH_IPV4_ESP,
MLX5_RSS_HASH_IPV6,
MLX5_RSS_HASH_IPV6_TCP,
MLX5_RSS_HASH_IPV6_UDP,
+ MLX5_RSS_HASH_IPV6_ESP,
+ MLX5_RSS_HASH_ESP_SPI,
MLX5_RSS_HASH_NONE,
};
uint8_t target_protocol,
const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error);
+int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ ret = mlx5_flow_validate_item_esp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
ret = flow_dv_validate_item_port_id
(dev, items, attr, item_flags, error);
(tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
}
+/**
+ * Add ESP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_esp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_esp *esp_m = item->mask;
+ const struct rte_flow_item_esp *esp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ char *spi_m;
+ char *spi_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
+ if (!esp_v)
+ return;
+ if (!esp_m)
+ esp_m = &rte_flow_item_esp_mask;
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ if (inner) {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
+ } else {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
+ }
+ *(uint32_t *)spi_m = esp_m->hdr.spi;
+ *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
+}
+
/**
* Add UDP item to matcher and to the value.
*
fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (fields == 0)
+ if (items & MLX5_FLOW_ITEM_ESP) {
+ if (rss_types & RTE_ETH_RSS_ESP)
+ fields |= IBV_RX_HASH_IPSEC_SPI;
+ }
+ if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
+ *hash_fields = fields;
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
+ }
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
!items) {
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ flow_dv_translate_item_esp(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id
(dev, match_mask, match_value, items, attr);
case MLX5_RSS_HASH_NONE:
hrxqs[6] = hrxq_idx;
return 0;
+ case MLX5_RSS_HASH_IPV4_ESP:
+ hrxqs[7] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_ESP:
+ hrxqs[8] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_ESP_SPI:
+ hrxqs[9] = hrxq_idx;
+ return 0;
default:
return -1;
}
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
+ case MLX5_RSS_HASH_IPV4_ESP:
+ return hrxqs[7];
+ case MLX5_RSS_HASH_IPV6_ESP:
+ return hrxqs[8];
+ case MLX5_RSS_HASH_ESP_SPI:
+ return hrxqs[9];
default:
return 0;
}