case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_GTP:
if (tunnel_decap)
attr->attr = 0;
break;
bool external, int hairpin, struct rte_flow_error *error)
{
int ret;
- uint64_t action_flags = 0;
+ uint64_t aso_mask, action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
bool shared_count = false;
uint16_t udp_dport = 0;
uint32_t tag_id = 0;
+ const struct rte_flow_action_age *non_shared_age = NULL;
+ const struct rte_flow_action_count *count = NULL;
if (items == NULL)
return -1;
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ ret = mlx5_flow_validate_item_esp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
ret = flow_dv_validate_item_port_id
(dev, items, attr, item_flags, error);
attr, error);
if (ret < 0)
return ret;
+ count = actions->conf;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
+ non_shared_age = actions->conf;
ret = flow_dv_validate_action_age(action_flags,
actions, dev,
error);
"cannot be done before meter action");
}
}
+ /*
+ * Only support one ASO action in a single flow rule.
+ * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
+ * Group 0 uses HW counter for AGE too even if no counter action.
+ */
+ aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
+ (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
+ (action_flags & MLX5_FLOW_ACTION_AGE &&
+ !(non_shared_age && count) &&
+ (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
+ priv->sh->flow_hit_aso_en);
+ if (__builtin_popcountl(aso_mask) > 1)
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
/*
* Hairpin flow will add one more TAG action in TX implicit mode.
* In TX explicit mode, there will be no hairpin flow ID.
(tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
}
+/**
+ * Add ESP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_esp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_esp *esp_m = item->mask;
+ const struct rte_flow_item_esp *esp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ char *spi_m;
+ char *spi_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
+ if (!esp_v)
+ return;
+ if (!esp_m)
+ esp_m = &rte_flow_item_esp_mask;
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ if (inner) {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
+ } else {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
+ }
+ *(uint32_t *)spi_m = esp_m->hdr.spi;
+ *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
+}
+
/**
* Add UDP item to matcher and to the value.
*
fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (fields == 0)
+ if (items & MLX5_FLOW_ITEM_ESP) {
+ if (rss_types & RTE_ETH_RSS_ESP)
+ fields |= IBV_RX_HASH_IPSEC_SPI;
+ }
+ if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
+ *hash_fields = fields;
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
+ }
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
!items) {
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ flow_dv_translate_item_esp(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id
(dev, match_mask, match_value, items, attr);
case MLX5_RSS_HASH_NONE:
hrxqs[6] = hrxq_idx;
return 0;
+ case MLX5_RSS_HASH_IPV4_ESP:
+ hrxqs[7] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_ESP:
+ hrxqs[8] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_ESP_SPI:
+ hrxqs[9] = hrxq_idx;
+ return 0;
default:
return -1;
}
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
+ case MLX5_RSS_HASH_IPV4_ESP:
+ return hrxqs[7];
+ case MLX5_RSS_HASH_IPV6_ESP:
+ return hrxqs[8];
+ case MLX5_RSS_HASH_ESP_SPI:
+ return hrxqs[9];
default:
return 0;
}
* MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
* same slot in mlx5_rss_hash_fields.
*
- * @param[in] rss_types
- * RSS type.
+ * @param[in] orig_rss_types
+ * RSS type as provided in shared RSS action.
* @param[in, out] hash_field
* hash_field variable needed to be adjusted.
*
* void
*/
void
-flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
uint64_t *hash_field)
{
+ uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
+
switch (*hash_field & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
}
}
/* Create sub policy. */
- if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+ if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
+ !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
/* Reuse the first pre-allocated sub_policy. */
sub_policy = mtr_policy->sub_policys[domain][0];
sub_policy_idx = sub_policy->idx;