case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
case RTE_FLOW_ITEM_TYPE_GENEVE:
case RTE_FLOW_ITEM_TYPE_MPLS:
+ case RTE_FLOW_ITEM_TYPE_GTP:
if (tunnel_decap)
attr->attr = 0;
break;
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
const struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_ctx_shared *sh = priv->sh;
- bool direction_error = false;
if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
- /* Push VLAN is not supported in ingress except for CX6 FDB mode. */
- if (attr->transfer) {
- bool fdb_tx = priv->representor_id != UINT16_MAX;
- bool is_cx5 = sh->steering_format_version ==
- MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
-
- if (!fdb_tx && is_cx5)
- direction_error = true;
- } else if (attr->ingress) {
- direction_error = true;
- }
- if (direction_error)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "push vlan action not supported for ingress");
if (!attr->transfer && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
return 0;
}
+/**
+ * Indicates whether ASO aging is supported.
+ *
+ * @param[in] sh
+ * Pointer to shared device context structure.
+ * @param[in] attr
+ * Attributes of flow that includes AGE action.
+ *
+ * @return
+ * True when ASO aging is supported, false otherwise.
+ */
+static inline bool
+flow_hit_aso_supported(const struct mlx5_dev_ctx_shared *sh,
+ const struct rte_flow_attr *attr)
+{
+ MLX5_ASSERT(sh && attr);
+ return (sh->flow_hit_aso_en && (attr->transfer || attr->group));
+}
+
/**
* Validate count action.
*
* Indicator if action is shared.
* @param[in] action_flags
* Holds the actions detected until now.
+ * @param[in] attr
+ * Attributes of flow that includes this action.
* @param[out] error
* Pointer to error structure.
*
static int
flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared,
uint64_t action_flags,
+ const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"duplicate count actions set");
if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) &&
- !priv->sh->flow_hit_aso_en)
+ !flow_hit_aso_supported(priv->sh, attr))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "old age and shared count combination is not supported");
+ "old age and indirect count combination is not supported");
#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
return 0;
#endif
* @return
* sizeof struct item_type, 0 if void or irrelevant.
*/
-static size_t
+size_t
flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type)
{
size_t retval;
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static int
+int
flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
size_t *size, struct rte_flow_error *error)
{
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count
(dev, false, *action_flags | sub_action_flags,
- error);
+ attr, error);
if (ret < 0)
return ret;
*count = act->conf;
}
/* Continue validation for Xcap actions.*/
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index))) {
if ((sub_action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
MLX5_FLOW_XCAP_ACTIONS)
return rte_flow_error_set(error, ENOTSUP,
bool external, int hairpin, struct rte_flow_error *error)
{
int ret;
- uint64_t action_flags = 0;
+ uint64_t aso_mask, action_flags = 0;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
+ bool shared_count = false;
uint16_t udp_dport = 0;
+ uint32_t tag_id = 0;
+ const struct rte_flow_action_age *non_shared_age = NULL;
+ const struct rte_flow_action_count *count = NULL;
if (items == NULL)
return -1;
switch (type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ ret = mlx5_flow_validate_item_esp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
ret = flow_dv_validate_item_port_id
(dev, items, attr, item_flags, error);
gre_item = items;
last_item = MLX5_FLOW_LAYER_GRE;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ ret = mlx5_flow_validate_item_gre_option(dev, items, item_flags,
+ attr, gre_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
ret = mlx5_flow_validate_item_nvgre(items, item_flags,
next_protocol,
return ret;
last_item = MLX5_FLOW_ITEM_TAG;
break;
- case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ last_item = MLX5_FLOW_ITEM_TX_QUEUE;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
break;
case RTE_FLOW_ITEM_TYPE_GTP:
ret = flow_dv_validate_item_gtp(dev, items, item_flags,
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
int type = actions->type;
- bool shared_count = false;
if (!mlx5_flow_os_action_supported(type))
return rte_flow_error_set(error, ENOTSUP,
++actions_n;
if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
modify_after_mirror = 1;
+ tag_id = ((const struct rte_flow_action_set_tag *)
+ actions->conf)->index;
action_flags |= MLX5_FLOW_ACTION_SET_TAG;
rw_act_num += MLX5_ACT_NUM_SET_TAG;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
ret = flow_dv_validate_action_count(dev, shared_count,
action_flags,
- error);
+ attr, error);
if (ret < 0)
return ret;
+ count = actions->conf;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_AGE:
+ non_shared_age = actions->conf;
ret = flow_dv_validate_action_age(action_flags,
actions, dev,
error);
return ret;
/*
* Validate the regular AGE action (using counter)
- * mutual exclusion with share counter actions.
+ * mutual exclusion with indirect counter actions.
*/
- if (!priv->sh->flow_hit_aso_en) {
+ if (!flow_hit_aso_supported(priv->sh, attr)) {
if (shared_count)
return rte_flow_error_set
(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
- "old age and shared count combination is not supported");
+ "old age and indirect count combination is not supported");
if (sample_count)
return rte_flow_error_set
(error, EINVAL,
error);
if (ret < 0)
return ret;
+ if ((action_flags & MLX5_FLOW_ACTION_SET_TAG) &&
+ tag_id == 0 && priv->mtr_color_reg == REG_NON)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "sample after tag action causes metadata tag index 0 corruption");
action_flags |= MLX5_FLOW_ACTION_SAMPLE;
++actions_n;
break;
*/
if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
MLX5_FLOW_VLAN_ACTIONS)) &&
- (queue_index == 0xFFFF ||
- mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN ||
+ (queue_index == 0xFFFF || !mlx5_rxq_is_hairpin(dev, queue_index) ||
((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL &&
conf->tx_explicit != 0))) {
if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap and decap "
"combination aren't supported");
+ /* Push VLAN is not supported in ingress except for NICs newer than CX5. */
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) {
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ bool direction_error = false;
+
+ if (attr->transfer) {
+ bool fdb_tx = priv->representor_id != UINT16_MAX;
+ bool is_cx5 = sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5;
+
+ if (!fdb_tx && is_cx5)
+ direction_error = true;
+ } else if (attr->ingress) {
+ direction_error = true;
+ }
+ if (direction_error)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "push VLAN action not supported "
+ "for ingress");
+ }
if (!attr->transfer && attr->ingress) {
if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "encap is not supported"
" for ingress traffic");
- else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
- return rte_flow_error_set
- (error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL, "push VLAN action not "
- "supported for ingress");
else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
MLX5_FLOW_VLAN_ACTIONS)
return rte_flow_error_set
"cannot be done before meter action");
}
}
+ /*
+ * Only support one ASO action in a single flow rule.
+ * non-shared AGE + counter will fallback to use HW counter, no ASO hit object.
+ * Group 0 uses HW counter for AGE too even if no counter action.
+ */
+ aso_mask = (action_flags & MLX5_FLOW_ACTION_METER && priv->sh->meter_aso_en) << 2 |
+ (action_flags & MLX5_FLOW_ACTION_CT && priv->sh->ct_aso_en) << 1 |
+ (action_flags & MLX5_FLOW_ACTION_AGE &&
+ !(non_shared_age && count) &&
+ (attr->group || (attr->transfer && priv->fdb_def_rule)) &&
+ priv->sh->flow_hit_aso_en);
+ if (__builtin_popcountl(aso_mask) > 1)
+ return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unsupported combining AGE, METER, CT ASO actions in a single rule");
/*
* Hairpin flow will add one more TAG action in TX implicit mode.
* In TX explicit mode, there will be no hairpin flow ID.
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"sample before modify action is not supported");
+ /*
+ * Validation the NIC Egress flow on representor, except implicit
+ * hairpin default egress flow with TX_QUEUE item, other flows not
+ * work due to metadata regC0 mismatch.
+ */
+ if ((!attr->transfer && attr->egress) && priv->representor &&
+ !(item_flags & MLX5_FLOW_ITEM_TX_QUEUE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "NIC egress rules on representors"
+ " is not supported");
return 0;
}
(tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
}
+/**
+ * Add ESP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_esp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_esp *esp_m = item->mask;
+ const struct rte_flow_item_esp *esp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ char *spi_m;
+ char *spi_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ESP);
+ if (!esp_v)
+ return;
+ if (!esp_m)
+ esp_m = &rte_flow_item_esp_mask;
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ if (inner) {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, inner_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, inner_esp_spi);
+ } else {
+ spi_m = MLX5_ADDR_OF(fte_match_set_misc, headers_m, outer_esp_spi);
+ spi_v = MLX5_ADDR_OF(fte_match_set_misc, headers_v, outer_esp_spi);
+ }
+ *(uint32_t *)spi_m = esp_m->hdr.spi;
+ *(uint32_t *)spi_v = esp_m->hdr.spi & esp_v->hdr.spi;
+}
+
/**
* Add UDP item to matcher and to the value.
*
protocol_m & protocol_v);
}
+/**
+ * Add GRE optional items to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] gre_item
+ * Pointer to gre_item.
+ * @param[in] pattern_flags
+ * Accumulated pattern flags.
+ */
+static void
+flow_dv_translate_item_gre_option(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const struct rte_flow_item *gre_item,
+ uint64_t pattern_flags)
+{
+ const struct rte_flow_item_gre_opt *option_m = item->mask;
+ const struct rte_flow_item_gre_opt *option_v = item->spec;
+ const struct rte_flow_item_gre *gre_m = gre_item->mask;
+ const struct rte_flow_item_gre *gre_v = gre_item->spec;
+ static const struct rte_flow_item_gre empty_gre = {0};
+ struct rte_flow_item gre_key_item;
+ uint16_t c_rsvd0_ver_m, c_rsvd0_ver_v;
+ uint16_t protocol_m, protocol_v;
+ void *misc5_m;
+ void *misc5_v;
+
+ /*
+ * If only match key field, keep using misc for matching.
+ * If need to match checksum or sequence, using misc5 and do
+ * not need using misc.
+ */
+ if (!(option_m->sequence.sequence ||
+ option_m->checksum_rsvd.checksum)) {
+ flow_dv_translate_item_gre(matcher, key, gre_item,
+ pattern_flags);
+ gre_key_item.spec = &option_v->key.key;
+ gre_key_item.mask = &option_m->key.key;
+ flow_dv_translate_item_gre_key(matcher, key, &gre_key_item);
+ return;
+ }
+ if (!gre_v) {
+ gre_v = &empty_gre;
+ gre_m = &empty_gre;
+ } else {
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ }
+ protocol_v = gre_v->protocol;
+ protocol_m = gre_m->protocol;
+ if (!protocol_m) {
+ /* Force next protocol to prevent matchers duplication */
+ uint16_t ether_type =
+ mlx5_translate_tunnel_etypes(pattern_flags);
+ if (ether_type) {
+ protocol_v = rte_be_to_cpu_16(ether_type);
+ protocol_m = UINT16_MAX;
+ }
+ }
+ c_rsvd0_ver_v = gre_v->c_rsvd0_ver;
+ c_rsvd0_ver_m = gre_m->c_rsvd0_ver;
+ if (option_m->sequence.sequence) {
+ c_rsvd0_ver_v |= RTE_BE16(0x1000);
+ c_rsvd0_ver_m |= RTE_BE16(0x1000);
+ }
+ if (option_m->key.key) {
+ c_rsvd0_ver_v |= RTE_BE16(0x2000);
+ c_rsvd0_ver_m |= RTE_BE16(0x2000);
+ }
+ if (option_m->checksum_rsvd.checksum) {
+ c_rsvd0_ver_v |= RTE_BE16(0x8000);
+ c_rsvd0_ver_m |= RTE_BE16(0x8000);
+ }
+ /*
+ * Hardware parses GRE optional field into the fixed location,
+ * do not need to adjust the tunnel dword indices.
+ */
+ misc5_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_5);
+ misc5_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_5);
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_0,
+ rte_be_to_cpu_32((c_rsvd0_ver_v | protocol_v << 16) &
+ (c_rsvd0_ver_m | protocol_m << 16)));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_0,
+ rte_be_to_cpu_32(c_rsvd0_ver_m | protocol_m << 16));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_1,
+ rte_be_to_cpu_32(option_v->checksum_rsvd.checksum &
+ option_m->checksum_rsvd.checksum));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_1,
+ rte_be_to_cpu_32(option_m->checksum_rsvd.checksum));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_2,
+ rte_be_to_cpu_32(option_v->key.key & option_m->key.key));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_2,
+ rte_be_to_cpu_32(option_m->key.key));
+ MLX5_SET(fte_match_set_misc5, misc5_v, tunnel_header_3,
+ rte_be_to_cpu_32(option_v->sequence.sequence &
+ option_m->sequence.sequence));
+ MLX5_SET(fte_match_set_misc5, misc5_m, tunnel_header_3,
+ rte_be_to_cpu_32(option_m->sequence.sequence));
+}
+
/**
* Add NVGRE item to matcher and to the value.
*
/* Don't count both inner and outer flex items in one rule. */
if (mlx5_flex_acquire_index(dev, spec->handle, true) != index)
MLX5_ASSERT(false);
- dev_flow->handle->flex_item |= RTE_BIT32(index);
+ dev_flow->handle->flex_item |= (uint8_t)RTE_BIT32(index);
}
mlx5_flex_flow_translate_item(dev, matcher, key, item, is_inner);
}
tbl_data->tunnel->tunnel_id : 0,
tbl_data->group_id);
}
- mlx5_list_destroy(tbl_data->matchers);
+ if (tbl_data->matchers)
+ mlx5_list_destroy(tbl_data->matchers);
mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], tbl_data->idx);
}
{
const struct mlx5_rte_flow_item_tx_queue *queue_m;
const struct mlx5_rte_flow_item_tx_queue *queue_v;
- void *misc_m =
- MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
- void *misc_v =
- MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
struct mlx5_txq_ctrl *txq;
uint32_t queue, mask;
txq = mlx5_txq_get(dev, queue_v->queue);
if (!txq)
return;
- if (txq->type == MLX5_TXQ_TYPE_HAIRPIN)
+ if (txq->is_hairpin)
queue = txq->obj->sq->id;
else
queue = txq->obj->sq_obj.sq->id;
fields |= MLX5_IPV6_IBV_RX_HASH;
}
}
- if (fields == 0)
+ if (items & MLX5_FLOW_ITEM_ESP) {
+ if (rss_types & RTE_ETH_RSS_ESP)
+ fields |= IBV_RX_HASH_IPSEC_SPI;
+ }
+ if ((fields & ~IBV_RX_HASH_IPSEC_SPI) == 0) {
+ *hash_fields = fields;
/*
* There is no match between the RSS types and the
* L3 protocol (IPv4/IPv6) defined in the flow rule.
*/
return;
+ }
if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
(!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP)) ||
!items) {
uint32_t *hrxq_idx)
{
struct mlx5_flow_handle *dh = dev_flow->handle;
+ uint32_t shared_rss = rss_desc->shared_rss;
struct mlx5_hrxq *hrxq;
MLX5_ASSERT(rss_desc->queue_num);
rss_desc->queue_num = 1;
hrxq = mlx5_hrxq_get(dev, rss_desc);
*hrxq_idx = hrxq ? hrxq->idx : 0;
+ rss_desc->shared_rss = shared_rss;
return hrxq;
}
};
const struct rte_flow_item *integrity_items[2] = {NULL, NULL};
const struct rte_flow_item *tunnel_item = NULL;
+ const struct rte_flow_item *gre_item = NULL;
if (!wks)
return rte_flow_error_set(error, ENOMEM,
*/
if (action_flags & MLX5_FLOW_ACTION_AGE) {
if ((non_shared_age && count) ||
- !(priv->sh->flow_hit_aso_en &&
- (attr->group || attr->transfer))) {
+ !flow_hit_aso_supported(priv->sh, attr)) {
/* Creates age by counters. */
cnt_act = flow_dv_prepare_counter
(dev, dev_flow,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ flow_dv_translate_item_esp(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_ITEM_ESP;
+ break;
case RTE_FLOW_ITEM_TYPE_PORT_ID:
flow_dv_translate_item_port_id
(dev, match_mask, match_value, items, attr);
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
tunnel_item = items;
+ gre_item = items;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
flow_dv_translate_item_gre_key(match_mask,
match_value, items);
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
+ case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+ matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ tunnel_item = items;
+ break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
matcher.priority = MLX5_TUNNEL_PRIO_GET(rss_desc);
last_item = MLX5_FLOW_LAYER_GRE;
case RTE_FLOW_ITEM_TYPE_ICMP:
flow_dv_translate_item_icmp(match_mask, match_value,
items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_ICMP;
break;
case RTE_FLOW_ITEM_TYPE_ICMP6:
flow_dv_translate_item_icmp6(match_mask, match_value,
items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
/*
* When E-Switch mode is enabled, we have two cases where we need to
* set the source port manually.
- * The first one, is in case of Nic steering rule, and the second is
- * E-Switch rule where no port_id item was found. In both cases
- * the source port is set according the current port in use.
+ * The first one, is in case of NIC ingress steering rule, and the
+ * second is E-Switch rule where no port_id item was found.
+ * In both cases the source port is set according the current port
+ * in use.
*/
- if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode) {
+ if (!(item_flags & MLX5_FLOW_ITEM_PORT_ID) && priv->sh->esw_mode &&
+ !(attr->egress && !attr->transfer)) {
if (flow_dv_translate_item_port_id(dev, match_mask,
match_value, NULL, attr))
return -rte_errno;
else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
flow_dv_translate_item_nvgre(match_mask, match_value,
tunnel_item, item_flags);
+ else if (tunnel_item->type == RTE_FLOW_ITEM_TYPE_GRE_OPTION)
+ flow_dv_translate_item_gre_option(match_mask, match_value,
+ tunnel_item, gre_item, item_flags);
else
MLX5_ASSERT(false);
}
case MLX5_RSS_HASH_NONE:
hrxqs[6] = hrxq_idx;
return 0;
+ case MLX5_RSS_HASH_IPV4_ESP:
+ hrxqs[7] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_IPV6_ESP:
+ hrxqs[8] = hrxq_idx;
+ return 0;
+ case MLX5_RSS_HASH_ESP_SPI:
+ hrxqs[9] = hrxq_idx;
+ return 0;
default:
return -1;
}
* @return
* Valid hash RX queue index, otherwise 0.
*/
-static uint32_t
-__flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
- const uint64_t hash_fields)
+uint32_t
+flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
+ const uint64_t hash_fields)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_shared_action_rss *shared_rss =
return hrxqs[5];
case MLX5_RSS_HASH_NONE:
return hrxqs[6];
+ case MLX5_RSS_HASH_IPV4_ESP:
+ return hrxqs[7];
+ case MLX5_RSS_HASH_IPV6_ESP:
+ return hrxqs[8];
+ case MLX5_RSS_HASH_ESP_SPI:
+ return hrxqs[9];
default:
return 0;
}
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx;
- hrxq_idx = __flow_dv_action_rss_hrxq_lookup(dev,
+ hrxq_idx = flow_dv_action_rss_hrxq_lookup(dev,
rss_desc->shared_rss,
dev_flow->hash_fields);
if (hrxq_idx)
int index = rte_bsf32(dev_handle->flex_item);
mlx5_flex_release_index(dev, index);
- dev_handle->flex_item &= ~RTE_BIT32(index);
+ dev_handle->flex_item &= ~(uint8_t)RTE_BIT32(index);
}
if (dev_handle->dvh.matcher)
flow_dv_matcher_release(dev, dev_handle);
* MLX5_RSS_HASH_IPV4_DST_ONLY are mutually exclusive so they can share
* same slot in mlx5_rss_hash_fields.
*
- * @param[in] rss_types
- * RSS type.
+ * @param[in] orig_rss_types
+ * RSS type as provided in shared RSS action.
* @param[in, out] hash_field
* hash_field variable needed to be adjusted.
*
* void
*/
void
-flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
+flow_dv_action_rss_l34_hash_adjust(uint64_t orig_rss_types,
uint64_t *hash_field)
{
+ uint64_t rss_types = rte_eth_rss_hf_refine(orig_rss_types);
+
switch (*hash_field & ~IBV_RX_HASH_INNER) {
case MLX5_RSS_HASH_IPV4:
if (rss_types & MLX5_IPV4_LAYER_TYPES) {
struct mlx5_shared_action_rss *shared_rss,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_rss_desc rss_desc = { 0 };
size_t i;
int err;
/* Set non-zero value to indicate a shared RSS. */
rss_desc.shared_rss = action_idx;
rss_desc.ind_tbl = shared_rss->ind_tbl;
+ if (priv->sh->config.dv_flow_en == 2)
+ rss_desc.hws_flags = MLX5DR_ACTION_FLAG_HWS_RX;
for (i = 0; i < MLX5_RSS_HASH_FIELDS_LEN; i++) {
struct mlx5_hrxq *hrxq;
uint64_t hash_fields = mlx5_rss_hash_fields[i];
* A valid shared action handle in case of success, NULL otherwise and
* rte_errno is set.
*/
-static struct rte_flow_action_handle *
+struct rte_flow_action_handle *
flow_dv_action_create(struct rte_eth_dev *dev,
const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_destroy(struct rte_eth_dev *dev,
struct rte_flow_action_handle *handle,
struct rte_flow_error *error)
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_update(struct rte_eth_dev *dev,
struct rte_flow_action_handle *handle,
const void *update,
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
- struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
- MLX5_ASSERT(wks);
egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
NULL,
"cannot create policy "
"mark action for this color");
- wks->mark = 1;
if (flow_dv_tag_resource_register(dev, tag_be,
&dev_flow, &flow_err))
return -rte_mtr_error_set(error,
act_cnt->rix_mark =
dev_flow.handle->dvh.rix_tag;
action_flags |= MLX5_FLOW_ACTION_MARK;
+ mtr_policy->mark = 1;
break;
}
case RTE_FLOW_ACTION_TYPE_SET_TAG:
act_cnt->next_sub_policy = NULL;
mtr_policy->is_hierarchy = 1;
mtr_policy->dev = next_policy->dev;
+ if (next_policy->mark)
+ mtr_policy->mark = 1;
action_flags |=
MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY;
break;
"counters are not available");
}
-static int
+int
flow_dv_action_query(struct rte_eth_dev *dev,
const struct rte_flow_action_handle *handle, void *data,
struct rte_flow_error *error)
}
}
/* Create sub policy. */
- if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[0]) {
+ if (!mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_GREEN] &&
+ !mtr_policy->sub_policys[domain][0]->rix_hrxq[RTE_COLOR_YELLOW]) {
/* Reuse the first pre-allocated sub_policy. */
sub_policy = mtr_policy->sub_policys[domain][0];
sub_policy_idx = sub_policy->idx;
* @return
* 0 on success, otherwise negative errno value.
*/
-static int
+int
flow_dv_action_validate(struct rte_eth_dev *dev,
const struct rte_flow_indir_action_conf *conf,
const struct rte_flow_action *action,
"Indirect age action not supported");
return flow_dv_validate_action_age(0, action, dev, err);
case RTE_FLOW_ACTION_TYPE_COUNT:
- return flow_dv_validate_action_count(dev, true, 0, err);
+ return flow_dv_validate_action_count(dev, true, 0, NULL, err);
case RTE_FLOW_ACTION_TYPE_CONNTRACK:
if (!priv->sh->ct_aso_en)
return rte_flow_error_set(err, ENOTSUP,
};
#endif /* HAVE_IBV_FLOW_DV_SUPPORT */
-