const struct rte_flow_item *rule_items = items;
const struct rte_flow_item *port_id_item = NULL;
bool def_policy = false;
+ uint16_t udp_dport = 0;
if (items == NULL)
return -1;
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
error);
+ const struct rte_flow_item_udp *spec = items->spec;
+ const struct rte_flow_item_udp *mask = items->mask;
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ if (spec != NULL)
+ udp_dport = rte_be_to_cpu_16
+ (spec->hdr.dst_port &
+ mask->hdr.dst_port);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
last_item = MLX5_FLOW_LAYER_GRE_KEY;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
- ret = mlx5_flow_validate_item_vxlan(dev, items,
- item_flags, attr,
- error);
+ ret = mlx5_flow_validate_item_vxlan(dev, udp_dport,
+ items, item_flags,
+ attr, error);
if (ret < 0)
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
+ dport = MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport);
if (!vxlan_v)
return;
if (!vxlan_m) {
else
vxlan_m = &nic_mask;
}
- if ((!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
+ if ((priv->sh->steering_format_version ==
+ MLX5_STEERING_LOGIC_FORMAT_CONNECTX_5 &&
+ dport != MLX5_UDP_PORT_VXLAN) ||
+ (!attr->group && !attr->transfer && !priv->sh->tunnel_header_0_1) ||
((attr->group || attr->transfer) && !priv->sh->misc5_cap)) {
void *misc_m;
void *misc_v;
MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
geneve_opt_v->option_len + 1);
}
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_tlv_option_0_exist, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist, 1);
/* Set the data. */
if (geneve_opt_v->data) {
memcpy(&opt_data_key, geneve_opt_v->data,
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
- * @param[in] samples
- * Sample IDs to be used in the matching.
+ * @param[in] last_item
+ * Last item flags.
*/
static void
flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
- void *key, const struct rte_flow_item *item)
+ void *key, const struct rte_flow_item *item,
+ uint64_t last_item)
{
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_item_ecpri *ecpri_m = item->mask;
void *dw_m;
void *dw_v;
+ /*
+ * In case of eCPRI over Ethernet, if EtherType is not specified,
+ * match on eCPRI EtherType implicitly.
+ */
+ if (last_item & MLX5_FLOW_LAYER_OUTER_L2) {
+ void *hdrs_m, *hdrs_v, *l2m, *l2v;
+
+ hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ l2m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, ethertype);
+ l2v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype);
+ if (*(uint16_t *)l2m == 0 && *(uint16_t *)l2v == 0) {
+ *(uint16_t *)l2m = UINT16_MAX;
+ *(uint16_t *)l2v = RTE_BE16(RTE_ETHER_TYPE_ECPRI);
+ }
+ }
if (!ecpri_v)
return;
if (!ecpri_m)
rss_desc->hash_fields = dev_flow->hash_fields;
rss_desc->tunnel = !!(dh->layers & MLX5_FLOW_LAYER_TUNNEL);
rss_desc->shared_rss = 0;
+ if (rss_desc->hash_fields == 0)
+ rss_desc->queue_num = 1;
*hrxq_idx = mlx5_hrxq_get(dev, rss_desc);
if (!*hrxq_idx)
return NULL;
}
static inline int
-flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx)
+flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx,
+ struct rte_flow_error *error)
{
uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
struct rte_eth_dev *owndev = &rte_eth_devices[owner];
- RTE_SET_USED(dev);
+ int ret;
MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
if (dev->data->dev_started != 1)
- return -1;
- return flow_dv_aso_ct_dev_release(owndev, idx);
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Indirect CT action cannot be destroyed when the port is stopped");
+ ret = flow_dv_aso_ct_dev_release(owndev, idx);
+ if (ret < 0)
+ return rte_flow_error_set(error, EAGAIN,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Current state prevents indirect CT action from being destroyed");
+ return ret;
}
/*
action_flags |= MLX5_FLOW_ACTION_AGE;
break;
case MLX5_RTE_FLOW_ACTION_TYPE_COUNT:
- flow->counter = (uint32_t)(uintptr_t)(action->conf);
- cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter,
- NULL);
- __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1,
- __ATOMIC_RELAXED);
- /* Save information first, will apply later. */
- action_flags |= MLX5_FLOW_ACTION_COUNT;
+ cnt_act = flow_dv_counter_get_by_idx(dev,
+ (uint32_t)(uintptr_t)action->conf,
+ NULL);
+ MLX5_ASSERT(cnt_act != NULL);
+ /**
+ * When creating meter drop flow in drop table, the
+ * counter should not overwrite the rte flow counter.
+ */
+ if (attr->group == MLX5_FLOW_TABLE_LEVEL_METER &&
+ dev_flow->dv.table_id == MLX5_MTR_TABLE_ID_DROP) {
+ dev_flow->dv.actions[actions_n++] =
+ cnt_act->action;
+ } else {
+ flow->counter =
+ (uint32_t)(uintptr_t)(action->conf);
+ __atomic_fetch_add(&cnt_act->shared_info.refcnt,
+ 1, __ATOMIC_RELAXED);
+ /* Save information first, will apply later. */
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ }
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
if (!dev_conf->devx) {
"cannot create eCPRI parser");
}
flow_dv_translate_item_ecpri(dev, match_mask,
- match_value, items);
+ match_value, items,
+ last_item);
/* No other protocol should follow eCPRI layer. */
last_item = MLX5_FLOW_LAYER_ECPRI;
break;
#ifdef HAVE_MLX5DV_DR
/* DR supports drop action placeholder. */
MLX5_ASSERT(priv->sh->dr_drop_action);
- dv->actions[n++] = priv->sh->dr_drop_action;
+ dv->actions[n++] = dv->group ?
+ priv->sh->dr_drop_action :
+ priv->root_drop_action;
#else
/* For DV we use the explicit drop queue. */
MLX5_ASSERT(priv->drop_queue.hrxq);
}
/* Keep the current age handling by default. */
if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct)
- flow_dv_aso_ct_release(dev, flow->ct);
+ flow_dv_aso_ct_release(dev, flow->ct, NULL);
else if (flow->age)
flow_dv_aso_age_release(dev, flow->age);
if (flow->geneve_tlv_option) {
" released with references %d.", idx, ret);
return 0;
case MLX5_INDIRECT_ACTION_TYPE_CT:
- ret = flow_dv_aso_ct_release(dev, idx);
+ ret = flow_dv_aso_ct_release(dev, idx, error);
if (ret < 0)
return ret;
if (ret > 0)
}
rte_spinlock_unlock(&mtr_policy->sl);
}
+/**
+ * Check whether the DR drop action is supported on the root table or not.
+ *
+ * Create a simple flow with DR drop action on root table to validate
+ * if DR drop action on root table is supported or not.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_match_params mask = {
+ .size = sizeof(mask.buf),
+ };
+ struct mlx5_flow_dv_match_params value = {
+ .size = sizeof(value.buf),
+ };
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .priority = 0,
+ .match_criteria_enable = 0,
+ .match_mask = (void *)&mask,
+ };
+ struct mlx5_flow_tbl_resource *tbl = NULL;
+ void *matcher = NULL;
+ void *flow = NULL;
+ int ret = -1;
+
+ tbl = flow_dv_tbl_resource_get(dev, 0, 0, 0, false, NULL,
+ 0, 0, 0, NULL);
+ if (!tbl)
+ goto err;
+ dv_attr.match_criteria_enable = flow_dv_matcher_enable(mask.buf);
+ __flow_dv_adjust_buf_size(&mask.size, dv_attr.match_criteria_enable);
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &matcher);
+ if (ret)
+ goto err;
+ __flow_dv_adjust_buf_size(&value.size, dv_attr.match_criteria_enable);
+ ret = mlx5_flow_os_create_flow(matcher, (void *)&value, 1,
+ &sh->dr_drop_action, &flow);
+err:
+ /*
+ * If DR drop action is not supported on root table, flow create will
+ * be failed with EOPNOTSUPP or EPROTONOSUPPORT.
+ */
+ if (!flow) {
+ if (matcher &&
+ (errno == EPROTONOSUPPORT || errno == EOPNOTSUPP))
+ DRV_LOG(INFO, "DR drop action is not supported in root table.");
+ else
+ DRV_LOG(ERR, "Unexpected error in DR drop action support detection");
+ ret = -1;
+ } else {
+ claim_zero(mlx5_flow_os_destroy_flow(flow));
+ }
+ if (matcher)
+ claim_zero(mlx5_flow_os_destroy_flow_matcher(matcher));
+ if (tbl)
+ flow_dv_tbl_resource_release(MLX5_SH(dev), tbl);
+ return ret;
+}
/**
* Validate the batch counter support in root table.
* @note: only stub for now
*/
static int
-flow_get_aged_flows(struct rte_eth_dev *dev,
+flow_dv_get_aged_flows(struct rte_eth_dev *dev,
void **context,
uint32_t nb_contexts,
struct rte_flow_error *error)
RTE_MTR_ERROR_TYPE_POLICER_ACTION_GREEN,
NULL,
"Multiple fate actions not supported.");
+ *hierarchy_domain = 0;
while (true) {
fm = mlx5_flow_meter_find(priv, meter_id, NULL);
if (!fm)
"Non termination meter not supported in hierarchy.");
policy = mlx5_flow_meter_policy_find(dev, fm->policy_id, NULL);
MLX5_ASSERT(policy);
- if (!policy->is_hierarchy) {
+ /**
+ * Only inherit the supported domains of the first meter in
+ * hierarchy.
+ * One meter supports at least one domain.
+ */
+ if (!*hierarchy_domain) {
if (policy->transfer)
*hierarchy_domain |=
MLX5_MTR_DOMAIN_TRANSFER_BIT;
MLX5_MTR_DOMAIN_INGRESS_BIT;
if (policy->egress)
*hierarchy_domain |= MLX5_MTR_DOMAIN_EGRESS_BIT;
+ }
+ if (!policy->is_hierarchy) {
*is_rss = policy->is_rss;
break;
}
return -rte_mtr_error_set(error,
ENOTSUP,
RTE_MTR_ERROR_TYPE_METER_POLICY,
- NULL, flow_err.message ?
- flow_err.message :
- "Meter hierarchy only supports GREEN color.");
+ NULL,
+ "Meter hierarchy only supports GREEN color.");
+ if (*policy_mode != MLX5_MTR_POLICY_MODE_OG)
+ return -rte_mtr_error_set(error,
+ ENOTSUP,
+ RTE_MTR_ERROR_TYPE_METER_POLICY,
+ NULL,
+ "No yellow policy should be provided in meter hierarchy.");
mtr = act->conf;
ret = flow_dv_validate_policy_mtr_hierarchy(dev,
mtr->mtr_id,
* so MARK action is only in ingress domain.
*/
domain_color[i] = MLX5_MTR_DOMAIN_INGRESS_BIT;
- else if (action_flags[i] &
- MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
- domain_color[i] = hierarchy_domain;
else
domain_color[i] = def_domain;
+ if (action_flags[i] &
+ MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
+ domain_color[i] &= hierarchy_domain;
/*
* Non-termination actions only support NIC Tx domain.
* The adjustion should be skipped when there is no
.counter_alloc = flow_dv_counter_allocate,
.counter_free = flow_dv_counter_free,
.counter_query = flow_dv_counter_query,
- .get_aged_flows = flow_get_aged_flows,
+ .get_aged_flows = flow_dv_get_aged_flows,
.action_validate = flow_dv_action_validate,
.action_create = flow_dv_action_create,
.action_destroy = flow_dv_action_destroy,