flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
uint32_t rix_jump);
-static inline uint16_t
-mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
-{
- if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
- return RTE_ETHER_TYPE_TEB;
- else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
- return RTE_ETHER_TYPE_IPV4;
- else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
- return RTE_ETHER_TYPE_IPV6;
- else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
- return RTE_ETHER_TYPE_MPLS;
- return 0;
-}
-
static int16_t
flow_dv_get_esw_manager_vport_id(struct rte_eth_dev *dev)
{
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_DMAC_15_0};
- info[idx] = (struct field_modify_info){4, 0,
+ info[idx] = (struct field_modify_info){4, off,
MLX5_MODI_OUT_DMAC_47_16};
}
break;
(32 - width)) << off);
} else {
if (data->offset < 16)
- info[idx++] = (struct field_modify_info){2, 4,
+ info[idx++] = (struct field_modify_info){2, 0,
MLX5_MODI_OUT_SMAC_15_0};
- info[idx] = (struct field_modify_info){4, 0,
+ info[idx] = (struct field_modify_info){4, off,
MLX5_MODI_OUT_SMAC_47_16};
}
break;
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_SIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
mask[0] = rte_cpu_to_be_32(0xffffffff >> (32 - width));
} else {
if (data->offset < 32)
- info[idx++] = (struct field_modify_info){4, 12,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_31_0};
if (data->offset < 64)
- info[idx++] = (struct field_modify_info){4, 8,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_63_32};
if (data->offset < 96)
- info[idx++] = (struct field_modify_info){4, 4,
+ info[idx++] = (struct field_modify_info){4, 0,
MLX5_MODI_OUT_DIPV6_95_64};
if (data->offset < 128)
info[idx++] = (struct field_modify_info){4, 0,
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
const struct rte_flow_attr *attributes,
bool external, struct rte_flow_error *error)
{
- uint32_t target_group, table;
+ uint32_t target_group, table = 0;
int ret = 0;
struct flow_grp_info grp_info = {
.external = !!external,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"target group must be other than"
" the current flow group");
+ if (table == 0)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "root table shouldn't be destination");
return 0;
}
* Pointer to rte_eth_dev structure.
* @param[in] action_flags
* Bit-fields that holds the actions detected until now.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[in] action
* Pointer to the meter action.
* @param[in] attr
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
- uint64_t action_flags,
+ uint64_t action_flags, uint64_t item_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
const struct rte_flow_item *port_id_item,
NULL,
"Flow and meter policy "
"have different src port.");
+ } else if (mtr_policy->is_rss) {
+ struct mlx5_flow_meter_policy *fp;
+ struct mlx5_meter_policy_action_container *acg;
+ struct mlx5_meter_policy_action_container *acy;
+ const struct rte_flow_action *rss_act;
+ int ret;
+
+ fp = mlx5_flow_meter_hierarchy_get_final_policy(dev,
+ mtr_policy);
+ if (fp == NULL)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Unable to get the final "
+ "policy in the hierarchy");
+ acg = &fp->act_cnt[RTE_COLOR_GREEN];
+ acy = &fp->act_cnt[RTE_COLOR_YELLOW];
+ MLX5_ASSERT(acg->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS ||
+ acy->fate_action ==
+ MLX5_FLOW_FATE_SHARED_RSS);
+ if (acg->fate_action == MLX5_FLOW_FATE_SHARED_RSS)
+ rss_act = acg->rss;
+ else
+ rss_act = acy->rss;
+ ret = mlx5_flow_validate_action_rss(rss_act,
+ action_flags, dev, attr,
+ item_flags, error);
+ if (ret)
+ return ret;
}
*def_policy = false;
}
case RTE_FLOW_ACTION_TYPE_METER:
ret = mlx5_flow_validate_action_meter(dev,
action_flags,
+ item_flags,
actions, attr,
port_id_item,
&def_policy,
* - Explicit decap action is prohibited by the tunnel offload API.
* - Drop action in tunnel steer rule is prohibited by the API.
* - Application cannot use MARK action because it's value can mask
- * tunnel default miss nitification.
+ * tunnel default miss notification.
* - JUMP in tunnel match rule has no support in current PMD
* implementation.
* - TAG & META are reserved for future uses.
protocol_v = rte_be_to_cpu_16(gre_v->protocol);
if (!protocol_m) {
/* Force next protocol to prevent matchers duplication */
- protocol_m = 0xFFFF;
protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
}
MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, protocol_m);
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
m_protocol = vxlan_m->protocol;
v_protocol = vxlan_v->protocol;
if (!m_protocol) {
- m_protocol = 0xff;
/* Force next protocol to ensure next headers parsing. */
if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
v_protocol = RTE_VXLAN_GPE_TYPE_IPV6;
+ if (v_protocol)
+ m_protocol = 0xFF;
}
MLX5_SET(fte_match_set_misc3, misc_m,
outer_vxlan_gpe_next_protocol, m_protocol);
protocol_v = rte_be_to_cpu_16(geneve_v->protocol);
if (!protocol_m) {
/* Force next protocol to prevent matchers duplication */
- protocol_m = 0xFFFF;
protocol_v = mlx5_translate_tunnel_etypes(pattern_flags);
+ if (protocol_v)
+ protocol_m = 0xFFFF;
}
MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type, protocol_m);
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- /* We already have GENVE TLV option obj allocated. */
+ /* We already have GENEVE TLV option obj allocated. */
__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
__ATOMIC_RELAXED);
} else {
switch (prev_layer) {
case MLX5_FLOW_LAYER_OUTER_L4_UDP:
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
- MLX5_UDP_PORT_MPLS);
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ }
break;
case MLX5_FLOW_LAYER_GRE:
/* Fall-through. */
case MLX5_FLOW_LAYER_GRE_KEY:
- MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
- MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
- RTE_ETHER_TYPE_MPLS);
+ if (!MLX5_GET16(fte_match_set_misc, misc_v, gre_protocol)) {
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ }
break;
default:
break;
* Check flow matching criteria first, subtract misc5/4 length if flow
* doesn't own misc5/4 parameters. In some old rdma-core releases,
* misc5/4 are not supported, and matcher creation failure is expected
- * w/o subtration. If misc5 is provided, misc4 must be counted in since
+ * w/o subtraction. If misc5 is provided, misc4 must be counted in since
* misc5 is right after misc4.
*/
if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
goto error;
}
}
- /* create a dest array actioin */
+ /* create a dest array action */
ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
resource->num_of_dest,
(((const struct rte_flow_action_mark *)
(sub_actions->conf))->id);
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
pre_rix = dev_flow->handle->dvh.rix_tag;
/* Save the mark resource before sample */
pre_r = dev_flow->dv.tag_resource;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
action_flags |= MLX5_FLOW_ACTION_FLAG;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
struct rte_flow_action_mark mark = {
.id = MLX5_FLOW_MARK_DEFAULT,
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
- dev_flow->handle->mark = 1;
+ wks->mark = 1;
if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
size_t i;
int err;
- if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl)) {
+ if (mlx5_ind_table_obj_setup(dev, shared_rss->ind_tbl,
+ !!dev->data->dev_started)) {
return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"cannot setup indirection table");
error_hrxq_new:
err = rte_errno;
__flow_dv_action_rss_hrxqs_release(dev, shared_rss);
- if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true))
+ if (!mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true, true))
shared_rss->ind_tbl = NULL;
rte_errno = err;
return -rte_errno;
NULL,
"shared rss hrxq has references");
queue = shared_rss->ind_tbl->queues;
- remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true);
+ remaining = mlx5_ind_table_obj_release(dev, shared_rss->ind_tbl, true,
+ !!dev->data->dev_started);
if (remaining)
return rte_flow_error_set(error, EBUSY,
RTE_FLOW_ERROR_TYPE_ACTION,
void *queue = NULL;
uint16_t *queue_old = NULL;
uint32_t queue_size = action_conf->queue_num * sizeof(uint16_t);
+ bool dev_started = !!dev->data->dev_started;
if (!shared_rss)
return rte_flow_error_set(error, EINVAL,
rte_spinlock_lock(&shared_rss->action_rss_sl);
queue_old = shared_rss->ind_tbl->queues;
ret = mlx5_ind_table_obj_modify(dev, shared_rss->ind_tbl,
- queue, action_conf->queue_num, true);
+ queue, action_conf->queue_num,
+ true /* standalone */,
+ dev_started /* ref_new_qs */,
+ dev_started /* deref_old_qs */);
if (ret) {
mlx5_free(queue);
ret = rte_flow_error_set(error, rte_errno,
(MLX5_MAX_MODIFY_NUM + 1)];
} mhdr_dummy;
struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
egress = (domain == MLX5_MTR_DOMAIN_EGRESS) ? 1 : 0;
transfer = (domain == MLX5_MTR_DOMAIN_TRANSFER) ? 1 : 0;
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
NULL,
"cannot create policy "
"mark action for this color");
- dev_flow.handle->mark = 1;
+ wks->mark = 1;
if (flow_dv_tag_resource_register(dev, tag_be,
&dev_flow, &flow_err))
return -rte_mtr_error_set(error,
struct mlx5_meter_policy_action_container *act_cnt;
uint32_t domain = MLX5_MTR_DOMAIN_INGRESS;
uint16_t sub_policy_num;
+ struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
+ MLX5_ASSERT(wks);
rte_spinlock_lock(&mtr_policy->sl);
for (i = 0; i < MLX5_MTR_RTE_COLORS; i++) {
if (!rss_desc[i])
if (act_cnt->rix_mark || act_cnt->modify_hdr) {
memset(&dh, 0, sizeof(struct mlx5_flow_handle));
if (act_cnt->rix_mark)
- dh.mark = 1;
+ wks->mark = 1;
dh.fate_action = MLX5_FLOW_FATE_QUEUE;
dh.rix_hrxq = hrxq_idx[i];
flow_drv_rxq_flags_set(dev, &dh);