{0, 0, 0},
};
+struct field_modify_info modify_vlan_out_first_vid[] = {
+ /* Size in bits !!! */
+ {12, 0, MLX5_MODI_OUT_FIRST_VID},
+ {0, 0, 0},
+};
+
struct field_modify_info modify_ipv4[] = {
{1, 8, MLX5_MODI_OUT_IPV4_TTL},
{4, 12, MLX5_MODI_OUT_SIPV4},
MLX5_MODIFICATION_TYPE_SET, error);
}
+/**
+ * Convert modify-header set VLAN VID action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_vlan_vid
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_of_set_vlan_vid *conf =
+ (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
+ int i = resource->actions_num;
+ struct mlx5_modification_cmd *actions = &resource->actions[i];
+ struct field_modify_info *field = modify_vlan_out_first_vid;
+
+ if (i >= MLX5_MODIFY_NUM)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many items to modify");
+ actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
+ actions[i].field = field->id;
+ actions[i].length = field->size;
+ actions[i].offset = field->offset;
+ actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+ actions[i].data1 = conf->vlan_vid;
+ actions[i].data1 = actions[i].data1 << 16;
+ resource->actions_num = ++i;
+ return 0;
+}
+
/**
* Convert modify-header set TP action to DV specification.
*
const struct rte_flow_item_port_id switch_mask = {
.id = 0xffffffff,
};
- uint16_t esw_domain_id;
- uint16_t item_port_esw_domain_id;
+ struct mlx5_priv *esw_priv;
+ struct mlx5_priv *dev_priv;
int ret;
if (!attr->transfer)
return ret;
if (!spec)
return 0;
- ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
- NULL);
- if (ret)
- return rte_flow_error_set(error, -ret,
+ esw_priv = mlx5_port_to_eswitch_info(spec->id);
+ if (!esw_priv)
+ return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
"failed to obtain E-Switch info for"
" port");
- ret = mlx5_port_to_eswitch_info(dev->data->port_id,
- &esw_domain_id, NULL);
- if (ret < 0)
- return rte_flow_error_set(error, -ret,
+ dev_priv = mlx5_dev_to_eswitch_info(dev);
+ if (!dev_priv)
+ return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
- if (item_port_esw_domain_id != esw_domain_id)
- return rte_flow_error_set(error, -ret,
+ if (esw_priv->domain_id != dev_priv->domain_id)
+ return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
"cannot match on a port from a"
" different E-Switch");
return 0;
}
+/**
+ * Validate the set VLAN VID.
+ *
+ * @param[in] item_flags
+ * Holds the items detected in this rule.
+ * @param[in] actions
+ * Pointer to the list of actions remaining in the flow rule.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *action = actions;
+ const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
+
+ if (conf->vlan_vid > RTE_BE16(0xFFE))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "VLAN VID value is too big");
+ /* If a push VLAN action follows then it will handle this action */
+ if (mlx5_flow_find_action(actions,
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
+ return 0;
+
+ /*
+ * Action is on an existing VLAN header:
+ * Need to verify this is a single modify CID action.
+ * Rule mast include a match on outer VLAN.
+ */
+ if (mlx5_flow_find_action(++action,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "Multiple VLAN VID modifications are "
+ "not supported");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "match on VLAN is required in order "
+ "to set VLAN VID");
+ return 0;
+}
+
/**
* Validate count action.
*
* Pointer to the jump action.
* @param[in] action_flags
* Holds the actions detected until now.
- * @param[in] group
- * The group of the current flow.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in] external
+ * Action belongs to flow rule created by request external to PMD.
* @param[out] error
* Pointer to error structure.
*
static int
flow_dv_validate_action_jump(const struct rte_flow_action *action,
uint64_t action_flags,
- uint32_t group,
- struct rte_flow_error *error)
+ const struct rte_flow_attr *attributes,
+ bool external, struct rte_flow_error *error)
{
+ uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
+ MLX5_MAX_TABLES;
+ uint32_t target_group, table;
+ int ret = 0;
+
if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
MLX5_FLOW_FATE_ESWITCH_ACTIONS))
return rte_flow_error_set(error, EINVAL,
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
- if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
+ target_group =
+ ((const struct rte_flow_action_jump *)action->conf)->group;
+ ret = mlx5_flow_group_to_table(attributes, external, target_group,
+ &table, error);
+ if (ret)
+ return ret;
+ if (table >= max_group)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "target group index out of range");
+ if (attributes->group >= target_group)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "target group must be higher then"
+ "target group must be higher than"
" the current flow group");
return 0;
}
struct rte_flow_error *error)
{
const struct rte_flow_action_port_id *port_id;
+ struct mlx5_priv *act_priv;
+ struct mlx5_priv *dev_priv;
uint16_t port;
- uint16_t esw_domain_id;
- uint16_t act_port_domain_id;
- int ret;
if (!attr->transfer)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can have only one fate actions in"
" a flow");
- ret = mlx5_port_to_eswitch_info(dev->data->port_id,
- &esw_domain_id, NULL);
- if (ret < 0)
- return rte_flow_error_set(error, -ret,
+ dev_priv = mlx5_dev_to_eswitch_info(dev);
+ if (!dev_priv)
+ return rte_flow_error_set(error, rte_errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
"failed to obtain E-Switch info");
port_id = action->conf;
port = port_id->original ? dev->data->port_id : port_id->id;
- ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
- if (ret)
+ act_priv = mlx5_port_to_eswitch_info(port);
+ if (!act_priv)
return rte_flow_error_set
- (error, -ret,
+ (error, rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
"failed to obtain E-Switch port id for port");
- if (act_port_domain_id != esw_domain_id)
+ if (act_priv->domain_id != dev_priv->domain_id)
return rte_flow_error_set
- (error, -ret,
+ (error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"port does not belong to"
" E-Switch being configured");
* Pointer to dev struct.
* @param[in] attributes
* Pointer to flow attributes
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to error structure.
*
static int
flow_dv_validate_attributes(struct rte_eth_dev *dev,
const struct rte_flow_attr *attributes,
+ bool external __rte_unused,
struct rte_flow_error *error)
{
struct mlx5_priv *priv = dev->data->dev_private;
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL,
- "groups is not supported");
+ "groups are not supported");
+#else
+ uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
+ MLX5_MAX_TABLES;
+ uint32_t table;
+ int ret;
+
+ ret = mlx5_flow_group_to_table(attributes, external,
+ attributes->group,
+ &table, error);
+ if (ret)
+ return ret;
+ if (table >= max_group)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
+ "group index out of range");
#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
"egress is not supported");
- if (attributes->group >= MLX5_MAX_TABLES_FDB)
- return rte_flow_error_set
- (error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL, "group must be smaller than "
- RTE_STR(MLX5_MAX_TABLES_FDB));
}
if (!(attributes->egress ^ attributes->ingress))
return rte_flow_error_set(error, ENOTSUP,
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to the error structure.
*
flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+ bool external, struct rte_flow_error *error)
{
int ret;
uint64_t action_flags = 0;
if (items == NULL)
return -1;
- ret = flow_dv_validate_attributes(dev, attr, error);
+ ret = flow_dv_validate_attributes(dev, attr, external, error);
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
return ret;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ ret = mlx5_flow_validate_item_geneve(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
ret = mlx5_flow_validate_item_mpls(dev, items,
item_flags,
return ret;
/* Count PCP with push_vlan command. */
break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ ret = flow_dv_validate_action_set_vlan_vid
+ (item_flags, actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count VID with push_vlan command. */
+ break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
ret = flow_dv_validate_action_l2_encap(action_flags,
case RTE_FLOW_ACTION_TYPE_JUMP:
ret = flow_dv_validate_action_jump(actions,
action_flags,
- attr->group, error);
+ attr, external,
+ error);
if (ret)
return ret;
++actions_n;
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add Geneve item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+
+static void
+flow_dv_translate_item_geneve(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_geneve *geneve_m = item->mask;
+ const struct rte_flow_item_geneve *geneve_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint16_t dport;
+ uint16_t gbhdr_m;
+ uint16_t gbhdr_v;
+ char *vni_m;
+ char *vni_v;
+ size_t size, i;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = MLX5_UDP_PORT_GENEVE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!geneve_v)
+ return;
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
+ size = sizeof(geneve_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ memcpy(vni_m, geneve_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & geneve_v->vni[i];
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_m->protocol));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
+ gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
+ gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+}
+
/**
* Add MPLS item to matcher and to the value.
*
}
}
+/**
+ * Add vport metadata Reg C0 item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] reg
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_meta_vport(void *matcher, void *key,
+ uint32_t value, uint32_t mask)
+{
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, value);
+}
+
/**
* Add source vport match to the specified matcher.
*
{
const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
- uint16_t mask, val, id;
- int ret;
+ struct mlx5_priv *priv;
+ uint16_t mask, id;
mask = pid_m ? pid_m->id : 0xffff;
id = pid_v ? pid_v->id : dev->data->port_id;
- ret = mlx5_port_to_eswitch_info(id, NULL, &val);
- if (ret)
- return ret;
- flow_dv_translate_item_source_vport(matcher, key, val, mask);
+ priv = mlx5_port_to_eswitch_info(id);
+ if (!priv)
+ return -rte_errno;
+ /* Translate to vport field or to metadata, depending on mode. */
+ if (priv->vport_meta_mask)
+ flow_dv_translate_item_meta_vport(matcher, key,
+ priv->vport_meta_tag,
+ priv->vport_meta_mask);
+ else
+ flow_dv_translate_item_source_vport(matcher, key,
+ priv->vport_id, mask);
return 0;
}
struct rte_flow_error *error)
{
uint32_t port;
- uint16_t port_id;
- int ret;
+ struct mlx5_priv *priv;
const struct rte_flow_action_port_id *conf =
(const struct rte_flow_action_port_id *)action->conf;
port = conf->original ? dev->data->port_id : conf->id;
- ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
- if (ret)
- return rte_flow_error_set(error, -ret,
+ priv = mlx5_port_to_eswitch_info(port);
+ if (!priv)
+ return rte_flow_error_set(error, -rte_errno,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
"No eswitch info was found for port");
- *dst_port_id = port_id;
+ if (priv->vport_meta_mask)
+ *dst_port_id = priv->vport_meta_tag;
+ else
+ *dst_port_id = priv->vport_id;
return 0;
}
struct rte_vlan_hdr vlan = { 0 };
bool vlan_inherited = false;
uint16_t vlan_tci;
+ uint32_t table;
+ int ret = 0;
- flow->group = attr->group;
+ ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
+ &table, error);
+ if (ret)
+ return ret;
+ flow->group = table;
if (attr->transfer)
res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
flow->counter = flow_dv_counter_alloc(dev,
count->shared,
count->id,
- attr->group);
+ flow->group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.actions[actions_n++] =
dev_flow->dv.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ /* Push VLAN command is also handling this VLAN_VID */
+ action_flags &= ~MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
if (!vlan_inherited) {
vlan.vlan_tci |= vlan_tci;
/* Push VLAN command will use this value */
break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (!vlan_inherited) {
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan_inherited = true;
+ }
+ vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
+ vlan.vlan_tci |= rte_be_to_cpu_16
+ (((const struct rte_flow_action_of_set_vlan_vid *)
+ actions->conf)->vlan_vid);
+ /* Push VLAN command will use this value */
+ if (mlx5_flow_find_action
+ (actions,
+ RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
+ break;
+ /* If no VLAN push - this is a modify header action */
+ if (flow_dv_convert_action_modify_vlan_vid
+ (&res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
if (flow_dv_create_action_l2_encap(dev, actions,
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
- tbl = flow_dv_tbl_resource_get(dev, jump_data->group,
+ ret = mlx5_flow_group_to_table(attr, dev_flow->external,
+ jump_data->group, &table,
+ error);
+ if (ret)
+ return ret;
+ tbl = flow_dv_tbl_resource_get(dev, table,
attr->egress,
attr->transfer, error);
if (!tbl)
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel, attr->group);
+ items, tunnel, flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->dv.hash_fields |=
mlx5_flow_hashfields_adjust
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel, attr->group);
+ items, tunnel, flow->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->dv.hash_fields |=
mlx5_flow_hashfields_adjust
items, tunnel);
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ flow_dv_translate_item_geneve(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_GENEVE;
+ break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
matcher.egress = attr->egress;
- matcher.group = attr->group;
+ matcher.group = flow->group;
matcher.transfer = attr->transfer;
if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
return -rte_errno;