#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
-#define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_flow_item_eth) + \
- sizeof(struct rte_flow_item_ipv4))
/* VLAN header definitions */
#define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
#define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
* Pointer to item specification.
* @param[out] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
*/
static void
-flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
+flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr,
+ struct mlx5_flow *dev_flow, bool tunnel_decap)
{
+ uint64_t layers = dev_flow->handle->layers;
+
+ /*
+ * If layers is already initialized, it means this dev_flow is the
+ * suffix flow, the layers flags is set by the prefix flow. Need to
+ * use the layer flags from prefix flow as the suffix flow may not
+ * have the user defined items as the flow is split.
+ */
+ if (layers) {
+ if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ attr->ipv4 = 1;
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV6)
+ attr->ipv6 = 1;
+ if (layers & MLX5_FLOW_LAYER_OUTER_L4_TCP)
+ attr->tcp = 1;
+ else if (layers & MLX5_FLOW_LAYER_OUTER_L4_UDP)
+ attr->udp = 1;
+ attr->valid = 1;
+ return;
+ }
for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ uint8_t next_protocol = 0xff;
switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ if (tunnel_decap)
+ attr->attr = 0;
+ break;
case RTE_FLOW_ITEM_TYPE_IPV4:
if (!attr->ipv6)
attr->ipv4 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ item->mask)->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (item->spec))->hdr.next_proto_id &
+ ((const struct rte_flow_item_ipv4 *)
+ (item->mask))->hdr.next_proto_id;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
if (!attr->ipv4)
attr->ipv6 = 1;
+ if (item->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ item->mask)->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (item->spec))->hdr.proto &
+ ((const struct rte_flow_item_ipv6 *)
+ (item->mask))->hdr.proto;
+ if ((next_protocol == IPPROTO_IPIP ||
+ next_protocol == IPPROTO_IPV6) && tunnel_decap)
+ attr->attr = 0;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
if (!attr->tcp)
off_b - __builtin_clz(mask);
MLX5_ASSERT(size_b);
size_b = size_b == sizeof(uint32_t) * CHAR_BIT ? 0 : size_b;
- actions[i].action_type = type;
- actions[i].field = field->id;
- actions[i].offset = off_b;
- actions[i].length = size_b;
+ actions[i] = (struct mlx5_modification_cmd) {
+ .action_type = type,
+ .field = field->id,
+ .offset = off_b,
+ .length = size_b,
+ };
/* Convert entire record to expected big-endian format. */
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
if (type == MLX5_MODIFICATION_TYPE_COPY) {
const struct rte_flow_action_of_set_vlan_vid *conf =
(const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
int i = resource->actions_num;
- struct mlx5_modification_cmd *actions = &resource->actions[i];
+ struct mlx5_modification_cmd *actions = resource->actions;
struct field_modify_info *field = modify_vlan_out_first_vid;
if (i >= MLX5_MAX_MODIFY_NUM)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"too many items to modify");
- actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
- actions[i].field = field->id;
- actions[i].length = field->size;
- actions[i].offset = field->offset;
+ actions[i] = (struct mlx5_modification_cmd) {
+ .action_type = MLX5_MODIFICATION_TYPE_SET,
+ .field = field->id,
+ .length = field->size,
+ .offset = field->offset,
+ };
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
actions[i].data1 = conf->vlan_vid;
actions[i].data1 = actions[i].data1 << 16;
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_tp *conf =
(const struct rte_flow_action_set_tp *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->udp) {
memset(&udp, 0, sizeof(udp));
memset(&udp_mask, 0, sizeof(udp_mask));
item.spec = &udp;
item.mask = &udp_mask;
field = modify_udp;
- }
- if (attr->tcp) {
+ } else {
+ MLX5_ASSERT(attr->tcp);
memset(&tcp, 0, sizeof(tcp));
memset(&tcp_mask, 0, sizeof(tcp_mask));
if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_action *action,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
const struct rte_flow_action_set_ttl *conf =
(const struct rte_flow_action_set_ttl *)(action->conf);
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = conf->ttl_value;
* Pointer to rte_flow_item objects list.
* @param[in] attr
* Pointer to flow attributes structure.
+ * @param[in] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] tunnel_decap
+ * Whether action is after tunnel decapsulation.
* @param[out] error
* Pointer to the error structure.
*
flow_dv_convert_action_modify_dec_ttl
(struct mlx5_flow_dv_modify_hdr_resource *resource,
const struct rte_flow_item *items,
- union flow_dv_attr *attr,
- struct rte_flow_error *error)
+ union flow_dv_attr *attr, struct mlx5_flow *dev_flow,
+ bool tunnel_decap, struct rte_flow_error *error)
{
struct rte_flow_item item;
struct rte_flow_item_ipv4 ipv4;
struct field_modify_info *field;
if (!attr->valid)
- flow_dv_attr_init(items, attr);
+ flow_dv_attr_init(items, attr, dev_flow, tunnel_decap);
if (attr->ipv4) {
memset(&ipv4, 0, sizeof(ipv4));
memset(&ipv4_mask, 0, sizeof(ipv4_mask));
item.spec = &ipv4;
item.mask = &ipv4_mask;
field = modify_ipv4;
- }
- if (attr->ipv6) {
+ } else {
+ MLX5_ASSERT(attr->ipv6);
memset(&ipv6, 0, sizeof(ipv6));
memset(&ipv6_mask, 0, sizeof(ipv6_mask));
ipv6.hdr.hop_limits = 0xFF;
"too many items to modify");
MLX5_ASSERT(conf->id != REG_NONE);
MLX5_ASSERT(conf->id < RTE_DIM(reg_to_field));
- actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
- actions[i].field = reg_to_field[conf->id];
+ actions[i] = (struct mlx5_modification_cmd) {
+ .action_type = MLX5_MODIFICATION_TYPE_SET,
+ .field = reg_to_field[conf->id],
+ };
actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
actions[i].data1 = rte_cpu_to_be_32(conf->data);
++i;
"mark id exceeds the limit");
if (!mask)
mask = &nic_mask;
+ if (!mask->id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_mark),
RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
item->spec,
"data cannot be empty");
- if (!spec->data)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
- "data cannot be zero");
if (config->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
if (!mlx5_flow_ext_mreg_supported(dev))
return rte_flow_error_set(error, ENOTSUP,
}
if (!mask)
mask = &rte_flow_item_meta_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_meta),
"data cannot be empty");
if (!mask)
mask = &rte_flow_item_tag_mask;
+ if (!mask->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
+ "mask cannot be zero");
+
ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
(const uint8_t *)&nic_mask,
sizeof(struct rte_flow_item_tag),
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
- struct mlx5_priv *priv = dev->data->dev_private;
+ const struct mlx5_priv *priv = dev->data->dev_private;
(void)action;
(void)attr;
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after pop VLAN action");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "pop vlan action for VF representor "
+ "not supported on NIC table");
return 0;
}
if (items == NULL)
return;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
- items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
- ;
- if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int type = items->type;
+
+ if (type == RTE_FLOW_ITEM_TYPE_VLAN ||
+ type == MLX5_RTE_FLOW_ITEM_TYPE_VLAN)
+ break;
+ }
+ if (items->type != RTE_FLOW_ITEM_TYPE_END) {
const struct rte_flow_item_vlan *vlan_m = items->mask;
const struct rte_flow_item_vlan *vlan_v = items->spec;
/* Only full match values are accepted */
if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
- vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
+ vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
vlan->vlan_tci |=
rte_be_to_cpu_16(vlan_v->tci &
MLX5DV_FLOW_VLAN_PCP_MASK_BE);
/**
* Validate the push VLAN action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] item_flags
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_push_vlan(uint64_t action_flags,
+flow_dv_validate_action_push_vlan(struct rte_eth_dev *dev,
+ uint64_t action_flags,
uint64_t item_flags __rte_unused,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
+ const struct mlx5_priv *priv = dev->data->dev_private;
- if (attr->ingress)
+ if (!attr->transfer && attr->ingress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
NULL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"wrong action order, port_id should "
"be after push VLAN");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "push vlan action for VF representor "
+ "not supported on NIC table");
(void)attr;
return 0;
}
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"meta data must be within reg C0");
- if (!(conf->data & conf->mask))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "zero value has no effect");
return 0;
}
/**
* Validate the L2 encap action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] action
* Pointer to the action structure.
* @param[in] attr
- * Pointer to flow attributes
+ * Pointer to flow attributes.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_l2_encap(uint64_t action_flags,
+flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
+ uint64_t action_flags,
const struct rte_flow_action *action,
const struct rte_flow_attr *attr,
struct rte_flow_error *error)
{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+
if (!(action->conf))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, action,
"configuration cannot be null");
- if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap or"
- " decap action in a flow");
- if (!attr->transfer && attr->ingress)
+ "can only have a single encap action "
+ "in a flow");
+ if (!attr->transfer && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "encap action not supported for "
- "ingress");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "encap action for VF representor "
+ "not supported on NIC table");
return 0;
}
/**
- * Validate the L2 decap action.
+ * Validate a decap action.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] action_flags
* Holds the actions detected until now.
* @param[in] attr
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_l2_decap(uint64_t action_flags,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
+flow_dv_validate_action_decap(struct rte_eth_dev *dev,
+ uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
- if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
- return rte_flow_error_set(error, EINVAL,
+ const struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (action_flags & MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap or"
- " decap action in a flow");
+ action_flags &
+ MLX5_FLOW_ACTION_DECAP ? "can only "
+ "have a single decap action" : "decap "
+ "after encap is not supported");
if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
NULL,
"decap action not supported for "
"egress");
- return 0;
-}
-
-/**
- * Validate the raw encap action.
- *
- * @param[in] action_flags
- * Holds the actions detected until now.
- * @param[in] action
- * Pointer to the encap action.
- * @param[in] attr
- * Pointer to flow attributes
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-static int
-flow_dv_validate_action_raw_encap(uint64_t action_flags,
- const struct rte_flow_action *action,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_raw_encap *raw_encap =
- (const struct rte_flow_action_raw_encap *)action->conf;
- if (!(action->conf))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "configuration cannot be null");
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single encap"
- " action in a flow");
- /* encap without preceding decap is not supported for ingress */
- if (!attr->transfer && attr->ingress &&
- !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
+ if (!attr->transfer && priv->representor)
return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
- NULL,
- "encap action not supported for "
- "ingress");
- if (!raw_encap->size || !raw_encap->data)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, action,
- "raw encap data cannot be empty");
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "decap action for VF representor "
+ "not supported on NIC table");
return 0;
}
+const struct rte_flow_action_raw_decap empty_decap = {.data = NULL, .size = 0,};
+
/**
- * Validate the raw decap action.
+ * Validate the raw encap and decap actions.
*
- * @param[in] action_flags
- * Holds the actions detected until now.
- * @param[in] action
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] decap
+ * Pointer to the decap action.
+ * @param[in] encap
* Pointer to the encap action.
* @param[in] attr
* Pointer to flow attributes
+ * @param[in/out] action_flags
+ * Holds the actions detected until now.
+ * @param[out] actions_n
+ * pointer to the number of actions counter.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_action_raw_decap(uint64_t action_flags,
- const struct rte_flow_action *action,
- const struct rte_flow_attr *attr,
- struct rte_flow_error *error)
-{
- const struct rte_flow_action_raw_decap *decap = action->conf;
+flow_dv_validate_action_raw_encap_decap
+ (struct rte_eth_dev *dev,
+ const struct rte_flow_action_raw_decap *decap,
+ const struct rte_flow_action_raw_encap *encap,
+ const struct rte_flow_attr *attr, uint64_t *action_flags,
+ int *actions_n, struct rte_flow_error *error)
+{
+ const struct mlx5_priv *priv = dev->data->dev_private;
+ int ret;
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ if (encap && (!encap->size || !encap->data))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can't have encap action before"
- " decap action");
- if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, NULL,
- "can only have a single decap"
- " action in a flow");
- /* decap action is valid on egress only if it is followed by encap */
- if (attr->egress && decap &&
- decap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
- NULL, "decap action not supported"
- " for egress");
- } else if (decap && decap->size > MLX5_ENCAPSULATION_DECISION_SIZE &&
- (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)) {
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- NULL,
- "can't have decap action "
- "after modify action");
+ "raw encap data cannot be empty");
+ if (decap && encap) {
+ if (decap->size <= MLX5_ENCAPSULATION_DECISION_SIZE &&
+ encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 encap. */
+ decap = NULL;
+ else if (encap->size <=
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ decap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* L3 decap. */
+ encap = NULL;
+ else if (encap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE &&
+ decap->size >
+ MLX5_ENCAPSULATION_DECISION_SIZE)
+ /* 2 L2 actions: encap and decap. */
+ ;
+ else
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "unsupported too small "
+ "raw decap and too small raw "
+ "encap combination");
+ }
+ if (decap) {
+ ret = flow_dv_validate_action_decap(dev, *action_flags, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ *action_flags |= MLX5_FLOW_ACTION_DECAP;
+ ++(*actions_n);
+ }
+ if (encap) {
+ if (encap->size <= MLX5_ENCAPSULATION_DECISION_SIZE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "small raw encap size");
+ if (*action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "more than one encap action");
+ if (!attr->transfer && priv->representor)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "encap action for VF representor "
+ "not supported on NIC table");
+ *action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ ++(*actions_n);
}
return 0;
}
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
struct mlx5dv_dr_domain *domain;
- resource->flags = dev_flow->group ? 0 : 1;
+ resource->flags = dev_flow->dv.group ? 0 : 1;
if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
domain = sh->fdb_domain;
else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
- dev_flow->dv.encap_decap = cache_resource;
+ dev_flow->handle->dvh.encap_decap = cache_resource;
DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)&tbl_data->jump, cnt);
}
rte_atomic32_inc(&tbl_data->jump.refcnt);
- dev_flow->dv.jump = &tbl_data->jump;
+ dev_flow->handle->dvh.jump = &tbl_data->jump;
return 0;
}
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
- dev_flow->dv.port_id_action = cache_resource;
+ dev_flow->handle->dvh.port_id_action = cache_resource;
DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
- dev_flow->dv.push_vlan_res = cache_resource;
+ dev_flow->handle->dvh.push_vlan_res = cache_resource;
DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
NULL, "action configuration not set");
- if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can't have encap action before"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L4))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no transport layer "
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL, "no TCP item in"
struct rte_flow_error *error)
{
int ret = 0;
+ uint64_t layer;
ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
if (!ret) {
- if (!(item_flags & MLX5_FLOW_LAYER_L3))
+ layer = (action_flags & MLX5_FLOW_ACTION_DECAP) ?
+ MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ if (!(item_flags & layer))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
NULL,
struct mlx5dv_dr_domain *ns;
uint32_t actions_len;
- resource->flags =
- dev_flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
+ resource->flags = dev_flow->dv.group ? 0 :
+ MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
if (resource->actions_num > flow_dv_modify_hdr_action_max(dev,
resource->flags))
return rte_flow_error_set(error, EOVERFLOW,
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
return 0;
}
}
rte_atomic32_init(&cache_resource->refcnt);
rte_atomic32_inc(&cache_resource->refcnt);
LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
- dev_flow->dv.modify_hdr = cache_resource;
+ dev_flow->handle->dvh.modify_hdr = cache_resource;
DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
+ mkey_attr.relaxed_ordering = 1;
mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
if (!mem_mng->dm) {
mlx5_glue->devx_umem_dereg(mem_mng->umem);
int actions_n = 0;
uint8_t item_ipv6_proto = 0;
const struct rte_flow_item *gre_item = NULL;
- struct rte_flow_item_tcp nic_tcp_mask = {
+ const struct rte_flow_action_raw_decap *decap;
+ const struct rte_flow_action_raw_encap *encap;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_item_tcp nic_tcp_mask = {
.hdr = {
.tcp_flags = 0xFF,
.src_port = RTE_BE16(UINT16_MAX),
.dst_port = RTE_BE16(UINT16_MAX),
}
};
+ const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+ const struct rte_flow_item_ipv6 nic_ipv6_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
+ uint16_t queue_index = 0xFFFF;
if (items == NULL)
return -1;
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv4_mask,
error);
if (ret < 0)
return ret;
&item_flags, &tunnel);
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
last_item,
- ether_type, NULL,
+ ether_type,
+ &nic_ipv6_mask,
error);
if (ret < 0)
return ret;
error);
if (ret < 0)
return ret;
+ item_ipv6_proto = IPPROTO_ICMPV6;
last_item = MLX5_FLOW_LAYER_ICMP6;
break;
case RTE_FLOW_ITEM_TYPE_TAG:
attr, error);
if (ret < 0)
return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (actions->conf))->index;
action_flags |= MLX5_FLOW_ACTION_QUEUE;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
attr, item_flags,
error);
if (ret < 0)
return ret;
+ if (rss != NULL && rss->queue_num)
+ queue_index = rss->queue[0];
action_flags |= MLX5_FLOW_ACTION_RSS;
++actions_n;
break;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
- ret = flow_dv_validate_action_push_vlan(action_flags,
+ ret = flow_dv_validate_action_push_vlan(dev,
+ action_flags,
item_flags,
actions, attr,
error);
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
- ret = flow_dv_validate_action_l2_encap(action_flags,
+ ret = flow_dv_validate_action_l2_encap(dev,
+ action_flags,
actions, attr,
error);
if (ret < 0)
return ret;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
- MLX5_FLOW_ACTION_VXLAN_ENCAP :
- MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
- ret = flow_dv_validate_action_l2_decap(action_flags,
- attr, error);
+ ret = flow_dv_validate_action_decap(dev, action_flags,
+ attr, error);
if (ret < 0)
return ret;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
- MLX5_FLOW_ACTION_VXLAN_DECAP :
- MLX5_FLOW_ACTION_NVGRE_DECAP;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
- ret = flow_dv_validate_action_raw_encap(action_flags,
- actions, attr,
- error);
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, actions->conf, attr, &action_flags,
+ &actions_n, error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
- ++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
- ret = flow_dv_validate_action_raw_decap(action_flags,
- actions, attr,
- error);
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ encap = NULL;
+ actions--;
+ } else {
+ encap = actions->conf;
+ }
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev,
+ decap ? decap : &empty_decap, encap,
+ attr, &action_flags, &actions_n,
+ error);
if (ret < 0)
return ret;
- action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
- ++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
actions,
"no fate action is found");
}
+ /* Continue validation for Xcap actions.*/
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) && (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't supported");
+ if (!attr->transfer && attr->ingress && (action_flags &
+ MLX5_FLOW_ACTION_ENCAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ }
return 0;
}
* Internal preparation function. Allocates the DV flow size,
* this size is constant.
*
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
* @param[in] attr
* Pointer to the flow attributes.
* @param[in] items
* otherwise NULL and rte_errno is set.
*/
static struct mlx5_flow *
-flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
const struct rte_flow_item items[] __rte_unused,
const struct rte_flow_action actions[] __rte_unused,
struct rte_flow_error *error)
{
- size_t size = sizeof(struct mlx5_flow);
+ size_t size = sizeof(struct mlx5_flow_handle);
struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
- dev_flow = rte_calloc(__func__, 1, size, 0);
- if (!dev_flow) {
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = rte_calloc(__func__, 1, size, 0);
+ if (!dev_handle) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "not enough memory to create flow");
+ "not enough memory to create flow handle");
return NULL;
}
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
dev_flow->ingress = attr->ingress;
- dev_flow->transfer = attr->transfer;
+ dev_flow->dv.transfer = attr->transfer;
return dev_flow;
}
/* The value must be in the range of the mask. */
for (i = 0; i < sizeof(eth_m->dst); ++i)
l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ if (eth_v->type) {
+ /* When ethertype is present set mask for tagged VLAN. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /* Set value for tagged VLAN if ethertype is 802.1Q. */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
+ 1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
+ }
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ */
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
rte_be_to_cpu_16(eth_m->type));
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
* This is workaround, masks are not supported,
* and pre-validated.
*/
- dev_flow->dv.vf_vlan.tag =
+ dev_flow->handle->vf_vlan.tag =
rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
}
tci_m = rte_be_to_cpu_16(vlan_m->tci);
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
.dst_addr = RTE_BE32(0xffffffff),
.type_of_service = 0xff,
.next_proto_id = 0xff,
+ .time_to_live = 0xff,
},
};
void *headers_m;
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv4_v)
return;
if (!ipv4_m)
ipv4_m->hdr.next_proto_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
}
/**
* Flow matcher value.
* @param[in] item
* Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
* @param[in] inner
* Item is inner pattern.
* @param[in] group
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
+ const uint64_t item_flags,
int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
else
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
if (!ipv6_v)
return;
if (!ipv6_m)
ipv6_m->hdr.proto);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+ /* Hop limit. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
}
/**
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
+ /* GRE K bit must be on and should already be validated */
+ MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
if (!key_v)
return;
if (!key_m)
key_m = &gre_key_default_mask;
- /* GRE K bit must be on and should already be validated */
- MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
- MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
rte_be_to_cpu_32(*key_m) >> 8);
MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add VXLAN-GPE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+
+static void
+flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
+ const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
+ void *misc_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ char *vni_m;
+ char *vni_v;
+ uint16_t dport;
+ int size;
+ int i;
+ uint8_t flags_m = 0xff;
+ uint8_t flags_v = 0xc;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
+ MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!vxlan_v)
+ return;
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_gpe_mask;
+ size = sizeof(vxlan_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+ memcpy(vni_m, vxlan_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+ if (vxlan_m->flags) {
+ flags_m = vxlan_m->flags;
+ flags_v = vxlan_v->flags;
+ }
+ MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
+ MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
+ MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
+ vxlan_m->protocol);
+ MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
+ vxlan_v->protocol);
+}
+
/**
* Add Geneve item to matcher and to the value.
*
return;
if (!icmp6_m)
icmp6_m = &rte_flow_item_icmp6_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
icmp6_v->type & icmp6_m->type);
return;
if (!icmp_m)
icmp_m = &rte_flow_item_icmp_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv4 ICMP packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
icmp_m->hdr.icmp_type);
MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
(void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
rte_atomic32_inc(&cache_matcher->refcnt);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
/* old matcher should not make the table ref++. */
flow_dv_tbl_resource_release(dev, tbl);
return 0;
/* only matcher ref++, table ref++ already done above in get API. */
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
+ dev_flow->handle->dvh.matcher = cache_matcher;
DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
key->domain ? "FDB" : "NIC", key->table_id,
cache_matcher->priority,
cache_resource = container_of
(entry, struct mlx5_flow_dv_tag_resource, entry);
rte_atomic32_inc(&cache_resource->refcnt);
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot insert tag");
}
- dev_flow->dv.tag_resource = cache_resource;
+ dev_flow->handle->dvh.tag_resource = cache_resource;
DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
(void *)cache_resource,
rte_atomic32_read(&cache_resource->refcnt));
flow_dv_hashfields_set(struct mlx5_flow *dev_flow)
{
struct rte_flow *flow = dev_flow->flow;
- uint64_t items = dev_flow->layers;
+ uint64_t items = dev_flow->handle->layers;
int rss_inner = 0;
uint64_t rss_types = rte_eth_rss_hf_refine(flow->rss.types);
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *dev_conf = &priv->config;
struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint64_t action_flags = 0;
!!priv->fdb_def_rule, &table, error);
if (ret)
return ret;
- dev_flow->group = table;
+ dev_flow->dv.group = table;
if (attr->transfer)
mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
if (priority == MLX5_FLOW_PRIO_RSVD)
(dev, &port_id_resource, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.port_id_action->action;
+ handle->dvh.port_id_action->action;
action_flags |= MLX5_FLOW_ACTION_PORT_ID;
break;
case RTE_FLOW_ACTION_TYPE_FLAG:
break;
}
tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
action_flags |= MLX5_FLOW_ACTION_MARK;
tag_be = mlx5_flow_mark_set
(((const struct rte_flow_action_mark *)
(actions->conf))->id);
- if (!dev_flow->dv.tag_resource)
- if (flow_dv_tag_resource_register
- (dev, tag_be, dev_flow, error))
- return -rte_errno;
+ MLX5_ASSERT(!handle->dvh.tag_resource);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.tag_resource->action;
+ handle->dvh.tag_resource->action;
break;
case RTE_FLOW_ACTION_TYPE_SET_META:
if (flow_dv_convert_action_set_meta
goto cnt_err;
}
flow->counter = flow_dv_counter_alloc(dev,
- count->shared,
- count->id,
- dev_flow->group);
+ count->shared,
+ count->id,
+ dev_flow->dv.group);
if (flow->counter == NULL)
goto cnt_err;
dev_flow->dv.actions[actions_n++] =
(dev, attr, &vlan, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.push_vlan_res->action;
+ handle->dvh.push_vlan_res->action;
action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
break;
case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
- MLX5_FLOW_ACTION_VXLAN_ENCAP :
- MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ handle->dvh.encap_decap->verbs_action;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
- action_flags |= actions->type ==
- RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
- MLX5_FLOW_ACTION_VXLAN_DECAP :
- MLX5_FLOW_ACTION_NVGRE_DECAP;
+ handle->dvh.encap_decap->verbs_action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
/* Handle encap with preceding decap. */
- if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
+ if (action_flags & MLX5_FLOW_ACTION_DECAP) {
if (flow_dv_create_action_raw_encap
(dev, actions, dev_flow, attr, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
} else {
/* Handle encap without preceding decap. */
if (flow_dv_create_action_l2_encap
error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
- action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
break;
case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
- /* Check if this decap is followed by encap. */
- for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
- action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
- action++) {
- }
- /* Handle decap only if it isn't followed by encap. */
+ while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
if (flow_dv_create_action_l2_decap
(dev, dev_flow, attr->transfer, error))
return -rte_errno;
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.encap_decap->verbs_action;
+ handle->dvh.encap_decap->verbs_action;
}
/* If decap is followed by encap, handle it at encap. */
- action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
jump_data = action->conf;
"cannot create jump action.");
}
dev_flow->dv.actions[actions_n++] =
- dev_flow->dv.jump->action;
+ handle->dvh.jump->action;
action_flags |= MLX5_FLOW_ACTION_JUMP;
break;
case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
if (flow_dv_convert_action_modify_tp
(mhdr_res, actions, items,
- &flow_attr, error))
+ &flow_attr, dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= actions->type ==
RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
break;
case RTE_FLOW_ACTION_TYPE_DEC_TTL:
if (flow_dv_convert_action_modify_dec_ttl
- (mhdr_res, items, &flow_attr, error))
+ (mhdr_res, items, &flow_attr, dev_flow,
+ !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
break;
case RTE_FLOW_ACTION_TYPE_SET_TTL:
if (flow_dv_convert_action_modify_ttl
- (mhdr_res, actions, items,
- &flow_attr, error))
+ (mhdr_res, actions, items, &flow_attr,
+ dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
return -rte_errno;
action_flags |= MLX5_FLOW_ACTION_SET_TTL;
break;
(dev, mhdr_res, dev_flow, error))
return -rte_errno;
dev_flow->dv.actions[modify_action_position] =
- dev_flow->dv.modify_hdr->verbs_action;
+ handle->dvh.modify_hdr->verbs_action;
}
break;
default:
modify_action_position = actions_n++;
}
dev_flow->dv.actions_n = actions_n;
- dev_flow->actions = action_flags;
+ handle->act_flags = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
int item_type = items->type;
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel,
- dev_flow->group);
+ items, item_flags, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
MLX5_FLOW_LAYER_OUTER_L3_IPV4;
mlx5_flow_tunnel_ip_check(items, next_protocol,
&item_flags, &tunnel);
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel,
- dev_flow->group);
+ items, item_flags, tunnel,
+ dev_flow->dv.group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
MLX5_FLOW_LAYER_OUTER_L3_IPV6;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_GRE_KEY:
case RTE_FLOW_ITEM_TYPE_NVGRE:
flow_dv_translate_item_nvgre(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
flow_dv_translate_item_vxlan(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
+ flow_dv_translate_item_vxlan_gpe(match_mask,
+ match_value, items,
+ tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
break;
case RTE_FLOW_ITEM_TYPE_GENEVE:
flow_dv_translate_item_geneve(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GENEVE;
break;
case RTE_FLOW_ITEM_TYPE_MPLS:
flow_dv_translate_item_mpls(match_mask, match_value,
items, last_item, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_MARK:
case RTE_FLOW_ITEM_TYPE_GTP:
flow_dv_translate_item_gtp(match_mask, match_value,
items, tunnel);
+ matcher.priority = flow->rss.level >= 2 ?
+ MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4;
last_item = MLX5_FLOW_LAYER_GTP;
break;
default:
MLX5_ASSERT(!flow_dv_check_valid_spec(matcher.mask.buf,
dev_flow->dv.value.buf));
#endif
- dev_flow->layers = item_flags;
+ /*
+ * Layers may be already initialized from prefix flow if this dev_flow
+ * is the suffix flow.
+ */
+ handle->layers |= item_flags;
if (action_flags & MLX5_FLOW_ACTION_RSS)
flow_dv_hashfields_set(dev_flow);
/* Register matcher. */
/* reserved field no needs to be set to 0 here. */
tbl_key.domain = attr->transfer;
tbl_key.direction = attr->egress;
- tbl_key.table_id = dev_flow->group;
+ tbl_key.table_id = dev_flow->dv.group;
if (flow_dv_matcher_register(dev, &matcher, &tbl_key, dev_flow, error))
return -rte_errno;
return 0;
__flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct mlx5_flow_dv *dv;
+ struct mlx5_flow_dv_workspace *dv;
+ struct mlx5_flow_handle *dh;
+ struct mlx5_flow_handle_dv *dv_h;
struct mlx5_flow *dev_flow;
struct mlx5_priv *priv = dev->data->dev_private;
int n;
int err;
+ int idx;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
+ for (idx = priv->flow_idx - 1; idx >= 0; idx--) {
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[idx];
dv = &dev_flow->dv;
+ dh = dev_flow->handle;
+ dv_h = &dh->dvh;
n = dv->actions_n;
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
- if (dev_flow->transfer) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP) {
+ if (dv->transfer) {
dv->actions[n++] = priv->sh->esw_drop_action;
} else {
- dv->hrxq = mlx5_hrxq_drop_new(dev);
- if (!dv->hrxq) {
+ dh->hrxq = mlx5_hrxq_drop_new(dev);
+ if (!dh->hrxq) {
rte_flow_error_set
(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
"cannot get drop hash queue");
goto error;
}
- dv->actions[n++] = dv->hrxq->action;
+ dv->actions[n++] = dh->hrxq->action;
}
- } else if (dev_flow->actions &
+ } else if (dh->act_flags &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
dev_flow->hash_fields,
(*flow->rss.queue),
flow->rss.queue_num,
- !!(dev_flow->layers &
+ !!(dh->layers &
MLX5_FLOW_LAYER_TUNNEL));
}
if (!hrxq) {
"cannot get hash queue");
goto error;
}
- dv->hrxq = hrxq;
- dv->actions[n++] = dv->hrxq->action;
+ dh->hrxq = hrxq;
+ dv->actions[n++] = dh->hrxq->action;
}
- dv->flow =
- mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
+ dh->ib_flow =
+ mlx5_glue->dv_create_flow(dv_h->matcher->matcher_object,
(void *)&dv->value, n,
dv->actions);
- if (!dv->flow) {
+ if (!dh->ib_flow) {
rte_flow_error_set(error, errno,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL,
goto error;
}
if (priv->vmwa_context &&
- dev_flow->dv.vf_vlan.tag &&
- !dev_flow->dv.vf_vlan.created) {
+ dh->vf_vlan.tag && !dh->vf_vlan.created) {
/*
* The rule contains the VLAN pattern.
* For VF we are going to create VLAN
* interface to make hypervisor set correct
* e-Switch vport context.
*/
- mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
+ mlx5_vlan_vmwa_acquire(dev, &dh->vf_vlan);
}
}
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- struct mlx5_flow_dv *dv = &dev_flow->dv;
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_matcher_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_flow_dv_matcher *matcher = handle->dvh.matcher;
MLX5_ASSERT(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
/**
* Release an encap/decap resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+flow_dv_encap_decap_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_encap_decap_resource *cache_resource =
- flow->dv.encap_decap;
+ handle->dvh.encap_decap;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
*
* @param dev
* Pointer to Ethernet device.
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev,
- struct mlx5_flow *flow)
+ struct mlx5_flow_handle *handle)
{
- struct mlx5_flow_dv_jump_tbl_resource *cache_resource = flow->dv.jump;
+ struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
+ handle->dvh.jump;
struct mlx5_flow_tbl_data_entry *tbl_data =
container_of(cache_resource,
struct mlx5_flow_tbl_data_entry, jump);
/**
* Release a modify-header resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+flow_dv_modify_hdr_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
- flow->dv.modify_hdr;
+ handle->dvh.modify_hdr;
MLX5_ASSERT(cache_resource->verbs_action);
DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
/**
* Release port ID action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
+flow_dv_port_id_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_port_id_action_resource *cache_resource =
- flow->dv.port_id_action;
+ handle->dvh.port_id_action;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
/**
* Release push vlan action resource.
*
- * @param flow
- * Pointer to mlx5_flow.
+ * @param handle
+ * Pointer to mlx5_flow_handle.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
static int
-flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
+flow_dv_push_vlan_action_resource_release(struct mlx5_flow_handle *handle)
{
struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
- flow->dv.push_vlan_res;
+ handle->dvh.push_vlan_res;
MLX5_ASSERT(cache_resource->action);
DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
static void
__flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow_dv *dv;
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dh;
if (!flow)
return;
- LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
- dv = &dev_flow->dv;
- if (dv->flow) {
- claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
- dv->flow = NULL;
+ LIST_FOREACH(dh, &flow->dev_handles, next) {
+ if (dh->ib_flow) {
+ claim_zero(mlx5_glue->dv_destroy_flow(dh->ib_flow));
+ dh->ib_flow = NULL;
}
- if (dv->hrxq) {
- if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dh->hrxq) {
+ if (dh->act_flags & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
- mlx5_hrxq_release(dev, dv->hrxq);
- dv->hrxq = NULL;
+ mlx5_hrxq_release(dev, dh->hrxq);
+ dh->hrxq = NULL;
}
- if (dev_flow->dv.vf_vlan.tag &&
- dev_flow->dv.vf_vlan.created)
- mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
+ if (dh->vf_vlan.tag && dh->vf_vlan.created)
+ mlx5_vlan_vmwa_release(dev, &dh->vf_vlan);
}
}
static void
__flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
{
- struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
if (!flow)
return;
mlx5_flow_meter_detach(flow->meter);
flow->meter = NULL;
}
- while (!LIST_EMPTY(&flow->dev_flows)) {
- dev_flow = LIST_FIRST(&flow->dev_flows);
- LIST_REMOVE(dev_flow, next);
- if (dev_flow->dv.matcher)
- flow_dv_matcher_release(dev, dev_flow);
- if (dev_flow->dv.encap_decap)
- flow_dv_encap_decap_resource_release(dev_flow);
- if (dev_flow->dv.modify_hdr)
- flow_dv_modify_hdr_resource_release(dev_flow);
- if (dev_flow->dv.jump)
- flow_dv_jump_tbl_resource_release(dev, dev_flow);
- if (dev_flow->dv.port_id_action)
- flow_dv_port_id_action_resource_release(dev_flow);
- if (dev_flow->dv.push_vlan_res)
- flow_dv_push_vlan_action_resource_release(dev_flow);
- if (dev_flow->dv.tag_resource)
- flow_dv_tag_release(dev, dev_flow->dv.tag_resource);
- rte_free(dev_flow);
+ while (!LIST_EMPTY(&flow->dev_handles)) {
+ dev_handle = LIST_FIRST(&flow->dev_handles);
+ LIST_REMOVE(dev_handle, next);
+ if (dev_handle->dvh.matcher)
+ flow_dv_matcher_release(dev, dev_handle);
+ if (dev_handle->dvh.encap_decap)
+ flow_dv_encap_decap_resource_release(dev_handle);
+ if (dev_handle->dvh.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev_handle);
+ if (dev_handle->dvh.jump)
+ flow_dv_jump_tbl_resource_release(dev, dev_handle);
+ if (dev_handle->dvh.port_id_action)
+ flow_dv_port_id_action_resource_release(dev_handle);
+ if (dev_handle->dvh.push_vlan_res)
+ flow_dv_push_vlan_action_resource_release(dev_handle);
+ if (dev_handle->dvh.tag_resource)
+ flow_dv_tag_release(dev,
+ dev_handle->dvh.tag_resource);
+ rte_free(dev_handle);
}
}