#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
+#define MLX5DV_FLOW_ACTION_COUNTER_DEVX 0
+#endif
+
+union flow_dv_attr {
+ struct {
+ uint32_t valid:1;
+ uint32_t ipv4:1;
+ uint32_t ipv6:1;
+ uint32_t tcp:1;
+ uint32_t udp:1;
+ uint32_t reserved:27;
+ };
+ uint32_t attr;
+};
+
+/**
+ * Initialize flow attributes structure according to flow items' types.
+ *
+ * @param[in] item
+ * Pointer to item specification.
+ * @param[out] attr
+ * Pointer to flow attributes structure.
+ */
+static void
+flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
+{
+ for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ attr->ipv4 = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ attr->ipv6 = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ attr->udp = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ attr->tcp = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ attr->valid = 1;
+}
+
+struct field_modify_info {
+ uint32_t size; /* Size of field in protocol header, in bytes. */
+ uint32_t offset; /* Offset of field in protocol header, in bytes. */
+ enum mlx5_modification_field id;
+};
+
+struct field_modify_info modify_eth[] = {
+ {4, 0, MLX5_MODI_OUT_DMAC_47_16},
+ {2, 4, MLX5_MODI_OUT_DMAC_15_0},
+ {4, 6, MLX5_MODI_OUT_SMAC_47_16},
+ {2, 10, MLX5_MODI_OUT_SMAC_15_0},
+ {0, 0, 0},
+};
+
+struct field_modify_info modify_ipv4[] = {
+ {1, 8, MLX5_MODI_OUT_IPV4_TTL},
+ {4, 12, MLX5_MODI_OUT_SIPV4},
+ {4, 16, MLX5_MODI_OUT_DIPV4},
+ {0, 0, 0},
+};
+
+struct field_modify_info modify_ipv6[] = {
+ {1, 7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
+ {4, 8, MLX5_MODI_OUT_SIPV6_127_96},
+ {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
+ {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
+ {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
+ {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
+ {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
+ {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
+ {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
+ {0, 0, 0},
+};
+
+struct field_modify_info modify_udp[] = {
+ {2, 0, MLX5_MODI_OUT_UDP_SPORT},
+ {2, 2, MLX5_MODI_OUT_UDP_DPORT},
+ {0, 0, 0},
+};
+
+struct field_modify_info modify_tcp[] = {
+ {2, 0, MLX5_MODI_OUT_TCP_SPORT},
+ {2, 2, MLX5_MODI_OUT_TCP_DPORT},
+ {0, 0, 0},
+};
+
+/**
+ * Convert modify-header action to DV specification.
+ *
+ * @param[in] item
+ * Pointer to item specification.
+ * @param[in] field
+ * Pointer to field modification information.
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] type
+ * Type of modification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_modify_action(struct rte_flow_item *item,
+ struct field_modify_info *field,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ uint32_t type,
+ struct rte_flow_error *error)
+{
+ uint32_t i = resource->actions_num;
+ struct mlx5_modification_cmd *actions = resource->actions;
+ const uint8_t *spec = item->spec;
+ const uint8_t *mask = item->mask;
+ uint32_t set;
+
+ while (field->size) {
+ set = 0;
+ /* Generate modify command for each mask segment. */
+ memcpy(&set, &mask[field->offset], field->size);
+ if (set) {
+ if (i >= MLX5_MODIFY_NUM)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "too many items to modify");
+ actions[i].action_type = type;
+ actions[i].field = field->id;
+ actions[i].length = field->size ==
+ 4 ? 0 : field->size * 8;
+ rte_memcpy(&actions[i].data[4 - field->size],
+ &spec[field->offset], field->size);
+ actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
+ ++i;
+ }
+ if (resource->actions_num != i)
+ resource->actions_num = i;
+ field++;
+ }
+ if (!resource->actions_num)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "invalid modification flow item");
+ return 0;
+}
+
+/**
+ * Convert modify-header set IPv4 address action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_ipv4
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_ipv4 *conf =
+ (const struct rte_flow_action_set_ipv4 *)(action->conf);
+ struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv4 ipv4_mask;
+
+ memset(&ipv4, 0, sizeof(ipv4));
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
+ ipv4.hdr.src_addr = conf->ipv4_addr;
+ ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
+ } else {
+ ipv4.hdr.dst_addr = conf->ipv4_addr;
+ ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
+ }
+ item.spec = &ipv4;
+ item.mask = &ipv4_mask;
+ return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header set IPv6 address action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_ipv6
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_ipv6 *conf =
+ (const struct rte_flow_action_set_ipv6 *)(action->conf);
+ struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_ipv6 ipv6_mask;
+
+ memset(&ipv6, 0, sizeof(ipv6));
+ memset(&ipv6_mask, 0, sizeof(ipv6_mask));
+ if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
+ memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
+ sizeof(ipv6.hdr.src_addr));
+ memcpy(&ipv6_mask.hdr.src_addr,
+ &rte_flow_item_ipv6_mask.hdr.src_addr,
+ sizeof(ipv6.hdr.src_addr));
+ } else {
+ memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
+ sizeof(ipv6.hdr.dst_addr));
+ memcpy(&ipv6_mask.hdr.dst_addr,
+ &rte_flow_item_ipv6_mask.hdr.dst_addr,
+ sizeof(ipv6.hdr.dst_addr));
+ }
+ item.spec = &ipv6;
+ item.mask = &ipv6_mask;
+ return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header set MAC address action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_mac
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_mac *conf =
+ (const struct rte_flow_action_set_mac *)(action->conf);
+ struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_eth eth_mask;
+
+ memset(ð, 0, sizeof(eth));
+ memset(ð_mask, 0, sizeof(eth_mask));
+ if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
+ memcpy(ð.src.addr_bytes, &conf->mac_addr,
+ sizeof(eth.src.addr_bytes));
+ memcpy(ð_mask.src.addr_bytes,
+ &rte_flow_item_eth_mask.src.addr_bytes,
+ sizeof(eth_mask.src.addr_bytes));
+ } else {
+ memcpy(ð.dst.addr_bytes, &conf->mac_addr,
+ sizeof(eth.dst.addr_bytes));
+ memcpy(ð_mask.dst.addr_bytes,
+ &rte_flow_item_eth_mask.dst.addr_bytes,
+ sizeof(eth_mask.dst.addr_bytes));
+ }
+ item.spec = ð
+ item.mask = ð_mask;
+ return flow_dv_convert_modify_action(&item, modify_eth, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header set TP action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[in] items
+ * Pointer to rte_flow_item objects list.
+ * @param[in] attr
+ * Pointer to flow attributes structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_tp
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ const struct rte_flow_item *items,
+ union flow_dv_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_tp *conf =
+ (const struct rte_flow_action_set_tp *)(action->conf);
+ struct rte_flow_item item;
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_udp udp_mask;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_tcp tcp_mask;
+ struct field_modify_info *field;
+
+ if (!attr->valid)
+ flow_dv_attr_init(items, attr);
+ if (attr->udp) {
+ memset(&udp, 0, sizeof(udp));
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
+ udp.hdr.src_port = conf->port;
+ udp_mask.hdr.src_port =
+ rte_flow_item_udp_mask.hdr.src_port;
+ } else {
+ udp.hdr.dst_port = conf->port;
+ udp_mask.hdr.dst_port =
+ rte_flow_item_udp_mask.hdr.dst_port;
+ }
+ item.type = RTE_FLOW_ITEM_TYPE_UDP;
+ item.spec = &udp;
+ item.mask = &udp_mask;
+ field = modify_udp;
+ }
+ if (attr->tcp) {
+ memset(&tcp, 0, sizeof(tcp));
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
+ tcp.hdr.src_port = conf->port;
+ tcp_mask.hdr.src_port =
+ rte_flow_item_tcp_mask.hdr.src_port;
+ } else {
+ tcp.hdr.dst_port = conf->port;
+ tcp_mask.hdr.dst_port =
+ rte_flow_item_tcp_mask.hdr.dst_port;
+ }
+ item.type = RTE_FLOW_ITEM_TYPE_TCP;
+ item.spec = &tcp;
+ item.mask = &tcp_mask;
+ field = modify_tcp;
+ }
+ return flow_dv_convert_modify_action(&item, field, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header set TTL action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[in] items
+ * Pointer to rte_flow_item objects list.
+ * @param[in] attr
+ * Pointer to flow attributes structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_ttl
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_action *action,
+ const struct rte_flow_item *items,
+ union flow_dv_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_set_ttl *conf =
+ (const struct rte_flow_action_set_ttl *)(action->conf);
+ struct rte_flow_item item;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv4 ipv4_mask;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_ipv6 ipv6_mask;
+ struct field_modify_info *field;
+
+ if (!attr->valid)
+ flow_dv_attr_init(items, attr);
+ if (attr->ipv4) {
+ memset(&ipv4, 0, sizeof(ipv4));
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ ipv4.hdr.time_to_live = conf->ttl_value;
+ ipv4_mask.hdr.time_to_live = 0xFF;
+ item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ item.spec = &ipv4;
+ item.mask = &ipv4_mask;
+ field = modify_ipv4;
+ }
+ if (attr->ipv6) {
+ memset(&ipv6, 0, sizeof(ipv6));
+ memset(&ipv6_mask, 0, sizeof(ipv6_mask));
+ ipv6.hdr.hop_limits = conf->ttl_value;
+ ipv6_mask.hdr.hop_limits = 0xFF;
+ item.type = RTE_FLOW_ITEM_TYPE_IPV6;
+ item.spec = &ipv6;
+ item.mask = &ipv6_mask;
+ field = modify_ipv6;
+ }
+ return flow_dv_convert_modify_action(&item, field, resource,
+ MLX5_MODIFICATION_TYPE_SET, error);
+}
+
+/**
+ * Convert modify-header decrement TTL action to DV specification.
+ *
+ * @param[in,out] resource
+ * Pointer to the modify-header resource.
+ * @param[in] action
+ * Pointer to action specification.
+ * @param[in] items
+ * Pointer to rte_flow_item objects list.
+ * @param[in] attr
+ * Pointer to flow attributes structure.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_action_modify_dec_ttl
+ (struct mlx5_flow_dv_modify_hdr_resource *resource,
+ const struct rte_flow_item *items,
+ union flow_dv_attr *attr,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_item item;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv4 ipv4_mask;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_ipv6 ipv6_mask;
+ struct field_modify_info *field;
+
+ if (!attr->valid)
+ flow_dv_attr_init(items, attr);
+ if (attr->ipv4) {
+ memset(&ipv4, 0, sizeof(ipv4));
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ ipv4.hdr.time_to_live = 0xFF;
+ ipv4_mask.hdr.time_to_live = 0xFF;
+ item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ item.spec = &ipv4;
+ item.mask = &ipv4_mask;
+ field = modify_ipv4;
+ }
+ if (attr->ipv6) {
+ memset(&ipv6, 0, sizeof(ipv6));
+ memset(&ipv6_mask, 0, sizeof(ipv6_mask));
+ ipv6.hdr.hop_limits = 0xFF;
+ ipv6_mask.hdr.hop_limits = 0xFF;
+ item.type = RTE_FLOW_ITEM_TYPE_IPV6;
+ item.spec = &ipv6;
+ item.mask = &ipv6_mask;
+ field = modify_ipv6;
+ }
+ return flow_dv_convert_modify_action(&item, field, resource,
+ MLX5_MODIFICATION_TYPE_ADD, error);
+}
+
/**
* Validate META item.
*
return 0;
}
+/**
+ * Validate count action.
+ *
+ * @param[in] dev
+ * device otr.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_count(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ if (!priv->config.devx)
+ goto notsup_err;
+#ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
+ return 0;
+#endif
+notsup_err:
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+}
+
/**
* Validate the L2 encap action.
*
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single encap or"
" decap action in a flow");
+ if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have decap action after"
+ " modify action");
if (attr->egress)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
RTE_FLOW_ERROR_TYPE_ACTION, NULL,
"can only have a single decap"
" action in a flow");
+ if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have decap action after"
+ " modify action");
/* decap action is valid on egress only if it is followed by encap */
if (attr->egress) {
for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
return 0;
}
-
/**
* Find existing encap/decap resource or create and register a new one.
*
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
/* Lookup a matching resource from cache. */
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
- (priv->ctx, cache_resource->size,
+ (priv->sh->ctx, cache_resource->size,
(cache_resource->size ? cache_resource->buf : NULL),
cache_resource->reformat_type,
cache_resource->ft_type);
}
/**
- * Verify the @p attributes will be correctly understood by the NIC and store
- * them in the @p flow if everything is correct.
+ * Validate the modify-header actions.
*
- * @param[in] dev
- * Pointer to dev struct.
- * @param[in] attributes
- * Pointer to flow attributes
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
* @param[out] error
* Pointer to error structure.
*
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate_attributes(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attributes,
- struct rte_flow_error *error)
+flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
- uint32_t priority_max = priv->config.flow_prio - 1;
-
- if (attributes->group)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
- NULL,
- "groups is not supported");
- if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
- attributes->priority >= priority_max)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
- NULL,
- "priority out of range");
- if (attributes->transfer)
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
- NULL,
- "transfer is not supported");
- if (!(attributes->egress ^ attributes->ingress))
- return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR, NULL,
- "must specify exactly one of "
- "ingress or egress");
+ if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL, "action configuration not set");
+ if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have encap action before"
+ " modify action");
return 0;
}
/**
- * Internal validation function. For validating both actions and items.
+ * Validate the modify-header MAC address actions.
*
- * @param[in] dev
- * Pointer to the rte_eth_dev structure.
- * @param[in] attr
- * Pointer to the flow attributes.
- * @param[in] items
- * Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
* @param[out] error
- * Pointer to the error structure.
+ * Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
+flow_dv_validate_action_modify_mac(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
{
- int ret;
- uint64_t action_flags = 0;
- uint64_t item_flags = 0;
- int tunnel = 0;
- uint8_t next_protocol = 0xff;
- int actions_n = 0;
+ int ret = 0;
- if (items == NULL)
- return -1;
- ret = flow_dv_validate_attributes(dev, attr, error);
- if (ret < 0)
- return ret;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L2))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no L2 item in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Validate the modify-header IPv4 address actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no ipv4 item in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Validate the modify-header IPv6 address actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no ipv6 item in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Validate the modify-header TP actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_tp(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no transport layer "
+ "in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Validate the modify-header TTL actions.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the modify action.
+ * @param[in] item_flags
+ * Holds the items detected.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const uint64_t item_flags,
+ struct rte_flow_error *error)
+{
+ int ret = 0;
+
+ ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
+ if (!ret) {
+ if (!(item_flags & MLX5_FLOW_LAYER_L3))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no IP protocol in pattern");
+ }
+ return ret;
+}
+
+/**
+ * Find existing modify-header resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to modify-header resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_modify_hdr_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_modify_hdr_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+
+ /* Lookup a matching resource from cache. */
+ LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
+ if (resource->ft_type == cache_resource->ft_type &&
+ resource->actions_num == cache_resource->actions_num &&
+ !memcmp((const void *)resource->actions,
+ (const void *)cache_resource->actions,
+ (resource->actions_num *
+ sizeof(resource->actions[0])))) {
+ DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.modify_hdr = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new modify-header resource. */
+ cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ cache_resource->verbs_action =
+ mlx5_glue->dv_create_flow_action_modify_header
+ (priv->sh->ctx,
+ cache_resource->actions_num *
+ sizeof(cache_resource->actions[0]),
+ (uint64_t *)cache_resource->actions,
+ cache_resource->ft_type);
+ if (!cache_resource->verbs_action) {
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
+ dev_flow->dv.modify_hdr = cache_resource;
+ DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+/**
+ * Get or create a flow counter.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] shared
+ * Indicate if this counter is shared with other flows.
+ * @param[in] id
+ * Counter identifier.
+ *
+ * @return
+ * pointer to flow counter on success, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_flow_counter *
+flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_counter *cnt = NULL;
+ struct mlx5_devx_counter_set *dcs = NULL;
+ int ret;
+
+ if (!priv->config.devx) {
+ ret = -ENOTSUP;
+ goto error_exit;
+ }
+ if (shared) {
+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (cnt->shared && cnt->id == id) {
+ cnt->ref_cnt++;
+ return cnt;
+ }
+ }
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
+ if (!dcs || !cnt) {
+ ret = -ENOMEM;
+ goto error_exit;
+ }
+ ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
+ if (ret)
+ goto error_exit;
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .ref_cnt = 1,
+ .id = id,
+ .dcs = dcs,
+ };
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+error_exit:
+ rte_free(cnt);
+ rte_free(dcs);
+ rte_errno = -ret;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+flow_dv_counter_release(struct mlx5_flow_counter *counter)
+{
+ int ret;
+
+ if (!counter)
+ return;
+ if (--counter->ref_cnt == 0) {
+ ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
+ if (ret)
+ DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
+ LIST_REMOVE(counter, next);
+ rte_free(counter->dcs);
+ rte_free(counter);
+ }
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to dev struct.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t priority_max = priv->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!(attributes->egress ^ attributes->ingress))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR, NULL,
+ "must specify exactly one of "
+ "ingress or egress");
+ return 0;
+}
+
+/**
+ * Internal validation function. For validating both actions and items.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ int ret;
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
+ uint64_t last_item = 0;
+ int tunnel = 0;
+ uint8_t next_protocol = 0xff;
+ int actions_n = 0;
+
+ if (items == NULL)
+ return -1;
+ ret = flow_dv_validate_attributes(dev, attr, error);
+ if (ret < 0)
+ return ret;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
- MLX5_FLOW_LAYER_OUTER_VLAN;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv4 *)
- items->mask)->hdr.next_proto_id)
+ items->mask)->hdr.next_proto_id) {
next_protocol =
((const struct rte_flow_item_ipv4 *)
(items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
if (items->mask != NULL &&
((const struct rte_flow_item_ipv6 *)
- items->mask)->hdr.proto)
+ items->mask)->hdr.proto) {
next_protocol =
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
break;
case RTE_FLOW_ITEM_TYPE_TCP:
ret = mlx5_flow_validate_item_tcp
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
case RTE_FLOW_ITEM_TYPE_GRE:
case RTE_FLOW_ITEM_TYPE_NVGRE:
next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE;
+ last_item = MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
ret = mlx5_flow_validate_item_vxlan_gpe(items,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(dev, items,
+ item_flags,
+ last_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_MPLS;
break;
case RTE_FLOW_ITEM_TYPE_META:
ret = flow_dv_validate_item_meta(dev, items, attr,
error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_ITEM_METADATA;
+ last_item = MLX5_FLOW_ITEM_METADATA;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "item not supported");
}
+ item_flags |= last_item;
}
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
++actions_n;
break;
case RTE_FLOW_ACTION_TYPE_COUNT:
- ret = mlx5_flow_validate_action_count(dev, attr, error);
+ ret = flow_dv_validate_action_count(dev, error);
if (ret < 0)
return ret;
action_flags |= MLX5_FLOW_ACTION_COUNT;
action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
++actions_n;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ ret = flow_dv_validate_action_modify_mac(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ ret = flow_dv_validate_action_modify_ipv4(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ ret = flow_dv_validate_action_modify_ipv6(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ ret = flow_dv_validate_action_modify_tp(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ ret = flow_dv_validate_action_modify_ttl(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TTL ?
+ MLX5_FLOW_ACTION_SET_TTL :
+ MLX5_FLOW_ACTION_DEC_TTL;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add MPLS item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_mpls(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ uint64_t prev_layer,
+ int inner)
+{
+ const uint32_t *in_mpls_m = item->mask;
+ const uint32_t *in_mpls_v = item->spec;
+ uint32_t *out_mpls_m = 0;
+ uint32_t *out_mpls_v = 0;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_2);
+ void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ ETHER_TYPE_MPLS);
+ break;
+ default:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_MPLS);
+ break;
+ }
+ if (!in_mpls_v)
+ return;
+ if (!in_mpls_m)
+ in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_udp);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_gre);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_gre);
+ break;
+ default:
+ /* Inner MPLS not over GRE is not supported. */
+ if (!inner) {
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_m,
+ outer_first_mpls);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_v,
+ outer_first_mpls);
+ }
+ break;
+ }
+ if (out_mpls_m && out_mpls_v) {
+ *out_mpls_m = *in_mpls_m;
+ *out_mpls_v = *in_mpls_v & *in_mpls_m;
+ }
+}
+
/**
* Add META item to matcher
*
struct mlx5_flow *dev_flow,
struct rte_flow_error *error)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_matcher *cache_matcher;
struct mlx5dv_flow_matcher_attr dv_attr = {
.type = IBV_FLOW_ATTR_NORMAL,
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
+ mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
- }
- rte_atomic32_inc(&cache_matcher->refcnt);
- LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
- dev_flow->dv.matcher = cache_matcher;
- DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
- cache_matcher->priority,
- cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
- rte_atomic32_read(&cache_matcher->refcnt));
- return 0;
-}
-
-/**
- * Fill the flow with DV spec.
- *
- * @param[in] dev
- * Pointer to rte_eth_dev structure.
- * @param[in, out] dev_flow
- * Pointer to the sub flow.
- * @param[in] attr
- * Pointer to the flow attributes.
- * @param[in] items
- * Pointer to the list of items.
- * @param[in] actions
- * Pointer to the list of actions.
- * @param[out] error
- * Pointer to the error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
- */
-static int
-flow_dv_translate(struct rte_eth_dev *dev,
- struct mlx5_flow *dev_flow,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item items[],
- const struct rte_flow_action actions[],
- struct rte_flow_error *error)
-{
- struct priv *priv = dev->data->dev_private;
- struct rte_flow *flow = dev_flow->flow;
- uint64_t item_flags = 0;
- uint64_t action_flags = 0;
- uint64_t priority = attr->priority;
- struct mlx5_flow_dv_matcher matcher = {
- .mask = {
- .size = sizeof(matcher.mask.buf),
- },
- };
- int actions_n = 0;
-
- if (priority == MLX5_FLOW_PRIO_RSVD)
- priority = priv->config.flow_prio - 1;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
- int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- void *match_mask = matcher.mask.buf;
- void *match_value = dev_flow->dv.value.buf;
-
- switch (items->type) {
- case RTE_FLOW_ITEM_TYPE_ETH:
- flow_dv_translate_item_eth(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
- MLX5_FLOW_LAYER_OUTER_L2;
- break;
- case RTE_FLOW_ITEM_TYPE_VLAN:
- flow_dv_translate_item_vlan(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L2;
- item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
- MLX5_FLOW_LAYER_INNER_VLAN) :
- (MLX5_FLOW_LAYER_OUTER_L2 |
- MLX5_FLOW_LAYER_OUTER_VLAN);
- break;
- case RTE_FLOW_ITEM_TYPE_IPV4:
- flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV4_LAYER_TYPES,
- MLX5_IPV4_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- break;
- case RTE_FLOW_ITEM_TYPE_IPV6:
- flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L3;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel,
- MLX5_IPV6_LAYER_TYPES,
- MLX5_IPV6_IBV_RX_HASH);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
- MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- flow_dv_translate_item_tcp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_TCP,
- IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
- break;
- case RTE_FLOW_ITEM_TYPE_UDP:
- flow_dv_translate_item_udp(match_mask, match_value,
- items, tunnel);
- matcher.priority = MLX5_PRIORITY_MAP_L4;
- dev_flow->dv.hash_fields |=
- mlx5_flow_hashfields_adjust
- (dev_flow, tunnel, ETH_RSS_UDP,
- IBV_RX_HASH_SRC_PORT_UDP |
- IBV_RX_HASH_DST_PORT_UDP);
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
- MLX5_FLOW_LAYER_OUTER_L4_UDP;
- break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- flow_dv_translate_item_gre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN;
- break;
- case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
- flow_dv_translate_item_vxlan(match_mask, match_value,
- items, tunnel);
- item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
- break;
- case RTE_FLOW_ITEM_TYPE_META:
- flow_dv_translate_item_meta(match_mask, match_value,
- items);
- item_flags |= MLX5_FLOW_ITEM_METADATA;
- break;
- default:
- break;
- }
- }
- assert(!flow_dv_check_valid_spec(matcher.mask.buf,
- dev_flow->dv.value.buf));
- dev_flow->layers = item_flags;
- /* Register matcher. */
- matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
- matcher.mask.size);
- matcher.priority = mlx5_flow_adjust_priority(dev, priority,
- matcher.priority);
- matcher.egress = attr->egress;
- if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
- return -rte_errno;
- for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ }
+ rte_atomic32_inc(&cache_matcher->refcnt);
+ LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
+ dev_flow->dv.matcher = cache_matcher;
+ DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
+ cache_matcher->priority,
+ cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
+ rte_atomic32_read(&cache_matcher->refcnt));
+ return 0;
+}
+
+/**
+ * Add source vport match to the specified matcher.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] port
+ * Source vport value to match
+ * @param[in] mask
+ * Mask
+ */
+static void
+flow_dv_translate_source_vport(void *matcher, void *key,
+ int16_t port, uint16_t mask)
+{
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
+ MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
+}
+
+/**
+ * Fill the flow with DV spec.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ */
+static int
+flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow *flow = dev_flow->flow;
+ uint64_t item_flags = 0;
+ uint64_t last_item = 0;
+ uint64_t action_flags = 0;
+ uint64_t priority = attr->priority;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf),
+ },
+ };
+ int actions_n = 0;
+ bool actions_end = false;
+ struct mlx5_flow_dv_modify_hdr_resource res = {
+ .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX
+ };
+ union flow_dv_attr flow_attr = { .attr = 0 };
+
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = priv->config.flow_prio - 1;
+ for (; !actions_end ; actions++) {
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
const struct rte_flow_action *action = actions;
+ const struct rte_flow_action_count *count = action->conf;
const uint8_t *rss_key;
switch (actions->type) {
flow->rss.level = rss->level;
action_flags |= MLX5_FLOW_ACTION_RSS;
break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (!priv->config.devx) {
+ rte_errno = ENOTSUP;
+ goto cnt_err;
+ }
+ flow->counter =
+ flow_dv_counter_new(dev,
+ count->shared, count->id);
+ if (flow->counter == NULL)
+ goto cnt_err;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_COUNTER_DEVX;
+ dev_flow->dv.actions[actions_n].obj =
+ flow->counter->dcs->obj;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+cnt_err:
+ if (rte_errno == ENOTSUP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+ else
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "cannot create counter"
+ " object.");
case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
if (flow_dv_create_action_l2_encap(dev, actions,
/* If decap is followed by encap, handle it at encap. */
action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ if (flow_dv_convert_action_modify_mac(&res, actions,
+ error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ if (flow_dv_convert_action_modify_ipv4(&res, actions,
+ error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ if (flow_dv_convert_action_modify_ipv6(&res, actions,
+ error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ if (flow_dv_convert_action_modify_tp(&res, actions,
+ items, &flow_attr,
+ error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ if (flow_dv_convert_action_modify_dec_ttl(&res, items,
+ &flow_attr,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ if (flow_dv_convert_action_modify_ttl(&res, actions,
+ items, &flow_attr,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ actions_end = true;
+ if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
+ /* create modify action if needed. */
+ if (flow_dv_modify_hdr_resource_register
+ (dev, &res,
+ dev_flow,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.modify_hdr->verbs_action;
+ actions_n++;
+ }
+ break;
default:
break;
}
}
dev_flow->dv.actions_n = actions_n;
flow->actions = action_flags;
+ if (attr->ingress && !attr->transfer &&
+ (priv->representor || priv->master)) {
+ /* It was validated - we support unidirection flows only. */
+ assert(!attr->egress);
+ /*
+ * Add matching on source vport index only
+ * for ingress rules in E-Switch configurations.
+ */
+ flow_dv_translate_source_vport(matcher.mask.buf,
+ dev_flow->dv.value.buf,
+ priv->vport_id,
+ 0xffff);
+ }
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_dv_translate_item_eth(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ flow_dv_translate_item_vlan(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L2;
+ last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
+ MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 |
+ MLX5_FLOW_LAYER_OUTER_VLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_dv_translate_item_ipv4(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV4_LAYER_TYPES,
+ MLX5_IPV4_IBV_RX_HASH);
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_dv_translate_item_ipv6(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L3;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel,
+ MLX5_IPV6_LAYER_TYPES,
+ MLX5_IPV6_IBV_RX_HASH);
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ flow_dv_translate_item_tcp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_TCP,
+ IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP);
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ flow_dv_translate_item_udp(match_mask, match_value,
+ items, tunnel);
+ matcher.priority = MLX5_PRIORITY_MAP_L4;
+ dev_flow->dv.hash_fields |=
+ mlx5_flow_hashfields_adjust
+ (dev_flow, tunnel, ETH_RSS_UDP,
+ IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP);
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ flow_dv_translate_item_gre(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ flow_dv_translate_item_nvgre(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ flow_dv_translate_item_vxlan(match_mask, match_value,
+ items, tunnel);
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ flow_dv_translate_item_mpls(match_mask, match_value,
+ items, last_item, tunnel);
+ last_item = MLX5_FLOW_LAYER_MPLS;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(match_mask, match_value,
+ items);
+ last_item = MLX5_FLOW_ITEM_METADATA;
+ break;
+ default:
+ break;
+ }
+ item_flags |= last_item;
+ }
+ assert(!flow_dv_check_valid_spec(matcher.mask.buf,
+ dev_flow->dv.value.buf));
+ dev_flow->layers = item_flags;
+ /* Register matcher. */
+ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
+ matcher.mask.size);
+ matcher.priority = mlx5_flow_adjust_priority(dev, priority,
+ matcher.priority);
+ matcher.egress = attr->egress;
+ if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
+ return -rte_errno;
return 0;
}
} else if (flow->actions &
(MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
struct mlx5_hrxq *hrxq;
+
hrxq = mlx5_hrxq_get(dev, flow->key,
MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields,
return 1;
}
+/**
+ * Release a modify-header resource.
+ *
+ * @param flow
+ * Pointer to mlx5_flow.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
+{
+ struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
+ flow->dv.modify_hdr;
+
+ assert(cache_resource->verbs_action);
+ DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->verbs_action));
+ LIST_REMOVE(cache_resource, next);
+ rte_free(cache_resource);
+ DRV_LOG(DEBUG, "modify-header resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
*
dv->hrxq = NULL;
}
}
- if (flow->counter)
- flow->counter = NULL;
}
/**
if (!flow)
return;
flow_dv_remove(dev, flow);
+ if (flow->counter) {
+ flow_dv_counter_release(flow->counter);
+ flow->counter = NULL;
+ }
while (!LIST_EMPTY(&flow->dev_flows)) {
dev_flow = LIST_FIRST(&flow->dev_flows);
LIST_REMOVE(dev_flow, next);
flow_dv_matcher_release(dev, dev_flow);
if (dev_flow->dv.encap_decap)
flow_dv_encap_decap_resource_release(dev_flow);
+ if (dev_flow->dv.modify_hdr)
+ flow_dv_modify_hdr_resource_release(dev_flow);
rte_free(dev_flow);
}
}
+/**
+ * Query a dv flow rule for its statistics via devx.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] flow
+ * Pointer to the sub flow.
+ * @param[out] data
+ * data retrieved by the query.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
+ void *data, struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct rte_flow_query_count *qc = data;
+ uint64_t pkts = 0;
+ uint64_t bytes = 0;
+ int err;
+
+ if (!priv->config.devx)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not supported");
+ if (flow->counter) {
+ err = mlx5_devx_cmd_flow_counter_query
+ (flow->counter->dcs,
+ qc->reset, &pkts, &bytes);
+ if (err)
+ return rte_flow_error_set
+ (error, err,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot read counters");
+ qc->hits_set = 1;
+ qc->bytes_set = 1;
+ qc->hits = pkts - flow->counter->hits;
+ qc->bytes = bytes - flow->counter->bytes;
+ if (qc->reset) {
+ flow->counter->hits = pkts;
+ flow->counter->bytes = bytes;
+ }
+ return 0;
+ }
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "counters are not available");
+}
+
/**
* Query a flow.
*
* @see rte_flow_ops
*/
static int
-flow_dv_query(struct rte_eth_dev *dev __rte_unused,
+flow_dv_query(struct rte_eth_dev *dev,
struct rte_flow *flow __rte_unused,
const struct rte_flow_action *actions __rte_unused,
void *data __rte_unused,
struct rte_flow_error *error __rte_unused)
{
- rte_errno = ENOTSUP;
- return -rte_errno;
+ int ret = -EINVAL;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_query_count(dev, flow, data, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ return ret;
}