* Copyright 2018 Mellanox Technologies, Ltd
*/
-
#include <sys/queue.h>
#include <stdalign.h>
#include <stdint.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
#include <rte_ip.h>
+#include <rte_gre.h>
#include "mlx5.h"
#include "mlx5_defs.h"
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+/**
+ * Validate META item.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] item
+ * Item specification.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_item_meta(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_meta *spec = item->spec;
+ const struct rte_flow_item_meta *mask = item->mask;
+ const struct rte_flow_item_meta nic_mask = {
+ .data = RTE_BE32(UINT32_MAX)
+ };
+ int ret;
+ uint64_t offloads = dev->data->dev_conf.txmode.offloads;
+
+ if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
+ return rte_flow_error_set(error, EPERM,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "match on metadata offload "
+ "configuration is off for this port");
+ if (!spec)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "data cannot be empty");
+ if (!spec->data)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ NULL,
+ "data cannot be zero");
+ if (!mask)
+ mask = &rte_flow_item_meta_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_meta),
+ error);
+ if (ret < 0)
+ return ret;
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "pattern not supported for ingress");
+ return 0;
+}
+
+/**
+ * Validate the L2 encap action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_l2_encap(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and encap in same flow");
+ if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can only have a single encap or"
+ " decap action in a flow");
+ if (attr->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "encap action not supported for "
+ "ingress");
+ return 0;
+}
+
+/**
+ * Validate the L2 decap action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_l2_decap(uint64_t action_flags,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and decap in same flow");
+ if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can only have a single encap or"
+ " decap action in a flow");
+ if (attr->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "decap action not supported for "
+ "egress");
+ return 0;
+}
+
+/**
+ * Validate the raw encap action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_raw_encap(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (!(action->conf))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, action,
+ "configuration cannot be null");
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and encap in same flow");
+ if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can only have a single encap"
+ " action in a flow");
+ /* encap without preceding decap is not supported for ingress */
+ if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "encap action not supported for "
+ "ingress");
+ return 0;
+}
+
+/**
+ * Validate the raw decap action.
+ *
+ * @param[in] action_flags
+ * Holds the actions detected until now.
+ * @param[in] action
+ * Pointer to the encap action.
+ * @param[in] attr
+ * Pointer to flow attributes
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_validate_action_raw_decap(uint64_t action_flags,
+ const struct rte_flow_action *action,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ if (action_flags & MLX5_FLOW_ACTION_DROP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't drop and decap in same flow");
+ if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can't have encap action before"
+ " decap action");
+ if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "can only have a single decap"
+ " action in a flow");
+ /* decap action is valid on egress only if it is followed by encap */
+ if (attr->egress) {
+ for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
+ action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
+ action++) {
+ }
+ if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL, "decap action not supported"
+ " for egress");
+ }
+ return 0;
+}
+
+
+/**
+ * Find existing encap/decap resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] resource
+ * Pointer to encap/decap resource.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_encap_decap_resource_register
+ (struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_encap_decap_resource *resource,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+
+ /* Lookup a matching resource from cache. */
+ LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
+ if (resource->reformat_type == cache_resource->reformat_type &&
+ resource->ft_type == cache_resource->ft_type &&
+ resource->size == cache_resource->size &&
+ !memcmp((const void *)resource->buf,
+ (const void *)cache_resource->buf,
+ resource->size)) {
+ DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->dv.encap_decap = cache_resource;
+ return 0;
+ }
+ }
+ /* Register new encap/decap resource. */
+ cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ *cache_resource = *resource;
+ cache_resource->verbs_action =
+ mlx5_glue->dv_create_flow_action_packet_reformat
+ (priv->ctx, cache_resource->size,
+ (cache_resource->size ? cache_resource->buf : NULL),
+ cache_resource->reformat_type,
+ cache_resource->ft_type);
+ if (!cache_resource->verbs_action) {
+ rte_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
+ dev_flow->dv.encap_decap = cache_resource;
+ DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+/**
+ * Get the size of specific rte_flow_item_type
+ *
+ * @param[in] item_type
+ * Tested rte_flow_item_type.
+ *
+ * @return
+ * sizeof struct item_type, 0 if void or irrelevant.
+ */
+static size_t
+flow_dv_get_item_len(const enum rte_flow_item_type item_type)
+{
+ size_t retval;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ retval = sizeof(struct rte_flow_item_eth);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ retval = sizeof(struct rte_flow_item_vlan);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ retval = sizeof(struct rte_flow_item_ipv4);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ retval = sizeof(struct rte_flow_item_ipv6);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ retval = sizeof(struct rte_flow_item_udp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ retval = sizeof(struct rte_flow_item_tcp);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ retval = sizeof(struct rte_flow_item_vxlan);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ retval = sizeof(struct rte_flow_item_gre);
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ retval = sizeof(struct rte_flow_item_nvgre);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ retval = sizeof(struct rte_flow_item_vxlan_gpe);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ retval = sizeof(struct rte_flow_item_mpls);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
+ default:
+ retval = 0;
+ break;
+ }
+ return retval;
+}
+
+#define MLX5_ENCAP_IPV4_VERSION 0x40
+#define MLX5_ENCAP_IPV4_IHL_MIN 0x05
+#define MLX5_ENCAP_IPV4_TTL_DEF 0x40
+#define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
+#define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
+#define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
+#define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
+
+/**
+ * Convert the encap action data from list of rte_flow_item to raw buffer
+ *
+ * @param[in] items
+ * Pointer to rte_flow_item objects list.
+ * @param[out] buf
+ * Pointer to the output buffer.
+ * @param[out] size
+ * Pointer to the output buffer size.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
+ size_t *size, struct rte_flow_error *error)
+{
+ struct ether_hdr *eth = NULL;
+ struct vlan_hdr *vlan = NULL;
+ struct ipv4_hdr *ipv4 = NULL;
+ struct ipv6_hdr *ipv6 = NULL;
+ struct udp_hdr *udp = NULL;
+ struct vxlan_hdr *vxlan = NULL;
+ struct vxlan_gpe_hdr *vxlan_gpe = NULL;
+ struct gre_hdr *gre = NULL;
+ size_t len;
+ size_t temp_size = 0;
+
+ if (!items)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "invalid empty data");
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ len = flow_dv_get_item_len(items->type);
+ if (len + temp_size > MLX5_ENCAP_MAX_LEN)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "items total size is too big"
+ " for encap action");
+ rte_memcpy((void *)&buf[temp_size], items->spec, len);
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ eth = (struct ether_hdr *)&buf[temp_size];
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ vlan = (struct vlan_hdr *)&buf[temp_size];
+ if (!eth)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "eth header not found");
+ if (!eth->ether_type)
+ eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ipv4 = (struct ipv4_hdr *)&buf[temp_size];
+ if (!vlan && !eth)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "neither eth nor vlan"
+ " header found");
+ if (vlan && !vlan->eth_proto)
+ vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
+ else if (eth && !eth->ether_type)
+ eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
+ if (!ipv4->version_ihl)
+ ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
+ MLX5_ENCAP_IPV4_IHL_MIN;
+ if (!ipv4->time_to_live)
+ ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ipv6 = (struct ipv6_hdr *)&buf[temp_size];
+ if (!vlan && !eth)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "neither eth nor vlan"
+ " header found");
+ if (vlan && !vlan->eth_proto)
+ vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
+ else if (eth && !eth->ether_type)
+ eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
+ if (!ipv6->vtc_flow)
+ ipv6->vtc_flow =
+ RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
+ if (!ipv6->hop_limits)
+ ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ udp = (struct udp_hdr *)&buf[temp_size];
+ if (!ipv4 && !ipv6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "ip header not found");
+ if (ipv4 && !ipv4->next_proto_id)
+ ipv4->next_proto_id = IPPROTO_UDP;
+ else if (ipv6 && !ipv6->proto)
+ ipv6->proto = IPPROTO_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ vxlan = (struct vxlan_hdr *)&buf[temp_size];
+ if (!udp)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "udp header not found");
+ if (!udp->dst_port)
+ udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
+ if (!vxlan->vx_flags)
+ vxlan->vx_flags =
+ RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
+ if (!udp)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "udp header not found");
+ if (!vxlan_gpe->proto)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "next protocol not found");
+ if (!udp->dst_port)
+ udp->dst_port =
+ RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
+ if (!vxlan_gpe->vx_flags)
+ vxlan_gpe->vx_flags =
+ MLX5_ENCAP_VXLAN_GPE_FLAGS;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ gre = (struct gre_hdr *)&buf[temp_size];
+ if (!gre->proto)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "next protocol not found");
+ if (!ipv4 && !ipv6)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "ip header not found");
+ if (ipv4 && !ipv4->next_proto_id)
+ ipv4->next_proto_id = IPPROTO_GRE;
+ else if (ipv6 && !ipv6->proto)
+ ipv6->proto = IPPROTO_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ default:
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ (void *)items->type,
+ "unsupported item type");
+ break;
+ }
+ temp_size += len;
+ }
+ *size = temp_size;
+ return 0;
+}
+
+/**
+ * Convert L2 encap action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item *encap_data;
+ const struct rte_flow_action_raw_encap *raw_encap_data;
+ struct mlx5_flow_dv_encap_decap_resource res = {
+ .reformat_type =
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
+ .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
+ };
+
+ if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ raw_encap_data =
+ (const struct rte_flow_action_raw_encap *)action->conf;
+ res.size = raw_encap_data->size;
+ memcpy(res.buf, raw_encap_data->data, res.size);
+ } else {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
+ encap_data =
+ ((const struct rte_flow_action_vxlan_encap *)
+ action->conf)->definition;
+ else
+ encap_data =
+ ((const struct rte_flow_action_nvgre_encap *)
+ action->conf)->definition;
+ if (flow_dv_convert_encap_data(encap_data, res.buf,
+ &res.size, error))
+ return -rte_errno;
+ }
+ if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create L2 encap action");
+ return 0;
+}
+
+/**
+ * Convert L2 decap action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_dv_encap_decap_resource res = {
+ .size = 0,
+ .reformat_type =
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
+ .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
+ };
+
+ if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create L2 decap action");
+ return 0;
+}
+
+/**
+ * Convert raw decap/encap (L3 tunnel) action to DV specification.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to action structure.
+ * @param[in, out] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action_raw_encap *encap_data;
+ struct mlx5_flow_dv_encap_decap_resource res;
+
+ encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
+ res.size = encap_data->size;
+ memcpy(res.buf, encap_data->data, res.size);
+ res.reformat_type = attr->egress ?
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
+ MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "can't create encap action");
+ return 0;
+}
+
/**
* Verify the @p attributes will be correctly understood by the NIC and store
* them in the @p flow if everything is correct.
struct rte_flow_error *error)
{
int ret;
- uint32_t action_flags = 0;
- uint32_t item_flags = 0;
+ uint64_t action_flags = 0;
+ uint64_t item_flags = 0;
int tunnel = 0;
uint8_t next_protocol = 0xff;
int actions_n = 0;
if (ret < 0)
return ret;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (items->type) {
case RTE_FLOW_ITEM_TYPE_VOID:
break;
((const struct rte_flow_item_ipv6 *)
items->spec)->hdr.proto;
break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &rte_flow_item_tcp_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
case RTE_FLOW_ITEM_TYPE_UDP:
ret = mlx5_flow_validate_item_udp(items, item_flags,
next_protocol,
item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
MLX5_FLOW_LAYER_OUTER_L4_UDP;
break;
- case RTE_FLOW_ITEM_TYPE_TCP:
- ret = mlx5_flow_validate_item_tcp
- (items, item_flags,
- next_protocol,
- &rte_flow_item_tcp_mask,
- error);
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
if (ret < 0)
return ret;
- item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
- MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ item_flags |= MLX5_FLOW_LAYER_GRE;
break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
ret = mlx5_flow_validate_item_vxlan(items, item_flags,
return ret;
item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
break;
- case RTE_FLOW_ITEM_TYPE_GRE:
- ret = mlx5_flow_validate_item_gre(items, item_flags,
- next_protocol, error);
- if (ret < 0)
- return ret;
- item_flags |= MLX5_FLOW_LAYER_GRE;
- break;
- case RTE_FLOW_ITEM_TYPE_MPLS:
- ret = mlx5_flow_validate_item_mpls(items, item_flags,
- next_protocol,
- error);
+ case RTE_FLOW_ITEM_TYPE_META:
+ ret = flow_dv_validate_item_meta(dev, items, attr,
+ error);
if (ret < 0)
return ret;
- item_flags |= MLX5_FLOW_LAYER_MPLS;
+ item_flags |= MLX5_FLOW_ITEM_METADATA;
break;
default:
return rte_flow_error_set(error, ENOTSUP,
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
actions, "too many actions");
- tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
switch (actions->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
break;
action_flags |= MLX5_FLOW_ACTION_COUNT;
++actions_n;
break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = flow_dv_validate_action_l2_encap(action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
+ MLX5_FLOW_ACTION_VXLAN_ENCAP :
+ MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ ret = flow_dv_validate_action_l2_decap(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
+ MLX5_FLOW_ACTION_VXLAN_DECAP :
+ MLX5_FLOW_ACTION_NVGRE_DECAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap(action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ ret = flow_dv_validate_action_raw_decap(action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
+ ++actions_n;
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION,
char *l24_v;
uint8_t tos;
- if (!ipv4_v)
- return;
- if (!ipv4_m)
- ipv4_m = &nic_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
+ if (!ipv4_v)
+ return;
+ if (!ipv4_m)
+ ipv4_m = &nic_mask;
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
int i;
int size;
- if (!ipv6_v)
- return;
- if (!ipv6_m)
- ipv6_m = &nic_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
+ if (!ipv6_v)
+ return;
+ if (!ipv6_m)
+ ipv6_m = &nic_mask;
size = sizeof(ipv6_m->hdr.dst_addr);
l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
memcpy(l24_m, ipv6_m->hdr.src_addr, size);
for (i = 0; i < size; ++i)
l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
/* TOS. */
vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
void *headers_m;
void *headers_v;
- if (!tcp_v)
- return;
- if (!tcp_m)
- tcp_m = &rte_flow_item_tcp_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
+ if (!tcp_v)
+ return;
+ if (!tcp_m)
+ tcp_m = &rte_flow_item_tcp_mask;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
rte_be_to_cpu_16(tcp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
void *headers_m;
void *headers_v;
- if (!udp_v)
- return;
- if (!udp_m)
- udp_m = &rte_flow_item_udp_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ if (!udp_v)
+ return;
+ if (!udp_m)
+ udp_m = &rte_flow_item_udp_mask;
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
rte_be_to_cpu_16(udp_m->hdr.src_port));
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
- if (!gre_v)
- return;
- if (!gre_m)
- gre_m = &rte_flow_item_gre_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
}
MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+ if (!gre_v)
+ return;
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
rte_be_to_cpu_16(gre_m->protocol));
MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
int size;
int i;
+ flow_dv_translate_item_gre(matcher, key, item, inner);
if (!nvgre_v)
return;
if (!nvgre_m)
memcpy(gre_key_m, tni_flow_id_m, size);
for (i = 0; i < size; ++i)
gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
- flow_dv_translate_item_gre(matcher, key, item, inner);
}
/**
int size;
int i;
- if (!vxlan_v)
- return;
- if (!vxlan_m)
- vxlan_m = &rte_flow_item_vxlan_mask;
if (inner) {
headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
inner_headers);
MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
}
+ if (!vxlan_v)
+ return;
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_mask;
size = sizeof(vxlan_m->vni);
vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
vni_v[i] = vni_m[i] & vxlan_v->vni[i];
}
+/**
+ * Add META item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_meta(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_meta *meta_m;
+ const struct rte_flow_item_meta *meta_v;
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+
+ meta_m = (const void *)item->mask;
+ if (!meta_m)
+ meta_m = &rte_flow_item_meta_mask;
+ meta_v = (const void *)item->spec;
+ if (meta_v) {
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
+ rte_be_to_cpu_32(meta_m->data));
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
+ rte_be_to_cpu_32(meta_v->data & meta_m->data));
+ }
+}
+
/**
* Update the matcher and the value based the selected item.
*
struct mlx5_flow_dv_matcher *tmatcher = matcher;
switch (item->type) {
- case RTE_FLOW_ITEM_TYPE_VOID:
- case RTE_FLOW_ITEM_TYPE_END:
- break;
case RTE_FLOW_ITEM_TYPE_ETH:
flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
inner);
tmatcher->priority = MLX5_PRIORITY_MAP_L4;
dev_flow->verbs.hash_fields |=
mlx5_flow_hashfields_adjust(dev_flow, inner,
- ETH_RSS_TCP,
- (IBV_RX_HASH_SRC_PORT_TCP |
- IBV_RX_HASH_DST_PORT_TCP));
- break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
- flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
- inner);
+ ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
break;
case RTE_FLOW_ITEM_TYPE_GRE:
flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
inner);
break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
+ inner);
+ break;
case RTE_FLOW_ITEM_TYPE_VXLAN:
case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
inner);
break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
+ break;
default:
break;
}
/**
* Store the requested actions in an array.
*
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
* @param[in] action
* Flow action to translate.
* @param[in, out] dev_flow
* Pointer to the mlx5_flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-static void
-flow_dv_create_action(const struct rte_flow_action *action,
- struct mlx5_flow *dev_flow)
+static int
+flow_dv_create_action(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ struct rte_flow_error *error)
{
const struct rte_flow_action_queue *queue;
const struct rte_flow_action_rss *rss;
int actions_n = dev_flow->dv.actions_n;
struct rte_flow *flow = dev_flow->flow;
+ const struct rte_flow_action *action_ptr = action;
+ const uint8_t *rss_key;
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VOID:
case RTE_FLOW_ACTION_TYPE_FLAG:
dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
dev_flow->dv.actions[actions_n].tag_value =
- MLX5_FLOW_MARK_DEFAULT;
+ mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
actions_n++;
flow->actions |= MLX5_FLOW_ACTION_FLAG;
break;
case RTE_FLOW_ACTION_TYPE_MARK:
dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
dev_flow->dv.actions[actions_n].tag_value =
- ((const struct rte_flow_action_mark *)
- (action->conf))->id;
+ mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (action->conf))->id);
flow->actions |= MLX5_FLOW_ACTION_MARK;
actions_n++;
break;
memcpy((*flow->queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
- memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
- flow->rss.types = rss->types;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
+ flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
flow->rss.level = rss->level;
/* Added to array only in apply since we need the QP */
flow->actions |= MLX5_FLOW_ACTION_RSS;
break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (flow_dv_create_action_l2_encap(dev, action,
+ dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.encap_decap->verbs_action;
+ flow->actions |= action->type ==
+ RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
+ MLX5_FLOW_ACTION_VXLAN_ENCAP :
+ MLX5_FLOW_ACTION_NVGRE_ENCAP;
+ actions_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ if (flow_dv_create_action_l2_decap(dev, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.encap_decap->verbs_action;
+ flow->actions |= action->type ==
+ RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
+ MLX5_FLOW_ACTION_VXLAN_DECAP :
+ MLX5_FLOW_ACTION_NVGRE_DECAP;
+ actions_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Handle encap action with preceding decap */
+ if (flow->actions & MLX5_FLOW_ACTION_RAW_DECAP) {
+ if (flow_dv_create_action_raw_encap(dev, action,
+ dev_flow,
+ attr, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.encap_decap->verbs_action;
+ } else {
+ /* Handle encap action without preceding decap */
+ if (flow_dv_create_action_l2_encap(dev, action,
+ dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.encap_decap->verbs_action;
+ }
+ flow->actions |= MLX5_FLOW_ACTION_RAW_ENCAP;
+ actions_n++;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ /* Check if this decap action is followed by encap. */
+ for (; action_ptr->type != RTE_FLOW_ACTION_TYPE_END &&
+ action_ptr->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
+ action_ptr++) {
+ }
+ /* Handle decap action only if it isn't followed by encap */
+ if (action_ptr->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n].type =
+ MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
+ dev_flow->dv.actions[actions_n].action =
+ dev_flow->dv.encap_decap->verbs_action;
+ actions_n++;
+ }
+ /* If decap is followed by encap, handle it at encap case. */
+ flow->actions |= MLX5_FLOW_ACTION_RAW_DECAP;
+ break;
default:
break;
}
dev_flow->dv.actions_n = actions_n;
+ return 0;
}
static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
- if (!cache_matcher->matcher_object)
+ if (!cache_matcher->matcher_object) {
+ rte_free(cache_matcher);
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
+ }
rte_atomic32_inc(&cache_matcher->refcnt);
LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
dev_flow->dv.matcher = cache_matcher;
return 0;
}
-
/**
* Fill the flow with DV spec.
*
struct mlx5_flow *dev_flow,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
- const struct rte_flow_action actions[] __rte_unused,
+ const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
struct priv *priv = dev->data->dev_private;
},
};
void *match_value = dev_flow->dv.value.buf;
- uint8_t inner = 0;
+ int tunnel = 0;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
- for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++)
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
flow_dv_create_item(&matcher, match_value, items, dev_flow,
- inner);
+ tunnel);
+ }
matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
matcher.mask.size);
if (priority == MLX5_FLOW_PRIO_RSVD)
if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
return -rte_errno;
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
- flow_dv_create_action(actions, dev_flow);
+ if (flow_dv_create_action(dev, actions, dev_flow, attr, error))
+ return -rte_errno;
return 0;
}
(dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
dv->hash_fields, (*flow->queue),
flow->rss.queue_num,
- !!(flow->layers &
+ !!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
return 1;
}
+/**
+ * Release an encap/decap resource.
+ *
+ * @param flow
+ * Pointer to mlx5_flow.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
+{
+ struct mlx5_flow_dv_encap_decap_resource *cache_resource =
+ flow->dv.encap_decap;
+
+ assert(cache_resource->verbs_action);
+ DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
+ claim_zero(mlx5_glue->destroy_flow_action
+ (cache_resource->verbs_action));
+ LIST_REMOVE(cache_resource, next);
+ rte_free(cache_resource);
+ DRV_LOG(DEBUG, "encap/decap resource %p: removed",
+ (void *)cache_resource);
+ return 0;
+ }
+ return 1;
+}
+
/**
* Remove the flow from the NIC but keeps it in memory.
*
LIST_REMOVE(dev_flow, next);
if (dev_flow->dv.matcher)
flow_dv_matcher_release(dev, dev_flow);
+ if (dev_flow->dv.encap_decap)
+ flow_dv_encap_decap_resource_release(dev_flow);
rte_free(dev_flow);
}
}