+ LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ if (cnt->shared != shared)
+ continue;
+ if (cnt->id != id)
+ continue;
+ cnt->ref_cnt++;
+ return cnt;
+ }
+#ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
+
+ struct mlx5_flow_counter tmpl = {
+ .shared = shared,
+ .id = id,
+ .cs = mlx5_glue->create_counter_set
+ (priv->ctx,
+ &(struct ibv_counter_set_init_attr){
+ .counter_set_id = id,
+ }),
+ .hits = 0,
+ .bytes = 0,
+ };
+
+ if (!tmpl.cs) {
+ rte_errno = errno;
+ return NULL;
+ }
+ cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
+ if (!cnt) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ *cnt = tmpl;
+ LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ return cnt;
+#endif
+ rte_errno = ENOTSUP;
+ return NULL;
+}
+
+/**
+ * Release a flow counter.
+ *
+ * @param[in] counter
+ * Pointer to the counter handler.
+ */
+static void
+mlx5_flow_counter_release(struct mlx5_flow_counter *counter)
+{
+ if (--counter->ref_cnt == 0) {
+ claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
+ LIST_REMOVE(counter, next);
+ rte_free(counter);
+ }
+}
+
+/**
+ * Verify the @p attributes will be correctly understood by the NIC and store
+ * them in the @p flow if everything is correct.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device.
+ * @param[in] attributes
+ * Pointer to flow attributes
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_attributes(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attributes,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ uint32_t priority_max =
+ ((struct priv *)dev->data->dev_private)->config.flow_prio - 1;
+
+ if (attributes->group)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ NULL,
+ "groups is not supported");
+ if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
+ attributes->priority >= priority_max)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ NULL,
+ "priority out of range");
+ if (attributes->egress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ NULL,
+ "egress is not supported");
+ if (attributes->transfer)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ NULL,
+ "transfer is not supported");
+ if (!attributes->ingress)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ NULL,
+ "ingress attribute is mandatory");
+ flow->attributes = *attributes;
+ if (attributes->priority == MLX5_FLOW_PRIO_RSVD)
+ flow->attributes.priority = priority_max;
+ return 0;
+}
+
+/**
+ * Verify the @p item specifications (spec, last, mask) are compatible with the
+ * NIC capabilities.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] mask
+ * @p item->mask or flow default bit-masks.
+ * @param[in] nic_mask
+ * Bit-masks covering supported fields by the NIC to compare with user mask.
+ * @param[in] size
+ * Bit-masks size in bytes.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+ const uint8_t *mask,
+ const uint8_t *nic_mask,
+ unsigned int size,
+ struct rte_flow_error *error)
+{
+ unsigned int i;
+
+ assert(nic_mask);
+ for (i = 0; i < size; ++i)
+ if ((nic_mask[i] | mask[i]) != nic_mask[i])
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask enables non supported"
+ " bits");
+ if (!item->spec && (item->mask || item->last))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "mask/last without a spec is not"
+ " supported");
+ if (item->spec && item->last) {
+ uint8_t spec[size];
+ uint8_t last[size];
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < size; ++i) {
+ spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
+ last[i] = ((const uint8_t *)item->last)[i] & mask[i];
+ }
+ ret = memcmp(spec, last, size);
+ if (ret != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "range is not supported");
+ }
+ return 0;
+}
+
+/**
+ * Add a verbs item specification into @p flow.
+ *
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] src
+ * Create specification.
+ * @param[in] size
+ * Size in bytes of the specification to copy.
+ */
+static void
+mlx5_flow_spec_verbs_add(struct rte_flow *flow, void *src, unsigned int size)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (verbs->specs) {
+ void *dst;
+
+ dst = (void *)(verbs->specs + verbs->size);
+ memcpy(dst, src, size);
+ ++verbs->attr->num_of_specs;
+ }
+ verbs->size += size;
+}
+
+/**
+ * Adjust verbs hash fields according to the @p flow information.
+ *
+ * @param[in, out] flow.
+ * Pointer to flow structure.
+ * @param[in] tunnel
+ * 1 when the hash field is for a tunnel item.
+ * @param[in] layer_types
+ * ETH_RSS_* types.
+ * @param[in] hash_fields
+ * Item hash fields.
+ */
+static void
+mlx5_flow_verbs_hashfields_adjust(struct rte_flow *flow,
+ int tunnel __rte_unused,
+ uint32_t layer_types, uint64_t hash_fields)
+{
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ hash_fields |= (tunnel ? IBV_RX_HASH_INNER : 0);
+ if (flow->rss.level == 2 && !tunnel)
+ hash_fields = 0;
+ else if (flow->rss.level < 2 && tunnel)
+ hash_fields = 0;
+#endif
+ if (!(flow->rss.types & layer_types))
+ hash_fields = 0;
+ flow->cur_verbs->hash_fields |= hash_fields;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_eth(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *mask = item->mask;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ const unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layers already configured");
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_eth),
+ error);
+ if (ret)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ if (size > flow_size)
+ return size;
+ if (spec) {
+ unsigned int i;
+
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.val.ether_type = spec->type;
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ eth.mask.ether_type = mask->type;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
+ eth.val.src_mac[i] &= eth.mask.src_mac[i];
+ }
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, ð, size);
+ return size;
+}
+
+/**
+ * Update the VLAN tag in the Verbs Ethernet specification.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] eth
+ * Verbs structure containing the VLAN information to copy.
+ */
+static void
+mlx5_flow_item_vlan_update(struct ibv_flow_attr *attr,
+ struct ibv_flow_spec_eth *eth)
+{
+ unsigned int i;
+ const enum ibv_flow_spec_type search = eth->type;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ struct ibv_flow_spec_eth *e =
+ (struct ibv_flow_spec_eth *)hdr;
+
+ e->val.vlan_tag = eth->val.vlan_tag;
+ e->mask.vlan_tag = eth->mask.vlan_tag;
+ e->val.ether_type = eth->val.ether_type;
+ e->mask.ether_type = eth->mask.ether_type;
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into @p flow (or by updating the already present
+ * Ethernet Verbs) specification after ensuring the NIC will understand and
+ * process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *mask = item->mask;
+ const struct rte_flow_item_vlan nic_mask = {
+ .tci = RTE_BE16(0x0fff),
+ .inner_type = RTE_BE16(0xffff),
+ };
+ unsigned int size = sizeof(struct ibv_flow_spec_eth);
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ struct ibv_flow_spec_eth eth = {
+ .type = IBV_FLOW_SPEC_ETH | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+ const uint32_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
+ MLX5_FLOW_LAYER_INNER_L4) :
+ (MLX5_FLOW_LAYER_OUTER_L3 | MLX5_FLOW_LAYER_OUTER_L4);
+ const uint32_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+
+ if (flow->layers & vlanm)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VLAN layer already configured");
+ else if ((flow->layers & l34m) != 0)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L2 layer cannot follow L3/L4 layer");
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_vlan), error);
+ if (ret)
+ return ret;
+ if (spec) {
+ eth.val.vlan_tag = spec->tci;
+ eth.mask.vlan_tag = mask->tci;
+ eth.val.vlan_tag &= eth.mask.vlan_tag;
+ eth.val.ether_type = spec->inner_type;
+ eth.mask.ether_type = mask->inner_type;
+ eth.val.ether_type &= eth.mask.ether_type;
+ }
+ /*
+ * From verbs perspective an empty VLAN is equivalent
+ * to a packet without VLAN layer.
+ */
+ if (!eth.mask.vlan_tag)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ item->spec,
+ "VLAN cannot be empty");
+ if (!(flow->layers & l2m)) {
+ if (size <= flow_size) {
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ mlx5_flow_spec_verbs_add(flow, ð, size);
+ }
+ } else {
+ if (flow->cur_verbs)
+ mlx5_flow_item_vlan_update(flow->cur_verbs->attr,
+ ð);
+ size = 0; /* Only an update is done in eth specification. */
+ }
+ flow->layers |= tunnel ?
+ (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) :
+ (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN);
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_ipv4(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv4 *spec = item->spec;
+ const struct rte_flow_item_ipv4 *mask = item->mask;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
+ struct ibv_flow_spec_ipv4_ext ipv4 = {
+ .type = IBV_FLOW_SPEC_IPV4_EXT |
+ (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv4), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (spec) {
+ ipv4.val = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = spec->hdr.src_addr,
+ .dst_ip = spec->hdr.dst_addr,
+ .proto = spec->hdr.next_proto_id,
+ .tos = spec->hdr.type_of_service,
+ };
+ ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
+ .src_ip = mask->hdr.src_addr,
+ .dst_ip = mask->hdr.dst_addr,
+ .proto = mask->hdr.next_proto_id,
+ .tos = mask->hdr.type_of_service,
+ };
+ /* Remove unwanted bits from values. */
+ ipv4.val.src_ip &= ipv4.mask.src_ip;
+ ipv4.val.dst_ip &= ipv4.mask.dst_ip;
+ ipv4.val.proto &= ipv4.mask.proto;
+ ipv4.val.tos &= ipv4.mask.tos;
+ }
+ flow->l3_protocol_en = !!ipv4.mask.proto;
+ flow->l3_protocol = ipv4.val.proto;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
+ ETH_RSS_NONFRAG_IPV4_OTHER),
+ (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_ipv6(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_ipv6 *spec = item->spec;
+ const struct rte_flow_item_ipv6 *mask = item->mask;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
+ struct ibv_flow_spec_ipv6 ipv6 = {
+ .type = IBV_FLOW_SPEC_IPV6 | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "multiple L3 layers not supported");
+ else if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 cannot follow an L4 layer.");
+ /*
+ * IPv6 is not recognised by the NIC inside a GRE tunnel.
+ * Such support has to be disabled as the rule will be
+ * accepted. Issue reproduced with Mellanox OFED 4.3-3.0.2.1 and
+ * Mellanox OFED 4.4-1.0.0.0.
+ */
+ if (tunnel && flow->layers & MLX5_FLOW_LAYER_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "IPv6 inside a GRE tunnel is"
+ " not recognised.");
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&nic_mask,
+ sizeof(struct rte_flow_item_ipv6), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (spec) {
+ unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
+
+ memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
+ RTE_DIM(ipv6.val.src_ip));
+ memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
+ RTE_DIM(ipv6.val.dst_ip));
+ memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
+ RTE_DIM(ipv6.mask.src_ip));
+ memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
+ RTE_DIM(ipv6.mask.dst_ip));
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.mask.next_hdr = mask->hdr.proto;
+ ipv6.mask.hop_limit = mask->hdr.hop_limits;
+ /* Remove unwanted bits from values. */
+ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
+ ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
+ ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
+ }
+ ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
+ ipv6.val.next_hdr &= ipv6.mask.next_hdr;
+ ipv6.val.hop_limit &= ipv6.mask.hop_limit;
+ }
+ flow->l3_protocol_en = !!ipv6.mask.next_hdr;
+ flow->l3_protocol = ipv6.val.next_hdr;
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust
+ (flow, tunnel,
+ (ETH_RSS_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER),
+ (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L3;
+ mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = IBV_FLOW_SPEC_UDP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter"
+ " on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ if (spec) {
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_UDP,
+ (IBV_RX_HASH_SRC_PORT_UDP |
+ IBV_RX_HASH_DST_PORT_UDP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &udp, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_tcp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_tcp *spec = item->spec;
+ const struct rte_flow_item_tcp *mask = item->mask;
+ const int tunnel = !!(flow->layers & MLX5_FLOW_LAYER_TUNNEL);
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp tcp = {
+ .type = IBV_FLOW_SPEC_TCP | (tunnel ? IBV_FLOW_SPEC_INNER : 0),
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with TCP layer");
+ if (!(flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3)))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter on L4");
+ if (flow->layers & (tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already present");
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_tcp_mask,
+ sizeof(struct rte_flow_item_tcp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ if (spec) {
+ tcp.val.dst_port = spec->hdr.dst_port;
+ tcp.val.src_port = spec->hdr.src_port;
+ tcp.mask.dst_port = mask->hdr.dst_port;
+ tcp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ tcp.val.src_port &= tcp.mask.src_port;
+ tcp.val.dst_port &= tcp.mask.dst_port;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_verbs_hashfields_adjust(flow, tunnel, ETH_RSS_TCP,
+ (IBV_RX_HASH_SRC_PORT_TCP |
+ IBV_RX_HASH_DST_PORT_TCP));
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L4;
+ mlx5_flow_spec_verbs_add(flow, &tcp, size);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan *spec = item->spec;
+ const struct rte_flow_item_vxlan *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan.mask.tunnel_id = id.vlan_id;
+ /* Remove unwanted bits from values. */
+ vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
+ * only this layer is defined in the Verbs specification it is
+ * interpreted as wildcard and all packets will match this
+ * rule, if it follows a full stack layer (ex: eth / ipv4 /
+ * udp), all packets matching the layers before will also
+ * match this rule. To avoid such situation, VNI 0 is
+ * currently refused.
+ */
+ if (!vxlan.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN tunnel must be fully defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
+ const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_vxlan_gpe *spec = item->spec;
+ const struct rte_flow_item_vxlan_gpe *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel vxlan_gpe = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+ int ret;
+ union vni {
+ uint32_t vlan_id;
+ uint8_t vni[4];
+ } id = { .vlan_id = 0, };
+
+ if (!((struct priv *)dev->data->dev_private)->config.l3_vxlan_en)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 VXLAN is not enabled by device"
+ " parameter and/or not configured in"
+ " firmware");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ /*
+ * Verify only UDPv4 is present as defined in
+ * https://tools.ietf.org/html/rfc7348
+ */
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "no outer UDP layer found");
+ if (!mask)
+ mask = &rte_flow_item_vxlan_gpe_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+ sizeof(struct rte_flow_item_vxlan_gpe), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&id.vni[1], spec->vni, 3);
+ vxlan_gpe.val.tunnel_id = id.vlan_id;
+ memcpy(&id.vni[1], mask->vni, 3);
+ vxlan_gpe.mask.tunnel_id = id.vlan_id;
+ if (spec->protocol)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VxLAN-GPE protocol not supported");
+ /* Remove unwanted bits from values. */
+ vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
+ }
+ /*
+ * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
+ * layer is defined in the Verbs specification it is interpreted as
+ * wildcard and all packets will match this rule, if it follows a full
+ * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
+ * before will also match this rule. To avoid such situation, VNI 0
+ * is currently refused.
+ */
+ if (!vxlan_gpe.val.tunnel_id)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE vni cannot be 0");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN-GPE tunnel must be fully"
+ " defined");
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &vxlan_gpe, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_VXLAN_GPE;
+ return size;
+}
+
+/**
+ * Update the protocol in Verbs IPv4/IPv6 spec.
+ *
+ * @param[in, out] attr
+ * Pointer to Verbs attributes structure.
+ * @param[in] search
+ * Specification type to search in order to update the IP protocol.
+ * @param[in] protocol
+ * Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
+ enum ibv_flow_spec_type search,
+ uint8_t protocol)
+{
+ unsigned int i;
+ struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+ ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+ if (!attr)
+ return;
+ for (i = 0; i != attr->num_of_specs; ++i) {
+ if (hdr->type == search) {
+ union {
+ struct ibv_flow_spec_ipv4_ext *ipv4;
+ struct ibv_flow_spec_ipv6 *ipv6;
+ } ip;
+
+ switch (search) {
+ case IBV_FLOW_SPEC_IPV4_EXT:
+ ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
+ if (!ip.ipv4->val.proto) {
+ ip.ipv4->val.proto = protocol;
+ ip.ipv4->mask.proto = 0xff;
+ }
+ break;
+ case IBV_FLOW_SPEC_IPV6:
+ ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
+ if (!ip.ipv6->val.next_hdr) {
+ ip.ipv6->val.next_hdr = protocol;
+ ip.ipv6->mask.next_hdr = 0xff;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+ }
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * It will also update the previous L3 layer with the protocol value matching
+ * the GRE.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+ const struct rte_flow_item_gre *spec = item->spec;
+ const struct rte_flow_item_gre *mask = item->mask;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ unsigned int size = sizeof(struct ibv_flow_spec_gre);
+ struct ibv_flow_spec_gre tunnel = {
+ .type = IBV_FLOW_SPEC_GRE,
+ .size = size,
+ };
+#else
+ unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+ struct ibv_flow_spec_tunnel tunnel = {
+ .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+ .size = size,
+ };
+#endif
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_GRE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with this GRE layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already present");
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 Layer is missing");
+ if (!mask)
+ mask = &rte_flow_item_gre_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_gre_mask,
+ sizeof(struct rte_flow_item_gre), error);
+ if (ret < 0)
+ return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ if (spec) {
+ tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+ tunnel.val.protocol = spec->protocol;
+ tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+ tunnel.mask.protocol = mask->protocol;
+ /* Remove unwanted bits from values. */
+ tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+ tunnel.val.protocol &= tunnel.mask.protocol;
+ tunnel.val.key &= tunnel.mask.key;
+ }
+#else
+ if (spec && (spec->protocol & mask->protocol))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "without MPLS support the"
+ " specification cannot be used for"
+ " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ if (size <= flow_size) {
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV4_EXT,
+ MLX5_IP_PROTOCOL_GRE);
+ else
+ mlx5_flow_item_gre_ip_protocol_update
+ (verbs->attr, IBV_FLOW_SPEC_IPV6,
+ MLX5_IP_PROTOCOL_GRE);
+ mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_GRE;
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_mpls(const struct rte_flow_item *item __rte_unused,
+ struct rte_flow *flow __rte_unused,
+ const size_t flow_size __rte_unused,
+ struct rte_flow_error *error)
+{
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ const struct rte_flow_item_mpls *spec = item->spec;
+ const struct rte_flow_item_mpls *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_mpls);
+ struct ibv_flow_spec_mpls mpls = {
+ .type = IBV_FLOW_SPEC_MPLS,
+ .size = size,
+ };
+ int ret;
+
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_MPLS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with MPLS layer");
+ if (flow->layers & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "a tunnel is already"
+ " present");
+ if (!mask)
+ mask = &rte_flow_item_mpls_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_mpls_mask,
+ sizeof(struct rte_flow_item_mpls), error);
+ if (ret < 0)
+ return ret;
+ if (spec) {
+ memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
+ memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
+ /* Remove unwanted bits from values. */
+ mpls.val.label &= mpls.mask.label;
+ }
+ if (size <= flow_size) {
+ mlx5_flow_spec_verbs_add(flow, &mpls, size);
+ flow->cur_verbs->attr->priority = MLX5_PRIORITY_MAP_L2;
+ }
+ flow->layers |= MLX5_FLOW_LAYER_MPLS;
+ return size;
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "MPLS is not supported by Verbs, please"
+ " update.");
+}
+
+/**
+ * Convert the @p pattern into a Verbs specifications after ensuring the NIC
+ * will understand and process it correctly.
+ * The conversion is performed item per item, each of them is written into
+ * the @p flow if its size is lesser or equal to @p flow_size.
+ * Validation and memory consumption computation are still performed until the
+ * end of @p pattern, unless an error is encountered.
+ *
+ * @param[in] pattern
+ * Flow pattern.
+ * @param[in, out] flow
+ * Pointer to the rte_flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small some
+ * garbage may be present.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @pattern has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_items(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ int remain = flow_size;
+ size_t size = 0;
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
+ int ret = 0;
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_item_eth(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = mlx5_flow_item_vlan(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_item_ipv4(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_item_tcp(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_item_vxlan(pattern, flow, remain,
+ error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_item_vxlan_gpe(dev, pattern, flow,
+ remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_item_gre(pattern, flow, remain, error);
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_item_mpls(pattern, flow, remain, error);
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "item not supported");
+ }
+ if (ret < 0)
+ return ret;
+ if (remain > ret)
+ remain -= ret;
+ else
+ remain = 0;
+ size += ret;
+ }
+ if (!flow->layers) {
+ const struct rte_flow_item item = {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ };
+
+ return mlx5_flow_item_eth(&item, flow, flow_size, error);
+ }
+ return size;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_drop(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
+ struct ibv_flow_spec_action_drop drop = {
+ .type = IBV_FLOW_SPEC_ACTION_DROP,
+ .size = size,
+ };
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (flow->modifier & (MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "drop is not compatible with"
+ " flag/mark action");
+ if (size < flow_size)
+ mlx5_flow_spec_verbs_add(flow, &drop, size);
+ flow->fate |= MLX5_FLOW_FATE_DROP;
+ return size;
+}
+
+/**
+ * Convert the @p action into @p flow after ensuring the NIC will understand
+ * and process it correctly.
+ *
+ * @param[in] dev
+ * Pointer to Ethernet device structure.
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_action_queue(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_queue *queue = action->conf;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (queue->index >= priv->rxqs_n)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue index out of range");
+ if (!(*priv->rxqs)[queue->index])
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &queue->index,
+ "queue is not configured");
+ if (flow->queue)
+ (*flow->queue)[0] = queue->index;
+ flow->rss.queue_num = 1;
+ flow->fate |= MLX5_FLOW_FATE_QUEUE;
+ return 0;
+}
+
+/**
+ * Ensure the @p action will be understood and used correctly by the NIC.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param action[in]
+ * Pointer to flow actions array.
+ * @param flow[in, out]
+ * Pointer to the rte_flow structure.
+ * @param error[in, out]
+ * Pointer to error structure.
+ *
+ * @return
+ * On success @p flow->queue array and @p flow->rss are filled and valid.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_rss(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_action_rss *rss = action->conf;
+ unsigned int i;
+
+ if (flow->fate)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "multiple fate actions are not"
+ " supported");
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
+ rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->func,
+ "RSS hash function not supported");
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss->level > 2)
+#else
+ if (rss->level > 1)
+#endif
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->level,
+ "tunnel RSS is not supported");
+ if (rss->key_len < MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too small");
+ if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->key_len,
+ "RSS hash key too large");
+ if (rss->queue_num > priv->config.ind_table_max_size)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue_num,
+ "number of queues too large");
+ if (rss->types & MLX5_RSS_HF_MASK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->types,
+ "some RSS protocols are not"
+ " supported");
+ for (i = 0; i != rss->queue_num; ++i) {
+ if (!(*priv->rxqs)[rss->queue[i]])
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ &rss->queue[i],
+ "queue is not configured");
+ }
+ if (flow->queue)
+ memcpy((*flow->queue), rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ flow->rss.queue_num = rss->queue_num;
+ memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
+ flow->rss.types = rss->types;
+ flow->rss.level = rss->level;
+ flow->fate |= MLX5_FLOW_FATE_RSS;
+ return 0;
+}
+
+/**
+ * Convert the @p action into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] action
+ * Action configuration.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p action has fully been
+ * converted, otherwise another call with this returned memory size should
+ * be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_action_flag(const struct rte_flow_action *action,
+ struct rte_flow *flow, const size_t flow_size,
+ struct rte_flow_error *error)
+{
+ unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
+ struct ibv_flow_spec_action_tag tag = {
+ .type = IBV_FLOW_SPEC_ACTION_TAG,
+ .size = size,
+ .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
+ };
+ struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+
+ if (flow->modifier & MLX5_FLOW_MOD_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag action already present");
+ if (flow->fate & MLX5_FLOW_FATE_DROP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "flag is not compatible with drop"
+ " action");
+ if (flow->modifier & MLX5_FLOW_MOD_MARK)
+ size = 0;
+ else if (size <= flow_size && verbs)
+ mlx5_flow_spec_verbs_add(flow, &tag, size);
+ flow->modifier |= MLX5_FLOW_MOD_FLAG;
+ return size;
+}
+
+/**
+ * Update verbs specification to modify the flag to mark.
+ *
+ * @param[in, out] verbs
+ * Pointer to the mlx5_flow_verbs structure.
+ * @param[in] mark_id
+ * Mark identifier to replace the flag.
+ */
+static void
+mlx5_flow_verbs_mark_update(struct mlx5_flow_verbs *verbs, uint32_t mark_id)
+{
+ struct ibv_spec_header *hdr;
+ int i;
+
+ if (!verbs)
+ return;
+ /* Update Verbs specification. */
+ hdr = (struct ibv_spec_header *)verbs->specs;
+ if (!hdr)
+ return;
+ for (i = 0; i != verbs->attr->num_of_specs; ++i) {
+ if (hdr->type == IBV_FLOW_SPEC_ACTION_TAG) {
+ struct ibv_flow_spec_action_tag *t =
+ (struct ibv_flow_spec_action_tag *)hdr;
+
+ t->tag_id = mlx5_flow_mark_set(mark_id);
+ }
+ hdr = (struct ibv_spec_header *)((uintptr_t)hdr + hdr->size);
+ }
+}