#define MLX5_FLOW_FATE_DROP (1u << 0)
#define MLX5_FLOW_FATE_QUEUE (1u << 1)
+/* possible L3 layers protocols filtering. */
+#define MLX5_IP_PROTOCOL_UDP 17
+
/** Handles information leading to a drop fate. */
struct mlx5_flow_verbs {
unsigned int size; /**< Size of the attribute. */
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
struct rte_flow_attr attributes; /**< User flow attribute. */
+ uint32_t l3_protocol_en:1; /**< Protocol filtering requested. */
uint32_t layers;
/**< Bit-fields of present layers see MLX5_FLOW_LAYER_*. */
uint32_t fate;
/**< Bit-fields of present fate see MLX5_FLOW_FATE_*. */
+ uint8_t l3_protocol; /**< valid when l3_protocol_en is set. */
struct mlx5_flow_verbs verbs; /* Verbs flow. */
uint16_t queue; /**< Destination queue to redirect traffic to. */
};
if (ret < 0)
return ret;
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
- if (size > flow_size)
- return size;
if (spec) {
ipv4.val = (struct ibv_flow_ipv4_ext_filter){
.src_ip = spec->hdr.src_addr,
ipv4.val.proto &= ipv4.mask.proto;
ipv4.val.tos &= ipv4.mask.tos;
}
- mlx5_flow_spec_verbs_add(flow, &ipv4, size);
+ flow->l3_protocol_en = !!ipv4.mask.proto;
+ flow->l3_protocol = ipv4.val.proto;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &ipv4, size);
return size;
}
if (ret < 0)
return ret;
flow->layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
- if (size > flow_size)
- return size;
if (spec) {
unsigned int i;
uint32_t vtc_flow_val;
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
}
- mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ flow->l3_protocol_en = !!ipv6.mask.next_hdr;
+ flow->l3_protocol = ipv6.val.next_hdr;
+ if (size <= flow_size)
+ mlx5_flow_spec_verbs_add(flow, &ipv6, size);
+ return size;
+}
+
+/**
+ * Convert the @p item into a Verbs specification after ensuring the NIC
+ * will understand and process it correctly.
+ * If the necessary size for the conversion is greater than the @p flow_size,
+ * nothing is written in @p flow, the validation is still performed.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in, out] flow
+ * Pointer to flow structure.
+ * @param[in] flow_size
+ * Size in bytes of the available space in @p flow, if too small, nothing is
+ * written.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * On success the number of bytes consumed/necessary, if the returned value
+ * is lesser or equal to @p flow_size, the @p item has fully been converted,
+ * otherwise another call with this returned memory size should be done.
+ * On error, a negative errno value is returned and rte_errno is set.
+ */
+static int
+mlx5_flow_item_udp(const struct rte_flow_item *item, struct rte_flow *flow,
+ const size_t flow_size, struct rte_flow_error *error)
+{
+ const struct rte_flow_item_udp *spec = item->spec;
+ const struct rte_flow_item_udp *mask = item->mask;
+ unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
+ struct ibv_flow_spec_tcp_udp udp = {
+ .type = IBV_FLOW_SPEC_UDP,
+ .size = size,
+ };
+ int ret;
+
+ if (!(flow->layers & MLX5_FLOW_LAYER_OUTER_L3))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L3 is mandatory to filter on L4");
+ if (flow->layers & MLX5_FLOW_LAYER_OUTER_L4)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "L4 layer is already present");
+ if (flow->l3_protocol_en && flow->l3_protocol != MLX5_IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "protocol filtering not compatible"
+ " with UDP layer");
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_udp_mask,
+ sizeof(struct rte_flow_item_udp), error);
+ if (ret < 0)
+ return ret;
+ flow->layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ if (size > flow_size)
+ return size;
+ if (spec) {
+ udp.val.dst_port = spec->hdr.dst_port;
+ udp.val.src_port = spec->hdr.src_port;
+ udp.mask.dst_port = mask->hdr.dst_port;
+ udp.mask.src_port = mask->hdr.src_port;
+ /* Remove unwanted bits from values. */
+ udp.val.src_port &= udp.mask.src_port;
+ udp.val.dst_port &= udp.mask.dst_port;
+ }
+ mlx5_flow_spec_verbs_add(flow, &udp, size);
return size;
}
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_item_ipv6(pattern, flow, remain, error);
break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_item_udp(pattern, flow, remain, error);
+ break;
default:
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,