#define MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL 0x3
#endif
+enum ibv_flow_flags {
+ IBV_FLOW_ATTR_FLAGS_ALLOW_LOOP_BACK = 1 << 0,
+ IBV_FLOW_ATTR_FLAGS_DONT_TRAP = 1 << 1,
+ IBV_FLOW_ATTR_FLAGS_EGRESS = 1 << 2,
+};
+
+enum ibv_flow_attr_type {
+ /* Steering according to rule specifications. */
+ IBV_FLOW_ATTR_NORMAL = 0x0,
+ /*
+ * Default unicast and multicast rule -
+ * receive all Eth traffic which isn't steered to any QP.
+ */
+ IBV_FLOW_ATTR_ALL_DEFAULT = 0x1,
+ /*
+ * Default multicast rule -
+ * receive all Eth multicast traffic which isn't steered to any QP.
+ */
+ IBV_FLOW_ATTR_MC_DEFAULT = 0x2,
+ /* Sniffer rule - receive all port traffic. */
+ IBV_FLOW_ATTR_SNIFFER = 0x3,
+};
+
+enum mlx5dv_flow_table_type {
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB = 0x2,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX = 0x3,
+};
+
+#define MLX5DV_FLOW_TABLE_TYPE_NIC_RX MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX
+#define MLX5DV_FLOW_TABLE_TYPE_NIC_TX MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX
+#define MLX5DV_FLOW_TABLE_TYPE_FDB MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB
+#define MLX5DV_FLOW_TABLE_TYPE_RDMA_RX MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX
+
+struct mlx5dv_flow_match_parameters {
+ size_t match_sz;
+ uint64_t match_buf[]; /* Device spec format */
+};
+
+struct mlx5dv_flow_matcher_attr {
+ enum ibv_flow_attr_type type;
+ uint32_t flags; /* From enum ibv_flow_flags. */
+ uint16_t priority;
+ uint8_t match_criteria_enable; /* Device spec format. */
+ struct mlx5dv_flow_match_parameters *match_mask;
+ uint64_t comp_mask; /* Use mlx5dv_flow_matcher_attr_mask. */
+ enum mlx5dv_flow_table_type ft_type;
+};
+
+/* Windows specific mlx5_matcher. */
+struct mlx5_matcher {
+ void *ctx;
+ struct mlx5dv_flow_matcher_attr attr;
+ uint64_t match_buf[];
+};
+
struct mlx5_err_cqe {
uint8_t rsvd0[32];
uint32_t srqn;
void *table,
void **matcher)
{
- RTE_SET_USED(ctx);
- RTE_SET_USED(attr);
+ struct mlx5dv_flow_matcher_attr *mattr;
+
RTE_SET_USED(table);
*matcher = NULL;
- rte_errno = ENOTSUP;
- return -rte_errno;
+ mattr = attr;
+ if (mattr->type != IBV_FLOW_ATTR_NORMAL) {
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
+ struct mlx5_matcher *mlx5_matcher =
+ mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(struct mlx5_matcher) +
+ MLX5_ST_SZ_BYTES(fte_match_param),
+ 0, SOCKET_ID_ANY);
+ if (!mlx5_matcher) {
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ mlx5_matcher->ctx = ctx;
+ memcpy(&mlx5_matcher->attr, attr, sizeof(mlx5_matcher->attr));
+ memcpy(&mlx5_matcher->match_buf,
+ mattr->match_mask->match_buf,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+ *matcher = mlx5_matcher;
+ return 0;
}
/**
int
mlx5_flow_os_destroy_flow_matcher(void *matcher)
{
- RTE_SET_USED(matcher);
- rte_errno = ENOTSUP;
- return -rte_errno;
+ mlx5_free(matcher);
+ return 0;
}
/**