enum mlx5dv_dr_domain_type { unused, };
struct mlx5dv_dr_domain;
struct mlx5dv_dr_action;
+#define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
#endif
#ifndef HAVE_MLX5DV_DR_DEVX_PORT
struct mlx5dv_devx_port;
#endif
+#ifndef HAVE_MLX5DV_DR_DEVX_PORT_V35
+struct mlx5dv_port;
+#endif
+
+#define MLX5_PORT_QUERY_VPORT (1u << 0)
+#define MLX5_PORT_QUERY_REG_C0 (1u << 1)
+
+struct mlx5_port_info {
+ uint16_t query_flags;
+ uint16_t vport_id; /* Associated VF vport index (if any). */
+ uint32_t vport_meta_tag; /* Used for vport index match ove VF LAG. */
+ uint32_t vport_meta_mask; /* Used for vport index field match mask. */
+};
+
#ifndef HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER
struct mlx5dv_dr_flow_meter_attr;
#endif
int (*devx_qp_query)(struct ibv_qp *qp,
const void *in, size_t inlen,
void *out, size_t outlen);
+ int (*devx_wq_query)(struct ibv_wq *wq, const void *in, size_t inlen,
+ void *out, size_t outlen);
int (*devx_port_query)(struct ibv_context *ctx,
uint32_t port_num,
- struct mlx5dv_devx_port *mlx5_devx_port);
+ struct mlx5_port_info *info);
int (*dr_dump_domain)(FILE *file, void *domain);
+ int (*dr_dump_rule)(FILE *file, void *rule);
int (*devx_query_eqn)(struct ibv_context *context, uint32_t cpus,
uint32_t *eqn);
struct mlx5dv_devx_event_channel *(*devx_create_event_channel)
struct mlx5dv_devx_async_event_hdr *event_data,
size_t event_resp_len);
void (*dr_reclaim_domain_memory)(void *domain, uint32_t enable);
+ void (*dr_allow_duplicate_rules)(void *domain, uint32_t allow);
struct mlx5dv_pp *(*dv_alloc_pp)(struct ibv_context *context,
size_t pp_context_sz,
const void *pp_context,
(void *domain,
size_t num_dest,
struct mlx5dv_dr_action_dest_attr *dests[]);
- void *(*dr_action_create_flow_hit)(struct mlx5dv_devx_obj *devx_obj,
- uint32_t offset,
- uint8_t reg_c_index);
+ void *(*dv_create_flow_action_aso)
+ (struct mlx5dv_dr_domain *domain, void *aso_obj,
+ uint32_t offset, uint32_t flags, uint8_t return_reg_c);
};
extern const struct mlx5_glue *mlx5_glue;