common/mlx5: share device context object
[dpdk.git] / drivers / common / mlx5 / linux / mlx5_glue.c
index 395519d..037ca96 100644 (file)
@@ -391,7 +391,7 @@ mlx5_glue_dr_create_flow_action_dest_flow_tbl(void *tbl)
 static void *
 mlx5_glue_dr_create_flow_action_dest_port(void *domain, uint32_t port)
 {
-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+#ifdef HAVE_MLX5DV_DR_CREATE_DEST_IB_PORT
        return mlx5dv_dr_action_create_dest_ib_port(domain, port);
 #else
 #ifdef HAVE_MLX5DV_DR_ESWITCH
@@ -492,6 +492,19 @@ mlx5_glue_dr_destroy_domain(void *domain)
 #endif
 }
 
+static int
+mlx5_glue_dr_sync_domain(void *domain, uint32_t flags)
+{
+#ifdef HAVE_MLX5DV_DR
+       return mlx5dv_dr_domain_sync(domain, flags);
+#else
+       (void)domain;
+       (void)flags;
+       errno = ENOTSUP;
+       return errno;
+#endif
+}
+
 static struct ibv_cq_ex *
 mlx5_glue_dv_create_cq(struct ibv_context *context,
                       struct ibv_cq_init_attr_ex *cq_attr,
@@ -755,6 +768,7 @@ mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
        return mlx5dv_dr_action_create_tag(tag);
 #else /* HAVE_MLX5DV_DR */
        struct mlx5dv_flow_action_attr *action;
+
        action = malloc(sizeof(*action));
        if (!action)
                return NULL;
@@ -797,6 +811,27 @@ mlx5_glue_dv_modify_flow_action_meter(void *action,
 #endif
 }
 
+static void *
+mlx5_glue_dv_create_flow_action_aso(struct mlx5dv_dr_domain *domain,
+                                   void *aso_obj,
+                                   uint32_t offset,
+                                   uint32_t flags,
+                                   uint8_t return_reg_c)
+{
+#if defined(HAVE_MLX5DV_DR) && defined(HAVE_MLX5_DR_CREATE_ACTION_ASO)
+       return mlx5dv_dr_action_create_aso(domain, aso_obj, offset,
+                                          flags, return_reg_c);
+#else
+       (void)domain;
+       (void)aso_obj;
+       (void)offset;
+       (void)flags;
+       (void)return_reg_c;
+       errno = ENOTSUP;
+       return NULL;
+#endif
+}
+
 static void *
 mlx5_glue_dr_create_flow_action_default_miss(void)
 {
@@ -1033,21 +1068,87 @@ mlx5_glue_devx_qp_query(struct ibv_qp *qp,
 }
 
 static int
-mlx5_glue_devx_port_query(struct ibv_context *ctx,
-                         uint32_t port_num,
-                         struct mlx5dv_devx_port *mlx5_devx_port)
+mlx5_glue_devx_wq_query(struct ibv_wq *wq, const void *in, size_t inlen,
+                       void *out, size_t outlen)
 {
-#ifdef HAVE_MLX5DV_DR_DEVX_PORT
-       return mlx5dv_query_devx_port(ctx, port_num, mlx5_devx_port);
+#ifdef HAVE_IBV_DEVX_QP
+       return mlx5dv_devx_wq_query(wq, in, inlen, out, outlen);
 #else
-       (void)ctx;
-       (void)port_num;
-       (void)mlx5_devx_port;
+       (void)wq;
+       (void)in;
+       (void)inlen;
+       (void)out;
+       (void)outlen;
        errno = ENOTSUP;
        return errno;
 #endif
 }
 
+static int
+mlx5_glue_devx_port_query(struct ibv_context *ctx,
+                         uint32_t port_num,
+                         struct mlx5_port_info *info)
+{
+       int err = 0;
+
+       info->query_flags = 0;
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT_V35
+       /* The DevX port query API is implemented (rdma-core v35 and above). */
+       struct mlx5_ib_uapi_query_port devx_port;
+
+       memset(&devx_port, 0, sizeof(devx_port));
+       err = mlx5dv_query_port(ctx, port_num, &devx_port);
+       if (err)
+               return err;
+       if (devx_port.flags & MLX5DV_QUERY_PORT_VPORT_REG_C0) {
+               info->vport_meta_tag = devx_port.reg_c0.value;
+               info->vport_meta_mask = devx_port.reg_c0.mask;
+               info->query_flags |= MLX5_PORT_QUERY_REG_C0;
+       }
+       if (devx_port.flags & MLX5DV_QUERY_PORT_VPORT) {
+               info->vport_id = devx_port.vport;
+               info->query_flags |= MLX5_PORT_QUERY_VPORT;
+       }
+#else
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+       /* The legacy DevX port query API is implemented (prior v35). */
+       struct mlx5dv_devx_port devx_port = {
+               .comp_mask = MLX5DV_DEVX_PORT_VPORT |
+                            MLX5DV_DEVX_PORT_MATCH_REG_C_0
+       };
+
+       err = mlx5dv_query_devx_port(ctx, port_num, &devx_port);
+       if (err)
+               return err;
+       if (devx_port.comp_mask & MLX5DV_DEVX_PORT_MATCH_REG_C_0) {
+               info->vport_meta_tag = devx_port.reg_c_0.value;
+               info->vport_meta_mask = devx_port.reg_c_0.mask;
+               info->query_flags |= MLX5_PORT_QUERY_REG_C0;
+       }
+       if (devx_port.comp_mask & MLX5DV_DEVX_PORT_VPORT) {
+               info->vport_id = devx_port.vport_num;
+               info->query_flags |= MLX5_PORT_QUERY_VPORT;
+       }
+#else
+       RTE_SET_USED(ctx);
+       RTE_SET_USED(port_num);
+#endif /* HAVE_MLX5DV_DR_DEVX_PORT */
+#endif /* HAVE_MLX5DV_DR_DEVX_PORT_V35 */
+       return err;
+}
+
+static int
+mlx5_glue_dr_dump_single_rule(FILE *file, void *rule)
+{
+#ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE
+       return mlx5dv_dump_dr_rule(file, rule);
+#else
+       RTE_SET_USED(file);
+       RTE_SET_USED(rule);
+       return -ENOTSUP;
+#endif
+}
+
 static int
 mlx5_glue_dr_dump_domain(FILE *file, void *domain)
 {
@@ -1060,6 +1161,39 @@ mlx5_glue_dr_dump_domain(FILE *file, void *domain)
 #endif
 }
 
+static void *
+mlx5_glue_dr_create_flow_action_sampler
+                       (struct mlx5dv_dr_flow_sampler_attr *attr)
+{
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_FLOW_SAMPLE
+       return mlx5dv_dr_action_create_flow_sampler(attr);
+#else
+       (void)attr;
+       errno = ENOTSUP;
+       return NULL;
+#endif
+}
+
+static void *
+mlx5_glue_dr_action_create_dest_array
+                       (void *domain,
+                        size_t num_dest,
+                        struct mlx5dv_dr_action_dest_attr *dests[])
+{
+#ifdef HAVE_MLX5_DR_CREATE_ACTION_DEST_ARRAY
+       return mlx5dv_dr_action_create_dest_array
+                               (domain,
+                               num_dest,
+                               dests);
+#else
+       (void)domain;
+       (void)num_dest;
+       (void)dests;
+       errno = ENOTSUP;
+       return NULL;
+#endif
+}
+
 static int
 mlx5_glue_devx_query_eqn(struct ibv_context *ctx, uint32_t cpus,
                         uint32_t *eqn)
@@ -1195,7 +1329,6 @@ mlx5_glue_dv_free_var(struct mlx5dv_var *var)
 #endif
 }
 
-
 static void
 mlx5_glue_dr_reclaim_domain_memory(void *domain, uint32_t enable)
 {
@@ -1207,6 +1340,45 @@ mlx5_glue_dr_reclaim_domain_memory(void *domain, uint32_t enable)
 #endif
 }
 
+static struct mlx5dv_pp *
+mlx5_glue_dv_alloc_pp(struct ibv_context *context,
+                     size_t pp_context_sz,
+                     const void *pp_context,
+                     uint32_t flags)
+{
+#ifdef HAVE_MLX5DV_PP_ALLOC
+       return mlx5dv_pp_alloc(context, pp_context_sz, pp_context, flags);
+#else
+       RTE_SET_USED(context);
+       RTE_SET_USED(pp_context_sz);
+       RTE_SET_USED(pp_context);
+       RTE_SET_USED(flags);
+       errno = ENOTSUP;
+       return NULL;
+#endif
+}
+
+static void
+mlx5_glue_dr_allow_duplicate_rules(void *domain, uint32_t allow)
+{
+#ifdef HAVE_MLX5_DR_ALLOW_DUPLICATE
+       mlx5dv_dr_domain_allow_duplicate_rules(domain, allow);
+#else
+       (void)(allow);
+       (void)(domain);
+#endif
+}
+
+static void
+mlx5_glue_dv_free_pp(struct mlx5dv_pp *pp)
+{
+#ifdef HAVE_MLX5DV_PP_ALLOC
+       mlx5dv_pp_free(pp);
+#else
+       RTE_SET_USED(pp);
+#endif
+}
+
 __rte_cache_aligned
 const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .version = MLX5_GLUE_VERSION,
@@ -1268,6 +1440,7 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
        .dr_create_domain = mlx5_glue_dr_create_domain,
        .dr_destroy_domain = mlx5_glue_dr_destroy_domain,
+       .dr_sync_domain = mlx5_glue_dr_sync_domain,
        .dv_create_cq = mlx5_glue_dv_create_cq,
        .dv_create_wq = mlx5_glue_dv_create_wq,
        .dv_query_device = mlx5_glue_dv_query_device,
@@ -1289,6 +1462,7 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .dv_create_flow_action_tag =  mlx5_glue_dv_create_flow_action_tag,
        .dv_create_flow_action_meter = mlx5_glue_dv_create_flow_action_meter,
        .dv_modify_flow_action_meter = mlx5_glue_dv_modify_flow_action_meter,
+       .dv_create_flow_action_aso = mlx5_glue_dv_create_flow_action_aso,
        .dr_create_flow_action_default_miss =
                mlx5_glue_dr_create_flow_action_default_miss,
        .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
@@ -1306,9 +1480,16 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .devx_umem_reg = mlx5_glue_devx_umem_reg,
        .devx_umem_dereg = mlx5_glue_devx_umem_dereg,
        .devx_qp_query = mlx5_glue_devx_qp_query,
+       .devx_wq_query = mlx5_glue_devx_wq_query,
        .devx_port_query = mlx5_glue_devx_port_query,
        .dr_dump_domain = mlx5_glue_dr_dump_domain,
+       .dr_dump_rule = mlx5_glue_dr_dump_single_rule,
        .dr_reclaim_domain_memory = mlx5_glue_dr_reclaim_domain_memory,
+       .dr_create_flow_action_sampler =
+               mlx5_glue_dr_create_flow_action_sampler,
+       .dr_create_flow_action_dest_array =
+               mlx5_glue_dr_action_create_dest_array,
+       .dr_allow_duplicate_rules = mlx5_glue_dr_allow_duplicate_rules,
        .devx_query_eqn = mlx5_glue_devx_query_eqn,
        .devx_create_event_channel = mlx5_glue_devx_create_event_channel,
        .devx_destroy_event_channel = mlx5_glue_devx_destroy_event_channel,
@@ -1319,4 +1500,6 @@ const struct mlx5_glue *mlx5_glue = &(const struct mlx5_glue) {
        .devx_free_uar = mlx5_glue_devx_free_uar,
        .dv_alloc_var = mlx5_glue_dv_alloc_var,
        .dv_free_var = mlx5_glue_dv_free_var,
+       .dv_alloc_pp = mlx5_glue_dv_alloc_pp,
+       .dv_free_pp = mlx5_glue_dv_free_pp,
 };