net/mlx5: remove attributes dependency on Verbs
authorOphir Munk <ophirmu@mellanox.com>
Wed, 3 Jun 2020 15:05:58 +0000 (15:05 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 16 Jun 2020 17:21:07 +0000 (19:21 +0200)
Define 'struct mlx5_dev_attr' which is ibv and dv independent. It
contains attribute that were originally contained in 'struct
ibv_device_attr_ex' and 'struct mlx5dv_context dv_attr'. Add a new API
mlx5_os_get_dev_attr() which fills in the new defined struct.

Signed-off-by: Ophir Munk <ophirmu@mellanox.com>
Acked-by: Matan Azrad <matan@mellanox.com>
drivers/net/mlx5/linux/mlx5_os.c
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_ethdev.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_txq.c

index 9443239..85dcf49 100644 (file)
@@ -85,3 +85,66 @@ mlx5_os_get_ctx_device_path(void *ctx)
 
        return ((struct ibv_context *)ctx)->device->ibdev_path;
 }
+
+/**
+ * Get mlx5 device attributes. The glue function query_device_ex() is called
+ * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
+ * device attributes from the glue out parameter.
+ *
+ * @param dev
+ *   Pointer to ibv context.
+ *
+ * @param device_attr
+ *   Pointer to mlx5 device attributes.
+ *
+ * @return
+ *   0 on success, non zero error number otherwise
+ */
+int
+mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+{
+       int err;
+       struct ibv_device_attr_ex attr_ex;
+       memset(device_attr, 0, sizeof(*device_attr));
+       err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
+       if (err)
+               return err;
+
+       device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+       device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+       device_attr->max_sge = attr_ex.orig_attr.max_sge;
+       device_attr->max_cq = attr_ex.orig_attr.max_cq;
+       device_attr->max_qp = attr_ex.orig_attr.max_qp;
+       device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
+       device_attr->max_rwq_indirection_table_size =
+               attr_ex.rss_caps.max_rwq_indirection_table_size;
+       device_attr->max_tso = attr_ex.tso_caps.max_tso;
+       device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+
+       struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+       err = mlx5_glue->dv_query_device(ctx, &dv_attr);
+       if (err)
+               return err;
+
+       device_attr->flags = dv_attr.flags;
+       device_attr->comp_mask = dv_attr.comp_mask;
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+       device_attr->sw_parsing_offloads =
+               dv_attr.sw_parsing_caps.sw_parsing_offloads;
+#endif
+       device_attr->min_single_stride_log_num_of_bytes =
+               dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
+       device_attr->max_single_stride_log_num_of_bytes =
+               dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
+       device_attr->min_single_wqe_log_num_of_strides =
+               dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
+       device_attr->max_single_wqe_log_num_of_strides =
+               dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
+       device_attr->stride_supported_qpts =
+               dv_attr.striding_rq_caps.supported_qpts;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+       device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+#endif
+
+       return err;
+}
index 95a34d1..0fa8742 100644 (file)
@@ -825,9 +825,9 @@ mlx5_alloc_shared_ibctx(const struct mlx5_dev_spawn_data *spawn,
                        goto error;
                DRV_LOG(DEBUG, "DevX is NOT supported");
        }
-       err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
+       err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
        if (err) {
-               DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
+               DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
                goto error;
        }
        sh->refcnt = 1;
@@ -2799,7 +2799,7 @@ err_secondary:
        }
 #endif
        config.ind_table_max_size =
-               sh->device_attr.rss_caps.max_rwq_indirection_table_size;
+               sh->device_attr.max_rwq_indirection_table_size;
        /*
         * Remove this check once DPDK supports larger/variable
         * indirection tables.
@@ -2828,11 +2828,11 @@ err_secondary:
        } else if (config.hw_padding) {
                DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
        }
-       config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
-                     (sh->device_attr.tso_caps.supported_qpts &
+       config.tso = (sh->device_attr.max_tso > 0 &&
+                     (sh->device_attr.tso_supported_qpts &
                       (1 << IBV_QPT_RAW_PACKET)));
        if (config.tso)
-               config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
+               config.tso_max_payload_sz = sh->device_attr.max_tso;
        /*
         * MPW is disabled by default, while the Enhanced MPW is enabled
         * by default.
index 30678aa..478ebef 100644 (file)
@@ -43,7 +43,6 @@
 #include "mlx5_utils.h"
 #include "mlx5_autoconf.h"
 
-
 enum mlx5_ipool_index {
 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
        MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
@@ -72,6 +71,29 @@ enum mlx5_reclaim_mem_mode {
        MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
 };
 
+/* Device attributes used in mlx5 PMD */
+struct mlx5_dev_attr {
+       uint64_t        device_cap_flags_ex;
+       int             max_qp_wr;
+       int             max_sge;
+       int             max_cq;
+       int             max_qp;
+       uint32_t        raw_packet_caps;
+       uint32_t        max_rwq_indirection_table_size;
+       uint32_t        max_tso;
+       uint32_t        tso_supported_qpts;
+       uint64_t        flags;
+       uint64_t        comp_mask;
+       uint32_t        sw_parsing_offloads;
+       uint32_t        min_single_stride_log_num_of_bytes;
+       uint32_t        max_single_stride_log_num_of_bytes;
+       uint32_t        min_single_wqe_log_num_of_strides;
+       uint32_t        max_single_wqe_log_num_of_strides;
+       uint32_t        stride_supported_qpts;
+       uint32_t        tunnel_offloads_caps;
+       char            fw_ver[64];
+};
+
 /** Key string for IPC. */
 #define MLX5_MP_NAME "net_mlx5_mp"
 
@@ -499,7 +521,7 @@ struct mlx5_dev_ctx_shared {
        uint32_t tdn; /* Transport Domain number. */
        char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
        char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
-       struct ibv_device_attr_ex device_attr; /* Device properties. */
+       struct mlx5_dev_attr device_attr; /* Device properties. */
        LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
        /**< Called by memory event callback. */
        struct mlx5_mr_share_cache share_cache;
@@ -856,5 +878,6 @@ void mlx5_flow_meter_detach(struct mlx5_flow_meter *fm);
 /* mlx5_os.c */
 const char *mlx5_os_get_ctx_device_name(void *ctx);
 const char *mlx5_os_get_ctx_device_path(void *ctx);
+int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
 
 #endif /* RTE_PMD_MLX5_H_ */
index 6919911..6b8b303 100644 (file)
@@ -626,8 +626,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
         * Since we need one CQ per QP, the limit is the minimum number
         * between the two values.
         */
-       max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
-                     priv->sh->device_attr.orig_attr.max_qp);
+       max = RTE_MIN(priv->sh->device_attr.max_cq,
+                     priv->sh->device_attr.max_qp);
        /* max_rx_queues is uint16_t. */
        max = RTE_MIN(max, (unsigned int)UINT16_MAX);
        info->max_rx_queues = max;
@@ -736,7 +736,7 @@ mlx5_read_clock(struct rte_eth_dev *dev, uint64_t *clock)
 int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
+       struct mlx5_dev_attr *attr = &priv->sh->device_attr;
        size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
 
        if (fw_size < size)
index 0b0abe1..f018553 100644 (file)
@@ -1405,9 +1405,9 @@ mlx5_rxq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                goto error;
        }
        DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
-               dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+               dev->data->port_id, priv->sh->device_attr.max_qp_wr);
        DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
-               dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+               dev->data->port_id, priv->sh->device_attr.max_sge);
        /* Allocate door-bell for types created with DevX. */
        if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
                struct mlx5_devx_dbr_page *dbr_page;
index 2047a9a..f7b548f 100644 (file)
@@ -645,9 +645,9 @@ mlx5_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx,
                .cap = {
                        /* Max number of outstanding WRs. */
                        .max_send_wr =
-                               ((priv->sh->device_attr.orig_attr.max_qp_wr <
+                               ((priv->sh->device_attr.max_qp_wr <
                                  desc) ?
-                                priv->sh->device_attr.orig_attr.max_qp_wr :
+                                priv->sh->device_attr.max_qp_wr :
                                 desc),
                        /*
                         * Max number of scatter/gather elements in a WR,
@@ -948,7 +948,7 @@ txq_calc_inline_max(struct mlx5_txq_ctrl *txq_ctrl)
        struct mlx5_priv *priv = txq_ctrl->priv;
        unsigned int wqe_size;
 
-       wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
+       wqe_size = priv->sh->device_attr.max_qp_wr / desc;
        if (!wqe_size)
                return 0;
        /*
@@ -1203,7 +1203,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_mode, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline &&
@@ -1215,7 +1215,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_send, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_empw > max_inline &&
@@ -1227,7 +1227,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        txq_ctrl->txq.inlen_empw, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
@@ -1237,7 +1237,7 @@ txq_adjust_params(struct mlx5_txq_ctrl *txq_ctrl)
                        " Tx queue size (%d)",
                        MLX5_MAX_TSO_HEADER, max_inline,
                        priv->dev_data->port_id,
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                goto error;
        }
        if (txq_ctrl->txq.inlen_send > max_inline) {
@@ -1322,12 +1322,12 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        if (txq_adjust_params(tmpl))
                goto error;
        if (txq_calc_wqebb_cnt(tmpl) >
-           priv->sh->device_attr.orig_attr.max_qp_wr) {
+           priv->sh->device_attr.max_qp_wr) {
                DRV_LOG(ERR,
                        "port %u Tx WQEBB count (%d) exceeds the limit (%d),"
                        " try smaller queue size",
                        dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
-                       priv->sh->device_attr.orig_attr.max_qp_wr);
+                       priv->sh->device_attr.max_qp_wr);
                rte_errno = ENOMEM;
                goto error;
        }