return ((struct ibv_context *)ctx)->device->ibdev_path;
}
+
+/**
+ * Get mlx5 device attributes. The glue function query_device_ex() is called
+ * with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
+ * device attributes from the glue out parameter.
+ *
+ * @param dev
+ * Pointer to ibv context.
+ *
+ * @param device_attr
+ * Pointer to mlx5 device attributes.
+ *
+ * @return
+ * 0 on success, non zero error number otherwise
+ */
+int
+mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *device_attr)
+{
+ int err;
+ struct ibv_device_attr_ex attr_ex;
+ memset(device_attr, 0, sizeof(*device_attr));
+ err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
+ if (err)
+ return err;
+
+ device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+ device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+ device_attr->max_sge = attr_ex.orig_attr.max_sge;
+ device_attr->max_cq = attr_ex.orig_attr.max_cq;
+ device_attr->max_qp = attr_ex.orig_attr.max_qp;
+ device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
+ device_attr->max_rwq_indirection_table_size =
+ attr_ex.rss_caps.max_rwq_indirection_table_size;
+ device_attr->max_tso = attr_ex.tso_caps.max_tso;
+ device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+
+ struct mlx5dv_context dv_attr = { .comp_mask = 0 };
+ err = mlx5_glue->dv_query_device(ctx, &dv_attr);
+ if (err)
+ return err;
+
+ device_attr->flags = dv_attr.flags;
+ device_attr->comp_mask = dv_attr.comp_mask;
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ device_attr->sw_parsing_offloads =
+ dv_attr.sw_parsing_caps.sw_parsing_offloads;
+#endif
+ device_attr->min_single_stride_log_num_of_bytes =
+ dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
+ device_attr->max_single_stride_log_num_of_bytes =
+ dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
+ device_attr->min_single_wqe_log_num_of_strides =
+ dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
+ device_attr->max_single_wqe_log_num_of_strides =
+ dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
+ device_attr->stride_supported_qpts =
+ dv_attr.striding_rq_caps.supported_qpts;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+#endif
+
+ return err;
+}
goto error;
DRV_LOG(DEBUG, "DevX is NOT supported");
}
- err = mlx5_glue->query_device_ex(sh->ctx, NULL, &sh->device_attr);
+ err = mlx5_os_get_dev_attr(sh->ctx, &sh->device_attr);
if (err) {
- DRV_LOG(DEBUG, "ibv_query_device_ex() failed");
+ DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
goto error;
}
sh->refcnt = 1;
}
#endif
config.ind_table_max_size =
- sh->device_attr.rss_caps.max_rwq_indirection_table_size;
+ sh->device_attr.max_rwq_indirection_table_size;
/*
* Remove this check once DPDK supports larger/variable
* indirection tables.
} else if (config.hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
}
- config.tso = (sh->device_attr.tso_caps.max_tso > 0 &&
- (sh->device_attr.tso_caps.supported_qpts &
+ config.tso = (sh->device_attr.max_tso > 0 &&
+ (sh->device_attr.tso_supported_qpts &
(1 << IBV_QPT_RAW_PACKET)));
if (config.tso)
- config.tso_max_payload_sz = sh->device_attr.tso_caps.max_tso;
+ config.tso_max_payload_sz = sh->device_attr.max_tso;
/*
* MPW is disabled by default, while the Enhanced MPW is enabled
* by default.
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
-
enum mlx5_ipool_index {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
MLX5_IPOOL_DECAP_ENCAP = 0, /* Pool for encap/decap resource. */
MLX5_RCM_AGGR, /* Reclaim PMD and rdma-core level. */
};
+/* Device attributes used in mlx5 PMD */
+struct mlx5_dev_attr {
+ uint64_t device_cap_flags_ex;
+ int max_qp_wr;
+ int max_sge;
+ int max_cq;
+ int max_qp;
+ uint32_t raw_packet_caps;
+ uint32_t max_rwq_indirection_table_size;
+ uint32_t max_tso;
+ uint32_t tso_supported_qpts;
+ uint64_t flags;
+ uint64_t comp_mask;
+ uint32_t sw_parsing_offloads;
+ uint32_t min_single_stride_log_num_of_bytes;
+ uint32_t max_single_stride_log_num_of_bytes;
+ uint32_t min_single_wqe_log_num_of_strides;
+ uint32_t max_single_wqe_log_num_of_strides;
+ uint32_t stride_supported_qpts;
+ uint32_t tunnel_offloads_caps;
+ char fw_ver[64];
+};
+
/** Key string for IPC. */
#define MLX5_MP_NAME "net_mlx5_mp"
uint32_t tdn; /* Transport Domain number. */
char ibdev_name[IBV_SYSFS_NAME_MAX]; /* IB device name. */
char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
- struct ibv_device_attr_ex device_attr; /* Device properties. */
+ struct mlx5_dev_attr device_attr; /* Device properties. */
LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
/**< Called by memory event callback. */
struct mlx5_mr_share_cache share_cache;
/* mlx5_os.c */
const char *mlx5_os_get_ctx_device_name(void *ctx);
const char *mlx5_os_get_ctx_device_path(void *ctx);
+int mlx5_os_get_dev_attr(void *ctx, struct mlx5_dev_attr *dev_attr);
#endif /* RTE_PMD_MLX5_H_ */
* Since we need one CQ per QP, the limit is the minimum number
* between the two values.
*/
- max = RTE_MIN(priv->sh->device_attr.orig_attr.max_cq,
- priv->sh->device_attr.orig_attr.max_qp);
+ max = RTE_MIN(priv->sh->device_attr.max_cq,
+ priv->sh->device_attr.max_qp);
/* max_rx_queues is uint16_t. */
max = RTE_MIN(max, (unsigned int)UINT16_MAX);
info->max_rx_queues = max;
int mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct ibv_device_attr *attr = &priv->sh->device_attr.orig_attr;
+ struct mlx5_dev_attr *attr = &priv->sh->device_attr;
size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
if (fw_size < size)
goto error;
}
DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
- dev->data->port_id, priv->sh->device_attr.orig_attr.max_qp_wr);
+ dev->data->port_id, priv->sh->device_attr.max_qp_wr);
DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
- dev->data->port_id, priv->sh->device_attr.orig_attr.max_sge);
+ dev->data->port_id, priv->sh->device_attr.max_sge);
/* Allocate door-bell for types created with DevX. */
if (tmpl->type != MLX5_RXQ_OBJ_TYPE_IBV) {
struct mlx5_devx_dbr_page *dbr_page;
.cap = {
/* Max number of outstanding WRs. */
.max_send_wr =
- ((priv->sh->device_attr.orig_attr.max_qp_wr <
+ ((priv->sh->device_attr.max_qp_wr <
desc) ?
- priv->sh->device_attr.orig_attr.max_qp_wr :
+ priv->sh->device_attr.max_qp_wr :
desc),
/*
* Max number of scatter/gather elements in a WR,
struct mlx5_priv *priv = txq_ctrl->priv;
unsigned int wqe_size;
- wqe_size = priv->sh->device_attr.orig_attr.max_qp_wr / desc;
+ wqe_size = priv->sh->device_attr.max_qp_wr / desc;
if (!wqe_size)
return 0;
/*
" Tx queue size (%d)",
txq_ctrl->txq.inlen_mode, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline &&
" Tx queue size (%d)",
txq_ctrl->txq.inlen_send, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_empw > max_inline &&
" Tx queue size (%d)",
txq_ctrl->txq.inlen_empw, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
" Tx queue size (%d)",
MLX5_MAX_TSO_HEADER, max_inline,
priv->dev_data->port_id,
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline) {
if (txq_adjust_params(tmpl))
goto error;
if (txq_calc_wqebb_cnt(tmpl) >
- priv->sh->device_attr.orig_attr.max_qp_wr) {
+ priv->sh->device_attr.max_qp_wr) {
DRV_LOG(ERR,
"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
" try smaller queue size",
dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
- priv->sh->device_attr.orig_attr.max_qp_wr);
+ priv->sh->device_attr.max_qp_wr);
rte_errno = ENOMEM;
goto error;
}