* with out parameter of type 'struct ibv_device_attr_ex *'. Then fill in mlx5
* device attributes from the glue out parameter.
*
- * @param cdev
- * Pointer to mlx5 device.
- *
- * @param device_attr
- * Pointer to mlx5 device attributes.
+ * @param sh
+ * Pointer to shared device context.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
- struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
{
int err;
- struct ibv_context *ctx = cdev->ctx;
- struct ibv_device_attr_ex attr_ex;
+ struct ibv_context *ctx = sh->cdev->ctx;
+ struct ibv_device_attr_ex attr_ex = { .comp_mask = 0 };
+ struct mlx5dv_context dv_attr = { .comp_mask = 0 };
- memset(device_attr, 0, sizeof(*device_attr));
err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
if (err) {
rte_errno = errno;
return -rte_errno;
}
- device_attr->device_cap_flags_ex = attr_ex.device_cap_flags_ex;
- device_attr->max_qp_wr = attr_ex.orig_attr.max_qp_wr;
- device_attr->max_sge = attr_ex.orig_attr.max_sge;
- device_attr->max_cq = attr_ex.orig_attr.max_cq;
- device_attr->max_cqe = attr_ex.orig_attr.max_cqe;
- device_attr->max_mr = attr_ex.orig_attr.max_mr;
- device_attr->max_pd = attr_ex.orig_attr.max_pd;
- device_attr->max_qp = attr_ex.orig_attr.max_qp;
- device_attr->max_srq = attr_ex.orig_attr.max_srq;
- device_attr->max_srq_wr = attr_ex.orig_attr.max_srq_wr;
- device_attr->raw_packet_caps = attr_ex.raw_packet_caps;
- device_attr->max_rwq_indirection_table_size =
- attr_ex.rss_caps.max_rwq_indirection_table_size;
- device_attr->max_tso = attr_ex.tso_caps.max_tso;
- device_attr->tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
-
- struct mlx5dv_context dv_attr = { .comp_mask = 0 };
#ifdef HAVE_IBV_MLX5_MOD_SWP
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_SWP;
#endif
rte_errno = errno;
return -rte_errno;
}
-
- device_attr->flags = dv_attr.flags;
- device_attr->comp_mask = dv_attr.comp_mask;
+ memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+ sh->dev_cap.device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+ sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+ sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
+ sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
+ sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
+ sh->dev_cap.raw_packet_caps = attr_ex.raw_packet_caps;
+ sh->dev_cap.max_rwq_indirection_table_size =
+ attr_ex.rss_caps.max_rwq_indirection_table_size;
+ sh->dev_cap.max_tso = attr_ex.tso_caps.max_tso;
+ sh->dev_cap.tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+ strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
+ sizeof(sh->dev_cap.fw_ver));
+ sh->dev_cap.flags = dv_attr.flags;
+ sh->dev_cap.comp_mask = dv_attr.comp_mask;
#ifdef HAVE_IBV_MLX5_MOD_SWP
- device_attr->sw_parsing_offloads =
+ sh->dev_cap.sw_parsing_offloads =
dv_attr.sw_parsing_caps.sw_parsing_offloads;
#endif
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- device_attr->min_single_stride_log_num_of_bytes =
+ sh->dev_cap.min_single_stride_log_num_of_bytes =
dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
- device_attr->max_single_stride_log_num_of_bytes =
+ sh->dev_cap.max_single_stride_log_num_of_bytes =
dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
- device_attr->min_single_wqe_log_num_of_strides =
+ sh->dev_cap.min_single_wqe_log_num_of_strides =
dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
- device_attr->max_single_wqe_log_num_of_strides =
+ sh->dev_cap.max_single_wqe_log_num_of_strides =
dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
- device_attr->stride_supported_qpts =
+ sh->dev_cap.stride_supported_qpts =
dv_attr.striding_rq_caps.supported_qpts;
#endif
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- device_attr->tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+ sh->dev_cap.tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
#endif
- strlcpy(device_attr->fw_ver, attr_ex.orig_attr.fw_ver,
- sizeof(device_attr->fw_ver));
-
return 0;
}
* Multi-packet send is supported by ConnectX-4 Lx PF as well
* as all ConnectX-5 devices.
*/
- if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
- if (sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
DRV_LOG(DEBUG, "enhanced MPW is supported");
mps = MLX5_MPW_ENHANCED;
} else {
mps = MLX5_MPW_DISABLED;
}
#ifdef HAVE_IBV_MLX5_MOD_SWP
- if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
- swp = sh->device_attr.sw_parsing_offloads;
+ if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ swp = sh->dev_cap.sw_parsing_offloads;
DRV_LOG(DEBUG, "SWP support: %u", swp);
#endif
config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
MLX5_SW_PARSING_TSO_CAP);
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
- sh->device_attr.min_single_stride_log_num_of_bytes);
+ sh->dev_cap.min_single_stride_log_num_of_bytes);
DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
- sh->device_attr.max_single_stride_log_num_of_bytes);
+ sh->dev_cap.max_single_stride_log_num_of_bytes);
DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
- sh->device_attr.min_single_wqe_log_num_of_strides);
+ sh->dev_cap.min_single_wqe_log_num_of_strides);
DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
- sh->device_attr.max_single_wqe_log_num_of_strides);
+ sh->dev_cap.max_single_wqe_log_num_of_strides);
DRV_LOG(DEBUG, "\tsupported_qpts: %d",
- sh->device_attr.stride_supported_qpts);
+ sh->dev_cap.stride_supported_qpts);
DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
config->mprq.log_min_stride_wqe_size);
DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
mprq = 1;
config->mprq.log_min_stride_size =
- sh->device_attr.min_single_stride_log_num_of_bytes;
+ sh->dev_cap.min_single_stride_log_num_of_bytes;
config->mprq.log_max_stride_size =
- sh->device_attr.max_single_stride_log_num_of_bytes;
+ sh->dev_cap.max_single_stride_log_num_of_bytes;
config->mprq.log_min_stride_num =
- sh->device_attr.min_single_wqe_log_num_of_strides;
+ sh->dev_cap.min_single_wqe_log_num_of_strides;
config->mprq.log_max_stride_num =
- sh->device_attr.max_single_wqe_log_num_of_strides;
+ sh->dev_cap.max_single_wqe_log_num_of_strides;
}
#endif
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (sh->device_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
- config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+ if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
(MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
"tunnel offloading disabled due to old OFED/rdma-core version");
#endif
#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- mpls_en = ((sh->device_attr.tunnel_offloads_caps &
+ mpls_en = ((sh->dev_cap.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
- (sh->device_attr.tunnel_offloads_caps &
+ (sh->dev_cap.tunnel_offloads_caps &
MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
mpls_en ? "" : "not ");
DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
priv->dev_port, priv->domain_id);
}
- config->hw_csum = !!(sh->device_attr.device_cap_flags_ex &
+ config->hw_csum = !!(sh->dev_cap.device_cap_flags_ex &
IBV_DEVICE_RAW_IP_CSUM);
DRV_LOG(DEBUG, "checksum offloading is %ssupported",
(config->hw_csum ? "" : "not "));
DRV_LOG(DEBUG, "counters are not supported");
#endif
config->ind_table_max_size =
- sh->device_attr.max_rwq_indirection_table_size;
+ sh->dev_cap.max_rwq_indirection_table_size;
/*
* Remove this check once DPDK supports larger/variable
* indirection tables.
config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
config->ind_table_max_size);
- config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps &
+ config->hw_vlan_strip = !!(sh->dev_cap.raw_packet_caps &
IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
(config->hw_vlan_strip ? "" : "not "));
- config->hw_fcs_strip = !!(sh->device_attr.raw_packet_caps &
+ config->hw_fcs_strip = !!(sh->dev_cap.raw_packet_caps &
IBV_RAW_PACKET_CAP_SCATTER_FCS);
#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
- hw_padding = !!sh->device_attr.rx_pad_end_addr_align;
+ hw_padding = !!sh->dev_cap.rx_pad_end_addr_align;
#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
- hw_padding = !!(sh->device_attr.device_cap_flags_ex &
+ hw_padding = !!(sh->dev_cap.device_cap_flags_ex &
IBV_DEVICE_PCI_WRITE_END_PADDING);
#endif
if (config->hw_padding && !hw_padding) {
} else if (config->hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
}
- config->tso = (sh->device_attr.max_tso > 0 &&
- (sh->device_attr.tso_supported_qpts &
+ config->tso = (sh->dev_cap.max_tso > 0 &&
+ (sh->dev_cap.tso_supported_qpts &
(1 << IBV_QPT_RAW_PACKET)));
if (config->tso)
- config->tso_max_payload_sz = sh->device_attr.max_tso;
+ config->tso_max_payload_sz = sh->dev_cap.max_tso;
/*
* MPW is disabled by default, while the Enhanced MPW is enabled
* by default.
#endif
}
if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
- !(sh->device_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
+ !(sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
config->cqe_comp = 0;
}
/* CQ to be associated with the receive queue. */
qp_attr.recv_cq = txq_ctrl->obj->cq;
/* Max number of outstanding WRs. */
- qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
- priv->sh->device_attr.max_qp_wr : desc);
+ qp_attr.cap.max_send_wr = RTE_MIN(priv->sh->dev_cap.max_qp_wr, desc);
/*
* Max number of scatter/gather elements in a WR, must be 1 to prevent
* libmlx5 from trying to affect must be 1 to prevent libmlx5 from
* trying to affect too much memory. TX gather is not impacted by the
- * device_attr.max_sge limit and will still work properly.
+ * dev_cap.max_sge limit and will still work properly.
*/
qp_attr.cap.max_send_sge = 1;
qp_attr.qp_type = IBV_QPT_RAW_PACKET,
sh->esw_mode = !!(spawn->info.master || spawn->info.representor);
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
- err = mlx5_os_get_dev_attr(sh->cdev, &sh->device_attr);
+ err = mlx5_os_capabilities_prepare(sh);
if (err) {
- DRV_LOG(DEBUG, "mlx5_os_get_dev_attr() failed");
+ DRV_LOG(ERR, "Fail to configure device capabilities.");
goto error;
}
sh->refcnt = 1;
void *data2;
};
-/* Device attributes used in mlx5 PMD */
-struct mlx5_dev_attr {
- uint64_t device_cap_flags_ex;
- int max_qp_wr;
- int max_sge;
- int max_cq;
- int max_qp;
- int max_cqe;
- uint32_t max_pd;
- uint32_t max_mr;
- uint32_t max_srq;
- uint32_t max_srq_wr;
- uint32_t raw_packet_caps;
- uint32_t max_rwq_indirection_table_size;
- uint32_t max_tso;
- uint32_t tso_supported_qpts;
- uint64_t flags;
- uint64_t comp_mask;
- uint32_t sw_parsing_offloads;
- uint32_t min_single_stride_log_num_of_bytes;
- uint32_t max_single_stride_log_num_of_bytes;
- uint32_t min_single_wqe_log_num_of_strides;
- uint32_t max_single_wqe_log_num_of_strides;
- uint32_t stride_supported_qpts;
- uint32_t tunnel_offloads_caps;
- char fw_ver[64];
+/* Device capabilities structure which isn't changed in any stage. */
+struct mlx5_dev_cap {
+ uint64_t device_cap_flags_ex;
+ int max_cq; /* Maximum number of supported CQs */
+ int max_qp; /* Maximum number of supported QPs. */
+ int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
+ int max_sge;
+ /* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
+ * operations.
+ */
+ uint32_t raw_packet_caps;
+ uint32_t max_rwq_indirection_table_size;
+ /* Maximum receive WQ indirection table size. */
+ uint32_t max_tso; /* Maximum TCP payload for TSO. */
+ uint32_t tso_supported_qpts;
+ uint64_t flags;
+ uint64_t comp_mask;
+ uint32_t sw_parsing_offloads;
+ uint32_t min_single_stride_log_num_of_bytes;
+ uint32_t max_single_stride_log_num_of_bytes;
+ uint32_t min_single_wqe_log_num_of_strides;
+ uint32_t max_single_wqe_log_num_of_strides;
+ uint32_t stride_supported_qpts;
+ uint32_t tunnel_offloads_caps;
+ char fw_ver[64]; /* Firmware version of this device. */
};
/** Data associated with devices to spawn. */
uint32_t tdn; /* Transport Domain number. */
char ibdev_name[MLX5_FS_NAME_MAX]; /* SYSFS dev name. */
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
- struct mlx5_dev_attr device_attr; /* Device properties. */
+ struct mlx5_dev_cap dev_cap; /* Device capabilities. */
int numa_node; /* Numa node of backing physical device. */
/* Packet pacing related structure. */
struct mlx5_dev_txpp txpp;
/* mlx5_os.c */
struct rte_pci_driver;
-int mlx5_os_get_dev_attr(struct mlx5_common_device *dev,
- struct mlx5_dev_attr *dev_attr);
+int mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh);
void mlx5_os_free_shared_dr(struct mlx5_priv *priv);
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
/* Create Send Queue object with DevX. */
wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
- (uint32_t)priv->sh->device_attr.max_qp_wr);
+ (uint32_t)priv->sh->dev_cap.max_qp_wr);
log_desc_n = log2above(wqe_n);
ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
if (ret) {
* Since we need one CQ per QP, the limit is the minimum number
* between the two values.
*/
- max = RTE_MIN(priv->sh->device_attr.max_cq,
- priv->sh->device_attr.max_qp);
+ max = RTE_MIN(priv->sh->dev_cap.max_cq, priv->sh->dev_cap.max_qp);
/* max_rx_queues is uint16_t. */
max = RTE_MIN(max, (unsigned int)UINT16_MAX);
info->max_rx_queues = max;
mlx5_fw_version_get(struct rte_eth_dev *dev, char *fw_ver, size_t fw_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_attr *attr = &priv->sh->device_attr;
+ struct mlx5_dev_cap *attr = &priv->sh->dev_cap;
size_t size = strnlen(attr->fw_ver, sizeof(attr->fw_ver)) + 1;
if (fw_size < size)
/* Should not release Rx queues but return immediately. */
return -rte_errno;
}
- DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
- dev->data->port_id, priv->sh->device_attr.max_qp_wr);
- DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
- dev->data->port_id, priv->sh->device_attr.max_sge);
+ DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.",
+ dev->data->port_id, priv->sh->dev_cap.max_qp_wr);
+ DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.",
+ dev->data->port_id, priv->sh->dev_cap.max_sge);
for (i = 0; i != priv->rxqs_n; ++i) {
struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_priv *priv = txq_ctrl->priv;
unsigned int wqe_size;
- wqe_size = priv->sh->device_attr.max_qp_wr / desc;
+ wqe_size = priv->sh->dev_cap.max_qp_wr / desc;
if (!wqe_size)
return 0;
/*
" satisfied (%u) on port %u, try the smaller"
" Tx queue size (%d)",
txq_ctrl->txq.inlen_mode, max_inline,
- priv->dev_data->port_id,
- priv->sh->device_attr.max_qp_wr);
+ priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline &&
" satisfied (%u) on port %u, try the smaller"
" Tx queue size (%d)",
txq_ctrl->txq.inlen_send, max_inline,
- priv->dev_data->port_id,
- priv->sh->device_attr.max_qp_wr);
+ priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_empw > max_inline &&
" satisfied (%u) on port %u, try the smaller"
" Tx queue size (%d)",
txq_ctrl->txq.inlen_empw, max_inline,
- priv->dev_data->port_id,
- priv->sh->device_attr.max_qp_wr);
+ priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.tso_en && max_inline < MLX5_MAX_TSO_HEADER) {
" satisfied (%u) on port %u, try the smaller"
" Tx queue size (%d)",
MLX5_MAX_TSO_HEADER, max_inline,
- priv->dev_data->port_id,
- priv->sh->device_attr.max_qp_wr);
+ priv->dev_data->port_id, priv->sh->dev_cap.max_qp_wr);
goto error;
}
if (txq_ctrl->txq.inlen_send > max_inline) {
if (txq_adjust_params(tmpl))
goto error;
if (txq_calc_wqebb_cnt(tmpl) >
- priv->sh->device_attr.max_qp_wr) {
+ priv->sh->dev_cap.max_qp_wr) {
DRV_LOG(ERR,
"port %u Tx WQEBB count (%d) exceeds the limit (%d),"
" try smaller queue size",
dev->data->port_id, txq_calc_wqebb_cnt(tmpl),
- priv->sh->device_attr.max_qp_wr);
+ priv->sh->dev_cap.max_qp_wr);
rte_errno = ENOMEM;
goto error;
}
}
/**
- * Get mlx5 device attributes.
+ * Get mlx5 device capabilities.
*
- * @param cdev
- * Pointer to mlx5 device.
- *
- * @param device_attr
- * Pointer to mlx5 device attributes.
+ * @param sh
+ * Pointer to shared device context.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx5_os_get_dev_attr(struct mlx5_common_device *cdev,
- struct mlx5_dev_attr *device_attr)
+mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
{
- struct mlx5_context *mlx5_ctx;
+ struct mlx5_hca_attr *hca_attr = &sh->cdev->config.hca_attr;
+ struct mlx5_context *mlx5_ctx = sh->cdev->ctx;
void *pv_iseg = NULL;
u32 cb_iseg = 0;
- if (!cdev || !cdev->ctx) {
- rte_errno = EINVAL;
- return -rte_errno;
- }
- mlx5_ctx = (struct mlx5_context *)cdev->ctx;
- memset(device_attr, 0, sizeof(*device_attr));
- device_attr->max_cq = 1 << cdev->config.hca_attr.log_max_cq;
- device_attr->max_qp = 1 << cdev->config.hca_attr.log_max_qp;
- device_attr->max_qp_wr = 1 << cdev->config.hca_attr.log_max_qp_sz;
- device_attr->max_cqe = 1 << cdev->config.hca_attr.log_max_cq_sz;
- device_attr->max_mr = 1 << cdev->config.hca_attr.log_max_mrw_sz;
- device_attr->max_pd = 1 << cdev->config.hca_attr.log_max_pd;
- device_attr->max_srq = 1 << cdev->config.hca_attr.log_max_srq;
- device_attr->max_srq_wr = 1 << cdev->config.hca_attr.log_max_srq_sz;
- device_attr->max_tso = 1 << cdev->config.hca_attr.max_lso_cap;
- if (cdev->config.hca_attr.rss_ind_tbl_cap) {
- device_attr->max_rwq_indirection_table_size =
- 1 << cdev->config.hca_attr.rss_ind_tbl_cap;
- }
- device_attr->sw_parsing_offloads =
- mlx5_get_supported_sw_parsing_offloads(&cdev->config.hca_attr);
- device_attr->tunnel_offloads_caps =
- mlx5_get_supported_tunneling_offloads(&cdev->config.hca_attr);
pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
if (pv_iseg == NULL) {
- DRV_LOG(ERR, "Failed to get device hca_iseg");
+ DRV_LOG(ERR, "Failed to get device hca_iseg.");
rte_errno = errno;
return -rte_errno;
}
- snprintf(device_attr->fw_ver, 64, "%x.%x.%04x",
+ memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+ sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
+ sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
+ sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
+ sh->dev_cap.max_tso = 1 << hca_attr->max_lso_cap;
+ if (hca_attr->rss_ind_tbl_cap) {
+ sh->dev_cap.max_rwq_indirection_table_size =
+ 1 << hca_attr->rss_ind_tbl_cap;
+ }
+ sh->dev_cap.sw_parsing_offloads =
+ mlx5_get_supported_sw_parsing_offloads(hca_attr);
+ sh->dev_cap.tunnel_offloads_caps =
+ mlx5_get_supported_tunneling_offloads(hca_attr);
+ snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
goto error;
}
DRV_LOG(DEBUG, "MPW isn't supported");
- config->swp = sh->device_attr.sw_parsing_offloads &
+ config->swp = sh->dev_cap.sw_parsing_offloads &
(MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
MLX5_SW_PARSING_TSO_CAP);
config->ind_table_max_size =
- sh->device_attr.max_rwq_indirection_table_size;
- config->tunnel_en = sh->device_attr.tunnel_offloads_caps &
+ sh->dev_cap.max_rwq_indirection_table_size;
+ config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
(MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
MLX5_TUNNELED_OFFLOADS_GRE_CAP |
MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
}
DRV_LOG(DEBUG, "counters are not supported");
config->ind_table_max_size =
- sh->device_attr.max_rwq_indirection_table_size;
+ sh->dev_cap.max_rwq_indirection_table_size;
/*
* Remove this check once DPDK supports larger/variable
* indirection tables.
DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
config->hw_padding = 0;
}
- config->tso = (sh->device_attr.max_tso > 0);
+ config->tso = (sh->dev_cap.max_tso > 0);
if (config->tso)
- config->tso_max_payload_sz = sh->device_attr.max_tso;
+ config->tso_max_payload_sz = sh->dev_cap.max_tso;
DRV_LOG(DEBUG, "%sMPS is %s.",
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",