mlx5_os_capabilities_prepare(struct mlx5_dev_ctx_shared *sh)
{
int err;
- struct ibv_context *ctx = sh->cdev->ctx;
+ struct mlx5_common_device *cdev = sh->cdev;
+ struct mlx5_hca_attr *hca_attr = &cdev->config.hca_attr;
struct ibv_device_attr_ex attr_ex = { .comp_mask = 0 };
struct mlx5dv_context dv_attr = { .comp_mask = 0 };
- err = mlx5_glue->query_device_ex(ctx, NULL, &attr_ex);
+ err = mlx5_glue->query_device_ex(cdev->ctx, NULL, &attr_ex);
if (err) {
rte_errno = errno;
return -rte_errno;
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
#endif
- err = mlx5_glue->dv_query_device(ctx, &dv_attr);
+ err = mlx5_glue->dv_query_device(cdev->ctx, &dv_attr);
if (err) {
rte_errno = errno;
return -rte_errno;
}
memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
- sh->dev_cap.device_cap_flags_ex = attr_ex.device_cap_flags_ex;
+ if (mlx5_dev_is_pci(cdev->dev))
+ sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(cdev->dev));
+ else
+ sh->dev_cap.sf = 1;
sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
- sh->dev_cap.raw_packet_caps = attr_ex.raw_packet_caps;
- sh->dev_cap.max_rwq_indirection_table_size =
- attr_ex.rss_caps.max_rwq_indirection_table_size;
- sh->dev_cap.max_tso = attr_ex.tso_caps.max_tso;
- sh->dev_cap.tso_supported_qpts = attr_ex.tso_caps.supported_qpts;
+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
+ sh->dev_cap.dest_tir = 1;
+#endif
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) && defined(HAVE_MLX5DV_DR)
+ DRV_LOG(DEBUG, "DV flow is supported.");
+ sh->dev_cap.dv_flow_en = 1;
+#endif
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ if (hca_attr->eswitch_manager && sh->dev_cap.dv_flow_en && sh->esw_mode)
+ sh->dev_cap.dv_esw_en = 1;
+#endif
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ DRV_LOG(DEBUG, "Enhanced MPW is supported.");
+ sh->dev_cap.mps = MLX5_MPW_ENHANCED;
+ } else {
+ DRV_LOG(DEBUG, "MPW is supported.");
+ sh->dev_cap.mps = MLX5_MPW;
+ }
+ } else {
+ DRV_LOG(DEBUG, "MPW isn't supported.");
+ sh->dev_cap.mps = MLX5_MPW_DISABLED;
+ }
+#if (RTE_CACHE_LINE_SIZE == 128)
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)
+ sh->dev_cap.cqe_comp = 1;
+ DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
+ sh->dev_cap.cqe_comp ? "" : "not ");
+#else
+ sh->dev_cap.cqe_comp = 1;
+#endif
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ sh->dev_cap.mpls_en =
+ ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported.",
+ sh->dev_cap.mpls_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING,
+ "MPLS over GRE/UDP tunnel offloading disabled due to old OFED/rdma-core version or firmware configuration");
+#endif
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+ sh->dev_cap.hw_padding = !!attr_ex.rx_pad_end_addr_align;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+ sh->dev_cap.hw_padding = !!(attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_PCI_WRITE_END_PADDING);
+#endif
+ sh->dev_cap.hw_csum =
+ !!(attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+ DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+ sh->dev_cap.hw_csum ? "" : "not ");
+ sh->dev_cap.hw_vlan_strip = !!(attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+ (sh->dev_cap.hw_vlan_strip ? "" : "not "));
+ sh->dev_cap.hw_fcs_strip = !!(attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
+ !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ DRV_LOG(DEBUG, "Counters are not supported.");
+#endif
+ /*
+ * DPDK doesn't support larger/variable indirection tables.
+ * Once DPDK supports it, take max size from device attr.
+ */
+ sh->dev_cap.ind_table_max_size =
+ RTE_MIN(attr_ex.rss_caps.max_rwq_indirection_table_size,
+ (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+ DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+ sh->dev_cap.ind_table_max_size);
+ sh->dev_cap.tso = (attr_ex.tso_caps.max_tso > 0 &&
+ (attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (sh->dev_cap.tso)
+ sh->dev_cap.tso_max_payload_sz = attr_ex.tso_caps.max_tso;
strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
sizeof(sh->dev_cap.fw_ver));
- sh->dev_cap.flags = dv_attr.flags;
- sh->dev_cap.comp_mask = dv_attr.comp_mask;
#ifdef HAVE_IBV_MLX5_MOD_SWP
- sh->dev_cap.sw_parsing_offloads =
- dv_attr.sw_parsing_caps.sw_parsing_offloads;
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ sh->dev_cap.swp = dv_attr.sw_parsing_caps.sw_parsing_offloads &
+ (MLX5_SW_PARSING_CAP |
+ MLX5_SW_PARSING_CSUM_CAP |
+ MLX5_SW_PARSING_TSO_CAP);
+ DRV_LOG(DEBUG, "SWP support: %u", sh->dev_cap.swp);
#endif
#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- sh->dev_cap.min_single_stride_log_num_of_bytes =
- dv_attr.striding_rq_caps.min_single_stride_log_num_of_bytes;
- sh->dev_cap.max_single_stride_log_num_of_bytes =
- dv_attr.striding_rq_caps.max_single_stride_log_num_of_bytes;
- sh->dev_cap.min_single_wqe_log_num_of_strides =
- dv_attr.striding_rq_caps.min_single_wqe_log_num_of_strides;
- sh->dev_cap.max_single_wqe_log_num_of_strides =
- dv_attr.striding_rq_caps.max_single_wqe_log_num_of_strides;
- sh->dev_cap.stride_supported_qpts =
- dv_attr.striding_rq_caps.supported_qpts;
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ struct mlx5dv_striding_rq_caps *strd_rq_caps =
+ &dv_attr.striding_rq_caps;
+
+ sh->dev_cap.mprq.enabled = 1;
+ sh->dev_cap.mprq.log_min_stride_size =
+ strd_rq_caps->min_single_stride_log_num_of_bytes;
+ sh->dev_cap.mprq.log_max_stride_size =
+ strd_rq_caps->max_single_stride_log_num_of_bytes;
+ sh->dev_cap.mprq.log_min_stride_num =
+ strd_rq_caps->min_single_wqe_log_num_of_strides;
+ sh->dev_cap.mprq.log_max_stride_num =
+ strd_rq_caps->max_single_wqe_log_num_of_strides;
+ sh->dev_cap.mprq.log_min_stride_wqe_size =
+ cdev->config.devx ?
+ hca_attr->log_min_stride_wqe_sz :
+ MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
+ DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
+ sh->dev_cap.mprq.log_min_stride_size);
+ DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
+ sh->dev_cap.mprq.log_max_stride_size);
+ DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
+ sh->dev_cap.mprq.log_min_stride_num);
+ DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
+ sh->dev_cap.mprq.log_max_stride_num);
+ DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
+ sh->dev_cap.mprq.log_min_stride_wqe_size);
+ DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ strd_rq_caps->supported_qpts);
+ DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
+ }
#endif
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- sh->dev_cap.tunnel_offloads_caps = dv_attr.tunnel_offloads_caps;
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ sh->dev_cap.tunnel_en = dv_attr.tunnel_offloads_caps &
+ (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP |
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
+ }
+ if (sh->dev_cap.tunnel_en) {
+ DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
+ } else {
+ DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
+ }
+#else
+ DRV_LOG(WARNING,
+ "Tunnel offloading disabled due to old OFED/rdma-core version");
#endif
+ if (!sh->cdev->config.devx)
+ return 0;
+ /* Check capabilities for Packet Pacing. */
+ DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz.",
+ hca_attr->dev_freq_khz);
+ DRV_LOG(DEBUG, "Packet pacing is %ssupported.",
+ hca_attr->qos.packet_pacing ? "" : "not ");
+ DRV_LOG(DEBUG, "Cross channel ops are %ssupported.",
+ hca_attr->cross_channel ? "" : "not ");
+ DRV_LOG(DEBUG, "WQE index ignore is %ssupported.",
+ hca_attr->wqe_index_ignore ? "" : "not ");
+ DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported.",
+ hca_attr->non_wire_sq ? "" : "not ");
+ DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
+ hca_attr->log_max_static_sq_wq ? "" : "not ",
+ hca_attr->log_max_static_sq_wq);
+ DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported.",
+ hca_attr->qos.wqe_rate_pp ? "" : "not ");
+ sh->dev_cap.txpp_en = hca_attr->qos.packet_pacing;
+ if (!hca_attr->cross_channel) {
+ DRV_LOG(DEBUG,
+ "Cross channel operations are required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->wqe_index_ignore) {
+ DRV_LOG(DEBUG,
+ "WQE index ignore feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->non_wire_sq) {
+ DRV_LOG(DEBUG,
+ "Non-wire SQ feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->log_max_static_sq_wq) {
+ DRV_LOG(DEBUG,
+ "Static WQE SQ feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->qos.wqe_rate_pp) {
+ DRV_LOG(DEBUG,
+ "WQE rate mode is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+ DRV_LOG(DEBUG,
+ "DevX does not provide UAR offset, can't create queues for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+#endif
+ /* Check for LRO support. */
+ if (sh->dev_cap.dest_tir && sh->dev_cap.dv_flow_en &&
+ hca_attr->lro_cap) {
+ /* TBD check tunnel lro caps. */
+ sh->dev_cap.lro_supported = 1;
+ DRV_LOG(DEBUG, "Device supports LRO.");
+ DRV_LOG(DEBUG,
+ "LRO minimal size of TCP segment required for coalescing is %d bytes.",
+ hca_attr->lro_min_mss_size);
+ }
+ sh->dev_cap.scatter_fcs_w_decap_disable =
+ hca_attr->scatter_fcs_w_decap_disable;
+ sh->dev_cap.rq_delay_drop_en = hca_attr->rq_delay_drop;
+ mlx5_rt_timestamp_config(sh, hca_attr);
return 0;
}
struct rte_eth_dev *eth_dev = NULL;
struct mlx5_priv *priv = NULL;
int err = 0;
- unsigned int hw_padding = 0;
- unsigned int mps;
- unsigned int mpls_en = 0;
- unsigned int swp = 0;
- unsigned int mprq = 0;
struct rte_ether_addr mac;
char name[RTE_ETH_NAME_MAX_LEN];
int own_domain_id = 0;
if (!sh)
return NULL;
/* Update final values for devargs before check sibling config. */
-#if !defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_MLX5DV_DR)
- if (config->dv_flow_en) {
+ if (config->dv_flow_en && !sh->dev_cap.dv_flow_en) {
DRV_LOG(WARNING, "DV flow is not supported.");
config->dv_flow_en = 0;
}
-#endif
-#ifdef HAVE_MLX5DV_DR_ESWITCH
- if (!(hca_attr->eswitch_manager && config->dv_flow_en && sh->esw_mode))
+ if (config->dv_esw_en && !sh->dev_cap.dv_esw_en) {
+ DRV_LOG(WARNING, "E-Switch DV flow is not supported.");
config->dv_esw_en = 0;
-#else
- config->dv_esw_en = 0;
-#endif
+ }
if (config->dv_miss_info && config->dv_esw_en)
config->dv_xmeta_en = MLX5_XMETA_MODE_META16;
if (!config->dv_esw_en &&
err = mlx5_dev_check_sibling_config(sh, config, dpdk_dev);
if (err)
goto error;
-#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
- config->dest_tir = 1;
-#endif
- /*
- * Multi-packet send is supported by ConnectX-4 Lx PF as well
- * as all ConnectX-5 devices.
- */
- if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
- if (sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
- DRV_LOG(DEBUG, "enhanced MPW is supported");
- mps = MLX5_MPW_ENHANCED;
- } else {
- DRV_LOG(DEBUG, "MPW is supported");
- mps = MLX5_MPW;
- }
- } else {
- DRV_LOG(DEBUG, "MPW isn't supported");
- mps = MLX5_MPW_DISABLED;
- }
-#ifdef HAVE_IBV_MLX5_MOD_SWP
- if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
- swp = sh->dev_cap.sw_parsing_offloads;
- DRV_LOG(DEBUG, "SWP support: %u", swp);
-#endif
- config->swp = swp & (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
- MLX5_SW_PARSING_TSO_CAP);
-#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
- if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
- DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %d",
- sh->dev_cap.min_single_stride_log_num_of_bytes);
- DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %d",
- sh->dev_cap.max_single_stride_log_num_of_bytes);
- DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %d",
- sh->dev_cap.min_single_wqe_log_num_of_strides);
- DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %d",
- sh->dev_cap.max_single_wqe_log_num_of_strides);
- DRV_LOG(DEBUG, "\tsupported_qpts: %d",
- sh->dev_cap.stride_supported_qpts);
- DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %d",
- config->mprq.log_min_stride_wqe_size);
- DRV_LOG(DEBUG, "device supports Multi-Packet RQ");
- mprq = 1;
- config->mprq.log_min_stride_size =
- sh->dev_cap.min_single_stride_log_num_of_bytes;
- config->mprq.log_max_stride_size =
- sh->dev_cap.max_single_stride_log_num_of_bytes;
- config->mprq.log_min_stride_num =
- sh->dev_cap.min_single_wqe_log_num_of_strides;
- config->mprq.log_max_stride_num =
- sh->dev_cap.max_single_wqe_log_num_of_strides;
- }
-#endif
-#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
- if (sh->dev_cap.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
- config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
- (MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN |
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE |
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE);
- }
- if (config->tunnel_en) {
- DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
- config->tunnel_en &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN ? "[VXLAN]" : "",
- config->tunnel_en &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE ? "[GRE]" : "",
- config->tunnel_en &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GENEVE ? "[GENEVE]" : ""
- );
- } else {
- DRV_LOG(DEBUG, "tunnel offloading is not supported");
- }
-#else
- DRV_LOG(WARNING,
- "tunnel offloading disabled due to old OFED/rdma-core version");
-#endif
-#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
- mpls_en = ((sh->dev_cap.tunnel_offloads_caps &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
- (sh->dev_cap.tunnel_offloads_caps &
- MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
- DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported",
- mpls_en ? "" : "not ");
-#else
- DRV_LOG(WARNING, "MPLS over GRE/UDP tunnel offloading disabled due to"
- " old OFED/rdma-core version or firmware configuration");
-#endif
- config->mpls_en = mpls_en;
nl_rdma = mlx5_nl_init(NETLINK_RDMA);
/* Check port status. */
if (spawn->phys_port <= UINT8_MAX) {
DRV_LOG(DEBUG, "dev_port-%u new domain_id=%u\n",
priv->dev_port, priv->domain_id);
}
- config->hw_csum = !!(sh->dev_cap.device_cap_flags_ex &
- IBV_DEVICE_RAW_IP_CSUM);
- DRV_LOG(DEBUG, "checksum offloading is %ssupported",
- (config->hw_csum ? "" : "not "));
-#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
- !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- DRV_LOG(DEBUG, "counters are not supported");
-#endif
- config->ind_table_max_size =
- sh->dev_cap.max_rwq_indirection_table_size;
- /*
- * Remove this check once DPDK supports larger/variable
- * indirection tables.
- */
- if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
- config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
- DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
- config->ind_table_max_size);
- config->hw_vlan_strip = !!(sh->dev_cap.raw_packet_caps &
- IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
- DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
- (config->hw_vlan_strip ? "" : "not "));
- config->hw_fcs_strip = !!(sh->dev_cap.raw_packet_caps &
- IBV_RAW_PACKET_CAP_SCATTER_FCS);
-#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
- hw_padding = !!sh->dev_cap.rx_pad_end_addr_align;
-#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
- hw_padding = !!(sh->dev_cap.device_cap_flags_ex &
- IBV_DEVICE_PCI_WRITE_END_PADDING);
-#endif
- if (config->hw_padding && !hw_padding) {
+ if (config->hw_padding && !sh->dev_cap.hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
config->hw_padding = 0;
} else if (config->hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding is enabled");
}
- config->tso = (sh->dev_cap.max_tso > 0 &&
- (sh->dev_cap.tso_supported_qpts &
- (1 << IBV_QPT_RAW_PACKET)));
- if (config->tso)
- config->tso_max_payload_sz = sh->dev_cap.max_tso;
/*
* MPW is disabled by default, while the Enhanced MPW is enabled
* by default.
*/
if (config->mps == MLX5_ARG_UNSET)
- config->mps = (mps == MLX5_MPW_ENHANCED) ? MLX5_MPW_ENHANCED :
- MLX5_MPW_DISABLED;
+ config->mps = (sh->dev_cap.mps == MLX5_MPW_ENHANCED) ?
+ MLX5_MPW_ENHANCED : MLX5_MPW_DISABLED;
else
- config->mps = config->mps ? mps : MLX5_MPW_DISABLED;
+ config->mps = config->mps ? sh->dev_cap.mps : MLX5_MPW_DISABLED;
DRV_LOG(INFO, "%sMPS is %s",
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",
config->mps != MLX5_MPW_DISABLED ? "enabled" : "disabled");
if (sh->cdev->config.devx) {
sh->steering_format_version = hca_attr->steering_format_version;
- /* Check for LRO support. */
- if (config->dest_tir && hca_attr->lro_cap &&
- config->dv_flow_en) {
- /* TBD check tunnel lro caps. */
- config->lro.supported = hca_attr->lro_cap;
- DRV_LOG(DEBUG, "Device supports LRO");
+ /* LRO is supported only when DV flow enabled. */
+ if (sh->dev_cap.lro_supported && config->dv_flow_en)
+ sh->dev_cap.lro_supported = 0;
+ if (sh->dev_cap.lro_supported) {
/*
* If LRO timeout is not configured by application,
* use the minimal supported value.
*/
- if (!config->lro.timeout)
- config->lro.timeout =
+ if (!config->lro_timeout)
+ config->lro_timeout =
hca_attr->lro_timer_supported_periods[0];
DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
- config->lro.timeout);
- DRV_LOG(DEBUG, "LRO minimal size of TCP segment "
- "required for coalescing is %d bytes",
- hca_attr->lro_min_mss_size);
+ config->lro_timeout);
}
#if defined(HAVE_MLX5DV_DR) && \
(defined(HAVE_MLX5_DR_CREATE_ACTION_FLOW_METER) || \
}
#endif
}
- if (config->cqe_comp && RTE_CACHE_LINE_SIZE == 128 &&
- !(sh->dev_cap.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)) {
- DRV_LOG(WARNING, "Rx CQE 128B compression is not supported");
+ if (config->cqe_comp && !sh->dev_cap.cqe_comp) {
+ DRV_LOG(WARNING, "Rx CQE 128B compression is not supported.");
config->cqe_comp = 0;
}
if (config->cqe_comp_fmt == MLX5_CQE_RESP_FORMAT_FTAG_STRIDX &&
}
DRV_LOG(DEBUG, "Rx CQE compression is %ssupported",
config->cqe_comp ? "" : "not ");
- if (config->tx_pp) {
- DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz",
- hca_attr->dev_freq_khz);
- DRV_LOG(DEBUG, "Packet pacing is %ssupported",
- hca_attr->qos.packet_pacing ? "" : "not ");
- DRV_LOG(DEBUG, "Cross channel ops are %ssupported",
- hca_attr->cross_channel ? "" : "not ");
- DRV_LOG(DEBUG, "WQE index ignore is %ssupported",
- hca_attr->wqe_index_ignore ? "" : "not ");
- DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported",
- hca_attr->non_wire_sq ? "" : "not ");
- DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
- hca_attr->log_max_static_sq_wq ? "" : "not ",
- hca_attr->log_max_static_sq_wq);
- DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported",
- hca_attr->qos.wqe_rate_pp ? "" : "not ");
- if (!sh->cdev->config.devx) {
- DRV_LOG(ERR, "DevX is required for packet pacing");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->qos.packet_pacing) {
- DRV_LOG(ERR, "Packet pacing is not supported");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->cross_channel) {
- DRV_LOG(ERR, "Cross channel operations are"
- " required for packet pacing");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->wqe_index_ignore) {
- DRV_LOG(ERR, "WQE index ignore feature is"
- " required for packet pacing");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->non_wire_sq) {
- DRV_LOG(ERR, "Non-wire SQ feature is"
- " required for packet pacing");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->log_max_static_sq_wq) {
- DRV_LOG(ERR, "Static WQE SQ feature is"
- " required for packet pacing");
- err = ENODEV;
- goto error;
- }
- if (!hca_attr->qos.wqe_rate_pp) {
- DRV_LOG(ERR, "WQE rate mode is required"
- " for packet pacing");
- err = ENODEV;
- goto error;
- }
-#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
- DRV_LOG(ERR, "DevX does not provide UAR offset,"
- " can't create queues for packet pacing");
+ if (config->tx_pp && !sh->dev_cap.txpp_en) {
+ DRV_LOG(ERR, "Packet pacing is not supported.");
err = ENODEV;
goto error;
-#endif
}
if (config->std_delay_drop || config->hp_delay_drop) {
if (!hca_attr->rq_delay_drop) {
priv->dev_port);
}
}
- if (sh->cdev->config.devx)
- mlx5_rt_timestamp_config(sh, config, hca_attr);
/*
* If HW has bug working with tunnel packet decapsulation and
* scatter FCS, and decapsulation is needed, clear the hw_fcs_strip
* bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore.
*/
- if (hca_attr->scatter_fcs_w_decap_disable && config->decap_en)
+ if (sh->dev_cap.scatter_fcs_w_decap_disable && config->decap_en)
config->hw_fcs_strip = 0;
+ else
+ config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
DRV_LOG(DEBUG, "FCS stripping configuration is %ssupported",
(config->hw_fcs_strip ? "" : "not "));
- if (config->mprq.enabled && !mprq) {
- DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
+ if (config->mprq.enabled && !sh->dev_cap.mprq.enabled) {
+ DRV_LOG(WARNING, "Multi-Packet RQ isn't supported.");
config->mprq.enabled = 0;
}
if (config->max_dump_files_num == 0)
eth_dev->rx_queue_count = mlx5_rx_queue_count;
/* Register MAC address. */
claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
- if (config->vf && config->vf_nl_en)
+ if (sh->dev_cap.vf && config->vf_nl_en)
mlx5_nl_mac_addr_sync(priv->nl_socket_route,
mlx5_ifindex(eth_dev),
eth_dev->data->mac_addrs,
if (mlx5_flex_item_port_init(eth_dev) < 0)
goto error;
}
- if (sh->cdev->config.devx && config->dv_flow_en && config->dest_tir) {
+ if (sh->cdev->config.devx && config->dv_flow_en &&
+ sh->dev_cap.dest_tir) {
priv->obj_ops = devx_obj_ops;
mlx5_queue_counter_id_prepare(eth_dev);
priv->obj_ops.lb_dummy_queue_create =
}
static void
-mlx5_os_config_default(struct mlx5_dev_config *config,
- struct mlx5_common_dev_config *cconf)
+mlx5_os_config_default(struct mlx5_dev_config *config)
{
memset(config, 0, sizeof(*config));
config->mps = MLX5_ARG_UNSET;
config->vf_nl_en = 1;
config->mprq.max_memcpy_len = MLX5_MPRQ_MEMCPY_DEFAULT_LEN;
config->mprq.min_rxqs_num = MLX5_MPRQ_MIN_RXQS;
- config->mprq.log_min_stride_wqe_size = cconf->devx ?
- cconf->hca_attr.log_min_stride_wqe_sz :
- MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
config->mprq.log_stride_num = MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM;
config->dv_esw_en = 1;
config->dv_flow_en = 1;
uint32_t restore;
/* Default configuration. */
- mlx5_os_config_default(&dev_config, &cdev->config);
- dev_config.vf = mlx5_dev_is_vf_pci(pci_dev);
+ mlx5_os_config_default(&dev_config);
list[i].eth_dev = mlx5_dev_spawn(cdev->dev, &list[i],
&dev_config, ð_da);
if (!list[i].eth_dev) {
if (ret != 0)
return ret;
/* Set default config data. */
- mlx5_os_config_default(&config, &cdev->config);
- config.sf = 1;
+ mlx5_os_config_default(&config);
/* Init spawn data. */
spawn.max_port = 1;
spawn.phys_port = 1;
mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int vf = priv->config.vf;
+ const int vf = priv->sh->dev_cap.vf;
if (vf)
mlx5_nl_mac_addr_remove(priv->nl_socket_route,
uint32_t index)
{
struct mlx5_priv *priv = dev->data->dev_private;
- const int vf = priv->config.vf;
+ const int vf = priv->sh->dev_cap.vf;
int ret = 0;
if (vf)
/* Device capabilities structure which isn't changed in any stage. */
struct mlx5_dev_cap {
- uint64_t device_cap_flags_ex;
int max_cq; /* Maximum number of supported CQs */
int max_qp; /* Maximum number of supported QPs. */
int max_qp_wr; /* Maximum number of outstanding WR on any WQ. */
/* Maximum number of s/g per WR for SQ & RQ of QP for non RDMA Read
* operations.
*/
- uint32_t raw_packet_caps;
- uint32_t max_rwq_indirection_table_size;
+ int mps; /* Multi-packet send supported mode. */
+ uint32_t vf:1; /* This is a VF. */
+ uint32_t sf:1; /* This is a SF. */
+ uint32_t txpp_en:1; /* Tx packet pacing is supported. */
+ uint32_t mpls_en:1; /* MPLS over GRE/UDP is supported. */
+ uint32_t cqe_comp:1; /* CQE compression is supported. */
+ uint32_t hw_csum:1; /* Checksum offload is supported. */
+ uint32_t hw_padding:1; /* End alignment padding is supported. */
+ uint32_t dest_tir:1; /* Whether advanced DR API is available. */
+ uint32_t dv_esw_en:1; /* E-Switch DV flow is supported. */
+ uint32_t dv_flow_en:1; /* DV flow is supported. */
+ uint32_t swp:3; /* Tx generic tunnel checksum and TSO offload. */
+ uint32_t hw_vlan_strip:1; /* VLAN stripping is supported. */
+ uint32_t scatter_fcs_w_decap_disable:1;
+ /* HW has bug working with tunnel packet decap and scatter FCS. */
+ uint32_t hw_fcs_strip:1; /* FCS stripping is supported. */
+ uint32_t rt_timestamp:1; /* Realtime timestamp format. */
+ uint32_t lro_supported:1; /* Whether LRO is supported. */
+ uint32_t rq_delay_drop_en:1; /* Enable RxQ delay drop. */
+ uint32_t tunnel_en:3;
+ /* Whether tunnel stateless offloads are supported. */
+ uint32_t ind_table_max_size;
/* Maximum receive WQ indirection table size. */
- uint32_t max_tso; /* Maximum TCP payload for TSO. */
- uint32_t tso_supported_qpts;
- uint64_t flags;
- uint64_t comp_mask;
- uint32_t sw_parsing_offloads;
- uint32_t min_single_stride_log_num_of_bytes;
- uint32_t max_single_stride_log_num_of_bytes;
- uint32_t min_single_wqe_log_num_of_strides;
- uint32_t max_single_wqe_log_num_of_strides;
- uint32_t stride_supported_qpts;
- uint32_t tunnel_offloads_caps;
+ uint32_t tso:1; /* Whether TSO is supported. */
+ uint32_t tso_max_payload_sz; /* Maximum TCP payload for TSO. */
+ struct {
+ uint32_t enabled:1; /* Whether MPRQ is enabled. */
+ uint32_t log_min_stride_size; /* Log min size of a stride. */
+ uint32_t log_max_stride_size; /* Log max size of a stride. */
+ uint32_t log_min_stride_num; /* Log min num of strides. */
+ uint32_t log_max_stride_num; /* Log max num of strides. */
+ uint32_t log_min_stride_wqe_size;
+ /* Log min WQE size, (size of single stride)*(num of strides).*/
+ } mprq; /* Capability for Multi-Packet RQ. */
char fw_ver[64]; /* Firmware version of this device. */
};
uint64_t imissed;
};
-#define MLX5_LRO_SUPPORTED(dev) \
- (((struct mlx5_priv *)((dev)->data->dev_private))->config.lro.supported)
-
/* Maximal size of coalesced segment for LRO is set in chunks of 256 Bytes. */
#define MLX5_LRO_SEG_CHUNK_SIZE 256u
/* Maximal number of segments to split. */
#define MLX5_MAX_RXQ_NSEG (1u << MLX5_MAX_LOG_RQ_SEGS)
-/* LRO configurations structure. */
-struct mlx5_lro_config {
- uint32_t supported:1; /* Whether LRO is supported. */
- uint32_t timeout; /* User configuration. */
-};
-
/*
* Device configuration structure.
*
* - User device parameters disabled features.
*/
struct mlx5_dev_config {
- unsigned int hw_csum:1; /* Checksum offload is supported. */
- unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */
unsigned int hw_vlan_insert:1; /* VLAN insertion in WQE is supported. */
unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */
unsigned int hw_padding:1; /* End alignment padding is supported. */
- unsigned int vf:1; /* This is a VF. */
- unsigned int sf:1; /* This is a SF. */
- unsigned int tunnel_en:3;
- /* Whether tunnel stateless offloads are supported. */
- unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
unsigned int cqe_comp:1; /* CQE compression is enabled. */
unsigned int cqe_comp_fmt:3; /* CQE compression format. */
- unsigned int tso:1; /* Whether TSO is supported. */
unsigned int rx_vec_en:1; /* Rx vector is enabled. */
unsigned int l3_vxlan_en:1; /* Enable L3 VXLAN flow creation. */
unsigned int vf_nl_en:1; /* Enable Netlink requests in VF mode. */
unsigned int dv_xmeta_en:2; /* Enable extensive flow metadata. */
unsigned int lacp_by_user:1;
/* Enable user to manage LACP traffic. */
- unsigned int swp:3; /* Tx generic tunnel checksum and TSO offload. */
- unsigned int dest_tir:1; /* Whether advanced DR API is available. */
unsigned int reclaim_mode:2; /* Memory reclaim mode. */
- unsigned int rt_timestamp:1; /* realtime timestamp format. */
unsigned int decap_en:1; /* Whether decap will be used or not. */
unsigned int dv_miss_info:1; /* restore packet after partial hw miss */
unsigned int allow_duplicate_pattern:1;
unsigned int enabled:1; /* Whether MPRQ is enabled. */
unsigned int log_stride_num; /* Log number of strides. */
unsigned int log_stride_size; /* Log size of a stride. */
- unsigned int log_min_stride_size; /* Log min size of a stride.*/
- unsigned int log_max_stride_size; /* Log max size of a stride.*/
- unsigned int log_min_stride_num; /* Log min num of strides. */
- unsigned int log_max_stride_num; /* Log max num of strides. */
- unsigned int log_min_stride_wqe_size;
- /* Log min WQE size, (size of single stride)*(num of strides).*/
unsigned int max_memcpy_len;
/* Maximum packet size to memcpy Rx packets. */
unsigned int min_rxqs_num;
/* Rx queue count threshold to enable MPRQ. */
} mprq; /* Configurations for Multi-Packet RQ. */
int mps; /* Multi-packet send supported mode. */
- unsigned int tso_max_payload_sz; /* Maximum TCP payload for TSO. */
- unsigned int ind_table_max_size; /* Maximum indirection table size. */
unsigned int max_dump_files_num; /* Maximum dump files per queue. */
unsigned int log_hp_size; /* Single hairpin queue data size in total. */
+ unsigned int lro_timeout; /* LRO user configuration. */
int txqs_inline; /* Queue number threshold for inlining. */
int txq_inline_min; /* Minimal amount of data bytes to inline. */
int txq_inline_max; /* Max packet size for inlining with SEND. */
int txq_inline_mpw; /* Max packet size for inlining with eMPW. */
int tx_pp; /* Timestamp scheduling granularity in nanoseconds. */
int tx_skew; /* Tx scheduling skew between WQE and data on wire. */
- struct mlx5_lro_config lro; /* LRO configuration. */
};
port_id = mlx5_eth_find_next(port_id + 1, dev))
int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
void mlx5_rt_timestamp_config(struct mlx5_dev_ctx_shared *sh,
- struct mlx5_dev_config *config,
struct mlx5_hca_attr *hca_attr);
struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
void *pv_iseg = NULL;
u32 cb_iseg = 0;
+ MLX5_ASSERT(sh->cdev->config.devx);
+ MLX5_ASSERT(mlx5_dev_is_pci(sh->cdev->dev));
pv_iseg = mlx5_glue->query_hca_iseg(mlx5_ctx, &cb_iseg);
if (pv_iseg == NULL) {
DRV_LOG(ERR, "Failed to get device hca_iseg.");
return -rte_errno;
}
memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+ sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(sh->cdev->dev));
sh->dev_cap.max_cq = 1 << hca_attr->log_max_cq;
sh->dev_cap.max_qp = 1 << hca_attr->log_max_qp;
sh->dev_cap.max_qp_wr = 1 << hca_attr->log_max_qp_sz;
- sh->dev_cap.max_tso = 1 << hca_attr->max_lso_cap;
+ sh->dev_cap.dv_flow_en = 1;
+ sh->dev_cap.mps = MLX5_MPW_DISABLED;
+ DRV_LOG(DEBUG, "MPW isn't supported.");
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported.");
+ sh->dev_cap.hw_csum = hca_attr->csum_cap;
+ DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+ (sh->dev_cap.hw_csum ? "" : "not "));
+ sh->dev_cap.hw_vlan_strip = hca_attr->vlan_cap;
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+ (sh->dev_cap.hw_vlan_strip ? "" : "not "));
+ sh->dev_cap.hw_fcs_strip = hca_attr->scatter_fcs;
+ sh->dev_cap.tso = ((1 << hca_attr->max_lso_cap) > 0);
+ if (sh->dev_cap.tso)
+ sh->dev_cap.tso_max_payload_sz = 1 << hca_attr->max_lso_cap;
+ DRV_LOG(DEBUG, "Counters are not supported.");
if (hca_attr->rss_ind_tbl_cap) {
- sh->dev_cap.max_rwq_indirection_table_size =
- 1 << hca_attr->rss_ind_tbl_cap;
+ /*
+ * DPDK doesn't support larger/variable indirection tables.
+ * Once DPDK supports it, take max size from device attr.
+ */
+ sh->dev_cap.ind_table_max_size =
+ RTE_MIN(1 << hca_attr->rss_ind_tbl_cap,
+ (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+ DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+ sh->dev_cap.ind_table_max_size);
+ }
+ sh->dev_cap.swp = mlx5_get_supported_sw_parsing_offloads(hca_attr);
+ sh->dev_cap.tunnel_en = mlx5_get_supported_tunneling_offloads(hca_attr);
+ if (sh->dev_cap.tunnel_en) {
+ DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
+ } else {
+ DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
}
- sh->dev_cap.sw_parsing_offloads =
- mlx5_get_supported_sw_parsing_offloads(hca_attr);
- sh->dev_cap.tunnel_offloads_caps =
- mlx5_get_supported_tunneling_offloads(hca_attr);
snprintf(sh->dev_cap.fw_ver, 64, "%x.%x.%04x",
MLX5_GET(initial_seg, pv_iseg, fw_rev_major),
MLX5_GET(initial_seg, pv_iseg, fw_rev_minor),
MLX5_GET(initial_seg, pv_iseg, fw_rev_subminor));
+ DRV_LOG(DEBUG, "Packet pacing is not supported.");
+ mlx5_rt_timestamp_config(sh, hca_attr);
return 0;
}
{
const struct mlx5_switch_info *switch_info = &spawn->info;
struct mlx5_dev_ctx_shared *sh = NULL;
- struct mlx5_hca_attr *hca_attr;
struct rte_eth_dev *eth_dev = NULL;
struct mlx5_priv *priv = NULL;
int err = 0;
strerror(errno));
goto error;
}
- DRV_LOG(DEBUG, "MPW isn't supported");
- config->swp = sh->dev_cap.sw_parsing_offloads &
- (MLX5_SW_PARSING_CAP | MLX5_SW_PARSING_CSUM_CAP |
- MLX5_SW_PARSING_TSO_CAP);
- config->ind_table_max_size =
- sh->dev_cap.max_rwq_indirection_table_size;
- config->tunnel_en = sh->dev_cap.tunnel_offloads_caps &
- (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
- MLX5_TUNNELED_OFFLOADS_GRE_CAP |
- MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
- if (config->tunnel_en) {
- DRV_LOG(DEBUG, "tunnel offloading is supported for %s%s%s",
- config->tunnel_en &
- MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
- config->tunnel_en &
- MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
- config->tunnel_en &
- MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : ""
- );
- } else {
- DRV_LOG(DEBUG, "tunnel offloading is not supported");
- }
- DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is no supported");
- config->mpls_en = 0;
/* Allocate private eth device data. */
priv = mlx5_malloc(MLX5_MEM_ZERO | MLX5_MEM_RTE,
sizeof(*priv),
}
own_domain_id = 1;
}
- DRV_LOG(DEBUG, "counters are not supported");
- config->ind_table_max_size =
- sh->dev_cap.max_rwq_indirection_table_size;
- /*
- * Remove this check once DPDK supports larger/variable
- * indirection tables.
- */
- if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512)
- config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512;
- DRV_LOG(DEBUG, "maximum Rx indirection table size is %u",
- config->ind_table_max_size);
if (config->hw_padding) {
DRV_LOG(DEBUG, "Rx end alignment padding isn't supported");
config->hw_padding = 0;
}
- config->tso = (sh->dev_cap.max_tso > 0);
- if (config->tso)
- config->tso_max_payload_sz = sh->dev_cap.max_tso;
DRV_LOG(DEBUG, "%sMPS is %s.",
config->mps == MLX5_MPW_ENHANCED ? "enhanced " :
config->mps == MLX5_MPW ? "legacy " : "",
DRV_LOG(WARNING, "Rx CQE compression isn't supported.");
config->cqe_comp = 0;
}
- if (sh->cdev->config.devx) {
- hca_attr = &sh->cdev->config.hca_attr;
- config->hw_csum = hca_attr->csum_cap;
- DRV_LOG(DEBUG, "checksum offloading is %ssupported",
- (config->hw_csum ? "" : "not "));
- config->hw_vlan_strip = hca_attr->vlan_cap;
- DRV_LOG(DEBUG, "VLAN stripping is %ssupported",
- (config->hw_vlan_strip ? "" : "not "));
- config->hw_fcs_strip = hca_attr->scatter_fcs;
- mlx5_rt_timestamp_config(sh, config, hca_attr);
- }
+ config->hw_fcs_strip = sh->dev_cap.hw_fcs_strip;
if (config->mprq.enabled) {
DRV_LOG(WARNING, "Multi-Packet RQ isn't supported");
config->mprq.enabled = 0;
},
.dv_flow_en = 1,
.log_hp_size = MLX5_ARG_UNSET,
- .vf = mlx5_dev_is_vf_pci(pci_dev),
};
int ret;
uint32_t restore;