+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ dv_attr.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
+#endif
+ err = mlx5_glue->dv_query_device(cdev->ctx, &dv_attr);
+ if (err) {
+ rte_errno = errno;
+ return -rte_errno;
+ }
+ memset(&sh->dev_cap, 0, sizeof(struct mlx5_dev_cap));
+ if (mlx5_dev_is_pci(cdev->dev))
+ sh->dev_cap.vf = mlx5_dev_is_vf_pci(RTE_DEV_TO_PCI(cdev->dev));
+ else
+ sh->dev_cap.sf = 1;
+ sh->dev_cap.max_qp_wr = attr_ex.orig_attr.max_qp_wr;
+ sh->dev_cap.max_sge = attr_ex.orig_attr.max_sge;
+ sh->dev_cap.max_cq = attr_ex.orig_attr.max_cq;
+ sh->dev_cap.max_qp = attr_ex.orig_attr.max_qp;
+#ifdef HAVE_MLX5DV_DR_ACTION_DEST_DEVX_TIR
+ sh->dev_cap.dest_tir = 1;
+#endif
+#if defined(HAVE_IBV_FLOW_DV_SUPPORT) && defined(HAVE_MLX5DV_DR)
+ DRV_LOG(DEBUG, "DV flow is supported.");
+ sh->dev_cap.dv_flow_en = 1;
+#endif
+#ifdef HAVE_MLX5DV_DR_ESWITCH
+ if (hca_attr->eswitch_manager && sh->dev_cap.dv_flow_en && sh->esw_mode)
+ sh->dev_cap.dv_esw_en = 1;
+#endif
+ /*
+ * Multi-packet send is supported by ConnectX-4 Lx PF as well
+ * as all ConnectX-5 devices.
+ */
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED) {
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_ENHANCED_MPW) {
+ DRV_LOG(DEBUG, "Enhanced MPW is supported.");
+ sh->dev_cap.mps = MLX5_MPW_ENHANCED;
+ } else {
+ DRV_LOG(DEBUG, "MPW is supported.");
+ sh->dev_cap.mps = MLX5_MPW;
+ }
+ } else {
+ DRV_LOG(DEBUG, "MPW isn't supported.");
+ sh->dev_cap.mps = MLX5_MPW_DISABLED;
+ }
+#if (RTE_CACHE_LINE_SIZE == 128)
+ if (dv_attr.flags & MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP)
+ sh->dev_cap.cqe_comp = 1;
+ DRV_LOG(DEBUG, "Rx CQE 128B compression is %ssupported.",
+ sh->dev_cap.cqe_comp ? "" : "not ");
+#else
+ sh->dev_cap.cqe_comp = 1;
+#endif
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+ sh->dev_cap.mpls_en =
+ ((dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_GRE) &&
+ (dv_attr.tunnel_offloads_caps &
+ MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CW_MPLS_OVER_UDP));
+ DRV_LOG(DEBUG, "MPLS over GRE/UDP tunnel offloading is %ssupported.",
+ sh->dev_cap.mpls_en ? "" : "not ");
+#else
+ DRV_LOG(WARNING,
+ "MPLS over GRE/UDP tunnel offloading disabled due to old OFED/rdma-core version or firmware configuration");
+#endif
+#if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
+ sh->dev_cap.hw_padding = !!attr_ex.rx_pad_end_addr_align;
+#elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
+ sh->dev_cap.hw_padding = !!(attr_ex.device_cap_flags_ex &
+ IBV_DEVICE_PCI_WRITE_END_PADDING);
+#endif
+ sh->dev_cap.hw_csum =
+ !!(attr_ex.device_cap_flags_ex & IBV_DEVICE_RAW_IP_CSUM);
+ DRV_LOG(DEBUG, "Checksum offloading is %ssupported.",
+ sh->dev_cap.hw_csum ? "" : "not ");
+ sh->dev_cap.hw_vlan_strip = !!(attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_CVLAN_STRIPPING);
+ DRV_LOG(DEBUG, "VLAN stripping is %ssupported.",
+ (sh->dev_cap.hw_vlan_strip ? "" : "not "));
+ sh->dev_cap.hw_fcs_strip = !!(attr_ex.raw_packet_caps &
+ IBV_RAW_PACKET_CAP_SCATTER_FCS);
+#if !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) && \
+ !defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
+ DRV_LOG(DEBUG, "Counters are not supported.");
+#endif
+ /*
+ * DPDK doesn't support larger/variable indirection tables.
+ * Once DPDK supports it, take max size from device attr.
+ */
+ sh->dev_cap.ind_table_max_size =
+ RTE_MIN(attr_ex.rss_caps.max_rwq_indirection_table_size,
+ (unsigned int)RTE_ETH_RSS_RETA_SIZE_512);
+ DRV_LOG(DEBUG, "Maximum Rx indirection table size is %u",
+ sh->dev_cap.ind_table_max_size);
+ sh->dev_cap.tso = (attr_ex.tso_caps.max_tso > 0 &&
+ (attr_ex.tso_caps.supported_qpts &
+ (1 << IBV_QPT_RAW_PACKET)));
+ if (sh->dev_cap.tso)
+ sh->dev_cap.tso_max_payload_sz = attr_ex.tso_caps.max_tso;
+ strlcpy(sh->dev_cap.fw_ver, attr_ex.orig_attr.fw_ver,
+ sizeof(sh->dev_cap.fw_ver));
+#ifdef HAVE_IBV_MLX5_MOD_SWP
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_SWP)
+ sh->dev_cap.swp = dv_attr.sw_parsing_caps.sw_parsing_offloads &
+ (MLX5_SW_PARSING_CAP |
+ MLX5_SW_PARSING_CSUM_CAP |
+ MLX5_SW_PARSING_TSO_CAP);
+ DRV_LOG(DEBUG, "SWP support: %u", sh->dev_cap.swp);
+#endif
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_STRIDING_RQ) {
+ struct mlx5dv_striding_rq_caps *strd_rq_caps =
+ &dv_attr.striding_rq_caps;
+
+ sh->dev_cap.mprq.enabled = 1;
+ sh->dev_cap.mprq.log_min_stride_size =
+ strd_rq_caps->min_single_stride_log_num_of_bytes;
+ sh->dev_cap.mprq.log_max_stride_size =
+ strd_rq_caps->max_single_stride_log_num_of_bytes;
+ sh->dev_cap.mprq.log_min_stride_num =
+ strd_rq_caps->min_single_wqe_log_num_of_strides;
+ sh->dev_cap.mprq.log_max_stride_num =
+ strd_rq_caps->max_single_wqe_log_num_of_strides;
+ sh->dev_cap.mprq.log_min_stride_wqe_size =
+ cdev->config.devx ?
+ hca_attr->log_min_stride_wqe_sz :
+ MLX5_MPRQ_LOG_MIN_STRIDE_WQE_SIZE;
+ DRV_LOG(DEBUG, "\tmin_single_stride_log_num_of_bytes: %u",
+ sh->dev_cap.mprq.log_min_stride_size);
+ DRV_LOG(DEBUG, "\tmax_single_stride_log_num_of_bytes: %u",
+ sh->dev_cap.mprq.log_max_stride_size);
+ DRV_LOG(DEBUG, "\tmin_single_wqe_log_num_of_strides: %u",
+ sh->dev_cap.mprq.log_min_stride_num);
+ DRV_LOG(DEBUG, "\tmax_single_wqe_log_num_of_strides: %u",
+ sh->dev_cap.mprq.log_max_stride_num);
+ DRV_LOG(DEBUG, "\tmin_stride_wqe_log_size: %u",
+ sh->dev_cap.mprq.log_min_stride_wqe_size);
+ DRV_LOG(DEBUG, "\tsupported_qpts: %d",
+ strd_rq_caps->supported_qpts);
+ DRV_LOG(DEBUG, "Device supports Multi-Packet RQ.");
+ }
+#endif
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (dv_attr.comp_mask & MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS) {
+ sh->dev_cap.tunnel_en = dv_attr.tunnel_offloads_caps &
+ (MLX5_TUNNELED_OFFLOADS_VXLAN_CAP |
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP |
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP);
+ }
+ if (sh->dev_cap.tunnel_en) {
+ DRV_LOG(DEBUG, "Tunnel offloading is supported for %s%s%s",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_VXLAN_CAP ? "[VXLAN]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GRE_CAP ? "[GRE]" : "",
+ sh->dev_cap.tunnel_en &
+ MLX5_TUNNELED_OFFLOADS_GENEVE_CAP ? "[GENEVE]" : "");
+ } else {
+ DRV_LOG(DEBUG, "Tunnel offloading is not supported.");
+ }
+#else
+ DRV_LOG(WARNING,
+ "Tunnel offloading disabled due to old OFED/rdma-core version");
+#endif
+ if (!sh->cdev->config.devx)
+ return 0;
+ /* Check capabilities for Packet Pacing. */
+ DRV_LOG(DEBUG, "Timestamp counter frequency %u kHz.",
+ hca_attr->dev_freq_khz);
+ DRV_LOG(DEBUG, "Packet pacing is %ssupported.",
+ hca_attr->qos.packet_pacing ? "" : "not ");
+ DRV_LOG(DEBUG, "Cross channel ops are %ssupported.",
+ hca_attr->cross_channel ? "" : "not ");
+ DRV_LOG(DEBUG, "WQE index ignore is %ssupported.",
+ hca_attr->wqe_index_ignore ? "" : "not ");
+ DRV_LOG(DEBUG, "Non-wire SQ feature is %ssupported.",
+ hca_attr->non_wire_sq ? "" : "not ");
+ DRV_LOG(DEBUG, "Static WQE SQ feature is %ssupported (%d)",
+ hca_attr->log_max_static_sq_wq ? "" : "not ",
+ hca_attr->log_max_static_sq_wq);
+ DRV_LOG(DEBUG, "WQE rate PP mode is %ssupported.",
+ hca_attr->qos.wqe_rate_pp ? "" : "not ");
+ sh->dev_cap.txpp_en = hca_attr->qos.packet_pacing;
+ if (!hca_attr->cross_channel) {
+ DRV_LOG(DEBUG,
+ "Cross channel operations are required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->wqe_index_ignore) {
+ DRV_LOG(DEBUG,
+ "WQE index ignore feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->non_wire_sq) {
+ DRV_LOG(DEBUG,
+ "Non-wire SQ feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->log_max_static_sq_wq) {
+ DRV_LOG(DEBUG,
+ "Static WQE SQ feature is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+ if (!hca_attr->qos.wqe_rate_pp) {
+ DRV_LOG(DEBUG,
+ "WQE rate mode is required for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+ }
+#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET
+ DRV_LOG(DEBUG,
+ "DevX does not provide UAR offset, can't create queues for packet pacing.");
+ sh->dev_cap.txpp_en = 0;
+#endif
+ /* Check for LRO support. */
+ if (mlx5_devx_obj_ops_en(sh) && hca_attr->lro_cap) {
+ /* TBD check tunnel lro caps. */
+ sh->dev_cap.lro_supported = 1;
+ DRV_LOG(DEBUG, "Device supports LRO.");
+ DRV_LOG(DEBUG,
+ "LRO minimal size of TCP segment required for coalescing is %d bytes.",
+ hca_attr->lro_min_mss_size);
+ }
+ sh->dev_cap.scatter_fcs_w_decap_disable =
+ hca_attr->scatter_fcs_w_decap_disable;
+ sh->dev_cap.rq_delay_drop_en = hca_attr->rq_delay_drop;
+ mlx5_rt_timestamp_config(sh, hca_attr);
+ return 0;