attr->flow_hit_aso = !!(MLX5_GET64(cmd_hca_cap, hcattr,
general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO);
+ attr->log_max_cq = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq);
+ attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp);
+ attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz);
+ attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz);
+ attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz);
+ attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd);
+ attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq);
+ attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz);
if (attr->qos.sup) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
attr->tunnel_stateless_gtp = MLX5_GET
(per_protocol_networking_offload_caps,
hcattr, tunnel_stateless_gtp);
+ attr->rss_ind_tbl_cap = MLX5_GET
+ (per_protocol_networking_offload_caps,
+ hcattr, rss_ind_tbl_cap);
if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
return 0;
if (attr->eth_virt) {
uint32_t log_max_ft_sampler_num:8;
struct mlx5_hca_qos_attr qos;
struct mlx5_hca_vdpa_attr vdpa;
+ int log_max_qp_sz;
+ int log_max_cq_sz;
+ int log_max_qp;
+ int log_max_cq;
+ uint32_t log_max_pd;
+ uint32_t log_max_mrw_sz;
+ uint32_t log_max_srq;
+ uint32_t log_max_srq_sz;
+ uint32_t rss_ind_tbl_cap;
};
struct mlx5_devx_wq_attr {
__rte_internal
int mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
struct mlx5_devx_virtio_q_couners_attr *attr);
-
__rte_internal
struct mlx5_devx_obj *mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx,
uint32_t pd);
#define MLX5_GET64(typ, p, fld) rte_be_to_cpu_64(*((rte_be64_t *)(p) + \
__mlx5_64_off(typ, fld)))
#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
struct mlx5_ifc_fte_match_set_misc_bits {
u8 gre_c_present[0x1];