X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fmlx5%2Fmlx5_devx_cmds.c;h=88be7dea3d9d55122739d92d74abfbb1ec8e212c;hb=5b9e24ae1411b2dba2ec754587436208d6b24578;hp=b792ce1aa32702ac19bfd7bdc739ad0db3de59cf;hpb=01b8b5b618eb071a7c6c419bfd88a96e42178ba3;p=dpdk.git diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index b792ce1aa3..88be7dea3d 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -53,8 +53,8 @@ mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg, MLX5_SET(access_register_in, in, register_id, reg_id); MLX5_SET(access_register_in, in, argument, arg); rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, - MLX5_ST_SZ_DW(access_register_out) * - sizeof(uint32_t) + dw_cnt); + MLX5_ST_SZ_BYTES(access_register_out) + + sizeof(uint32_t) * dw_cnt); if (rc) goto error; status = MLX5_GET(access_register_out, out, status); @@ -266,17 +266,17 @@ mlx5_devx_cmd_mkey_create(void *ctx, MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, attr->pd); MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF); + MLX5_SET(mkc, mkc, umr_en, attr->umr_en); MLX5_SET(mkc, mkc, translations_octword_size, translation_size); - if (attr->relaxed_ordering == 1) { - MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1); - MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1); - } + MLX5_SET(mkc, mkc, relaxed_ordering_write, + attr->relaxed_ordering_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, attr->relaxed_ordering_read); MLX5_SET64(mkc, mkc, start_addr, attr->addr); MLX5_SET64(mkc, mkc, len, attr->size); mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out, sizeof(out)); if (!mkey->obj) { - DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", + DRV_LOG(ERR, "Can't create %sdirect mkey - error %d", klm_num ? "an in" : "a ", errno); rte_errno = errno; mlx5_free(mkey); @@ -308,7 +308,7 @@ mlx5_devx_get_out_command_status(void *out) if (status) { int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome); - DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status, + DRV_LOG(ERR, "Bad DevX status %x, syndrome = %x", status, syndrome); } return status; @@ -374,8 +374,7 @@ mlx5_devx_cmd_query_nic_vport_context(void *ctx, syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome); if (status) { DRV_LOG(DEBUG, "Failed to query NIC vport context, " - "status %x, syndrome = %x", - status, syndrome); + "status %x, syndrome = %x", status, syndrome); return -1; } vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out, @@ -530,15 +529,13 @@ mlx5_devx_cmd_create_flex_parser(void *ctx, void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table); void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc); void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc); - struct mlx5_devx_obj *parse_flex_obj = NULL; + struct mlx5_devx_obj *parse_flex_obj = mlx5_malloc + (MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0, SOCKET_ID_ANY); uint32_t i; - parse_flex_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0, - SOCKET_ID_ANY); if (!parse_flex_obj) { - DRV_LOG(ERR, "Failed to allocate flex parser data"); + DRV_LOG(ERR, "Failed to allocate flex parser data."); rte_errno = ENOMEM; - mlx5_free(in); return NULL; } MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, @@ -664,8 +661,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); if (status) { DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " - "status %x, syndrome = %x", - status, syndrome); + "status %x, syndrome = %x", status, syndrome); return -1; } hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); @@ -685,17 +681,24 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, (cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz); attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id); attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr, - relaxed_ordering_write); + relaxed_ordering_write); attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, - relaxed_ordering_read); + relaxed_ordering_read); attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr, - access_register_user); + access_register_user); attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, eth_net_offloads); attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr, flex_parser_protocols); + attr->max_geneve_tlv_options = MLX5_GET(cmd_hca_cap, hcattr, + max_geneve_tlv_options); + attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr, + max_geneve_tlv_option_data_len); attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos); + attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO); attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); @@ -716,12 +719,47 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, device_frequency_khz); attr->scatter_fcs_w_decap_disable = MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable); + attr->roce = MLX5_GET(cmd_hca_cap, hcattr, roce); + attr->rq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, rq_ts_format); + attr->sq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, sq_ts_format); attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp); attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr, regexp_num_of_engines); attr->flow_hit_aso = !!(MLX5_GET64(cmd_hca_cap, hcattr, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO); + attr->geneve_tlv_opt = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT); + attr->log_max_cq = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq); + attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp); + attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz); + attr->log_max_qp_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp_sz); + attr->log_max_mrw_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_mrw_sz); + attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd); + attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq); + attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz); + attr->reg_c_preserve = + MLX5_GET(cmd_hca_cap, hcattr, reg_c_preserve); + attr->mmo_dma_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo); + attr->mmo_compress_en = MLX5_GET(cmd_hca_cap, hcattr, compress); + attr->mmo_decompress_en = MLX5_GET(cmd_hca_cap, hcattr, decompress); + attr->compress_min_block_size = MLX5_GET(cmd_hca_cap, hcattr, + compress_min_block_size); + attr->log_max_mmo_dma = MLX5_GET(cmd_hca_cap, hcattr, log_dma_mmo_size); + attr->log_max_mmo_compress = MLX5_GET(cmd_hca_cap, hcattr, + log_compress_mmo_size); + attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr, + log_decompress_mmo_size); + attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression); + attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr, + mini_cqe_resp_flow_tag); + attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr, + mini_cqe_resp_l3_l4_tag); + attr->umr_indirect_mkey_disabled = + MLX5_GET(cmd_hca_cap, hcattr, umr_indirect_mkey_disabled); + attr->umr_modify_entity_size_disabled = + MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled); if (attr->qos.sup) { MLX5_SET(query_hca_cap_in, in, op_mod, MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | @@ -732,23 +770,33 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, goto error; if (status) { DRV_LOG(DEBUG, "Failed to query devx QOS capabilities," - " status %x, syndrome = %x", - status, syndrome); + " status %x, syndrome = %x", status, syndrome); return -1; } hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); - attr->qos.srtcm_sup = - MLX5_GET(qos_cap, hcattr, flow_meter_srtcm); + attr->qos.flow_meter_old = + MLX5_GET(qos_cap, hcattr, flow_meter_old); attr->qos.log_max_flow_meter = MLX5_GET(qos_cap, hcattr, log_max_flow_meter); attr->qos.flow_meter_reg_c_ids = MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); - attr->qos.flow_meter_reg_share = - MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); + attr->qos.flow_meter = + MLX5_GET(qos_cap, hcattr, flow_meter); attr->qos.packet_pacing = MLX5_GET(qos_cap, hcattr, packet_pacing); attr->qos.wqe_rate_pp = MLX5_GET(qos_cap, hcattr, wqe_rate_pp); + if (attr->qos.flow_meter_aso_sup) { + attr->qos.log_meter_aso_granularity = + MLX5_GET(qos_cap, hcattr, + log_meter_aso_granularity); + attr->qos.log_meter_aso_max_alloc = + MLX5_GET(qos_cap, hcattr, + log_meter_aso_max_alloc); + attr->qos.log_max_num_meter_aso = + MLX5_GET(qos_cap, hcattr, + log_max_num_meter_aso); + } } if (attr->vdpa.valid) mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); @@ -763,17 +811,14 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE | MLX5_HCA_CAP_OPMOD_GET_CUR); - rc = mlx5_glue->devx_general_cmd(ctx, - in, sizeof(in), - out, sizeof(out)); + rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); if (rc) goto error; status = MLX5_GET(query_hca_cap_out, out, status); syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); if (status) { DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " - "status %x, syndrome = %x", - status, syndrome); + "status %x, syndrome = %x", status, syndrome); attr->log_max_ft_sampler_num = 0; return -1; } @@ -790,9 +835,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS | MLX5_HCA_CAP_OPMOD_GET_CUR); - rc = mlx5_glue->devx_general_cmd(ctx, - in, sizeof(in), - out, sizeof(out)); + rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out)); if (rc) { attr->eth_net_offloads = 0; goto error; @@ -801,8 +844,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); if (status) { DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, " - "status %x, syndrome = %x", - status, syndrome); + "status %x, syndrome = %x", status, syndrome); attr->eth_net_offloads = 0; return -1; } @@ -836,9 +878,35 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->tunnel_stateless_gtp = MLX5_GET (per_protocol_networking_offload_caps, hcattr, tunnel_stateless_gtp); - if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) - return 0; - if (attr->eth_virt) { + attr->rss_ind_tbl_cap = MLX5_GET + (per_protocol_networking_offload_caps, + hcattr, rss_ind_tbl_cap); + /* Query HCA attribute for ROCE. */ + if (attr->roce) { + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + MLX5_SET(query_hca_cap_in, in, opcode, + MLX5_CMD_OP_QUERY_HCA_CAP); + MLX5_SET(query_hca_cap_in, in, op_mod, + MLX5_GET_HCA_CAP_OP_MOD_ROCE | + MLX5_HCA_CAP_OPMOD_GET_CUR); + rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), + out, sizeof(out)); + if (rc) + goto error; + status = MLX5_GET(query_hca_cap_out, out, status); + syndrome = MLX5_GET(query_hca_cap_out, out, syndrome); + if (status) { + DRV_LOG(DEBUG, + "Failed to query devx HCA ROCE capabilities, " + "status %x, syndrome = %x", status, syndrome); + return -1; + } + hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability); + attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format); + } + if (attr->eth_virt && + attr->wqe_inline_mode == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) { rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr); if (rc) { attr->eth_virt = 0; @@ -918,7 +986,9 @@ devx_cmd_fill_wq_data(void *wq_ctx, struct mlx5_devx_wq_attr *wq_attr) MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter); MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter); MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride); - MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz); + if (wq_attr->log_wq_pg_sz > MLX5_ADAPTER_PAGE_SHIFT) + MLX5_SET(wq, wq_ctx, log_wq_pg_sz, + wq_attr->log_wq_pg_sz - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz); MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid); MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid); @@ -979,6 +1049,7 @@ mlx5_devx_cmd_create_rq(void *ctx, MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn); MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id); MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn); + MLX5_SET(sqc, rq_ctx, ts_format, rq_attr->ts_format); wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq); wq_attr = &rq_attr->wq_attr; devx_cmd_fill_wq_data(wq_ctx, wq_attr); @@ -1351,6 +1422,7 @@ mlx5_devx_cmd_create_sq(void *ctx, sq_attr->packet_pacing_rate_limit_index); MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz); MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num); + MLX5_SET(sqc, sq_ctx, ts_format, sq_attr->ts_format); wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq); wq_attr = &sq_attr->wq_attr; devx_cmd_fill_wq_data(wq_ctx, wq_attr); @@ -1526,6 +1598,20 @@ mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused, return -ret; } +int +mlx5_devx_cmd_flow_single_dump(void *rule_info __rte_unused, + FILE *file __rte_unused) +{ + int ret = 0; +#ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE + if (rule_info) + ret = mlx5_glue->dr_dump_rule(file, rule_info); +#else + ret = ENOTSUP; +#endif + return -ret; +} + /* * Create CQ using DevX API. * @@ -1560,20 +1646,20 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) } else { MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); } - MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); + MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ? + MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B); MLX5_SET(cqc, cqctx, cc, attr->use_first_only); MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); - MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size - - MLX5_ADAPTER_PAGE_SHIFT); + if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT) + MLX5_SET(cqc, cqctx, log_page_size, + attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); MLX5_SET(cqc, cqctx, cqe_comp_en, !!attr->cqe_comp_en); - MLX5_SET(cqc, cqctx, mini_cqe_res_format, - attr->mini_cqe_res_format); + MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format); MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext, attr->mini_cqe_res_format_ext); - MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); if (attr->q_umem_valid) { MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); @@ -1653,6 +1739,9 @@ mlx5_devx_cmd_create_virtq(void *ctx, MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id); MLX5_SET(virtio_q, virtctx, pd, attr->pd); + MLX5_SET(virtio_q, virtctx, queue_period_mode, attr->hw_latency_mode); + MLX5_SET(virtio_q, virtctx, queue_period_us, attr->hw_max_latency_us); + MLX5_SET(virtio_q, virtctx, queue_max_count, attr->hw_max_pending_comp); MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); @@ -1797,11 +1886,13 @@ mlx5_devx_cmd_create_qp(void *ctx, MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); MLX5_SET(qpc, qpc, pd, attr->pd); + MLX5_SET(qpc, qpc, ts_format, attr->ts_format); if (attr->uar_index) { MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); MLX5_SET(qpc, qpc, uar_page, attr->uar_index); - MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size - - MLX5_ADAPTER_PAGE_SHIFT); + if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT) + MLX5_SET(qpc, qpc, log_page_size, + attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT); if (attr->sq_size) { MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size)); MLX5_SET(qpc, qpc, cqn_snd, attr->cqn); @@ -2046,3 +2137,193 @@ mlx5_devx_cmd_create_flow_hit_aso_obj(void *ctx, uint32_t pd) flow_hit_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); return flow_hit_aso_obj; } + +/* + * Create PD using DevX API. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * + * @return + * The DevX object created, NULL otherwise and rte_errno is set. + */ +struct mlx5_devx_obj * +mlx5_devx_cmd_alloc_pd(void *ctx) +{ + struct mlx5_devx_obj *ppd = + mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ppd), 0, SOCKET_ID_ANY); + u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {0}; + u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0}; + + if (!ppd) { + DRV_LOG(ERR, "Failed to allocate PD data."); + rte_errno = ENOMEM; + return NULL; + } + MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); + ppd->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), + out, sizeof(out)); + if (!ppd->obj) { + mlx5_free(ppd); + DRV_LOG(ERR, "Failed to allocate PD Obj using DevX."); + rte_errno = errno; + return NULL; + } + ppd->id = MLX5_GET(alloc_pd_out, out, pd); + return ppd; +} + +/** + * Create general object of type GENEVE TLV option using DevX API. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param [in] class + * TLV option variable value of class + * @param [in] type + * TLV option variable value of type + * @param [in] len + * TLV option variable value of len + * + * @return + * The DevX object created, NULL otherwise and rte_errno is set. + */ +struct mlx5_devx_obj * +mlx5_devx_cmd_create_geneve_tlv_option(void *ctx, + uint16_t class, uint8_t type, uint8_t len) +{ + uint32_t in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + struct mlx5_devx_obj *geneve_tlv_opt_obj = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*geneve_tlv_opt_obj), + 0, SOCKET_ID_ANY); + + if (!geneve_tlv_opt_obj) { + DRV_LOG(ERR, "Failed to allocate geneve tlv option object."); + rte_errno = ENOMEM; + return NULL; + } + void *hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr); + void *opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, + geneve_tlv_opt); + MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, + MLX5_OBJ_TYPE_GENEVE_TLV_OPT); + MLX5_SET(geneve_tlv_option, opt, option_class, + rte_be_to_cpu_16(class)); + MLX5_SET(geneve_tlv_option, opt, option_type, type); + MLX5_SET(geneve_tlv_option, opt, option_data_length, len); + geneve_tlv_opt_obj->obj = mlx5_glue->devx_obj_create(ctx, in, + sizeof(in), out, sizeof(out)); + if (!geneve_tlv_opt_obj->obj) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create Geneve tlv option " + "Obj using DevX."); + mlx5_free(geneve_tlv_opt_obj); + return NULL; + } + geneve_tlv_opt_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return geneve_tlv_opt_obj; +} + +int +mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id) +{ +#ifdef HAVE_IBV_FLOW_DV_SUPPORT + uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0}; + int rc; + void *rq_ctx; + + MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ); + MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num); + rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out)); + if (rc) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - " + "rc = %d, errno = %d.", rc, errno); + return -rc; + }; + rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context); + *counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id); + return 0; +#else + (void)wq; + (void)counter_set_id; + return -ENOTSUP; +#endif +} + +/* + * Allocate queue counters via devx interface. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * + * @return + * Pointer to counter object on success, a NULL value otherwise and + * rte_errno is set. + */ +struct mlx5_devx_obj * +mlx5_devx_cmd_queue_counter_alloc(void *ctx) +{ + struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0, + SOCKET_ID_ANY); + uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0}; + + if (!dcs) { + rte_errno = ENOMEM; + return NULL; + } + MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER); + dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, + sizeof(out)); + if (!dcs->obj) { + DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error " + "%d.", errno); + rte_errno = errno; + mlx5_free(dcs); + return NULL; + } + dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id); + return dcs; +} + +/** + * Query queue counters values. + * + * @param[in] dcs + * devx object of the queue counter set. + * @param[in] clear + * Whether hardware should clear the counters after the query or not. + * @param[out] out_of_buffers + * Number of dropped occurred due to lack of WQE for the associated QPs/RQs. + * + * @return + * 0 on success, a negative value otherwise. + */ +int +mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear, + uint32_t *out_of_buffers) +{ + uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0}; + uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0}; + int rc; + + MLX5_SET(query_q_counter_in, in, opcode, + MLX5_CMD_OP_QUERY_Q_COUNTER); + MLX5_SET(query_q_counter_in, in, op_mod, 0); + MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id); + MLX5_SET(query_q_counter_in, in, clear, !!clear); + rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out, + sizeof(out)); + if (rc) { + DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc); + rte_errno = rc; + return -rc; + } + *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer); + return 0; +}