X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fmlx5%2Fmlx5_devx_cmds.c;h=7c81ae15a92d21faab7c1abc4bf4b24ac295bc0e;hb=972a1bf8120d971c04c065221c46c37c266b1f62;hp=fba485e72459dce6ba98db187498545c76883173;hpb=ffd5b302baabdc96fcd6deb58d8cd3ec39020c5c;p=dpdk.git diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c b/drivers/common/mlx5/mlx5_devx_cmds.c index fba485e724..7c81ae15a9 100644 --- a/drivers/common/mlx5/mlx5_devx_cmds.c +++ b/drivers/common/mlx5/mlx5_devx_cmds.c @@ -5,12 +5,75 @@ #include #include +#include #include "mlx5_prm.h" #include "mlx5_devx_cmds.h" #include "mlx5_common_utils.h" +#include "mlx5_malloc.h" +/** + * Perform read access to the registers. Reads data from register + * and writes ones to the specified buffer. + * + * @param[in] ctx + * Context returned from mlx5 open_device() glue function. + * @param[in] reg_id + * Register identifier according to the PRM. + * @param[in] arg + * Register access auxiliary parameter according to the PRM. + * @param[out] data + * Pointer to the buffer to store read data. + * @param[in] dw_cnt + * Buffer size in double words. + * + * @return + * 0 on success, a negative value otherwise. + */ +int +mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg, + uint32_t *data, uint32_t dw_cnt) +{ + uint32_t in[MLX5_ST_SZ_DW(access_register_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(access_register_out) + + MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0}; + int status, rc; + + MLX5_ASSERT(data && dw_cnt); + MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX); + if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) { + DRV_LOG(ERR, "Not enough buffer for register read data"); + return -1; + } + MLX5_SET(access_register_in, in, opcode, + MLX5_CMD_OP_ACCESS_REGISTER_USER); + MLX5_SET(access_register_in, in, op_mod, + MLX5_ACCESS_REGISTER_IN_OP_MOD_READ); + MLX5_SET(access_register_in, in, register_id, reg_id); + MLX5_SET(access_register_in, in, argument, arg); + rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, + MLX5_ST_SZ_DW(access_register_out) * + sizeof(uint32_t) + dw_cnt); + if (rc) + goto error; + status = MLX5_GET(access_register_out, out, status); + if (status) { + int syndrome = MLX5_GET(access_register_out, out, syndrome); + + DRV_LOG(DEBUG, "Failed to access NIC register 0x%X, " + "status %x, syndrome = %x", + reg_id, status, syndrome); + return -1; + } + memcpy(data, &out[MLX5_ST_SZ_DW(access_register_out)], + dw_cnt * sizeof(uint32_t)); + return 0; +error: + rc = (rc > 0) ? -rc : rc; + return rc; +} + /** * Allocate flow counters via devx interface. * @@ -28,7 +91,8 @@ struct mlx5_devx_obj * mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) { - struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0); + struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), + 0, SOCKET_ID_ANY); uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0}; @@ -44,7 +108,7 @@ mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128) if (!dcs->obj) { DRV_LOG(ERR, "Can't allocate counters - error %d", errno); rte_errno = errno; - rte_free(dcs); + mlx5_free(dcs); return NULL; } dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id); @@ -149,7 +213,8 @@ mlx5_devx_cmd_mkey_create(void *ctx, uint32_t in[in_size_dw]; uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0}; void *mkc; - struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0); + struct mlx5_devx_obj *mkey = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mkey), + 0, SOCKET_ID_ANY); size_t pgsize; uint32_t translation_size; @@ -158,7 +223,13 @@ mlx5_devx_cmd_mkey_create(void *ctx, return NULL; } memset(in, 0, in_size_dw * 4); - pgsize = sysconf(_SC_PAGESIZE); + pgsize = rte_mem_page_size(); + if (pgsize == (size_t)-1) { + mlx5_free(mkey); + DRV_LOG(ERR, "Failed to get page size"); + rte_errno = ENOMEM; + return NULL; + } MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); if (klm_num > 0) { @@ -208,7 +279,7 @@ mlx5_devx_cmd_mkey_create(void *ctx, DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n", klm_num ? "an in" : "a ", errno); rte_errno = errno; - rte_free(mkey); + mlx5_free(mkey); return NULL; } mkey->id = MLX5_GET(create_mkey_out, out, mkey_index); @@ -260,7 +331,7 @@ mlx5_devx_cmd_destroy(struct mlx5_devx_obj *obj) if (!obj) return 0; ret = mlx5_glue->devx_obj_destroy(obj->obj); - rte_free(obj); + mlx5_free(obj); return ret; } @@ -396,6 +467,168 @@ mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx, } } +int +mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj, + uint32_t ids[], uint32_t num) +{ + uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0}; + void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr); + void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex); + void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table); + int ret; + uint32_t idx = 0; + uint32_t i; + + if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) { + rte_errno = EINVAL; + DRV_LOG(ERR, "Too many sample IDs to be fetched."); + return -rte_errno; + } + MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, + MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, + MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id); + ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in), + out, sizeof(out)); + if (ret) { + rte_errno = ret; + DRV_LOG(ERR, "Failed to query sample IDs with object %p.", + (void *)flex_obj); + return -rte_errno; + } + for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) { + void *s_off = (void *)((char *)sample + i * + MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample)); + uint32_t en; + + en = MLX5_GET(parse_graph_flow_match_sample, s_off, + flow_match_sample_en); + if (!en) + continue; + ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off, + flow_match_sample_field_id); + } + if (num != idx) { + rte_errno = EINVAL; + DRV_LOG(ERR, "Number of sample IDs are not as expected."); + return -rte_errno; + } + return ret; +} + + +struct mlx5_devx_obj * +mlx5_devx_cmd_create_flex_parser(void *ctx, + struct mlx5_devx_graph_node_attr *data) +{ + uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr); + void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex); + void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table); + void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc); + void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc); + struct mlx5_devx_obj *parse_flex_obj = NULL; + uint32_t i; + + parse_flex_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0, + SOCKET_ID_ANY); + if (!parse_flex_obj) { + DRV_LOG(ERR, "Failed to allocate flex parser data"); + rte_errno = ENOMEM; + mlx5_free(in); + return NULL; + } + MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, + MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH); + MLX5_SET(parse_graph_flex, flex, header_length_mode, + data->header_length_mode); + MLX5_SET(parse_graph_flex, flex, header_length_base_value, + data->header_length_base_value); + MLX5_SET(parse_graph_flex, flex, header_length_field_offset, + data->header_length_field_offset); + MLX5_SET(parse_graph_flex, flex, header_length_field_shift, + data->header_length_field_shift); + MLX5_SET(parse_graph_flex, flex, header_length_field_mask, + data->header_length_field_mask); + for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) { + struct mlx5_devx_match_sample_attr *s = &data->sample[i]; + void *s_off = (void *)((char *)sample + i * + MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample)); + + if (!s->flow_match_sample_en) + continue; + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_en, !!s->flow_match_sample_en); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_field_offset, + s->flow_match_sample_field_offset); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_offset_mode, + s->flow_match_sample_offset_mode); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_field_offset_mask, + s->flow_match_sample_field_offset_mask); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_field_offset_shift, + s->flow_match_sample_field_offset_shift); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_field_base_offset, + s->flow_match_sample_field_base_offset); + MLX5_SET(parse_graph_flow_match_sample, s_off, + flow_match_sample_tunnel_mode, + s->flow_match_sample_tunnel_mode); + } + for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) { + struct mlx5_devx_graph_arc_attr *ia = &data->in[i]; + struct mlx5_devx_graph_arc_attr *oa = &data->out[i]; + void *in_off = (void *)((char *)in_arc + i * + MLX5_ST_SZ_BYTES(parse_graph_arc)); + void *out_off = (void *)((char *)out_arc + i * + MLX5_ST_SZ_BYTES(parse_graph_arc)); + + if (ia->arc_parse_graph_node != 0) { + MLX5_SET(parse_graph_arc, in_off, + compare_condition_value, + ia->compare_condition_value); + MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel, + ia->start_inner_tunnel); + MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node, + ia->arc_parse_graph_node); + MLX5_SET(parse_graph_arc, in_off, + parse_graph_node_handle, + ia->parse_graph_node_handle); + } + if (oa->arc_parse_graph_node != 0) { + MLX5_SET(parse_graph_arc, out_off, + compare_condition_value, + oa->compare_condition_value); + MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel, + oa->start_inner_tunnel); + MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node, + oa->arc_parse_graph_node); + MLX5_SET(parse_graph_arc, out_off, + parse_graph_node_handle, + oa->parse_graph_node_handle); + } + } + parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), + out, sizeof(out)); + if (!parse_flex_obj->obj) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create FLEX PARSE GRAPH object " + "by using DevX."); + mlx5_free(parse_flex_obj); + return NULL; + } + parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return parse_flex_obj; +} + /** * Query HCA attributes. * Using those attributes we can check on run time if the device @@ -416,7 +649,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0}; void *hcattr; - int status, syndrome, rc; + int status, syndrome, rc, i; MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); MLX5_SET(query_hca_cap_in, in, op_mod, @@ -455,6 +688,8 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, relaxed_ordering_write); attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr, relaxed_ordering_read); + attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr, + access_register_user); attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr, eth_net_offloads); attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt); @@ -464,6 +699,25 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, general_obj_types) & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q); + attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS); + attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr, + general_obj_types) & + MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE); + attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr, + wqe_index_ignore_cap); + attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd); + attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq); + attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr, + log_max_static_sq_wq); + attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr, + device_frequency_khz); + attr->scatter_fcs_w_decap_disable = + MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable); + attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp); + attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr, + regexp_num_of_engines); if (attr->qos.sup) { MLX5_SET(query_hca_cap_in, in, op_mod, MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP | @@ -484,9 +738,13 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->qos.log_max_flow_meter = MLX5_GET(qos_cap, hcattr, log_max_flow_meter); attr->qos.flow_meter_reg_c_ids = - MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); + MLX5_GET(qos_cap, hcattr, flow_meter_reg_id); attr->qos.flow_meter_reg_share = - MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); + MLX5_GET(qos_cap, hcattr, flow_meter_reg_share); + attr->qos.packet_pacing = + MLX5_GET(qos_cap, hcattr, packet_pacing); + attr->qos.wqe_rate_pp = + MLX5_GET(qos_cap, hcattr, wqe_rate_pp); } if (attr->vdpa.valid) mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa); @@ -529,7 +787,7 @@ mlx5_devx_cmd_query_hca_attr(void *ctx, attr->lro_max_msg_sz_mode = MLX5_GET (per_protocol_networking_offload_caps, hcattr, lro_max_msg_sz_mode); - for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { + for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) { attr->lro_timer_supported_periods[i] = MLX5_GET(per_protocol_networking_offload_caps, hcattr, lro_timer_supported_periods[i]); @@ -577,6 +835,7 @@ int mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, uint32_t *tis_td) { +#ifdef HAVE_IBV_FLOW_DV_SUPPORT uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0}; int rc; @@ -592,6 +851,12 @@ mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num, tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context); *tis_td = MLX5_GET(tisc, tis_ctx, transport_domain); return 0; +#else + (void)qp; + (void)tis_num; + (void)tis_td; + return -ENOTSUP; +#endif } /** @@ -661,7 +926,7 @@ mlx5_devx_cmd_create_rq(void *ctx, struct mlx5_devx_wq_attr *wq_attr; struct mlx5_devx_obj *rq = NULL; - rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket); + rq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rq), 0, socket); if (!rq) { DRV_LOG(ERR, "Failed to allocate RQ data"); rte_errno = ENOMEM; @@ -689,7 +954,7 @@ mlx5_devx_cmd_create_rq(void *ctx, if (!rq->obj) { DRV_LOG(ERR, "Failed to create RQ using DevX"); rte_errno = errno; - rte_free(rq); + mlx5_free(rq); return NULL; } rq->id = MLX5_GET(create_rq_out, out, rqn); @@ -766,7 +1031,7 @@ mlx5_devx_cmd_create_tir(void *ctx, void *tir_ctx, *outer, *inner, *rss_key; struct mlx5_devx_obj *tir = NULL; - tir = rte_calloc(__func__, 1, sizeof(*tir), 0); + tir = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tir), 0, SOCKET_ID_ANY); if (!tir) { DRV_LOG(ERR, "Failed to allocate TIR data"); rte_errno = ENOMEM; @@ -808,7 +1073,7 @@ mlx5_devx_cmd_create_tir(void *ctx, if (!tir->obj) { DRV_LOG(ERR, "Failed to create TIR using DevX"); rte_errno = errno; - rte_free(tir); + mlx5_free(tir); return NULL; } tir->id = MLX5_GET(create_tir_out, out, tirn); @@ -838,17 +1103,17 @@ mlx5_devx_cmd_create_rqt(void *ctx, struct mlx5_devx_obj *rqt = NULL; int i; - in = rte_calloc(__func__, 1, inlen, 0); + in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); if (!in) { DRV_LOG(ERR, "Failed to allocate RQT IN data"); rte_errno = ENOMEM; return NULL; } - rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0); + rqt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt), 0, SOCKET_ID_ANY); if (!rqt) { DRV_LOG(ERR, "Failed to allocate RQT data"); rte_errno = ENOMEM; - rte_free(in); + mlx5_free(in); return NULL; } MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); @@ -859,11 +1124,11 @@ mlx5_devx_cmd_create_rqt(void *ctx, for (i = 0; i < rqt_attr->rqt_actual_size; i++) MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out)); - rte_free(in); + mlx5_free(in); if (!rqt->obj) { DRV_LOG(ERR, "Failed to create RQT using DevX"); rte_errno = errno; - rte_free(rqt); + mlx5_free(rqt); return NULL; } rqt->id = MLX5_GET(create_rqt_out, out, rqtn); @@ -888,7 +1153,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + rqt_attr->rqt_actual_size * sizeof(uint32_t); uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0}; - uint32_t *in = rte_calloc(__func__, 1, inlen, 0); + uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY); void *rqt_ctx; int i; int ret; @@ -908,7 +1173,7 @@ mlx5_devx_cmd_modify_rqt(struct mlx5_devx_obj *rqt, for (i = 0; i < rqt_attr->rqt_actual_size; i++) MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]); ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out)); - rte_free(in); + mlx5_free(in); if (ret) { DRV_LOG(ERR, "Failed to modify RQT using DevX."); rte_errno = errno; @@ -941,7 +1206,7 @@ mlx5_devx_cmd_create_sq(void *ctx, struct mlx5_devx_wq_attr *wq_attr; struct mlx5_devx_obj *sq = NULL; - sq = rte_calloc(__func__, 1, sizeof(*sq), 0); + sq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sq), 0, SOCKET_ID_ANY); if (!sq) { DRV_LOG(ERR, "Failed to allocate SQ data"); rte_errno = ENOMEM; @@ -961,6 +1226,8 @@ mlx5_devx_cmd_create_sq(void *ctx, MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr); MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp); MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin); + MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire); + MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq); MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index); MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn); MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index, @@ -975,7 +1242,7 @@ mlx5_devx_cmd_create_sq(void *ctx, if (!sq->obj) { DRV_LOG(ERR, "Failed to create SQ using DevX"); rte_errno = errno; - rte_free(sq); + mlx5_free(sq); return NULL; } sq->id = MLX5_GET(create_sq_out, out, sqn); @@ -1014,7 +1281,7 @@ mlx5_devx_cmd_modify_sq(struct mlx5_devx_obj *sq, if (ret) { DRV_LOG(ERR, "Failed to modify SQ using DevX"); rte_errno = errno; - return -errno; + return -rte_errno; } return ret; } @@ -1039,7 +1306,7 @@ mlx5_devx_cmd_create_tis(void *ctx, struct mlx5_devx_obj *tis = NULL; void *tis_ctx; - tis = rte_calloc(__func__, 1, sizeof(*tis), 0); + tis = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tis), 0, SOCKET_ID_ANY); if (!tis) { DRV_LOG(ERR, "Failed to allocate TIS object"); rte_errno = ENOMEM; @@ -1059,7 +1326,7 @@ mlx5_devx_cmd_create_tis(void *ctx, if (!tis->obj) { DRV_LOG(ERR, "Failed to create TIS using DevX"); rte_errno = errno; - rte_free(tis); + mlx5_free(tis); return NULL; } tis->id = MLX5_GET(create_tis_out, out, tisn); @@ -1081,7 +1348,7 @@ mlx5_devx_cmd_create_td(void *ctx) uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0}; struct mlx5_devx_obj *td = NULL; - td = rte_calloc(__func__, 1, sizeof(*td), 0); + td = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*td), 0, SOCKET_ID_ANY); if (!td) { DRV_LOG(ERR, "Failed to allocate TD object"); rte_errno = ENOMEM; @@ -1094,7 +1361,7 @@ mlx5_devx_cmd_create_td(void *ctx) if (!td->obj) { DRV_LOG(ERR, "Failed to create TIS using DevX"); rte_errno = errno; - rte_free(td); + mlx5_free(td); return NULL; } td->id = MLX5_GET(alloc_transport_domain_out, out, @@ -1158,8 +1425,9 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) { uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0}; - struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj), - 0); + struct mlx5_devx_obj *cq_obj = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*cq_obj), + 0, SOCKET_ID_ANY); void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context); if (!cq_obj) { @@ -1175,6 +1443,7 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) } else { MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr); } + MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); MLX5_SET(cqc, cqctx, cc, attr->use_first_only); MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore); MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size); @@ -1182,6 +1451,9 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) MLX5_ADAPTER_PAGE_SHIFT); MLX5_SET(cqc, cqctx, c_eqn, attr->eqn); MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id); + MLX5_SET(cqc, cqctx, cqe_comp_en, attr->cqe_comp_en); + MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format); + MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size); if (attr->q_umem_valid) { MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid); MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id); @@ -1193,7 +1465,7 @@ mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr) if (!cq_obj->obj) { rte_errno = errno; DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno); - rte_free(cq_obj); + mlx5_free(cq_obj); return NULL; } cq_obj->id = MLX5_GET(create_cq_out, out, cqn); @@ -1217,8 +1489,9 @@ mlx5_devx_cmd_create_virtq(void *ctx, { uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; - struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__, - sizeof(*virtq_obj), 0); + struct mlx5_devx_obj *virtq_obj = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*virtq_obj), + 0, SOCKET_ID_ANY); void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq); void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr); void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context); @@ -1258,13 +1531,15 @@ mlx5_devx_cmd_create_virtq(void *ctx, MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id); MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size); MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset); + MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id); + MLX5_SET(virtio_q, virtctx, pd, attr->pd); MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id); virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, sizeof(out)); if (!virtq_obj->obj) { rte_errno = errno; DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX."); - rte_free(virtq_obj); + mlx5_free(virtq_obj); return NULL; } virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); @@ -1325,7 +1600,7 @@ mlx5_devx_cmd_modify_virtq(struct mlx5_devx_obj *virtq_obj, if (ret) { DRV_LOG(ERR, "Failed to modify VIRTQ using DevX."); rte_errno = errno; - return -errno; + return -rte_errno; } return ret; } @@ -1386,8 +1661,9 @@ mlx5_devx_cmd_create_qp(void *ctx, { uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0}; uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; - struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj), - 0); + struct mlx5_devx_obj *qp_obj = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*qp_obj), + 0, SOCKET_ID_ANY); void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); if (!qp_obj) { @@ -1442,7 +1718,7 @@ mlx5_devx_cmd_create_qp(void *ctx, if (!qp_obj->obj) { rte_errno = errno; DRV_LOG(ERR, "Failed to create QP Obj using DevX."); - rte_free(qp_obj); + mlx5_free(qp_obj); return NULL; } qp_obj->id = MLX5_GET(create_qp_out, out, qpn); @@ -1528,7 +1804,77 @@ mlx5_devx_cmd_modify_qp_state(struct mlx5_devx_obj *qp, uint32_t qp_st_mod_op, if (ret) { DRV_LOG(ERR, "Failed to modify QP using DevX."); rte_errno = errno; + return -rte_errno; + } + return ret; +} + +struct mlx5_devx_obj * +mlx5_devx_cmd_create_virtio_q_counters(void *ctx) +{ + uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0}; + struct mlx5_devx_obj *couners_obj = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*couners_obj), 0, + SOCKET_ID_ANY); + void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr); + + if (!couners_obj) { + DRV_LOG(ERR, "Failed to allocate virtio queue counters data."); + rte_errno = ENOMEM; + return NULL; + } + MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, + MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, + MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); + couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out, + sizeof(out)); + if (!couners_obj->obj) { + rte_errno = errno; + DRV_LOG(ERR, "Failed to create virtio queue counters Obj using" + " DevX."); + mlx5_free(couners_obj); + return NULL; + } + couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + return couners_obj; +} + +int +mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj, + struct mlx5_devx_virtio_q_couners_attr *attr) +{ + uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0}; + uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0}; + void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr); + void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out, + virtio_q_counters); + int ret; + + MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, + MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, + MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS); + MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id); + ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out, + sizeof(out)); + if (ret) { + DRV_LOG(ERR, "Failed to query virtio q counters using DevX."); + rte_errno = errno; return -errno; } + attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, + received_desc); + attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters, + completed_desc); + attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters, + error_cqes); + attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters, + bad_desc_errors); + attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters, + exceed_max_chain); + attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters, + invalid_buffer); return ret; }