MLX5_SET(access_register_in, in, register_id, reg_id);
MLX5_SET(access_register_in, in, argument, arg);
rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out,
- MLX5_ST_SZ_DW(access_register_out) *
- sizeof(uint32_t) + dw_cnt);
+ MLX5_ST_SZ_BYTES(access_register_out) +
+ sizeof(uint32_t) * dw_cnt);
if (rc)
goto error;
status = MLX5_GET(access_register_out, out, status);
attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd);
attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq);
attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz);
+ attr->reg_c_preserve =
+ MLX5_GET(cmd_hca_cap, hcattr, reg_c_preserve);
+ attr->mmo_dma_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo);
+ attr->mmo_compress_en = MLX5_GET(cmd_hca_cap, hcattr, compress);
+ attr->mmo_decompress_en = MLX5_GET(cmd_hca_cap, hcattr, decompress);
+ attr->compress_min_block_size = MLX5_GET(cmd_hca_cap, hcattr,
+ compress_min_block_size);
+ attr->log_max_mmo_dma = MLX5_GET(cmd_hca_cap, hcattr, log_dma_mmo_size);
+ attr->log_max_mmo_compress = MLX5_GET(cmd_hca_cap, hcattr,
+ log_compress_mmo_size);
+ attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr,
+ log_decompress_mmo_size);
+ attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression);
+ attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr,
+ mini_cqe_resp_flow_tag);
+ attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr,
+ mini_cqe_resp_l3_l4_tag);
if (attr->qos.sup) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
return -1;
}
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->qos.srtcm_sup =
- MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+ attr->qos.flow_meter_old =
+ MLX5_GET(qos_cap, hcattr, flow_meter_old);
attr->qos.log_max_flow_meter =
MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
attr->qos.flow_meter_reg_c_ids =
MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
- attr->qos.flow_meter_reg_share =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ attr->qos.flow_meter =
+ MLX5_GET(qos_cap, hcattr, flow_meter);
attr->qos.packet_pacing =
MLX5_GET(qos_cap, hcattr, packet_pacing);
attr->qos.wqe_rate_pp =
ppd->id = MLX5_GET(alloc_pd_out, out, pd);
return ppd;
}
+
+/**
+ * Create general object of type GENEVE TLV option using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] class
+ * TLV option variable value of class
+ * @param [in] type
+ * TLV option variable value of type
+ * @param [in] len
+ * TLV option variable value of len
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_geneve_tlv_option(void *ctx,
+ uint16_t class, uint8_t type, uint8_t len)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *geneve_tlv_opt_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*geneve_tlv_opt_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!geneve_tlv_opt_obj) {
+ DRV_LOG(ERR, "Failed to allocate geneve tlv option object.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ void *hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
+ void *opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in,
+ geneve_tlv_opt);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_OBJ_TYPE_GENEVE_TLV_OPT);
+ MLX5_SET(geneve_tlv_option, opt, option_class,
+ rte_be_to_cpu_16(class));
+ MLX5_SET(geneve_tlv_option, opt, option_type, type);
+ MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
+ geneve_tlv_opt_obj->obj = mlx5_glue->devx_obj_create(ctx, in,
+ sizeof(in), out, sizeof(out));
+ if (!geneve_tlv_opt_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create Geneve tlv option "
+ "Obj using DevX.");
+ mlx5_free(geneve_tlv_opt_obj);
+ return NULL;
+ }
+ geneve_tlv_opt_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return geneve_tlv_opt_obj;
+}
+
+int
+mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0};
+ int rc;
+ void *rq_ctx;
+
+ MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+ MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num);
+ rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out));
+ if (rc) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - "
+ "rc = %d, errno = %d.", rc, errno);
+ return -rc;
+ };
+ rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id);
+ return 0;
+#else
+ (void)wq;
+ (void)counter_set_id;
+ return -ENOTSUP;
+#endif
+}
+
+/*
+ * Allocate queue counters via devx interface.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ *
+ * @return
+ * Pointer to counter object on success, a NULL value otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_queue_counter_alloc(void *ctx)
+{
+ struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
+ SOCKET_ID_ANY);
+ uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
+
+ if (!dcs) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!dcs->obj) {
+ DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error "
+ "%d.", errno);
+ rte_errno = errno;
+ mlx5_free(dcs);
+ return NULL;
+ }
+ dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ return dcs;
+}
+
+/**
+ * Query queue counters values.
+ *
+ * @param[in] dcs
+ * devx object of the queue counter set.
+ * @param[in] clear
+ * Whether hardware should clear the counters after the query or not.
+ * @param[out] out_of_buffers
+ * Number of dropped occurred due to lack of WQE for the associated QPs/RQs.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+ uint32_t *out_of_buffers)
+{
+ uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0};
+ uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
+ int rc;
+
+ MLX5_SET(query_q_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, op_mod, 0);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id);
+ MLX5_SET(query_q_counter_in, in, clear, !!clear);
+ rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+ sizeof(out));
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc);
+ rte_errno = rc;
+ return -rc;
+ }
+ *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+ return 0;
+}