#include <rte_errno.h>
#include <rte_malloc.h>
+#include <rte_eal_paging.h>
#include "mlx5_prm.h"
#include "mlx5_devx_cmds.h"
#include "mlx5_common_utils.h"
+#include "mlx5_malloc.h"
+/**
+ * Perform read access to the registers. Reads data from register
+ * and writes ones to the specified buffer.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[in] reg_id
+ * Register identifier according to the PRM.
+ * @param[in] arg
+ * Register access auxiliary parameter according to the PRM.
+ * @param[out] data
+ * Pointer to the buffer to store read data.
+ * @param[in] dw_cnt
+ * Buffer size in double words.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_register_read(void *ctx, uint16_t reg_id, uint32_t arg,
+ uint32_t *data, uint32_t dw_cnt)
+{
+ uint32_t in[MLX5_ST_SZ_DW(access_register_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(access_register_out) +
+ MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0};
+ int status, rc;
+
+ MLX5_ASSERT(data && dw_cnt);
+ MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX);
+ if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) {
+ DRV_LOG(ERR, "Not enough buffer for register read data");
+ return -1;
+ }
+ MLX5_SET(access_register_in, in, opcode,
+ MLX5_CMD_OP_ACCESS_REGISTER_USER);
+ MLX5_SET(access_register_in, in, op_mod,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_READ);
+ MLX5_SET(access_register_in, in, register_id, reg_id);
+ MLX5_SET(access_register_in, in, argument, arg);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out,
+ MLX5_ST_SZ_DW(access_register_out) *
+ sizeof(uint32_t) + dw_cnt);
+ if (rc)
+ goto error;
+ status = MLX5_GET(access_register_out, out, status);
+ if (status) {
+ int syndrome = MLX5_GET(access_register_out, out, syndrome);
+
+ DRV_LOG(DEBUG, "Failed to access NIC register 0x%X, "
+ "status %x, syndrome = %x",
+ reg_id, status, syndrome);
+ return -1;
+ }
+ memcpy(data, &out[MLX5_ST_SZ_DW(access_register_out)],
+ dw_cnt * sizeof(uint32_t));
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
/**
* Allocate flow counters via devx interface.
*
* @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param dcs
* Pointer to counters properties structure to be filled by the routine.
* @param bulk_n_128
* rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx, uint32_t bulk_n_128)
+mlx5_devx_cmd_flow_counter_alloc(void *ctx, uint32_t bulk_n_128)
{
- struct mlx5_devx_obj *dcs = rte_zmalloc("dcs", sizeof(*dcs), 0);
+ struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs),
+ 0, SOCKET_ID_ANY);
uint32_t in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
if (!dcs->obj) {
DRV_LOG(ERR, "Can't allocate counters - error %d", errno);
rte_errno = errno;
- rte_free(dcs);
+ mlx5_free(dcs);
return NULL;
}
dcs->id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
int clear, uint32_t n_counters,
uint64_t *pkts, uint64_t *bytes,
uint32_t mkey, void *addr,
- struct mlx5dv_devx_cmd_comp *cmd_comp,
+ void *cmd_comp,
uint64_t async_id)
{
int out_len = MLX5_ST_SZ_BYTES(query_flow_counter_out) +
* Create a new mkey.
*
* @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param[in] attr
* Attributes of the requested mkey.
*
* is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_mkey_create(struct ibv_context *ctx,
+mlx5_devx_cmd_mkey_create(void *ctx,
struct mlx5_devx_mkey_attr *attr)
{
struct mlx5_klm *klm_array = attr->klm_array;
uint32_t in[in_size_dw];
uint32_t out[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
void *mkc;
- struct mlx5_devx_obj *mkey = rte_zmalloc("mkey", sizeof(*mkey), 0);
+ struct mlx5_devx_obj *mkey = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*mkey),
+ 0, SOCKET_ID_ANY);
size_t pgsize;
uint32_t translation_size;
return NULL;
}
memset(in, 0, in_size_dw * 4);
- pgsize = sysconf(_SC_PAGESIZE);
+ pgsize = rte_mem_page_size();
+ if (pgsize == (size_t)-1) {
+ mlx5_free(mkey);
+ DRV_LOG(ERR, "Failed to get page size");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
if (klm_num > 0) {
DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n",
klm_num ? "an in" : "a ", errno);
rte_errno = errno;
- rte_free(mkey);
+ mlx5_free(mkey);
return NULL;
}
mkey->id = MLX5_GET(create_mkey_out, out, mkey_index);
if (!obj)
return 0;
ret = mlx5_glue->devx_obj_destroy(obj->obj);
- rte_free(obj);
+ mlx5_free(obj);
return ret;
}
* 0 on success, a negative value otherwise.
*/
static int
-mlx5_devx_cmd_query_nic_vport_context(struct ibv_context *ctx,
+mlx5_devx_cmd_query_nic_vport_context(void *ctx,
unsigned int vport,
struct mlx5_hca_attr *attr)
{
* Query NIC vDPA attributes.
*
* @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param[out] vdpa_attr
* vDPA Attributes structure to fill.
*/
static void
-mlx5_devx_cmd_query_hca_vdpa_attr(struct ibv_context *ctx,
+mlx5_devx_cmd_query_hca_vdpa_attr(void *ctx,
struct mlx5_hca_vdpa_attr *vdpa_attr)
{
uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
}
}
+int
+mlx5_devx_cmd_query_parse_samples(struct mlx5_devx_obj *flex_obj,
+ uint32_t ids[], uint32_t num)
+{
+ uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_flex_parser_out)] = {0};
+ void *hdr = MLX5_ADDR_OF(create_flex_parser_out, in, hdr);
+ void *flex = MLX5_ADDR_OF(create_flex_parser_out, out, flex);
+ void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
+ int ret;
+ uint32_t idx = 0;
+ uint32_t i;
+
+ if (num > MLX5_GRAPH_NODE_SAMPLE_NUM) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "Too many sample IDs to be fetched.");
+ return -rte_errno;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, flex_obj->id);
+ ret = mlx5_glue->devx_obj_query(flex_obj->obj, in, sizeof(in),
+ out, sizeof(out));
+ if (ret) {
+ rte_errno = ret;
+ DRV_LOG(ERR, "Failed to query sample IDs with object %p.",
+ (void *)flex_obj);
+ return -rte_errno;
+ }
+ for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+ void *s_off = (void *)((char *)sample + i *
+ MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
+ uint32_t en;
+
+ en = MLX5_GET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_en);
+ if (!en)
+ continue;
+ ids[idx++] = MLX5_GET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_id);
+ }
+ if (num != idx) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "Number of sample IDs are not as expected.");
+ return -rte_errno;
+ }
+ return ret;
+}
+
+
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_flex_parser(void *ctx,
+ struct mlx5_devx_graph_node_attr *data)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_flex_parser_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ void *hdr = MLX5_ADDR_OF(create_flex_parser_in, in, hdr);
+ void *flex = MLX5_ADDR_OF(create_flex_parser_in, in, flex);
+ void *sample = MLX5_ADDR_OF(parse_graph_flex, flex, sample_table);
+ void *in_arc = MLX5_ADDR_OF(parse_graph_flex, flex, input_arc);
+ void *out_arc = MLX5_ADDR_OF(parse_graph_flex, flex, output_arc);
+ struct mlx5_devx_obj *parse_flex_obj = NULL;
+ uint32_t i;
+
+ parse_flex_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*parse_flex_obj), 0,
+ SOCKET_ID_ANY);
+ if (!parse_flex_obj) {
+ DRV_LOG(ERR, "Failed to allocate flex parser data");
+ rte_errno = ENOMEM;
+ mlx5_free(in);
+ return NULL;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_FLEX_PARSE_GRAPH);
+ MLX5_SET(parse_graph_flex, flex, header_length_mode,
+ data->header_length_mode);
+ MLX5_SET(parse_graph_flex, flex, header_length_base_value,
+ data->header_length_base_value);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_offset,
+ data->header_length_field_offset);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_shift,
+ data->header_length_field_shift);
+ MLX5_SET(parse_graph_flex, flex, header_length_field_mask,
+ data->header_length_field_mask);
+ for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
+ struct mlx5_devx_match_sample_attr *s = &data->sample[i];
+ void *s_off = (void *)((char *)sample + i *
+ MLX5_ST_SZ_BYTES(parse_graph_flow_match_sample));
+
+ if (!s->flow_match_sample_en)
+ continue;
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_en, !!s->flow_match_sample_en);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset,
+ s->flow_match_sample_field_offset);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_offset_mode,
+ s->flow_match_sample_offset_mode);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset_mask,
+ s->flow_match_sample_field_offset_mask);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_offset_shift,
+ s->flow_match_sample_field_offset_shift);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_field_base_offset,
+ s->flow_match_sample_field_base_offset);
+ MLX5_SET(parse_graph_flow_match_sample, s_off,
+ flow_match_sample_tunnel_mode,
+ s->flow_match_sample_tunnel_mode);
+ }
+ for (i = 0; i < MLX5_GRAPH_NODE_ARC_NUM; i++) {
+ struct mlx5_devx_graph_arc_attr *ia = &data->in[i];
+ struct mlx5_devx_graph_arc_attr *oa = &data->out[i];
+ void *in_off = (void *)((char *)in_arc + i *
+ MLX5_ST_SZ_BYTES(parse_graph_arc));
+ void *out_off = (void *)((char *)out_arc + i *
+ MLX5_ST_SZ_BYTES(parse_graph_arc));
+
+ if (ia->arc_parse_graph_node != 0) {
+ MLX5_SET(parse_graph_arc, in_off,
+ compare_condition_value,
+ ia->compare_condition_value);
+ MLX5_SET(parse_graph_arc, in_off, start_inner_tunnel,
+ ia->start_inner_tunnel);
+ MLX5_SET(parse_graph_arc, in_off, arc_parse_graph_node,
+ ia->arc_parse_graph_node);
+ MLX5_SET(parse_graph_arc, in_off,
+ parse_graph_node_handle,
+ ia->parse_graph_node_handle);
+ }
+ if (oa->arc_parse_graph_node != 0) {
+ MLX5_SET(parse_graph_arc, out_off,
+ compare_condition_value,
+ oa->compare_condition_value);
+ MLX5_SET(parse_graph_arc, out_off, start_inner_tunnel,
+ oa->start_inner_tunnel);
+ MLX5_SET(parse_graph_arc, out_off, arc_parse_graph_node,
+ oa->arc_parse_graph_node);
+ MLX5_SET(parse_graph_arc, out_off,
+ parse_graph_node_handle,
+ oa->parse_graph_node_handle);
+ }
+ }
+ parse_flex_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!parse_flex_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create FLEX PARSE GRAPH object "
+ "by using DevX.");
+ mlx5_free(parse_flex_obj);
+ return NULL;
+ }
+ parse_flex_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return parse_flex_obj;
+}
+
/**
* Query HCA attributes.
* Using those attributes we can check on run time if the device
* is having the required capabilities.
*
* @param[in] ctx
- * ibv contexts returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param[out] attr
* Attributes device values.
*
* 0 on success, a negative value otherwise.
*/
int
-mlx5_devx_cmd_query_hca_attr(struct ibv_context *ctx,
+mlx5_devx_cmd_query_hca_attr(void *ctx,
struct mlx5_hca_attr *attr)
{
uint32_t in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
void *hcattr;
- int status, syndrome, rc;
+ int status, syndrome, rc, i;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod,
attr->log_max_hairpin_num_packets = MLX5_GET
(cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
+ attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
+ relaxed_ordering_write);
+ attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
+ relaxed_ordering_read);
+ attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr,
+ access_register_user);
attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
eth_net_offloads);
attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
+ attr->vdpa.queue_counters_valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
+ attr->parse_graph_flex_node = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
+ attr->wqe_index_ignore = MLX5_GET(cmd_hca_cap, hcattr,
+ wqe_index_ignore_cap);
+ attr->cross_channel = MLX5_GET(cmd_hca_cap, hcattr, cd);
+ attr->non_wire_sq = MLX5_GET(cmd_hca_cap, hcattr, non_wire_sq);
+ attr->log_max_static_sq_wq = MLX5_GET(cmd_hca_cap, hcattr,
+ log_max_static_sq_wq);
+ attr->dev_freq_khz = MLX5_GET(cmd_hca_cap, hcattr,
+ device_frequency_khz);
+ attr->scatter_fcs_w_decap_disable =
+ MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable);
+ attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp);
+ attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr,
+ regexp_num_of_engines);
if (attr->qos.sup) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
attr->qos.log_max_flow_meter =
MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
attr->qos.flow_meter_reg_c_ids =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
attr->qos.flow_meter_reg_share =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ attr->qos.packet_pacing =
+ MLX5_GET(qos_cap, hcattr, packet_pacing);
+ attr->qos.wqe_rate_pp =
+ MLX5_GET(qos_cap, hcattr, wqe_rate_pp);
}
if (attr->vdpa.valid)
mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
if (!attr->eth_net_offloads)
return 0;
+ /* Query Flow Sampler Capability From FLow Table Properties Layout. */
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+
+ rc = mlx5_glue->devx_general_cmd(ctx,
+ in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
+ "status %x, syndrome = %x",
+ status, syndrome);
+ attr->log_max_ft_sampler_num = 0;
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->log_max_ft_sampler_num =
+ MLX5_GET(flow_table_nic_cap,
+ hcattr, flow_table_properties.log_max_ft_sampler_num);
+
/* Query HCA offloads for Ethernet protocol. */
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
attr->lro_max_msg_sz_mode = MLX5_GET
(per_protocol_networking_offload_caps,
hcattr, lro_max_msg_sz_mode);
- for (int i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
+ for (i = 0 ; i < MLX5_LRO_NUM_SUPP_PERIODS ; i++) {
attr->lro_timer_supported_periods[i] =
MLX5_GET(per_protocol_networking_offload_caps, hcattr,
lro_timer_supported_periods[i]);
}
+ attr->lro_min_mss_size = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, lro_min_mss_size);
attr->tunnel_stateless_geneve_rx =
MLX5_GET(per_protocol_networking_offload_caps,
hcattr, tunnel_stateless_geneve_rx);
* 0 on success, a negative value otherwise.
*/
int
-mlx5_devx_cmd_qp_query_tis_td(struct ibv_qp *qp, uint32_t tis_num,
+mlx5_devx_cmd_qp_query_tis_td(void *qp, uint32_t tis_num,
uint32_t *tis_td)
{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
uint32_t in[MLX5_ST_SZ_DW(query_tis_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(query_tis_out)] = {0};
int rc;
tis_ctx = MLX5_ADDR_OF(query_tis_out, out, tis_context);
*tis_td = MLX5_GET(tisc, tis_ctx, transport_domain);
return 0;
+#else
+ (void)qp;
+ (void)tis_num;
+ (void)tis_td;
+ return -ENOTSUP;
+#endif
}
/**
* Create RQ using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] rq_attr
* Pointer to create RQ attributes structure.
* @param [in] socket
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_rq(struct ibv_context *ctx,
+mlx5_devx_cmd_create_rq(void *ctx,
struct mlx5_devx_create_rq_attr *rq_attr,
int socket)
{
struct mlx5_devx_wq_attr *wq_attr;
struct mlx5_devx_obj *rq = NULL;
- rq = rte_calloc_socket(__func__, 1, sizeof(*rq), 0, socket);
+ rq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rq), 0, socket);
if (!rq) {
DRV_LOG(ERR, "Failed to allocate RQ data");
rte_errno = ENOMEM;
if (!rq->obj) {
DRV_LOG(ERR, "Failed to create RQ using DevX");
rte_errno = errno;
- rte_free(rq);
+ mlx5_free(rq);
return NULL;
}
rq->id = MLX5_GET(create_rq_out, out, rqn);
* Create TIR using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] tir_attr
* Pointer to TIR attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_tir(struct ibv_context *ctx,
+mlx5_devx_cmd_create_tir(void *ctx,
struct mlx5_devx_tir_attr *tir_attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
void *tir_ctx, *outer, *inner, *rss_key;
struct mlx5_devx_obj *tir = NULL;
- tir = rte_calloc(__func__, 1, sizeof(*tir), 0);
+ tir = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tir), 0, SOCKET_ID_ANY);
if (!tir) {
DRV_LOG(ERR, "Failed to allocate TIR data");
rte_errno = ENOMEM;
if (!tir->obj) {
DRV_LOG(ERR, "Failed to create TIR using DevX");
rte_errno = errno;
- rte_free(tir);
+ mlx5_free(tir);
return NULL;
}
tir->id = MLX5_GET(create_tir_out, out, tirn);
* Create RQT using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] rqt_attr
* Pointer to RQT attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_rqt(struct ibv_context *ctx,
+mlx5_devx_cmd_create_rqt(void *ctx,
struct mlx5_devx_rqt_attr *rqt_attr)
{
uint32_t *in = NULL;
struct mlx5_devx_obj *rqt = NULL;
int i;
- in = rte_calloc(__func__, 1, inlen, 0);
+ in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
if (!in) {
DRV_LOG(ERR, "Failed to allocate RQT IN data");
rte_errno = ENOMEM;
return NULL;
}
- rqt = rte_calloc(__func__, 1, sizeof(*rqt), 0);
+ rqt = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt), 0, SOCKET_ID_ANY);
if (!rqt) {
DRV_LOG(ERR, "Failed to allocate RQT data");
rte_errno = ENOMEM;
- rte_free(in);
+ mlx5_free(in);
return NULL;
}
MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
for (i = 0; i < rqt_attr->rqt_actual_size; i++)
MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
rqt->obj = mlx5_glue->devx_obj_create(ctx, in, inlen, out, sizeof(out));
- rte_free(in);
+ mlx5_free(in);
if (!rqt->obj) {
DRV_LOG(ERR, "Failed to create RQT using DevX");
rte_errno = errno;
- rte_free(rqt);
+ mlx5_free(rqt);
return NULL;
}
rqt->id = MLX5_GET(create_rqt_out, out, rqtn);
uint32_t inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) +
rqt_attr->rqt_actual_size * sizeof(uint32_t);
uint32_t out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
- uint32_t *in = rte_calloc(__func__, 1, inlen, 0);
+ uint32_t *in = mlx5_malloc(MLX5_MEM_ZERO, inlen, 0, SOCKET_ID_ANY);
void *rqt_ctx;
int i;
int ret;
for (i = 0; i < rqt_attr->rqt_actual_size; i++)
MLX5_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
ret = mlx5_glue->devx_obj_modify(rqt->obj, in, inlen, out, sizeof(out));
- rte_free(in);
+ mlx5_free(in);
if (ret) {
DRV_LOG(ERR, "Failed to modify RQT using DevX.");
rte_errno = errno;
* Create SQ using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] sq_attr
* Pointer to SQ attributes structure.
* @param [in] socket
* The DevX object created, NULL otherwise and rte_errno is set.
**/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_sq(struct ibv_context *ctx,
+mlx5_devx_cmd_create_sq(void *ctx,
struct mlx5_devx_create_sq_attr *sq_attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_sq_in)] = {0};
struct mlx5_devx_wq_attr *wq_attr;
struct mlx5_devx_obj *sq = NULL;
- sq = rte_calloc(__func__, 1, sizeof(*sq), 0);
+ sq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sq), 0, SOCKET_ID_ANY);
if (!sq) {
DRV_LOG(ERR, "Failed to allocate SQ data");
rte_errno = ENOMEM;
MLX5_SET(sqc, sq_ctx, reg_umr, sq_attr->reg_umr);
MLX5_SET(sqc, sq_ctx, allow_swp, sq_attr->allow_swp);
MLX5_SET(sqc, sq_ctx, hairpin, sq_attr->hairpin);
+ MLX5_SET(sqc, sq_ctx, non_wire, sq_attr->non_wire);
+ MLX5_SET(sqc, sq_ctx, static_sq_wq, sq_attr->static_sq_wq);
MLX5_SET(sqc, sq_ctx, user_index, sq_attr->user_index);
MLX5_SET(sqc, sq_ctx, cqn, sq_attr->cqn);
MLX5_SET(sqc, sq_ctx, packet_pacing_rate_limit_index,
if (!sq->obj) {
DRV_LOG(ERR, "Failed to create SQ using DevX");
rte_errno = errno;
- rte_free(sq);
+ mlx5_free(sq);
return NULL;
}
sq->id = MLX5_GET(create_sq_out, out, sqn);
if (ret) {
DRV_LOG(ERR, "Failed to modify SQ using DevX");
rte_errno = errno;
- return -errno;
+ return -rte_errno;
}
return ret;
}
* Create TIS using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] tis_attr
* Pointer to TIS attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_tis(struct ibv_context *ctx,
+mlx5_devx_cmd_create_tis(void *ctx,
struct mlx5_devx_tis_attr *tis_attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
struct mlx5_devx_obj *tis = NULL;
void *tis_ctx;
- tis = rte_calloc(__func__, 1, sizeof(*tis), 0);
+ tis = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*tis), 0, SOCKET_ID_ANY);
if (!tis) {
DRV_LOG(ERR, "Failed to allocate TIS object");
rte_errno = ENOMEM;
if (!tis->obj) {
DRV_LOG(ERR, "Failed to create TIS using DevX");
rte_errno = errno;
- rte_free(tis);
+ mlx5_free(tis);
return NULL;
}
tis->id = MLX5_GET(create_tis_out, out, tisn);
* Create transport domain using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
- *
+ * Context returned from mlx5 open_device() glue function.
* @return
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_td(struct ibv_context *ctx)
+mlx5_devx_cmd_create_td(void *ctx)
{
uint32_t in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
struct mlx5_devx_obj *td = NULL;
- td = rte_calloc(__func__, 1, sizeof(*td), 0);
+ td = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*td), 0, SOCKET_ID_ANY);
if (!td) {
DRV_LOG(ERR, "Failed to allocate TD object");
rte_errno = ENOMEM;
if (!td->obj) {
DRV_LOG(ERR, "Failed to create TIS using DevX");
rte_errno = errno;
- rte_free(td);
+ mlx5_free(td);
return NULL;
}
td->id = MLX5_GET(alloc_transport_domain_out, out,
* Create CQ using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] attr
* Pointer to CQ attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_cq(struct ibv_context *ctx, struct mlx5_devx_cq_attr *attr)
+mlx5_devx_cmd_create_cq(void *ctx, struct mlx5_devx_cq_attr *attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
- struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj),
- 0);
+ struct mlx5_devx_obj *cq_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*cq_obj),
+ 0, SOCKET_ID_ANY);
void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
if (!cq_obj) {
} else {
MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
}
+ MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
+ MLX5_SET(cqc, cqctx, cqe_comp_en, attr->cqe_comp_en);
+ MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format);
+ MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
if (attr->q_umem_valid) {
MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
if (!cq_obj->obj) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
- rte_free(cq_obj);
+ mlx5_free(cq_obj);
return NULL;
}
cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
* Create VIRTQ using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] attr
* Pointer to VIRTQ attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_virtq(struct ibv_context *ctx,
+mlx5_devx_cmd_create_virtq(void *ctx,
struct mlx5_devx_virtq_attr *attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_virtq_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
- struct mlx5_devx_obj *virtq_obj = rte_zmalloc(__func__,
- sizeof(*virtq_obj), 0);
+ struct mlx5_devx_obj *virtq_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*virtq_obj),
+ 0, SOCKET_ID_ANY);
void *virtq = MLX5_ADDR_OF(create_virtq_in, in, virtq);
void *hdr = MLX5_ADDR_OF(create_virtq_in, in, hdr);
void *virtctx = MLX5_ADDR_OF(virtio_net_q, virtq, virtio_q_context);
MLX5_SET(virtio_q, virtctx, umem_3_id, attr->umems[2].id);
MLX5_SET(virtio_q, virtctx, umem_3_size, attr->umems[2].size);
MLX5_SET64(virtio_q, virtctx, umem_3_offset, attr->umems[2].offset);
+ MLX5_SET(virtio_q, virtctx, counter_set_id, attr->counters_obj_id);
+ MLX5_SET(virtio_q, virtctx, pd, attr->pd);
MLX5_SET(virtio_net_q, virtq, tisn_or_qpn, attr->tis_id);
virtq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
sizeof(out));
if (!virtq_obj->obj) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create VIRTQ Obj using DevX.");
- rte_free(virtq_obj);
+ mlx5_free(virtq_obj);
return NULL;
}
virtq_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
if (ret) {
DRV_LOG(ERR, "Failed to modify VIRTQ using DevX.");
rte_errno = errno;
- return -errno;
+ return -rte_errno;
}
return ret;
}
* Create QP using DevX API.
*
* @param[in] ctx
- * ibv_context returned from mlx5dv_open_device.
+ * Context returned from mlx5 open_device() glue function.
* @param [in] attr
* Pointer to QP attributes structure.
*
* The DevX object created, NULL otherwise and rte_errno is set.
*/
struct mlx5_devx_obj *
-mlx5_devx_cmd_create_qp(struct ibv_context *ctx,
+mlx5_devx_cmd_create_qp(void *ctx,
struct mlx5_devx_qp_attr *attr)
{
uint32_t in[MLX5_ST_SZ_DW(create_qp_in)] = {0};
uint32_t out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
- struct mlx5_devx_obj *qp_obj = rte_zmalloc(__func__, sizeof(*qp_obj),
- 0);
+ struct mlx5_devx_obj *qp_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*qp_obj),
+ 0, SOCKET_ID_ANY);
void *qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
if (!qp_obj) {
if (!qp_obj->obj) {
rte_errno = errno;
DRV_LOG(ERR, "Failed to create QP Obj using DevX.");
- rte_free(qp_obj);
+ mlx5_free(qp_obj);
return NULL;
}
qp_obj->id = MLX5_GET(create_qp_out, out, qpn);
if (ret) {
DRV_LOG(ERR, "Failed to modify QP using DevX.");
rte_errno = errno;
+ return -rte_errno;
+ }
+ return ret;
+}
+
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_virtio_q_counters(void *ctx)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_virtio_q_counters_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *couners_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*couners_obj), 0,
+ SOCKET_ID_ANY);
+ void *hdr = MLX5_ADDR_OF(create_virtio_q_counters_in, in, hdr);
+
+ if (!couners_obj) {
+ DRV_LOG(ERR, "Failed to allocate virtio queue counters data.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ couners_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!couners_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create virtio queue counters Obj using"
+ " DevX.");
+ mlx5_free(couners_obj);
+ return NULL;
+ }
+ couners_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return couners_obj;
+}
+
+int
+mlx5_devx_cmd_query_virtio_q_counters(struct mlx5_devx_obj *couners_obj,
+ struct mlx5_devx_virtio_q_couners_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_virtio_q_counters_out)] = {0};
+ void *hdr = MLX5_ADDR_OF(query_virtio_q_counters_out, in, hdr);
+ void *virtio_q_counters = MLX5_ADDR_OF(query_virtio_q_counters_out, out,
+ virtio_q_counters);
+ int ret;
+
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_VIRTIO_Q_COUNTERS);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_id, couners_obj->id);
+ ret = mlx5_glue->devx_obj_query(couners_obj->obj, in, sizeof(in), out,
+ sizeof(out));
+ if (ret) {
+ DRV_LOG(ERR, "Failed to query virtio q counters using DevX.");
+ rte_errno = errno;
return -errno;
}
+ attr->received_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
+ received_desc);
+ attr->completed_desc = MLX5_GET64(virtio_q_counters, virtio_q_counters,
+ completed_desc);
+ attr->error_cqes = MLX5_GET(virtio_q_counters, virtio_q_counters,
+ error_cqes);
+ attr->bad_desc_errors = MLX5_GET(virtio_q_counters, virtio_q_counters,
+ bad_desc_errors);
+ attr->exceed_max_chain = MLX5_GET(virtio_q_counters, virtio_q_counters,
+ exceed_max_chain);
+ attr->invalid_buffer = MLX5_GET(virtio_q_counters, virtio_q_counters,
+ invalid_buffer);
return ret;
}