-// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright 2018 Mellanox Technologies, Ltd */
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 Mellanox Technologies, Ltd
+ */
#include <unistd.h>
#include "mlx5_prm.h"
#include "mlx5_devx_cmds.h"
-#include "mlx5_common_utils.h"
+#include "mlx5_common_log.h"
#include "mlx5_malloc.h"
-
/**
* Perform read access to the registers. Reads data from register
* and writes ones to the specified buffer.
MLX5_SET(access_register_in, in, register_id, reg_id);
MLX5_SET(access_register_in, in, argument, arg);
rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out,
- MLX5_ST_SZ_DW(access_register_out) *
- sizeof(uint32_t) + dw_cnt);
+ MLX5_ST_SZ_BYTES(access_register_out) +
+ sizeof(uint32_t) * dw_cnt);
if (rc)
goto error;
status = MLX5_GET(access_register_out, out, status);
if (status) {
int syndrome = MLX5_GET(access_register_out, out, syndrome);
- DRV_LOG(DEBUG, "Failed to access NIC register 0x%X, "
+ DRV_LOG(DEBUG, "Failed to read access NIC register 0x%X, "
"status %x, syndrome = %x",
reg_id, status, syndrome);
return -1;
return rc;
}
+/**
+ * Perform write access to the registers.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param[in] reg_id
+ * Register identifier according to the PRM.
+ * @param[in] arg
+ * Register access auxiliary parameter according to the PRM.
+ * @param[out] data
+ * Pointer to the buffer containing data to write.
+ * @param[in] dw_cnt
+ * Buffer size in double words (32bit units).
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_register_write(void *ctx, uint16_t reg_id, uint32_t arg,
+ uint32_t *data, uint32_t dw_cnt)
+{
+ uint32_t in[MLX5_ST_SZ_DW(access_register_in) +
+ MLX5_ACCESS_REGISTER_DATA_DWORD_MAX] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(access_register_out)] = {0};
+ int status, rc;
+ void *ptr;
+
+ MLX5_ASSERT(data && dw_cnt);
+ MLX5_ASSERT(dw_cnt <= MLX5_ACCESS_REGISTER_DATA_DWORD_MAX);
+ if (dw_cnt > MLX5_ACCESS_REGISTER_DATA_DWORD_MAX) {
+ DRV_LOG(ERR, "Data to write exceeds max size");
+ return -1;
+ }
+ MLX5_SET(access_register_in, in, opcode,
+ MLX5_CMD_OP_ACCESS_REGISTER_USER);
+ MLX5_SET(access_register_in, in, op_mod,
+ MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE);
+ MLX5_SET(access_register_in, in, register_id, reg_id);
+ MLX5_SET(access_register_in, in, argument, arg);
+ ptr = MLX5_ADDR_OF(access_register_in, in, register_data);
+ memcpy(ptr, data, dw_cnt * sizeof(uint32_t));
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+
+ rc = mlx5_glue->devx_general_cmd(ctx, in,
+ MLX5_ST_SZ_BYTES(access_register_in) +
+ dw_cnt * sizeof(uint32_t),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(access_register_out, out, status);
+ if (status) {
+ int syndrome = MLX5_GET(access_register_out, out, syndrome);
+
+ DRV_LOG(DEBUG, "Failed to write access NIC register 0x%X, "
+ "status %x, syndrome = %x",
+ reg_id, status, syndrome);
+ return -1;
+ }
+ return 0;
+error:
+ rc = (rc > 0) ? -rc : rc;
+ return rc;
+}
+
/**
* Allocate flow counters via devx interface.
*
MLX5_SET(create_mkey_in, in, pg_access, attr->pg_access);
MLX5_SET(mkc, mkc, lw, 0x1);
MLX5_SET(mkc, mkc, lr, 0x1);
+ if (attr->set_remote_rw) {
+ MLX5_SET(mkc, mkc, rw, 0x1);
+ MLX5_SET(mkc, mkc, rr, 0x1);
+ }
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, attr->pd);
MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
+ MLX5_SET(mkc, mkc, umr_en, attr->umr_en);
MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
MLX5_SET(mkc, mkc, relaxed_ordering_write,
- attr->relaxed_ordering_write);
- MLX5_SET(mkc, mkc, relaxed_ordering_read,
- attr->relaxed_ordering_read);
+ attr->relaxed_ordering_write);
+ MLX5_SET(mkc, mkc, relaxed_ordering_read, attr->relaxed_ordering_read);
MLX5_SET64(mkc, mkc, start_addr, attr->addr);
MLX5_SET64(mkc, mkc, len, attr->size);
+ MLX5_SET(mkc, mkc, crypto_en, attr->crypto_en);
+ if (attr->crypto_en) {
+ MLX5_SET(mkc, mkc, bsf_en, attr->crypto_en);
+ MLX5_SET(mkc, mkc, bsf_octword_size, 4);
+ }
mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
sizeof(out));
if (!mkey->obj) {
- DRV_LOG(ERR, "Can't create %sdirect mkey - error %d\n",
+ DRV_LOG(ERR, "Can't create %sdirect mkey - error %d",
klm_num ? "an in" : "a ", errno);
rte_errno = errno;
mlx5_free(mkey);
if (status) {
int syndrome = MLX5_GET(query_flow_counter_out, out, syndrome);
- DRV_LOG(ERR, "Bad devX status %x, syndrome = %x", status,
+ DRV_LOG(ERR, "Bad DevX status %x, syndrome = %x", status,
syndrome);
}
return status;
syndrome = MLX5_GET(query_nic_vport_context_out, out, syndrome);
if (status) {
DRV_LOG(DEBUG, "Failed to query NIC vport context, "
- "status %x, syndrome = %x",
- status, syndrome);
+ "status %x, syndrome = %x", status, syndrome);
return -1;
}
vctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
return parse_flex_obj;
}
+static int
+mlx5_devx_query_pkt_integrity_match(void *hcattr)
+{
+ return MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.inner_l3_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.inner_l4_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.outer_l3_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.outer_l4_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive
+ .inner_ipv4_checksum_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.inner_l4_checksum_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive
+ .outer_ipv4_checksum_ok) &&
+ MLX5_GET(flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.outer_l4_checksum_ok);
+}
+
/**
* Query HCA attributes.
* Using those attributes we can check on run time if the device
uint32_t out[MLX5_ST_SZ_DW(query_hca_cap_out)] = {0};
void *hcattr;
int status, syndrome, rc, i;
+ uint64_t general_obj_types_supported = 0;
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod,
syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
if (status) {
DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
- "status %x, syndrome = %x",
- status, syndrome);
+ "status %x, syndrome = %x", status, syndrome);
return -1;
}
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
(cmd_hca_cap, hcattr, log_min_hairpin_wq_data_sz);
attr->vhca_id = MLX5_GET(cmd_hca_cap, hcattr, vhca_id);
attr->relaxed_ordering_write = MLX5_GET(cmd_hca_cap, hcattr,
- relaxed_ordering_write);
+ relaxed_ordering_write);
attr->relaxed_ordering_read = MLX5_GET(cmd_hca_cap, hcattr,
- relaxed_ordering_read);
+ relaxed_ordering_read);
attr->access_register_user = MLX5_GET(cmd_hca_cap, hcattr,
- access_register_user);
+ access_register_user);
attr->eth_net_offloads = MLX5_GET(cmd_hca_cap, hcattr,
eth_net_offloads);
attr->eth_virt = MLX5_GET(cmd_hca_cap, hcattr, eth_virt);
attr->flex_parser_protocols = MLX5_GET(cmd_hca_cap, hcattr,
flex_parser_protocols);
+ attr->max_geneve_tlv_options = MLX5_GET(cmd_hca_cap, hcattr,
+ max_geneve_tlv_options);
+ attr->max_geneve_tlv_option_data_len = MLX5_GET(cmd_hca_cap, hcattr,
+ max_geneve_tlv_option_data_len);
attr->qos.sup = MLX5_GET(cmd_hca_cap, hcattr, qos);
+ attr->qos.flow_meter_aso_sup = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_METER_ASO);
attr->vdpa.valid = !!(MLX5_GET64(cmd_hca_cap, hcattr,
general_obj_types) &
MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
device_frequency_khz);
attr->scatter_fcs_w_decap_disable =
MLX5_GET(cmd_hca_cap, hcattr, scatter_fcs_w_decap_disable);
+ attr->roce = MLX5_GET(cmd_hca_cap, hcattr, roce);
+ attr->rq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, rq_ts_format);
+ attr->sq_ts_format = MLX5_GET(cmd_hca_cap, hcattr, sq_ts_format);
+ attr->steering_format_version =
+ MLX5_GET(cmd_hca_cap, hcattr, steering_format_version);
attr->regex = MLX5_GET(cmd_hca_cap, hcattr, regexp);
attr->regexp_num_of_engines = MLX5_GET(cmd_hca_cap, hcattr,
regexp_num_of_engines);
- attr->flow_hit_aso = !!(MLX5_GET64(cmd_hca_cap, hcattr,
- general_obj_types) &
+ /* Read the general_obj_types bitmap and extract the relevant bits. */
+ general_obj_types_supported = MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types);
+ attr->vdpa.valid = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTQ_NET_Q);
+ attr->vdpa.queue_counters_valid =
+ !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_Q_COUNTERS);
+ attr->parse_graph_flex_node =
+ !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_PARSE_GRAPH_FLEX_NODE);
+ attr->flow_hit_aso = !!(general_obj_types_supported &
MLX5_GENERAL_OBJ_TYPES_CAP_FLOW_HIT_ASO);
+ attr->geneve_tlv_opt = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT);
+ attr->dek = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_DEK);
+ attr->import_kek = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_IMPORT_KEK);
+ attr->credential = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_CREDENTIAL);
+ attr->crypto_login = !!(general_obj_types_supported &
+ MLX5_GENERAL_OBJ_TYPES_CAP_CRYPTO_LOGIN);
+ /* Add reading of other GENERAL_OBJ_TYPES_CAP bits above this line. */
attr->log_max_cq = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq);
attr->log_max_qp = MLX5_GET(cmd_hca_cap, hcattr, log_max_qp);
attr->log_max_cq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_cq_sz);
attr->log_max_pd = MLX5_GET(cmd_hca_cap, hcattr, log_max_pd);
attr->log_max_srq = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq);
attr->log_max_srq_sz = MLX5_GET(cmd_hca_cap, hcattr, log_max_srq_sz);
+ attr->reg_c_preserve =
+ MLX5_GET(cmd_hca_cap, hcattr, reg_c_preserve);
+ attr->mmo_dma_en = MLX5_GET(cmd_hca_cap, hcattr, dma_mmo);
+ attr->mmo_compress_en = MLX5_GET(cmd_hca_cap, hcattr, compress);
+ attr->mmo_decompress_en = MLX5_GET(cmd_hca_cap, hcattr, decompress);
+ attr->compress_min_block_size = MLX5_GET(cmd_hca_cap, hcattr,
+ compress_min_block_size);
+ attr->log_max_mmo_dma = MLX5_GET(cmd_hca_cap, hcattr, log_dma_mmo_size);
+ attr->log_max_mmo_compress = MLX5_GET(cmd_hca_cap, hcattr,
+ log_compress_mmo_size);
+ attr->log_max_mmo_decompress = MLX5_GET(cmd_hca_cap, hcattr,
+ log_decompress_mmo_size);
+ attr->cqe_compression = MLX5_GET(cmd_hca_cap, hcattr, cqe_compression);
+ attr->mini_cqe_resp_flow_tag = MLX5_GET(cmd_hca_cap, hcattr,
+ mini_cqe_resp_flow_tag);
+ attr->mini_cqe_resp_l3_l4_tag = MLX5_GET(cmd_hca_cap, hcattr,
+ mini_cqe_resp_l3_l4_tag);
+ attr->umr_indirect_mkey_disabled =
+ MLX5_GET(cmd_hca_cap, hcattr, umr_indirect_mkey_disabled);
+ attr->umr_modify_entity_size_disabled =
+ MLX5_GET(cmd_hca_cap, hcattr, umr_modify_entity_size_disabled);
+ attr->crypto = MLX5_GET(cmd_hca_cap, hcattr, crypto);
+ if (attr->crypto)
+ attr->aes_xts = MLX5_GET(cmd_hca_cap, hcattr, aes_xts);
+ attr->ct_offload = !!(MLX5_GET64(cmd_hca_cap, hcattr,
+ general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_CONN_TRACK_OFFLOAD);
if (attr->qos.sup) {
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_GET_HCA_CAP_OP_MOD_QOS_CAP |
goto error;
if (status) {
DRV_LOG(DEBUG, "Failed to query devx QOS capabilities,"
- " status %x, syndrome = %x",
- status, syndrome);
+ " status %x, syndrome = %x", status, syndrome);
return -1;
}
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->qos.srtcm_sup =
- MLX5_GET(qos_cap, hcattr, flow_meter_srtcm);
+ attr->qos.flow_meter_old =
+ MLX5_GET(qos_cap, hcattr, flow_meter_old);
attr->qos.log_max_flow_meter =
MLX5_GET(qos_cap, hcattr, log_max_flow_meter);
attr->qos.flow_meter_reg_c_ids =
MLX5_GET(qos_cap, hcattr, flow_meter_reg_id);
- attr->qos.flow_meter_reg_share =
- MLX5_GET(qos_cap, hcattr, flow_meter_reg_share);
+ attr->qos.flow_meter =
+ MLX5_GET(qos_cap, hcattr, flow_meter);
attr->qos.packet_pacing =
MLX5_GET(qos_cap, hcattr, packet_pacing);
attr->qos.wqe_rate_pp =
MLX5_GET(qos_cap, hcattr, wqe_rate_pp);
+ if (attr->qos.flow_meter_aso_sup) {
+ attr->qos.log_meter_aso_granularity =
+ MLX5_GET(qos_cap, hcattr,
+ log_meter_aso_granularity);
+ attr->qos.log_meter_aso_max_alloc =
+ MLX5_GET(qos_cap, hcattr,
+ log_meter_aso_max_alloc);
+ attr->qos.log_max_num_meter_aso =
+ MLX5_GET(qos_cap, hcattr,
+ log_max_num_meter_aso);
+ }
}
if (attr->vdpa.valid)
mlx5_devx_cmd_query_hca_vdpa_attr(ctx, &attr->vdpa);
MLX5_GET_HCA_CAP_OP_MOD_NIC_FLOW_TABLE |
MLX5_HCA_CAP_OPMOD_GET_CUR);
- rc = mlx5_glue->devx_general_cmd(ctx,
- in, sizeof(in),
- out, sizeof(out));
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (rc)
goto error;
status = MLX5_GET(query_hca_cap_out, out, status);
syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
if (status) {
DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
- "status %x, syndrome = %x",
- status, syndrome);
+ "status %x, syndrome = %x", status, syndrome);
attr->log_max_ft_sampler_num = 0;
return -1;
}
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
- attr->log_max_ft_sampler_num =
- MLX5_GET(flow_table_nic_cap,
- hcattr, flow_table_properties.log_max_ft_sampler_num);
-
+ attr->log_max_ft_sampler_num = MLX5_GET
+ (flow_table_nic_cap, hcattr,
+ flow_table_properties_nic_receive.log_max_ft_sampler_num);
+ attr->flow.tunnel_header_0_1 = MLX5_GET
+ (flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.tunnel_header_0_1);
+ attr->pkt_integrity_match = mlx5_devx_query_pkt_integrity_match(hcattr);
+ attr->inner_ipv4_ihl = MLX5_GET
+ (flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.inner_ipv4_ihl);
+ attr->outer_ipv4_ihl = MLX5_GET
+ (flow_table_nic_cap, hcattr,
+ ft_field_support_2_nic_receive.outer_ipv4_ihl);
/* Query HCA offloads for Ethernet protocol. */
memset(in, 0, sizeof(in));
memset(out, 0, sizeof(out));
MLX5_GET_HCA_CAP_OP_MOD_ETHERNET_OFFLOAD_CAPS |
MLX5_HCA_CAP_OPMOD_GET_CUR);
- rc = mlx5_glue->devx_general_cmd(ctx,
- in, sizeof(in),
- out, sizeof(out));
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
if (rc) {
attr->eth_net_offloads = 0;
goto error;
syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
if (status) {
DRV_LOG(DEBUG, "Failed to query devx HCA capabilities, "
- "status %x, syndrome = %x",
- status, syndrome);
+ "status %x, syndrome = %x", status, syndrome);
attr->eth_net_offloads = 0;
return -1;
}
hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
attr->wqe_vlan_insert = MLX5_GET(per_protocol_networking_offload_caps,
hcattr, wqe_vlan_insert);
+ attr->csum_cap = MLX5_GET(per_protocol_networking_offload_caps,
+ hcattr, csum_cap);
attr->lro_cap = MLX5_GET(per_protocol_networking_offload_caps, hcattr,
lro_cap);
attr->tunnel_lro_gre = MLX5_GET(per_protocol_networking_offload_caps,
attr->rss_ind_tbl_cap = MLX5_GET
(per_protocol_networking_offload_caps,
hcattr, rss_ind_tbl_cap);
- if (attr->wqe_inline_mode != MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
- return 0;
- if (attr->eth_virt) {
+ /* Query HCA attribute for ROCE. */
+ if (attr->roce) {
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+ MLX5_SET(query_hca_cap_in, in, opcode,
+ MLX5_CMD_OP_QUERY_HCA_CAP);
+ MLX5_SET(query_hca_cap_in, in, op_mod,
+ MLX5_GET_HCA_CAP_OP_MOD_ROCE |
+ MLX5_HCA_CAP_OPMOD_GET_CUR);
+ rc = mlx5_glue->devx_general_cmd(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (rc)
+ goto error;
+ status = MLX5_GET(query_hca_cap_out, out, status);
+ syndrome = MLX5_GET(query_hca_cap_out, out, syndrome);
+ if (status) {
+ DRV_LOG(DEBUG,
+ "Failed to query devx HCA ROCE capabilities, "
+ "status %x, syndrome = %x", status, syndrome);
+ return -1;
+ }
+ hcattr = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+ attr->qp_ts_format = MLX5_GET(roce_caps, hcattr, qp_ts_format);
+ }
+ if (attr->eth_virt &&
+ attr->wqe_inline_mode == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) {
rc = mlx5_devx_cmd_query_nic_vport_context(ctx, 0, attr);
if (rc) {
attr->eth_virt = 0;
MLX5_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
MLX5_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
MLX5_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
- MLX5_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
+ if (wq_attr->log_wq_pg_sz > MLX5_ADAPTER_PAGE_SHIFT)
+ MLX5_SET(wq, wq_ctx, log_wq_pg_sz,
+ wq_attr->log_wq_pg_sz - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
MLX5_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
MLX5_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
MLX5_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
MLX5_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
MLX5_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
+ MLX5_SET(sqc, rq_ctx, ts_format, rq_attr->ts_format);
wq_ctx = MLX5_ADDR_OF(rqc, rq_ctx, wq);
wq_attr = &rq_attr->wq_attr;
devx_cmd_fill_wq_data(wq_ctx, wq_attr);
sq_attr->packet_pacing_rate_limit_index);
MLX5_SET(sqc, sq_ctx, tis_lst_sz, sq_attr->tis_lst_sz);
MLX5_SET(sqc, sq_ctx, tis_num_0, sq_attr->tis_num);
+ MLX5_SET(sqc, sq_ctx, ts_format, sq_attr->ts_format);
wq_ctx = MLX5_ADDR_OF(sqc, sq_ctx, wq);
wq_attr = &sq_attr->wq_attr;
devx_cmd_fill_wq_data(wq_ctx, wq_attr);
return -ret;
}
+int
+mlx5_devx_cmd_flow_single_dump(void *rule_info __rte_unused,
+ FILE *file __rte_unused)
+{
+ int ret = 0;
+#ifdef HAVE_MLX5_DR_FLOW_DUMP_RULE
+ if (rule_info)
+ ret = mlx5_glue->dr_dump_rule(file, rule_info);
+#else
+ ret = ENOTSUP;
+#endif
+ return -ret;
+}
+
/*
* Create CQ using DevX API.
*
} else {
MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
}
- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
+ MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ?
+ MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B);
MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
- MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size -
- MLX5_ADAPTER_PAGE_SHIFT);
+ if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT)
+ MLX5_SET(cqc, cqctx, log_page_size,
+ attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
MLX5_SET(cqc, cqctx, cqe_comp_en, !!attr->cqe_comp_en);
- MLX5_SET(cqc, cqctx, mini_cqe_res_format,
- attr->mini_cqe_res_format);
+ MLX5_SET(cqc, cqctx, mini_cqe_res_format, attr->mini_cqe_res_format);
MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext,
attr->mini_cqe_res_format_ext);
- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
if (attr->q_umem_valid) {
MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
MLX5_SET(qpc, qpc, pd, attr->pd);
+ MLX5_SET(qpc, qpc, ts_format, attr->ts_format);
if (attr->uar_index) {
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
MLX5_SET(qpc, qpc, uar_page, attr->uar_index);
- MLX5_SET(qpc, qpc, log_page_size, attr->log_page_size -
- MLX5_ADAPTER_PAGE_SHIFT);
+ if (attr->log_page_size > MLX5_ADAPTER_PAGE_SHIFT)
+ MLX5_SET(qpc, qpc, log_page_size,
+ attr->log_page_size - MLX5_ADAPTER_PAGE_SHIFT);
if (attr->sq_size) {
MLX5_ASSERT(RTE_IS_POWER_OF_2(attr->sq_size));
MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
ppd->id = MLX5_GET(alloc_pd_out, out, pd);
return ppd;
}
+
+/**
+ * Create general object of type FLOW_METER_ASO using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] pd
+ * PD value to associate the FLOW_METER_ASO object with.
+ * @param [in] log_obj_size
+ * log_obj_size define to allocate number of 2 * meters
+ * in one FLOW_METER_ASO object.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_flow_meter_aso_obj(void *ctx, uint32_t pd,
+ uint32_t log_obj_size)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_flow_meter_aso_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_devx_obj *flow_meter_aso_obj;
+ void *ptr;
+
+ flow_meter_aso_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*flow_meter_aso_obj),
+ 0, SOCKET_ID_ANY);
+ if (!flow_meter_aso_obj) {
+ DRV_LOG(ERR, "Failed to allocate FLOW_METER_ASO object data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_FLOW_METER_ASO);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range,
+ log_obj_size);
+ ptr = MLX5_ADDR_OF(create_flow_meter_aso_in, in, flow_meter_aso);
+ MLX5_SET(flow_meter_aso, ptr, access_pd, pd);
+ flow_meter_aso_obj->obj = mlx5_glue->devx_obj_create(
+ ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!flow_meter_aso_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create FLOW_METER_ASO obj using DevX.");
+ mlx5_free(flow_meter_aso_obj);
+ return NULL;
+ }
+ flow_meter_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr,
+ out, obj_id);
+ return flow_meter_aso_obj;
+}
+
+/*
+ * Create general object of type CONN_TRACK_OFFLOAD using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] pd
+ * PD value to associate the CONN_TRACK_OFFLOAD ASO object with.
+ * @param [in] log_obj_size
+ * log_obj_size to allocate its power of 2 * objects
+ * in one CONN_TRACK_OFFLOAD bulk allocation.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_conn_track_offload_obj(void *ctx, uint32_t pd,
+ uint32_t log_obj_size)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_conn_track_aso_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ struct mlx5_devx_obj *ct_aso_obj;
+ void *ptr;
+
+ ct_aso_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ct_aso_obj),
+ 0, SOCKET_ID_ANY);
+ if (!ct_aso_obj) {
+ DRV_LOG(ERR, "Failed to allocate CONN_TRACK_OFFLOAD object.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_CONN_TRACK_OFFLOAD);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, log_obj_range, log_obj_size);
+ ptr = MLX5_ADDR_OF(create_conn_track_aso_in, in, conn_track_offload);
+ MLX5_SET(conn_track_offload, ptr, conn_track_aso_access_pd, pd);
+ ct_aso_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (!ct_aso_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create CONN_TRACK_OFFLOAD obj by using DevX.");
+ mlx5_free(ct_aso_obj);
+ return NULL;
+ }
+ ct_aso_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return ct_aso_obj;
+}
+
+/**
+ * Create general object of type GENEVE TLV option using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] class
+ * TLV option variable value of class
+ * @param [in] type
+ * TLV option variable value of type
+ * @param [in] len
+ * TLV option variable value of len
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_geneve_tlv_option(void *ctx,
+ uint16_t class, uint8_t type, uint8_t len)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_geneve_tlv_option_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *geneve_tlv_opt_obj = mlx5_malloc(MLX5_MEM_ZERO,
+ sizeof(*geneve_tlv_opt_obj),
+ 0, SOCKET_ID_ANY);
+
+ if (!geneve_tlv_opt_obj) {
+ DRV_LOG(ERR, "Failed to allocate geneve tlv option object.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ void *hdr = MLX5_ADDR_OF(create_geneve_tlv_option_in, in, hdr);
+ void *opt = MLX5_ADDR_OF(create_geneve_tlv_option_in, in,
+ geneve_tlv_opt);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_GENEVE_TLV_OPT);
+ MLX5_SET(geneve_tlv_option, opt, option_class,
+ rte_be_to_cpu_16(class));
+ MLX5_SET(geneve_tlv_option, opt, option_type, type);
+ MLX5_SET(geneve_tlv_option, opt, option_data_length, len);
+ geneve_tlv_opt_obj->obj = mlx5_glue->devx_obj_create(ctx, in,
+ sizeof(in), out, sizeof(out));
+ if (!geneve_tlv_opt_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create Geneve tlv option "
+ "Obj using DevX.");
+ mlx5_free(geneve_tlv_opt_obj);
+ return NULL;
+ }
+ geneve_tlv_opt_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return geneve_tlv_opt_obj;
+}
+
+int
+mlx5_devx_cmd_wq_query(void *wq, uint32_t *counter_set_id)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+ uint32_t in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(query_rq_out)] = {0};
+ int rc;
+ void *rq_ctx;
+
+ MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+ MLX5_SET(query_rq_in, in, rqn, ((struct ibv_wq *)wq)->wq_num);
+ rc = mlx5_glue->devx_wq_query(wq, in, sizeof(in), out, sizeof(out));
+ if (rc) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to query WQ counter set ID using DevX - "
+ "rc = %d, errno = %d.", rc, errno);
+ return -rc;
+ };
+ rq_ctx = MLX5_ADDR_OF(query_rq_out, out, rq_context);
+ *counter_set_id = MLX5_GET(rqc, rq_ctx, counter_set_id);
+ return 0;
+#else
+ (void)wq;
+ (void)counter_set_id;
+ return -ENOTSUP;
+#endif
+}
+
+/*
+ * Allocate queue counters via devx interface.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ *
+ * @return
+ * Pointer to counter object on success, a NULL value otherwise and
+ * rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_queue_counter_alloc(void *ctx)
+{
+ struct mlx5_devx_obj *dcs = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dcs), 0,
+ SOCKET_ID_ANY);
+ uint32_t in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
+
+ if (!dcs) {
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ dcs->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!dcs->obj) {
+ DRV_LOG(DEBUG, "Can't allocate q counter set by DevX - error "
+ "%d.", errno);
+ rte_errno = errno;
+ mlx5_free(dcs);
+ return NULL;
+ }
+ dcs->id = MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ return dcs;
+}
+
+/**
+ * Query queue counters values.
+ *
+ * @param[in] dcs
+ * devx object of the queue counter set.
+ * @param[in] clear
+ * Whether hardware should clear the counters after the query or not.
+ * @param[out] out_of_buffers
+ * Number of dropped occurred due to lack of WQE for the associated QPs/RQs.
+ *
+ * @return
+ * 0 on success, a negative value otherwise.
+ */
+int
+mlx5_devx_cmd_queue_counter_query(struct mlx5_devx_obj *dcs, int clear,
+ uint32_t *out_of_buffers)
+{
+ uint32_t out[MLX5_ST_SZ_BYTES(query_q_counter_out)] = {0};
+ uint32_t in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
+ int rc;
+
+ MLX5_SET(query_q_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, op_mod, 0);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, dcs->id);
+ MLX5_SET(query_q_counter_in, in, clear, !!clear);
+ rc = mlx5_glue->devx_obj_query(dcs->obj, in, sizeof(in), out,
+ sizeof(out));
+ if (rc) {
+ DRV_LOG(ERR, "Failed to query devx q counter set - rc %d", rc);
+ rte_errno = rc;
+ return -rc;
+ }
+ *out_of_buffers = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+ return 0;
+}
+
+/**
+ * Create general object of type DEK using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to DEK attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_dek_obj(void *ctx, struct mlx5_devx_dek_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_dek_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *dek_obj = NULL;
+ void *ptr = NULL, *key_addr = NULL;
+
+ dek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*dek_obj),
+ 0, SOCKET_ID_ANY);
+ if (dek_obj == NULL) {
+ DRV_LOG(ERR, "Failed to allocate DEK object data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_dek_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_DEK);
+ ptr = MLX5_ADDR_OF(create_dek_in, in, dek);
+ MLX5_SET(dek, ptr, key_size, attr->key_size);
+ MLX5_SET(dek, ptr, has_keytag, attr->has_keytag);
+ MLX5_SET(dek, ptr, key_purpose, attr->key_purpose);
+ MLX5_SET(dek, ptr, pd, attr->pd);
+ MLX5_SET64(dek, ptr, opaque, attr->opaque);
+ key_addr = MLX5_ADDR_OF(dek, ptr, key);
+ memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE);
+ dek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (dek_obj->obj == NULL) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create DEK obj using DevX.");
+ mlx5_free(dek_obj);
+ return NULL;
+ }
+ dek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return dek_obj;
+}
+
+/**
+ * Create general object of type IMPORT_KEK using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to IMPORT_KEK attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_import_kek_obj(void *ctx,
+ struct mlx5_devx_import_kek_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_import_kek_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *import_kek_obj = NULL;
+ void *ptr = NULL, *key_addr = NULL;
+
+ import_kek_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*import_kek_obj),
+ 0, SOCKET_ID_ANY);
+ if (import_kek_obj == NULL) {
+ DRV_LOG(ERR, "Failed to allocate IMPORT_KEK object data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_import_kek_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_IMPORT_KEK);
+ ptr = MLX5_ADDR_OF(create_import_kek_in, in, import_kek);
+ MLX5_SET(import_kek, ptr, key_size, attr->key_size);
+ key_addr = MLX5_ADDR_OF(import_kek, ptr, key);
+ memcpy(key_addr, (void *)(attr->key), MLX5_CRYPTO_KEY_MAX_SIZE);
+ import_kek_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (import_kek_obj->obj == NULL) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create IMPORT_KEK object using DevX.");
+ mlx5_free(import_kek_obj);
+ return NULL;
+ }
+ import_kek_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return import_kek_obj;
+}
+
+/**
+ * Create general object of type CREDENTIAL using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to CREDENTIAL attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_credential_obj(void *ctx,
+ struct mlx5_devx_credential_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_credential_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *credential_obj = NULL;
+ void *ptr = NULL, *credential_addr = NULL;
+
+ credential_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*credential_obj),
+ 0, SOCKET_ID_ANY);
+ if (credential_obj == NULL) {
+ DRV_LOG(ERR, "Failed to allocate CREDENTIAL object data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_credential_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_CREDENTIAL);
+ ptr = MLX5_ADDR_OF(create_credential_in, in, credential);
+ MLX5_SET(credential, ptr, credential_role, attr->credential_role);
+ credential_addr = MLX5_ADDR_OF(credential, ptr, credential);
+ memcpy(credential_addr, (void *)(attr->credential),
+ MLX5_CRYPTO_CREDENTIAL_SIZE);
+ credential_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (credential_obj->obj == NULL) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create CREDENTIAL object using DevX.");
+ mlx5_free(credential_obj);
+ return NULL;
+ }
+ credential_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return credential_obj;
+}
+
+/**
+ * Create general object of type CRYPTO_LOGIN using DevX API.
+ *
+ * @param[in] ctx
+ * Context returned from mlx5 open_device() glue function.
+ * @param [in] attr
+ * Pointer to CRYPTO_LOGIN attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_crypto_login_obj(void *ctx,
+ struct mlx5_devx_crypto_login_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_crypto_login_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
+ struct mlx5_devx_obj *crypto_login_obj = NULL;
+ void *ptr = NULL, *credential_addr = NULL;
+
+ crypto_login_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*crypto_login_obj),
+ 0, SOCKET_ID_ANY);
+ if (crypto_login_obj == NULL) {
+ DRV_LOG(ERR, "Failed to allocate CRYPTO_LOGIN object data");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ ptr = MLX5_ADDR_OF(create_crypto_login_in, in, hdr);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+ MLX5_GENERAL_OBJ_TYPE_CRYPTO_LOGIN);
+ ptr = MLX5_ADDR_OF(create_crypto_login_in, in, crypto_login);
+ MLX5_SET(crypto_login, ptr, credential_pointer,
+ attr->credential_pointer);
+ MLX5_SET(crypto_login, ptr, session_import_kek_ptr,
+ attr->session_import_kek_ptr);
+ credential_addr = MLX5_ADDR_OF(crypto_login, ptr, credential);
+ memcpy(credential_addr, (void *)(attr->credential),
+ MLX5_CRYPTO_CREDENTIAL_SIZE);
+ crypto_login_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in),
+ out, sizeof(out));
+ if (crypto_login_obj->obj == NULL) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create CRYPTO_LOGIN obj using DevX.");
+ mlx5_free(crypto_login_obj);
+ return NULL;
+ }
+ crypto_login_obj->id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return crypto_login_obj;
+}