#endif
return -ret;
}
+
+/*
+ * Create CQ using DevX API.
+ *
+ * @param[in] ctx
+ * ibv_context returned from mlx5dv_open_device.
+ * @param [in] attr
+ * Pointer to CQ attributes structure.
+ *
+ * @return
+ * The DevX object created, NULL otherwise and rte_errno is set.
+ */
+struct mlx5_devx_obj *
+mlx5_devx_cmd_create_cq(struct ibv_context *ctx, struct mlx5_devx_cq_attr *attr)
+{
+ uint32_t in[MLX5_ST_SZ_DW(create_cq_in)] = {0};
+ uint32_t out[MLX5_ST_SZ_DW(create_cq_out)] = {0};
+ struct mlx5_devx_obj *cq_obj = rte_zmalloc(__func__, sizeof(*cq_obj),
+ 0);
+ void *cqctx = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ if (!cq_obj) {
+ DRV_LOG(ERR, "Failed to allocate CQ object memory.");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+ MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+ if (attr->db_umem_valid) {
+ MLX5_SET(cqc, cqctx, dbr_umem_valid, attr->db_umem_valid);
+ MLX5_SET(cqc, cqctx, dbr_umem_id, attr->db_umem_id);
+ MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_umem_offset);
+ } else {
+ MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
+ }
+ MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
+ MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
+ MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
+ MLX5_SET(cqc, cqctx, log_page_size, attr->log_page_size);
+ MLX5_SET(cqc, cqctx, c_eqn, attr->eqn);
+ MLX5_SET(cqc, cqctx, uar_page, attr->uar_page_id);
+ if (attr->q_umem_valid) {
+ MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
+ MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
+ MLX5_SET64(create_cq_in, in, cq_umem_offset,
+ attr->q_umem_offset);
+ }
+ cq_obj->obj = mlx5_glue->devx_obj_create(ctx, in, sizeof(in), out,
+ sizeof(out));
+ if (!cq_obj->obj) {
+ rte_errno = errno;
+ DRV_LOG(ERR, "Failed to create CQ using DevX errno=%d.", errno);
+ rte_free(cq_obj);
+ return NULL;
+ }
+ cq_obj->id = MLX5_GET(create_cq_out, out, cqn);
+ return cq_obj;
+}
};
+/* CQ attributes structure, used by CQ operations. */
+struct mlx5_devx_cq_attr {
+ uint32_t q_umem_valid:1;
+ uint32_t db_umem_valid:1;
+ uint32_t use_first_only:1;
+ uint32_t overrun_ignore:1;
+ uint32_t log_cq_size:5;
+ uint32_t log_page_size:5;
+ uint32_t uar_page_id;
+ uint32_t q_umem_id;
+ uint64_t q_umem_offset;
+ uint32_t db_umem_id;
+ uint64_t db_umem_offset;
+ uint32_t eqn;
+ uint64_t db_addr;
+};
+
/* mlx5_devx_cmds.c */
struct mlx5_devx_obj *mlx5_devx_cmd_flow_counter_alloc(struct ibv_context *ctx,
struct mlx5_devx_obj *mlx5_devx_cmd_create_td(struct ibv_context *ctx);
int mlx5_devx_cmd_flow_dump(void *fdb_domain, void *rx_domain, void *tx_domain,
FILE *file);
+struct mlx5_devx_obj *mlx5_devx_cmd_create_cq(struct ibv_context *ctx,
+ struct mlx5_devx_cq_attr *attr);
#endif /* RTE_PMD_MLX5_DEVX_CMDS_H_ */
enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_CREATE_MKEY = 0x200,
+ MLX5_CMD_OP_CREATE_CQ = 0x400,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN = 0x816,
MLX5_CMD_OP_CREATE_TIR = 0x900,
u8 reserved_at_8[0x60]; // 14h-1Ch
};
+struct mlx5_ifc_cqc_bits {
+ u8 status[0x4];
+ u8 as_notify[0x1];
+ u8 initiator_src_dct[0x1];
+ u8 dbr_umem_valid[0x1];
+ u8 reserved_at_7[0x1];
+ u8 cqe_sz[0x3];
+ u8 cc[0x1];
+ u8 reserved_at_c[0x1];
+ u8 scqe_break_moderation_en[0x1];
+ u8 oi[0x1];
+ u8 cq_period_mode[0x2];
+ u8 cqe_comp_en[0x1];
+ u8 mini_cqe_res_format[0x2];
+ u8 st[0x4];
+ u8 reserved_at_18[0x8];
+ u8 dbr_umem_id[0x20];
+ u8 reserved_at_40[0x14];
+ u8 page_offset[0x6];
+ u8 reserved_at_5a[0x6];
+ u8 reserved_at_60[0x3];
+ u8 log_cq_size[0x5];
+ u8 uar_page[0x18];
+ u8 reserved_at_80[0x4];
+ u8 cq_period[0xc];
+ u8 cq_max_count[0x10];
+ u8 reserved_at_a0[0x18];
+ u8 c_eqn[0x8];
+ u8 reserved_at_c0[0x3];
+ u8 log_page_size[0x5];
+ u8 reserved_at_c8[0x18];
+ u8 reserved_at_e0[0x20];
+ u8 reserved_at_100[0x8];
+ u8 last_notified_index[0x18];
+ u8 reserved_at_120[0x8];
+ u8 last_solicit_index[0x18];
+ u8 reserved_at_140[0x8];
+ u8 consumer_counter[0x18];
+ u8 reserved_at_160[0x8];
+ u8 producer_counter[0x18];
+ u8 local_partition_id[0xc];
+ u8 process_id[0x14];
+ u8 reserved_at_1A0[0x20];
+ u8 dbr_addr[0x40];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x8];
+ u8 cqn[0x18];
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x40];
+ struct mlx5_ifc_cqc_bits cq_context;
+ u8 cq_umem_offset[0x40];
+ u8 cq_umem_id[0x20];
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+ u8 reserved_at_300[0x580];
+ u8 pas[];
+};
+
/* CQE format mask. */
#define MLX5E_CQE_FORMAT_MASK 0xc
DPDK_20.02 {
global:
+ mlx5_devx_cmd_create_cq;
mlx5_devx_cmd_create_rq;
mlx5_devx_cmd_create_rqt;
mlx5_devx_cmd_create_sq;