+/**
+ * Create a DevX CQ object for an Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param cqe_n
+ * Number of CQEs in CQ.
+ * @param idx
+ * Queue index in DPDK Rx queue array
+ * @param rxq_obj
+ * Pointer to Rx queue object data.
+ *
+ * @return
+ * The DevX object initialised, NULL otherwise and rte_errno is set.
+ */
+static struct mlx5_devx_obj *
+mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
+ struct mlx5_rxq_obj *rxq_obj)
+{
+ struct mlx5_devx_obj *cq_obj = 0;
+ struct mlx5_devx_cq_attr cq_attr = { 0 };
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ size_t page_size = rte_mem_page_size();
+ uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
+ uint32_t eqn = 0;
+ void *buf = NULL;
+ uint16_t event_nums[1] = {0};
+ uint32_t log_cqe_n;
+ uint32_t cq_size;
+ int ret = 0;
+
+ if (page_size == (size_t)-1) {
+ DRV_LOG(ERR, "Failed to get page_size.");
+ goto error;
+ }
+ if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
+ !rxq_data->lro) {
+ cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
+#ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
+ cq_attr.mini_cqe_res_format =
+ mlx5_rxq_mprq_enabled(rxq_data) ?
+ MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
+ MLX5DV_CQE_RES_FORMAT_HASH;
+#else
+ cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
+#endif
+ /*
+ * For vectorized Rx, it must not be doubled in order to
+ * make cq_ci and rq_ci aligned.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0)
+ cqe_n *= 2;
+ } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for HW"
+ " timestamp",
+ dev->data->port_id);
+ } else if (priv->config.cqe_comp && rxq_data->lro) {
+ DRV_LOG(DEBUG,
+ "port %u Rx CQE compression is disabled for LRO",
+ dev->data->port_id);
+ }
+#ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
+ if (priv->config.cqe_pad)
+ cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
+#endif
+ log_cqe_n = log2above(cqe_n);
+ cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
+ /* Query the EQN for this core. */
+ if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
+ DRV_LOG(ERR, "Failed to query EQN for CQ.");
+ goto error;
+ }
+ cq_attr.eqn = eqn;
+ buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
+ rxq_ctrl->socket);
+ if (!buf) {
+ DRV_LOG(ERR, "Failed to allocate memory for CQ.");
+ goto error;
+ }
+ rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
+ rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
+ cq_size,
+ IBV_ACCESS_LOCAL_WRITE);
+ if (!rxq_ctrl->cq_umem) {
+ DRV_LOG(ERR, "Failed to register umem for CQ.");
+ goto error;
+ }
+ cq_attr.uar_page_id = priv->sh->devx_rx_uar->page_id;
+ cq_attr.q_umem_id = rxq_ctrl->cq_umem->umem_id;
+ cq_attr.q_umem_valid = 1;
+ cq_attr.log_cq_size = log_cqe_n;
+ cq_attr.log_page_size = rte_log2_u32(page_size);
+ cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
+ cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
+ cq_attr.db_umem_valid = rxq_ctrl->cq_dbr_umem_id_valid;
+ cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
+ if (!cq_obj)
+ goto error;
+ rxq_data->cqe_n = log_cqe_n;
+ rxq_data->cqn = cq_obj->id;
+ if (rxq_obj->devx_channel) {
+ ret = mlx5_glue->devx_subscribe_devx_event
+ (rxq_obj->devx_channel,
+ cq_obj->obj,
+ sizeof(event_nums),
+ event_nums,
+ (uint64_t)(uintptr_t)cq_obj);
+ if (ret) {
+ DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
+ rte_errno = errno;
+ goto error;
+ }
+ }
+ /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
+ memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
+ return cq_obj;
+error:
+ rxq_release_devx_cq_resources(rxq_ctrl);
+ return NULL;
+}
+