} else {
MLX5_SET64(cqc, cqctx, dbr_addr, attr->db_addr);
}
- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
+ MLX5_SET(cqc, cqctx, cqe_sz, (RTE_CACHE_LINE_SIZE == 128) ?
+ MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B);
MLX5_SET(cqc, cqctx, cc, attr->use_first_only);
MLX5_SET(cqc, cqctx, oi, attr->overrun_ignore);
MLX5_SET(cqc, cqctx, log_cq_size, attr->log_cq_size);
attr->mini_cqe_res_format);
MLX5_SET(cqc, cqctx, mini_cqe_res_format_ext,
attr->mini_cqe_res_format_ext);
- MLX5_SET(cqc, cqctx, cqe_sz, attr->cqe_size);
if (attr->q_umem_valid) {
MLX5_SET(create_cq_in, in, cq_umem_valid, attr->q_umem_valid);
MLX5_SET(create_cq_in, in, cq_umem_id, attr->q_umem_id);
uint32_t cqe_comp_en:1;
uint32_t mini_cqe_res_format:2;
uint32_t mini_cqe_res_format_ext:2;
- uint32_t cqe_size:3;
uint32_t log_cq_size:5;
uint32_t log_page_size:5;
uint32_t uar_page_id;
"Port %u Rx CQE compression is disabled for LRO.",
dev->data->port_id);
}
- if (priv->config.cqe_pad)
- cq_attr.cqe_size = MLX5_CQE_SIZE_128B;
log_cqe_n = log2above(cqe_n);
cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
goto error;
}
- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
cq_attr.eqn = priv->sh->eqn;
cq_attr.q_umem_valid = 1;
goto error;
}
/* Create completion queue object for Rearm Queue. */
- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);
cq_attr.eqn = sh->eqn;
cq_attr.q_umem_valid = 1;
goto error;
}
/* Create completion queue object for Clock Queue. */
- cq_attr.cqe_size = (sizeof(struct mlx5_cqe) == 128) ?
- MLX5_CQE_SIZE_128B : MLX5_CQE_SIZE_64B;
cq_attr.use_first_only = 1;
cq_attr.overrun_ignore = 1;
cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar);