mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
struct mlx5_devx_wq_attr *wq_attr)
{
- wq_attr->end_padding_mode = priv->config.cqe_pad ?
+ wq_attr->end_padding_mode = priv->config.hw_padding ?
MLX5_WQ_END_PAD_MODE_ALIGN :
MLX5_WQ_END_PAD_MODE_NONE;
wq_attr->pd = priv->sh->pdn;
if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
!rxq_data->lro) {
cq_attr.cqe_comp_en = 1u;
- /*
- * Select CSUM miniCQE format only for non-vectorized MPRQ
- * Rx burst, use HASH miniCQE format for everything else.
- */
- if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
- mlx5_rxq_mprq_enabled(rxq_data))
- cq_attr.mini_cqe_res_format =
- MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
- else
- cq_attr.mini_cqe_res_format =
- MLX5_CQE_RESP_FORMAT_HASH;
+ rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
+ rxq_data->byte_mask = UINT32_MAX;
+ switch (priv->config.cqe_comp_fmt) {
+ case MLX5_CQE_RESP_FORMAT_HASH:
+ /* fallthrough */
+ case MLX5_CQE_RESP_FORMAT_CSUM:
+ /*
+ * Select CSUM miniCQE format only for non-vectorized
+ * MPRQ Rx burst, use HASH miniCQE format for others.
+ */
+ if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
+ mlx5_rxq_mprq_enabled(rxq_data))
+ cq_attr.mini_cqe_res_format =
+ MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
+ else
+ cq_attr.mini_cqe_res_format =
+ MLX5_CQE_RESP_FORMAT_HASH;
+ rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
+ break;
+ case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
+ rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
+ /* fallthrough */
+ case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
+ cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
+ break;
+ case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
+ cq_attr.mini_cqe_res_format = 0;
+ cq_attr.mini_cqe_res_format_ext = 1;
+ break;
+ }
+ DRV_LOG(DEBUG,
+ "Port %u Rx CQE compression is enabled, format %d.",
+ dev->data->port_id, priv->config.cqe_comp_fmt);
/*
* For vectorized Rx, it must not be doubled in order to
* make cq_ci and rq_ci aligned.
static void
mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
{
- if (txq_obj->sq_devx)
+ if (txq_obj->sq_devx) {
claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
- if (txq_obj->sq_umem)
+ txq_obj->sq_devx = NULL;
+ }
+ if (txq_obj->sq_umem) {
claim_zero(mlx5_glue->devx_umem_dereg(txq_obj->sq_umem));
- if (txq_obj->sq_buf)
+ txq_obj->sq_umem = NULL;
+ }
+ if (txq_obj->sq_buf) {
mlx5_free(txq_obj->sq_buf);
- if (txq_obj->sq_dbrec_page)
+ txq_obj->sq_buf = NULL;
+ }
+ if (txq_obj->sq_dbrec_page) {
claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
mlx5_os_get_umem_id
(txq_obj->sq_dbrec_page->umem),
txq_obj->sq_dbrec_offset));
+ txq_obj->sq_dbrec_page = NULL;
+ }
}
/**
txq_ctrl->uar_mmap_offset =
mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
txq_uar_init(txq_ctrl);
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
error:
ret = rte_errno; /* Save rte_errno before cleanup. */