+/**
+ * Handle the next CQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ *
+ * @return
+ * The last Tx buffer element to free.
+ */
+static __rte_always_inline uint16_t
+mlx5_tx_cqe_handle(struct mlx5_txq_data *txq)
+{
+ const unsigned int cqe_n = 1 << txq->cqe_n;
+ const unsigned int cqe_cnt = cqe_n - 1;
+ uint16_t last_elts;
+ union {
+ volatile struct mlx5_cqe *cqe;
+ volatile struct mlx5_err_cqe *err_cqe;
+ } u = {
+ .cqe = &(*txq->cqes)[txq->cq_ci & cqe_cnt],
+ };
+ int ret = check_cqe(u.cqe, cqe_n, txq->cq_ci);
+
+ if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+ if (unlikely(ret == MLX5_CQE_STATUS_ERR))
+ last_elts = mlx5_tx_error_cqe_handle(txq, u.err_cqe);
+ else
+ /* Do not release buffers. */
+ return txq->elts_tail;
+ } else {
+ uint16_t new_wqe_pi = rte_be_to_cpu_16(u.cqe->wqe_counter);
+ volatile struct mlx5_wqe_ctrl *ctrl =
+ (volatile struct mlx5_wqe_ctrl *)
+ tx_mlx5_wqe(txq, new_wqe_pi);
+
+ /* Release completion burst buffers. */
+ last_elts = ctrl->ctrl3;
+ txq->wqe_pi = new_wqe_pi;
+ txq->cq_ci++;
+ }
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ return last_elts;
+}
+