/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
/* Save elts_head in unused "immediate" field of WQE. */
last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
if (unlikely(!max_wqe))
return 0;
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
/* Start processing. */
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
do {
struct rte_mbuf *buf = *(pkts++);
uintptr_t addr;
/* Save elts_head in unused "immediate" field of WQE. */
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp = comp;
}
wqe->ctrl[3] = elts_head;
txq->elts_comp = 0;
txq->mpw_comp = txq->wqe_ci;
- txq->cq_pi++;
+#ifndef NDEBUG
+ ++txq->cq_pi;
+#endif
} else {
txq->elts_comp += j;
}
uint16_t elts_comp; /* Counter since last completion request. */
uint16_t mpw_comp; /* WQ index since last completion request. */
uint16_t cq_ci; /* Consumer index for completion queue. */
+#ifndef NDEBUG
uint16_t cq_pi; /* Producer index for completion queue. */
+#endif
uint16_t wqe_ci; /* Consumer index for work queue. */
uint16_t wqe_pi; /* Producer index for work queue. */
uint16_t elts_n:4; /* (*elts)[] length (in log2). */
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
if (unlikely(!pkts_n))
} else {
/* Request a completion. */
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
if (unlikely(!pkts_n))
return 0;
for (n = 0; n < pkts_n; ++n) {
wqe->ctrl[2] = rte_cpu_to_be_32(8);
wqe->ctrl[3] = txq->elts_head;
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
}
#ifdef MLX5_PMD_SOFT_COUNTERS
txq->stats.opackets += n;
assert(elts_n > pkts_n);
mlx5_tx_complete(txq);
max_elts = (elts_n - (elts_head - txq->elts_tail));
+ /* A CQE slot must always be available. */
+ assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
assert(pkts_n <= MLX5_DSEG_MAX - nb_dword_in_hdr);
} else {
/* Request a completion. */
txq->elts_comp = 0;
+#ifndef NDEBUG
++txq->cq_pi;
+#endif
comp_req = 8;
}
/* Fill CTRL in the header. */
(volatile struct mlx5_cqe (*)[])
(uintptr_t)cq_info.buf;
txq_data->cq_ci = 0;
+#ifndef NDEBUG
txq_data->cq_pi = 0;
+#endif
txq_data->wqe_ci = 0;
txq_data->wqe_pi = 0;
txq_ibv->qp = tmpl.qp;