uint16_t head = txq->elts_head;
unsigned int part;
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc->pkts_copy : 0);
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent - loc->pkts_copy;
head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
(MLX5_TXOFF_CONFIG(INLINE) &&
assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (unlikely(!pkts_n))
+ return 0;
+ loc.pkts_sent = 0;
+ loc.pkts_copy = 0;
+ loc.wqe_last = NULL;
+
+send_loop:
+ loc.pkts_loop = loc.pkts_sent;
/*
* Check if there are some CQEs, if any:
* - process an encountered errors
* - free related mbufs
* - doorbell the NIC about processed CQEs
*/
- if (unlikely(!pkts_n))
- return 0;
- rte_prefetch0(*pkts);
+ rte_prefetch0(*(pkts + loc.pkts_sent));
mlx5_tx_handle_completion(txq, olx);
/*
* Calculate the number of available resources - elts and WQEs.
loc.wqe_free = txq->wqe_s -
(uint16_t)(txq->wqe_ci - txq->wqe_pi);
if (unlikely(!loc.elts_free || !loc.wqe_free))
- return 0;
- loc.pkts_sent = 0;
- loc.pkts_copy = 0;
- loc.wqe_last = NULL;
+ return loc.pkts_sent;
for (;;) {
/*
* Fetch the packet from array. Usually this is
*/
assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
/* Take a shortcut if nothing is sent. */
- if (unlikely(loc.pkts_sent == 0))
- return 0;
+ if (unlikely(loc.pkts_sent == loc.pkts_loop))
+ return loc.pkts_sent;
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
*/
mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
/* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc.pkts_copy : 0);
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
* inlined mbufs.
*/
mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
+ loc.pkts_copy = loc.pkts_sent;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
#endif
assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (pkts_n > loc.pkts_sent) {
+ /*
+ * If burst size is large there might be no enough CQE
+ * fetched from completion queue and no enough resources
+ * freed to send all the packets.
+ */
+ goto send_loop;
+ }
return loc.pkts_sent;
}