* Pointer to the error CQE.
*
* @return
- * The last Tx buffer element to free.
+ * Negative value if queue recovery failed,
+ * the last Tx buffer element to free otherwise.
*/
-uint16_t
+int
mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
volatile struct mlx5_err_cqe *err_cqe)
{
return txq->elts_head;
}
/* Recovering failed - try again later on the same WQE. */
+ return -1;
} else {
txq->cq_ci++;
}
(pkts_n - part) * sizeof(struct rte_mbuf *));
}
+/**
+ * Update completion queue consuming index via doorbell
+ * and flush the completed data buffers.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param valid CQE pointer
+ * if not NULL update txq->wqe_pi and flush the buffers
+ * @param itail
+ * if not negative - flush the buffers till this index.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
+ volatile struct mlx5_cqe *last_cqe,
+ int itail,
+ unsigned int olx __rte_unused)
+{
+ uint16_t tail;
+
+ if (likely(last_cqe != NULL)) {
+ txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
+ tail = ((volatile struct mlx5_wqe_cseg *)
+ (txq->wqes + (txq->wqe_pi & txq->wqe_m)))->misc;
+ } else if (itail >= 0) {
+ tail = (uint16_t)itail;
+ } else {
+ return;
+ }
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ if (likely(tail != txq->elts_tail)) {
+ mlx5_tx_free_elts(txq, tail, olx);
+ assert(tail == txq->elts_tail);
+ }
+}
+
/**
* Manage TX completions. This routine checks the CQ for
* arrived CQEs, deduces the last accomplished WQE in SQ,
unsigned int olx __rte_unused)
{
unsigned int count = MLX5_TX_COMP_MAX_CQE;
- bool update = false;
- uint16_t tail = txq->elts_tail;
+ volatile struct mlx5_cqe *last_cqe = NULL;
int ret;
+ static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+ static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
do {
volatile struct mlx5_cqe *cqe;
assert(ret == MLX5_CQE_STATUS_HW_OWN);
break;
}
- /* Some error occurred, try to restart. */
+ /*
+ * Some error occurred, try to restart.
+ * We have no barrier after WQE related Doorbell
+ * written, make sure all writes are completed
+ * here, before we might perform SQ reset.
+ */
rte_wmb();
- tail = mlx5_tx_error_cqe_handle
+ ret = mlx5_tx_error_cqe_handle
(txq, (volatile struct mlx5_err_cqe *)cqe);
- if (likely(tail != txq->elts_tail)) {
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
- }
- /* Allow flushing all CQEs from the queue. */
- count = txq->cqe_s;
- } else {
- volatile struct mlx5_wqe_cseg *cseg;
-
- /* Normal transmit completion. */
- ++txq->cq_ci;
- rte_cio_rmb();
- txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
- cseg = (volatile struct mlx5_wqe_cseg *)
- (txq->wqes + (txq->wqe_pi & txq->wqe_m));
- tail = cseg->misc;
+ /*
+ * Flush buffers, update consuming index
+ * if recovery succeeded. Otherwise
+ * just try to recover later.
+ */
+ last_cqe = NULL;
+ break;
}
+ /* Normal transmit completion. */
+ ++txq->cq_ci;
+ last_cqe = cqe;
#ifndef NDEBUG
if (txq->cq_pi)
--txq->cq_pi;
#endif
- update = true;
/*
* We have to restrict the amount of processed CQEs
* in one tx_burst routine call. The CQ may be large
* latency.
*/
} while (--count);
- if (likely(tail != txq->elts_tail)) {
- /* Free data buffers from elts. */
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
- }
- if (likely(update)) {
- /* Update the consumer index. */
- rte_compiler_barrier();
- *txq->cq_db =
- rte_cpu_to_be_32(txq->cq_ci);
- }
+ mlx5_tx_comp_flush(txq, last_cqe, ret, olx);
}
/**
* Pointer to TX queue structure.
* @param loc
* Pointer to burst routine local context.
+ * @param multi,
+ * Routine is called from multi-segment sending loop,
+ * do not correct the elts_head according to the pkts_copy.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
static __rte_always_inline void
mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
struct mlx5_txq_local *restrict loc,
+ bool multi,
unsigned int olx)
{
uint16_t head = txq->elts_head;
unsigned int part;
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc->pkts_copy : 0);
+ part = (MLX5_TXOFF_CONFIG(INLINE) || multi) ?
+ 0 : loc->pkts_sent - loc->pkts_copy;
head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
(MLX5_TXOFF_CONFIG(INLINE) &&
memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
buf += MLX5_DSEG_MIN_INLINE_SIZE;
pdst += MLX5_DSEG_MIN_INLINE_SIZE;
+ len -= MLX5_DSEG_MIN_INLINE_SIZE;
/* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
+ pdst = (uint8_t *)txq->wqes;
*(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
- if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
- pdst = (uint8_t *)txq->wqes;
/*
* The WQEBB space availability is checked by caller.
* Here we should be aware of WQE ring buffer wraparound only.
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, true, olx);
return MLX5_TXCMP_CODE_MULTI;
}
continue;
/* Here ends the series of multi-segment packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
++loc->pkts_sent;
--pkts_n;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
if (MLX5_TXOFF_CONFIG(MULTI) &&
unlikely(NB_SEGS(loc->mbuf) > 1))
return MLX5_TXCMP_CODE_MULTI;
- if (unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, false, olx);
}
/*
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, false, olx);
}
/**
loc->wqe_free -= (2 + part + 3) / 4;
pkts_n -= part;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
++loc->pkts_sent;
--pkts_n;
/* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
+ mlx5_tx_request_completion(txq, loc, false, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (unlikely(!pkts_n))
+ return 0;
+ loc.pkts_sent = 0;
+ loc.pkts_copy = 0;
+ loc.wqe_last = NULL;
+
+send_loop:
+ loc.pkts_loop = loc.pkts_sent;
/*
* Check if there are some CQEs, if any:
* - process an encountered errors
* - free related mbufs
* - doorbell the NIC about processed CQEs
*/
- if (unlikely(!pkts_n))
- return 0;
- rte_prefetch0(*pkts);
+ rte_prefetch0(*(pkts + loc.pkts_sent));
mlx5_tx_handle_completion(txq, olx);
/*
* Calculate the number of available resources - elts and WQEs.
loc.wqe_free = txq->wqe_s -
(uint16_t)(txq->wqe_ci - txq->wqe_pi);
if (unlikely(!loc.elts_free || !loc.wqe_free))
- return 0;
- loc.pkts_sent = 0;
- loc.pkts_copy = 0;
- loc.wqe_last = NULL;
+ return loc.pkts_sent;
for (;;) {
/*
* Fetch the packet from array. Usually this is
*/
assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
/* Take a shortcut if nothing is sent. */
- if (unlikely(loc.pkts_sent == 0))
- return 0;
+ if (unlikely(loc.pkts_sent == loc.pkts_loop))
+ return loc.pkts_sent;
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
*/
mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
/* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc.pkts_copy : 0);
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
* inlined mbufs.
*/
mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
+ loc.pkts_copy = loc.pkts_sent;
}
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
#endif
assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (pkts_n > loc.pkts_sent) {
+ /*
+ * If burst size is large there might be no enough CQE
+ * fetched from completion queue and no enough resources
+ * freed to send all the packets.
+ */
+ goto send_loop;
+ }
return loc.pkts_sent;
}