mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm);
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t len);
+
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
+ if (cqe->lro_num_seg > 1) {
+ mlx5_lro_update_hdr
+ (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+ len);
+ pkt->ol_flags |= PKT_RX_LRO;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
+ }
}
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);
len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
- /* Initialize the offload flag. */
- pkt->ol_flags = 0;
/*
* Memcpy packets to the target mbuf if:
* - The size of packet is smaller than mprq_max_memcpy_len.
continue;
}
rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ DATA_LEN(pkt) = len;
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
++rxq->stats.idropped;
continue;
}
+ DATA_LEN(pkt) = len;
+ /*
+ * LRO packet may consume all the stride memory, in this
+ * case packet head-room space is not guaranteed so must
+ * to add an empty mbuf for the head-room.
+ */
+ if (!rxq->strd_headroom_en) {
+ struct rte_mbuf *headroom_mbuf =
+ rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(headroom_mbuf == NULL)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ PORT(pkt) = rxq->port_id;
+ NEXT(headroom_mbuf) = pkt;
+ pkt = headroom_mbuf;
+ NB_SEGS(pkt) = 2;
+ }
}
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (lro_num_seg > 1) {
pkt->tso_segsz = strd_sz;
}
PKT_LEN(pkt) = len;
- DATA_LEN(pkt) = len;
PORT(pkt) = rxq->port_id;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment bytes counter. */
mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
unsigned int olx __rte_unused)
{
+ unsigned int count = MLX5_TX_COMP_MAX_CQE;
bool update = false;
+ uint16_t tail = txq->elts_tail;
int ret;
do {
- volatile struct mlx5_wqe_cseg *cseg;
volatile struct mlx5_cqe *cqe;
- uint16_t tail;
cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
if (likely(ret != MLX5_CQE_STATUS_ERR)) {
/* No new CQEs in completion queue. */
assert(ret == MLX5_CQE_STATUS_HW_OWN);
- if (likely(update)) {
- /* Update the consumer index. */
- rte_compiler_barrier();
- *txq->cq_db =
- rte_cpu_to_be_32(txq->cq_ci);
- }
- return;
+ break;
}
/* Some error occurred, try to restart. */
rte_wmb();
tail = mlx5_tx_error_cqe_handle
(txq, (volatile struct mlx5_err_cqe *)cqe);
+ if (likely(tail != txq->elts_tail)) {
+ mlx5_tx_free_elts(txq, tail, olx);
+ assert(tail == txq->elts_tail);
+ }
+ /* Allow flushing all CQEs from the queue. */
+ count = txq->cqe_s;
} else {
+ volatile struct mlx5_wqe_cseg *cseg;
+
/* Normal transmit completion. */
++txq->cq_ci;
rte_cio_rmb();
if (txq->cq_pi)
--txq->cq_pi;
#endif
- if (likely(tail != txq->elts_tail)) {
- /* Free data buffers from elts. */
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
- }
update = true;
- } while (true);
+ /*
+ * We have to restrict the amount of processed CQEs
+ * in one tx_burst routine call. The CQ may be large
+ * and many CQEs may be updated by the NIC in one
+ * transaction. Buffers freeing is time consuming,
+ * multiple iterations may introduce significant
+ * latency.
+ */
+ } while (--count);
+ if (likely(tail != txq->elts_tail)) {
+ /* Free data buffers from elts. */
+ mlx5_tx_free_elts(txq, tail, olx);
+ assert(tail == txq->elts_tail);
+ }
+ if (likely(update)) {
+ /* Update the consumer index. */
+ rte_compiler_barrier();
+ *txq->cq_db =
+ rte_cpu_to_be_32(txq->cq_ci);
+ }
}
/**
*
* @param txq
* Pointer to TX queue structure.
- * @param n_mbuf
- * Number of mbuf not stored yet in elts array.
* @param loc
* Pointer to burst routine local context.
* @param olx
*/
static __rte_always_inline void
mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
- unsigned int n_mbuf,
struct mlx5_txq_local *restrict loc,
- unsigned int olx __rte_unused)
+ unsigned int olx)
{
- uint16_t head = txq->elts_head + n_mbuf;
+ uint16_t head = txq->elts_head;
+ unsigned int part;
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent -
+ (MLX5_TXOFF_CONFIG(MULTI) ? loc->pkts_copy : 0);
+ head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
- (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres) {
+ (MLX5_TXOFF_CONFIG(INLINE) &&
+ (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
volatile struct mlx5_wqe *last = loc->wqe_last;
txq->elts_comp = head;
- txq->wqe_comp = txq->wqe_ci;
+ if (MLX5_TXOFF_CONFIG(INLINE))
+ txq->wqe_comp = txq->wqe_ci;
/* Request unconditional completion on last WQE. */
last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
} while (true);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
--loc->elts_free;
++loc->pkts_sent;
--pkts_n;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
struct mlx5_txq_local *restrict loc,
unsigned int ds,
unsigned int slen,
- unsigned int olx __rte_unused)
+ unsigned int olx)
{
assert(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
}
/*
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
}
/**
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
+ pkts_n -= part;
goto next_empw;
}
/* Packet attributes match, continue the same eMPW. */
txq->wqe_ci += (2 + part + 3) / 4;
loc->wqe_free -= (2 + part + 3) / 4;
pkts_n -= part;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
struct mlx5_wqe_dseg *restrict dseg;
struct mlx5_wqe_eseg *restrict eseg;
enum mlx5_txcmp_code ret;
- unsigned int room, part;
+ unsigned int room, part, nlim;
unsigned int slen = 0;
-next_empw:
+ /*
+ * Limits the amount of packets in one WQE
+ * to improve CQE latency generation.
+ */
+ nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
/* Check whether we have minimal amount WQEs */
if (unlikely(loc->wqe_free <
((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_EXIT;
}
- /* Check if we have minimal room left. */
- if (room < MLX5_WQE_DSEG_SIZE) {
- part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
- goto next_empw;
- }
loc->mbuf = *pkts++;
if (likely(pkts_n > 1))
rte_prefetch0(*pkts);
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
}
+ /* Check if we have minimal room left. */
+ nlim--;
+ if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
+ break;
/*
* Check whether packet parameters coincide
* within assumed eMPW batch:
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
- goto next_empw;
+ /* Continue the loop with new eMPW session. */
}
assert(false);
}
}
++loc->pkts_sent;
--pkts_n;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
/* Take a shortcut if nothing is sent. */
if (unlikely(loc.pkts_sent == 0))
return 0;
- /* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
- mlx5_tx_request_completion(txq, part, &loc, olx);
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
* processed in this thread only by the polling.
*/
mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
+ /* Not all of the mbufs may be stored into elts yet. */
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent -
+ (MLX5_TXOFF_CONFIG(MULTI) ? loc.pkts_copy : 0);
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
- * It can be only if last packet was single-segment.
+ * It can be only if the last packet was single-segment.
* The copying is gathered into one place due to it is
* a good opportunity to optimize that with SIMD.
* Unfortunately if inlining is enabled the gaps in
DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
return txoff_func[m].func;
}
-
-