* Copyright 2015-2019 Mellanox Technologies, Ltd
*/
-#include <assert.h>
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <rte_branch_prediction.h>
#include <rte_ether.h>
#include <rte_cycles.h>
+#include <rte_flow.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+#include <mlx5_common.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
/* TX burst subroutines return codes. */
enum mlx5_txcmp_code {
#define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
#define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
#define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
+#define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
/* The most common offloads groups. */
#define MLX5_TXOFF_CONFIG_NONE 0
mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm);
+static inline void
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t phcsum);
+
+static inline void
+mlx5_lro_update_hdr(uint8_t *restrict padd,
+ volatile struct mlx5_cqe *restrict cqe,
+ uint32_t len);
+
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
[0xff] = RTE_PTYPE_ALL_MASK, /* Last entry for errored packet. */
};
uint8_t mlx5_cksum_table[1 << 10] __rte_cache_aligned;
uint8_t mlx5_swp_types_table[1 << 10] __rte_cache_aligned;
+uint64_t rte_net_mlx5_dynf_inline_mask;
+#define PKT_TX_DYNF_NOINLINE rte_net_mlx5_dynf_inline_mask
+
/**
* Build a table to translate Rx completion flags to packet type.
*
return RTE_ETH_RX_DESC_AVAIL;
}
+/**
+ * DPDK callback to get the RX queue information
+ *
+ * @param dev
+ * Pointer to the device structure.
+ *
+ * @param rx_queue_id
+ * Rx queue identificator.
+ *
+ * @param qinfo
+ * Pointer to the RX queue information structure.
+ *
+ * @return
+ * None.
+ */
+
+void
+mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_rxq_data *rxq = (*priv->rxqs)[rx_queue_id];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+
+ if (!rxq)
+ return;
+ qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq->mprq_mp : rxq->mp;
+ qinfo->conf.rx_thresh.pthresh = 0;
+ qinfo->conf.rx_thresh.hthresh = 0;
+ qinfo->conf.rx_thresh.wthresh = 0;
+ qinfo->conf.rx_free_thresh = rxq->rq_repl_thresh;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = 1 << rxq->elts_n;
+}
+
+/**
+ * DPDK callback to get the RX packet burst mode information
+ *
+ * @param dev
+ * Pointer to the device structure.
+ *
+ * @param rx_queue_id
+ * Rx queue identificatior.
+ *
+ * @param mode
+ * Pointer to the burts mode information.
+ *
+ * @return
+ * 0 as success, -EINVAL as failure.
+ */
+
+int
+mlx5_rx_burst_mode_get(struct rte_eth_dev *dev,
+ uint16_t rx_queue_id __rte_unused,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+
+ if (pkt_burst == mlx5_rx_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s", "Scalar");
+ } else if (pkt_burst == mlx5_rx_burst_mprq) {
+ snprintf(mode->info, sizeof(mode->info), "%s", "Multi-Packet RQ");
+ } else if (pkt_burst == mlx5_rx_burst_vec) {
+#if defined RTE_ARCH_X86_64
+ snprintf(mode->info, sizeof(mode->info), "%s", "Vector SSE");
+#elif defined RTE_ARCH_ARM64
+ snprintf(mode->info, sizeof(mode->info), "%s", "Vector Neon");
+#elif defined RTE_ARCH_PPC_64
+ snprintf(mode->info, sizeof(mode->info), "%s", "Vector AltiVec");
+#else
+ return -EINVAL;
+#endif
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
+
/**
* DPDK callback to get the number of used descriptors in a RX queue
*
MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
fd = fopen(path, "a+");
if (!fd) {
- DRV_LOG(WARNING, "cannot open %s for debug dump\n",
- path);
+ DRV_LOG(WARNING, "cannot open %s for debug dump", path);
MKSTR(path2, "./%s", fname);
fd = fopen(path2, "a+");
if (!fd) {
- DRV_LOG(ERR, "cannot open %s for debug dump\n",
- path2);
+ DRV_LOG(ERR, "cannot open %s for debug dump", path2);
return;
}
- DRV_LOG(INFO, "New debug dump in file %s\n", path2);
+ DRV_LOG(INFO, "New debug dump in file %s", path2);
} else {
- DRV_LOG(INFO, "New debug dump in file %s\n", path);
+ DRV_LOG(INFO, "New debug dump in file %s", path);
}
if (hex_title)
rte_hexdump(fd, hex_title, buf, hex_len);
* Pointer to the error CQE.
*
* @return
- * The last Tx buffer element to free.
+ * Negative value if queue recovery failed, otherwise
+ * the error completion entry is handled successfully.
*/
-uint16_t
+static int
mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
volatile struct mlx5_err_cqe *err_cqe)
{
*/
txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
new_wqe_pi) & wqe_m;
- if (tx_recover_qp(txq_ctrl) == 0) {
- txq->cq_ci++;
- /* Release all the remaining buffers. */
- return txq->elts_head;
+ if (tx_recover_qp(txq_ctrl)) {
+ /* Recovering failed - retry later on the same WQE. */
+ return -1;
}
- /* Recovering failed - try again later on the same WQE. */
- } else {
- txq->cq_ci++;
+ /* Release all the remaining buffers. */
+ txq_free_elts(txq_ctrl);
}
- /* Do not release buffers. */
- return txq->elts_tail;
+ return 0;
}
/**
byte_count = DATA_LEN(buf);
}
/* scat->addr must be able to store a pointer. */
- assert(sizeof(scat->addr) >= sizeof(uintptr_t));
+ MLX5_ASSERT(sizeof(scat->addr) >= sizeof(uintptr_t));
*scat = (struct mlx5_wqe_data_seg){
.addr = rte_cpu_to_be_64(addr),
.byte_count = rte_cpu_to_be_32(byte_count),
&rq_attr);
}
if (ret) {
- DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n",
+ DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
sm->state, strerror(errno));
rte_errno = errno;
return ret;
.qp_state = IBV_QPS_RESET,
.port_num = (uint8_t)priv->ibv_port,
};
- struct ibv_qp *qp = txq_ctrl->ibv->qp;
+ struct ibv_qp *qp = txq_ctrl->obj->qp;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
- "%s\n", strerror(errno));
+ "%s", strerror(errno));
rte_errno = errno;
return ret;
}
ret = mlx5_glue->modify_qp(qp, &mod,
(IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
strerror(errno));
rte_errno = errno;
return ret;
mod.qp_state = IBV_QPS_RTR;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
strerror(errno));
rte_errno = errno;
return ret;
mod.qp_state = IBV_QPS_RTS;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
strerror(errno));
rte_errno = errno;
return ret;
*
* @param[in] rxq
* Pointer to RX queue structure.
- * @param[in] mbuf_prepare
- * Whether to prepare mbufs for the RQ.
+ * @param[in] vec
+ * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
+ * 0 when called from non-vectorized Rx burst.
*
* @return
* -1 in case of recovery error, otherwise the CQE status.
*/
int
-mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
+mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
{
const uint16_t cqe_n = 1 << rxq->cqe_n;
const uint16_t cqe_mask = cqe_n - 1;
if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
&sm))
return -1;
- if (mbuf_prepare) {
+ if (vec) {
const uint16_t q_mask = wqe_n - 1;
uint16_t elt_idx;
struct rte_mbuf **elt;
return -1;
}
}
+ for (i = 0; i < (int)wqe_n; ++i) {
+ elt = &(*rxq->elts)[i];
+ DATA_LEN(*elt) =
+ (uint16_t)((*elt)->buf_len -
+ rte_pktmbuf_headroom(*elt));
+ }
+ /* Padding with a fake mbuf for vec Rx. */
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[wqe_n + i] =
+ &rxq->fake_mbuf;
}
mlx5_rxq_initialize(rxq);
rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
+ if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
+ pkt->ol_flags |= PKT_RX_DYNF_METADATA;
+ *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
+ }
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
if (rxq->vlan_strip &&
break;
}
while (pkt != seg) {
- assert(pkt != (*rxq->elts)[idx]);
+ MLX5_ASSERT(pkt != (*rxq->elts)[idx]);
rep = NEXT(pkt);
NEXT(pkt) = NULL;
NB_SEGS(pkt) = 1;
break;
}
pkt = seg;
- assert(len >= (rxq->crc_present << 2));
- pkt->ol_flags = 0;
+ MLX5_ASSERT(len >= (rxq->crc_present << 2));
+ pkt->ol_flags &= EXT_ATTACHED_MBUF;
/* If compressed, take hash result from mini-CQE. */
rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
cqe->rx_hash_res :
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
PKT_LEN(pkt) = len;
+ if (cqe->lro_num_seg > 1) {
+ mlx5_lro_update_hdr
+ (rte_pktmbuf_mtod(pkt, uint8_t *), cqe,
+ len);
+ pkt->ol_flags |= PKT_RX_LRO;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
+ }
}
DATA_LEN(rep) = DATA_LEN(seg);
PKT_LEN(rep) = PKT_LEN(seg);
&((volatile struct mlx5_wqe_mprq *)rxq->wqes)[rq_idx].dseg;
void *addr;
- assert(rep != NULL);
+ MLX5_ASSERT(rep != NULL);
/* Replace MPRQ buf. */
(*rxq->mprq_bufs)[rq_idx] = rep;
/* Replace WQE. */
byte_cnt = ret;
strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
MLX5_MPRQ_STRIDE_NUM_SHIFT;
- assert(strd_cnt);
+ MLX5_ASSERT(strd_cnt);
consumed_strd += strd_cnt;
if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
continue;
/* mini-CQE for MPRQ doesn't have hash result. */
strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
}
- assert(strd_idx < strd_n);
- assert(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) & wq_mask));
+ MLX5_ASSERT(strd_idx < strd_n);
+ MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
+ wq_mask));
lro_num_seg = cqe->lro_num_seg;
/*
* Currently configured to receive a packet per a stride. But if
break;
}
len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
- assert((int)len >= (rxq->crc_present << 2));
+ MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
if (rxq->crc_present)
len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
- /* Initialize the offload flag. */
- pkt->ol_flags = 0;
/*
* Memcpy packets to the target mbuf if:
* - The size of packet is smaller than mprq_max_memcpy_len.
continue;
}
rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
+ DATA_LEN(pkt) = len;
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
/* Increment the refcnt of the whole chunk. */
rte_atomic16_add_return(&buf->refcnt, 1);
- assert((uint16_t)rte_atomic16_read(&buf->refcnt) <=
- strd_n + 1);
+ MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
+ strd_n + 1);
buf_addr = RTE_PTR_SUB(addr, headroom_sz);
/*
* MLX5 device doesn't use iova but it is necessary in a
buf_len, shinfo);
/* Set mbuf head-room. */
pkt->data_off = headroom_sz;
- assert(pkt->ol_flags == EXT_ATTACHED_MBUF);
+ MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
/*
* Prevent potential overflow due to MTU change through
* kernel interface.
++rxq->stats.idropped;
continue;
}
+ DATA_LEN(pkt) = len;
+ /*
+ * LRO packet may consume all the stride memory, in this
+ * case packet head-room space is not guaranteed so must
+ * to add an empty mbuf for the head-room.
+ */
+ if (!rxq->strd_headroom_en) {
+ struct rte_mbuf *headroom_mbuf =
+ rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(headroom_mbuf == NULL)) {
+ rte_pktmbuf_free_seg(pkt);
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ PORT(pkt) = rxq->port_id;
+ NEXT(headroom_mbuf) = pkt;
+ pkt = headroom_mbuf;
+ NB_SEGS(pkt) = 2;
+ }
}
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
if (lro_num_seg > 1) {
pkt->tso_segsz = strd_sz;
}
PKT_LEN(pkt) = len;
- DATA_LEN(pkt) = len;
PORT(pkt) = rxq->port_id;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment bytes counter. */
* copying pointers to temporary array
* for rte_mempool_put_bulk() calls.
*/
- assert(pkts);
- assert(pkts_n);
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
for (;;) {
for (;;) {
/*
*/
mbuf = rte_pktmbuf_prefree_seg(*pkts);
if (likely(mbuf != NULL)) {
- assert(mbuf == *pkts);
+ MLX5_ASSERT(mbuf == *pkts);
if (likely(n_free != 0)) {
if (unlikely(pool != mbuf->pool))
/* From different pool. */
* This loop is implemented to avoid multiple
* inlining of rte_mempool_put_bulk().
*/
- assert(pool);
- assert(p_free);
- assert(n_free);
+ MLX5_ASSERT(pool);
+ MLX5_ASSERT(p_free);
+ MLX5_ASSERT(n_free);
/*
* Free the array of pre-freed mbufs
* belonging to the same memory pool.
{
uint16_t n_elts = tail - txq->elts_tail;
- assert(n_elts);
- assert(n_elts <= txq->elts_s);
+ MLX5_ASSERT(n_elts);
+ MLX5_ASSERT(n_elts <= txq->elts_s);
/*
* Implement a loop to support ring buffer wraparound
* with single inlining of mlx5_tx_free_mbuf().
part = txq->elts_s - (txq->elts_tail & txq->elts_m);
part = RTE_MIN(part, n_elts);
- assert(part);
- assert(part <= txq->elts_s);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
part, olx);
txq->elts_tail += part;
unsigned int part;
struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
- assert(pkts);
- assert(pkts_n);
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
part = txq->elts_s - (txq->elts_head & txq->elts_m);
- assert(part);
- assert(part <= txq->elts_s);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
/* This code is a good candidate for vectorizing with SIMD. */
rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
(void *)pkts,
(pkts_n - part) * sizeof(struct rte_mbuf *));
}
+/**
+ * Update completion queue consuming index via doorbell
+ * and flush the completed data buffers.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param valid CQE pointer
+ * if not NULL update txq->wqe_pi and flush the buffers
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
+ volatile struct mlx5_cqe *last_cqe,
+ unsigned int olx __rte_unused)
+{
+ if (likely(last_cqe != NULL)) {
+ uint16_t tail;
+
+ txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
+ tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
+ if (likely(tail != txq->elts_tail)) {
+ mlx5_tx_free_elts(txq, tail, olx);
+ MLX5_ASSERT(tail == txq->elts_tail);
+ }
+ }
+}
+
/**
* Manage TX completions. This routine checks the CQ for
* arrived CQEs, deduces the last accomplished WQE in SQ,
mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
unsigned int olx __rte_unused)
{
- bool update = false;
+ unsigned int count = MLX5_TX_COMP_MAX_CQE;
+ volatile struct mlx5_cqe *last_cqe = NULL;
+ uint16_t ci = txq->cq_ci;
int ret;
+ static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+ static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
do {
- volatile struct mlx5_wqe_cseg *cseg;
volatile struct mlx5_cqe *cqe;
- uint16_t tail;
- cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
- ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
+ cqe = &txq->cqes[ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, ci);
if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
if (likely(ret != MLX5_CQE_STATUS_ERR)) {
/* No new CQEs in completion queue. */
- assert(ret == MLX5_CQE_STATUS_HW_OWN);
- if (likely(update)) {
- /* Update the consumer index. */
- rte_compiler_barrier();
- *txq->cq_db =
- rte_cpu_to_be_32(txq->cq_ci);
- }
- return;
+ MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
+ break;
}
- /* Some error occurred, try to restart. */
+ /*
+ * Some error occurred, try to restart.
+ * We have no barrier after WQE related Doorbell
+ * written, make sure all writes are completed
+ * here, before we might perform SQ reset.
+ */
rte_wmb();
- tail = mlx5_tx_error_cqe_handle
+ txq->cq_ci = ci;
+ ret = mlx5_tx_error_cqe_handle
(txq, (volatile struct mlx5_err_cqe *)cqe);
- } else {
- /* Normal transmit completion. */
- ++txq->cq_ci;
- rte_cio_rmb();
- txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
- cseg = (volatile struct mlx5_wqe_cseg *)
- (txq->wqes + (txq->wqe_pi & txq->wqe_m));
- tail = cseg->misc;
- }
-#ifndef NDEBUG
- if (txq->cq_pi)
- --txq->cq_pi;
-#endif
- if (likely(tail != txq->elts_tail)) {
- /* Free data buffers from elts. */
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
+ if (unlikely(ret < 0)) {
+ /*
+ * Some error occurred on queue error
+ * handling, we do not advance the index
+ * here, allowing to retry on next call.
+ */
+ return;
+ }
+ /*
+ * We are going to fetch all entries with
+ * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
+ * The send queue is supposed to be empty.
+ */
+ ++ci;
+ txq->cq_pi = ci;
+ last_cqe = NULL;
+ continue;
}
- update = true;
+ /* Normal transmit completion. */
+ MLX5_ASSERT(ci != txq->cq_pi);
+ MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) ==
+ cqe->wqe_counter);
+ ++ci;
+ last_cqe = cqe;
+ /*
+ * We have to restrict the amount of processed CQEs
+ * in one tx_burst routine call. The CQ may be large
+ * and many CQEs may be updated by the NIC in one
+ * transaction. Buffers freeing is time consuming,
+ * multiple iterations may introduce significant
+ * latency.
+ */
+ if (likely(--count == 0))
+ break;
} while (true);
+ if (likely(ci != txq->cq_ci)) {
+ /*
+ * Update completion queue consuming index
+ * and ring doorbell to notify hardware.
+ */
+ rte_compiler_barrier();
+ txq->cq_ci = ci;
+ *txq->cq_db = rte_cpu_to_be_32(ci);
+ mlx5_tx_comp_flush(txq, last_cqe, olx);
+ }
}
/**
*
* @param txq
* Pointer to TX queue structure.
- * @param n_mbuf
- * Number of mbuf not stored yet in elts array.
* @param loc
* Pointer to burst routine local context.
* @param olx
*/
static __rte_always_inline void
mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
- unsigned int n_mbuf,
struct mlx5_txq_local *restrict loc,
- unsigned int olx __rte_unused)
+ unsigned int olx)
{
- uint16_t head = txq->elts_head + n_mbuf;
+ uint16_t head = txq->elts_head;
+ unsigned int part;
+ part = MLX5_TXOFF_CONFIG(INLINE) ?
+ 0 : loc->pkts_sent - loc->pkts_copy;
+ head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
- (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres) {
+ (MLX5_TXOFF_CONFIG(INLINE) &&
+ (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
volatile struct mlx5_wqe *last = loc->wqe_last;
txq->elts_comp = head;
- txq->wqe_comp = txq->wqe_ci;
+ if (MLX5_TXOFF_CONFIG(INLINE))
+ txq->wqe_comp = txq->wqe_ci;
/* Request unconditional completion on last WQE. */
last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- last->cseg.misc = head;
- /*
- * A CQE slot must always be available. Count the
- * issued CEQ "always" request instead of production
- * index due to here can be CQE with errors and
- * difference with ci may become inconsistent.
- */
- assert(txq->cqe_s > ++txq->cq_pi);
+ /* Save elts_head in dedicated free on completion queue. */
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
+ (last->cseg.opcode >> 8) << 16;
+#else
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
+#endif
+ /* A CQE slot must always be available. */
+ MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
}
}
{
struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
+ /* For legacy MPW replace the EMPW by TSO with modifier. */
+ if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
+ opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
* We should get here only if device support
* this feature correctly.
*/
- assert(txq->vlan_en);
+ MLX5_ASSERT(txq->vlan_en);
es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
loc->mbuf->vlan_tci);
} else {
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
/* Copy the rest two bytes from packet data. */
- assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
} else {
/* Fill the gap in the title WQEBB with inline data. */
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
/* Copy the rest two bytes from packet data. */
- assert(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
*(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
psrc += sizeof(uint16_t);
} else {
psrc += sizeof(rte_v128u32_t);
}
pdst = (uint8_t *)(es + 2);
- assert(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(pdst < (uint8_t *)txq->wqes_end);
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
if (!inlen) {
- assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
return (struct mlx5_wqe_dseg *)pdst;
}
/*
* Pointer to burst routine local context.
* @param len
* Length of data to be copied.
+ * @param must
+ * Length of data to be copied ignoring no inline hint.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
+ *
+ * @return
+ * Number of actual copied data bytes. This is always greater than or
+ * equal to must parameter and might be lesser than len in no inline
+ * hint flag is encountered.
*/
-static __rte_always_inline void
+static __rte_always_inline unsigned int
mlx5_tx_mseg_memcpy(uint8_t *pdst,
struct mlx5_txq_local *restrict loc,
unsigned int len,
+ unsigned int must,
unsigned int olx __rte_unused)
{
struct rte_mbuf *mbuf;
- unsigned int part, dlen;
+ unsigned int part, dlen, copy = 0;
uint8_t *psrc;
- assert(len);
+ MLX5_ASSERT(len);
+ MLX5_ASSERT(must <= len);
do {
/* Allow zero length packets, must check first. */
dlen = rte_pktmbuf_data_len(loc->mbuf);
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
- assert(loc->mbuf_nseg > 1);
- assert(loc->mbuf);
+ MLX5_ASSERT(loc->mbuf_nseg > 1);
+ MLX5_ASSERT(loc->mbuf);
--loc->mbuf_nseg;
+ if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ unsigned int diff;
+
+ if (copy >= must) {
+ /*
+ * We already copied the minimal
+ * requested amount of data.
+ */
+ return copy;
+ }
+ diff = must - copy;
+ if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
+ /*
+ * Copy only the minimal required
+ * part of the data buffer.
+ */
+ len = diff;
+ }
+ }
continue;
}
dlen -= loc->mbuf_off;
loc->mbuf_off);
part = RTE_MIN(len, dlen);
rte_memcpy(pdst, psrc, part);
+ copy += part;
loc->mbuf_off += part;
len -= part;
if (!len) {
loc->mbuf = mbuf->next;
rte_pktmbuf_free_seg(mbuf);
loc->mbuf_off = 0;
- assert(loc->mbuf_nseg >= 1);
+ MLX5_ASSERT(loc->mbuf_nseg >= 1);
--loc->mbuf_nseg;
}
- return;
+ return copy;
}
pdst += part;
} while (true);
struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
uint32_t csum;
uint8_t *pdst;
- unsigned int part;
+ unsigned int part, tlen = 0;
/*
* Calculate and set check sum flags first, uint32_t field
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
sizeof(struct rte_vlan_hdr) +
2 * RTE_ETHER_ADDR_LEN),
"invalid Ethernet Segment data size");
- assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
- es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
pdst = (uint8_t *)&es->inline_data;
if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
/* Implement VLAN tag insertion as part inline data. */
- mlx5_tx_mseg_memcpy(pdst, loc, 2 * RTE_ETHER_ADDR_LEN, olx);
+ mlx5_tx_mseg_memcpy(pdst, loc,
+ 2 * RTE_ETHER_ADDR_LEN,
+ 2 * RTE_ETHER_ADDR_LEN, olx);
pdst += 2 * RTE_ETHER_ADDR_LEN;
*(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
((RTE_ETHER_TYPE_VLAN << 16) |
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
- inlen -= 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
+ tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
}
- assert(pdst < (uint8_t *)txq->wqes_end);
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
/*
* The WQEBB space availability is checked by caller.
* Here we should be aware of WQE ring buffer wraparound only.
*/
part = (uint8_t *)txq->wqes_end - pdst;
- part = RTE_MIN(part, inlen);
- assert(part);
+ part = RTE_MIN(part, inlen - tlen);
+ MLX5_ASSERT(part);
do {
- mlx5_tx_mseg_memcpy(pdst, loc, part, olx);
- inlen -= part;
- if (likely(!inlen)) {
- pdst += part;
+ unsigned int copy;
+
+ /*
+ * Copying may be interrupted inside the routine
+ * if run into no inline hint flag.
+ */
+ copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
+ copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
+ tlen += copy;
+ if (likely(inlen <= tlen) || copy < part) {
+ es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
+ pdst += copy;
pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
return (struct mlx5_wqe_dseg *)pdst;
}
pdst = (uint8_t *)txq->wqes;
- part = inlen;
+ part = inlen - tlen;
} while (true);
}
unsigned int olx __rte_unused)
{
- assert(len);
+ MLX5_ASSERT(len);
dseg->bcount = rte_cpu_to_be_32(len);
dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
{
uintptr_t dst, src;
- assert(len);
+ MLX5_ASSERT(len);
if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
dseg->bcount = rte_cpu_to_be_32(len);
dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
/* Unrolled implementation of generic rte_memcpy. */
dst = (uintptr_t)&dseg->inline_data[0];
src = (uintptr_t)buf;
+ if (len & 0x08) {
#ifdef RTE_ARCH_STRICT_ALIGN
- memcpy(dst, src, len);
+ MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
#else
- if (len & 0x08) {
- *(uint64_t *)dst = *(uint64_t *)src;
+ *(uint64_t *)dst = *(unaligned_uint64_t *)src;
dst += sizeof(uint64_t);
src += sizeof(uint64_t);
+#endif
}
if (len & 0x04) {
- *(uint32_t *)dst = *(uint32_t *)src;
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
dst += sizeof(uint32_t);
src += sizeof(uint32_t);
}
if (len & 0x02) {
- *(uint16_t *)dst = *(uint16_t *)src;
+ *(uint16_t *)dst = *(unaligned_uint16_t *)src;
dst += sizeof(uint16_t);
src += sizeof(uint16_t);
}
if (len & 0x01)
*(uint8_t *)dst = *(uint8_t *)src;
-#endif
}
/**
unsigned int part;
uint8_t *pdst;
- assert(len > MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
(2 * RTE_ETHER_ADDR_LEN),
"invalid Data Segment data size");
memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
buf += MLX5_DSEG_MIN_INLINE_SIZE;
pdst += MLX5_DSEG_MIN_INLINE_SIZE;
+ len -= MLX5_DSEG_MIN_INLINE_SIZE;
/* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
- assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
+ pdst = (uint8_t *)txq->wqes;
*(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
- if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
- pdst = (uint8_t *)txq->wqes;
/*
* The WQEBB space availability is checked by caller.
* Here we should be aware of WQE ring buffer wraparound only.
struct mlx5_wqe_dseg *restrict dseg;
unsigned int ds;
- assert((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
+ MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
loc->mbuf_nseg = NB_SEGS(loc->mbuf);
loc->mbuf_off = 0;
* Non-zero offset means there are some data
* remained in the packet.
*/
- assert(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
- assert(rte_pktmbuf_data_len(loc->mbuf));
+ MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
+ MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
loc->mbuf_off);
dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
dseg = (struct mlx5_wqe_dseg *)txq->wqes;
mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
/* Store the mbuf to be freed on completion. */
- assert(loc->elts_free);
+ MLX5_ASSERT(loc->elts_free);
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
--loc->elts_free;
++dseg;
(txq, loc, dseg,
rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
rte_pktmbuf_data_len(loc->mbuf), olx);
- assert(loc->elts_free);
+ MLX5_ASSERT(loc->elts_free);
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
--loc->elts_free;
++dseg;
inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
inlen > (dlen + vlan)))
return MLX5_TXCMP_CODE_ERROR;
- assert(inlen >= txq->inlen_mode);
+ MLX5_ASSERT(inlen >= txq->inlen_mode);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
struct mlx5_wqe *restrict wqe;
unsigned int ds, nseg;
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* No inline at all, it means the CPU cycles saving
* is prioritized at configuration, we should not
struct mlx5_wqe *restrict wqe;
unsigned int ds, inlen, dlen, vlan = 0;
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* First calculate data length to be inlined
* to estimate the required space for WQE.
/* Check against minimal length. */
if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
return MLX5_TXCMP_CODE_ERROR;
- assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
- if (inlen > txq->inlen_send) {
+ MLX5_ASSERT(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ if (inlen > txq->inlen_send ||
+ loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
struct rte_mbuf *mbuf;
unsigned int nxlen;
uintptr_t start;
* inlining is required.
*/
if (txq->inlen_mode) {
- assert(txq->inlen_mode >= MLX5_ESEG_MIN_INLINE_SIZE);
- assert(txq->inlen_mode <= txq->inlen_send);
+ MLX5_ASSERT(txq->inlen_mode >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_mode <= txq->inlen_send);
inlen = txq->inlen_mode;
} else {
- if (!vlan || txq->vlan_en) {
+ if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE ||
+ !vlan || txq->vlan_en) {
/*
* VLAN insertion will be done inside by HW.
* It is not utmost effective - VLAN flag is
do {
smlen = nxlen;
mbuf = NEXT(mbuf);
- assert(mbuf);
+ MLX5_ASSERT(mbuf);
nxlen = rte_pktmbuf_data_len(mbuf);
nxlen += smlen;
} while (unlikely(nxlen < inlen));
inlen = nxlen;
mbuf = NEXT(mbuf);
/* There should be not end of packet. */
- assert(mbuf);
+ MLX5_ASSERT(mbuf);
nxlen = inlen + rte_pktmbuf_data_len(mbuf);
} while (unlikely(nxlen < txq->inlen_send));
}
* Estimate the number of Data Segments conservatively,
* supposing no any mbufs is being freed during inlining.
*/
- assert(inlen <= txq->inlen_send);
+ MLX5_ASSERT(inlen <= txq->inlen_send);
ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
MLX5_ESEG_MIN_INLINE_SIZE +
MLX5_WSEG_SIZE +
struct mlx5_txq_local *restrict loc,
unsigned int olx)
{
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
enum mlx5_txcmp_code ret;
- assert(NB_SEGS(loc->mbuf) > 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
/*
* Estimate the number of free elts quickly but
* conservatively. Some segment may be fully inlined
continue;
/* Here ends the series of multi-segment packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
struct mlx5_txq_local *restrict loc,
unsigned int olx)
{
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
unsigned int ds, dlen, hlen, ntcp, vlan = 0;
uint8_t *dptr;
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
dlen = rte_pktmbuf_data_len(loc->mbuf);
if (MLX5_TXOFF_CONFIG(VLAN) &&
loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
if (MLX5_TXOFF_CONFIG(MULTI) &&
unlikely(NB_SEGS(loc->mbuf) > 1))
return MLX5_TXCMP_CODE_MULTI;
- if (unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
/**
* Check the next packet attributes to match with the eMPW batch ones.
+ * In addition, for legacy MPW the packet length is checked either.
*
* @param txq
* Pointer to TX queue structure.
* Pointer to Ethernet Segment of eMPW batch.
* @param loc
* Pointer to burst routine local context.
+ * @param dlen
+ * Length of previous packet in MPW descriptor.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
struct mlx5_wqe_eseg *restrict es,
struct mlx5_txq_local *restrict loc,
+ uint32_t dlen,
unsigned int olx)
{
uint8_t swp_flags = 0;
return false;
/* Fill metadata field if needed. */
if (MLX5_TXOFF_CONFIG(METADATA) &&
- es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0))
+ es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
+ return false;
+ /* Legacy MPW can send packets with the same lengt only. */
+ if (MLX5_TXOFF_CONFIG(MPW) &&
+ dlen != rte_pktmbuf_data_len(loc->mbuf))
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
- assert(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags & PKT_TX_VLAN_PKT));
return true;
}
unsigned int slen,
unsigned int olx __rte_unused)
{
- assert(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
unsigned int slen,
unsigned int olx __rte_unused)
{
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert((len % MLX5_WSEG_SIZE) == 0);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
* and sends single-segment packet with eMPW opcode
* without data inlining.
*/
- assert(!MLX5_TXOFF_CONFIG(INLINE));
- assert(MLX5_TXOFF_CONFIG(EMPW));
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
unsigned int slen = 0;
next_empw:
- part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
+ part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
+ MLX5_MPW_MAX_PACKETS :
+ MLX5_EMPW_MAX_PACKETS);
if (unlikely(loc->elts_free < part)) {
/* We have no enough elts to save all mbufs. */
if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
eseg = &loc->wqe_last->eseg;
dseg = &loc->wqe_last->dseg[0];
loop = part;
+ /* Store the packet length for legacy MPW. */
+ if (MLX5_TXOFF_CONFIG(MPW))
+ eseg->mss = rte_cpu_to_be_16
+ (rte_pktmbuf_data_len(loc->mbuf));
for (;;) {
uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
#ifdef MLX5_PMD_SOFT_COUNTERS
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_SINGLE;
}
if (ret != MLX5_TXCMP_CODE_EMPW) {
- assert(false);
+ MLX5_ASSERT(false);
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
* - check sum settings
* - metadata value
* - software parser settings
+ * - packets length (legacy MPW only)
*/
- if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) {
- assert(loop);
+ if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
+ MLX5_ASSERT(loop);
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
+ pkts_n -= part;
goto next_empw;
}
/* Packet attributes match, continue the same eMPW. */
dseg = (struct mlx5_wqe_dseg *)txq->wqes;
}
/* eMPW is built successfully, update loop parameters. */
- assert(!loop);
- assert(pkts_n >= part);
+ MLX5_ASSERT(!loop);
+ MLX5_ASSERT(pkts_n >= part);
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
txq->stats.obytes += slen;
return ret;
/* Continue sending eMPW batches. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
* and sends single-segment packet with eMPW opcode
* with data inlining.
*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
- assert(MLX5_TXOFF_CONFIG(EMPW));
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
struct mlx5_wqe_dseg *restrict dseg;
struct mlx5_wqe_eseg *restrict eseg;
enum mlx5_txcmp_code ret;
- unsigned int room, part;
+ unsigned int room, part, nlim;
unsigned int slen = 0;
-next_empw:
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
+ /*
+ * Limits the amount of packets in one WQE
+ * to improve CQE latency generation.
+ */
+ nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
+ MLX5_MPW_INLINE_MAX_PACKETS :
+ MLX5_EMPW_MAX_PACKETS);
/* Check whether we have minimal amount WQEs */
if (unlikely(loc->wqe_free <
((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
olx & ~MLX5_TXOFF_CONFIG_VLAN);
eseg = &loc->wqe_last->eseg;
dseg = &loc->wqe_last->dseg[0];
+ /* Store the packet length for legacy MPW. */
+ if (MLX5_TXOFF_CONFIG(MPW))
+ eseg->mss = rte_cpu_to_be_16
+ (rte_pktmbuf_data_len(loc->mbuf));
room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
loc->wqe_free) * MLX5_WQE_SIZE -
MLX5_WQE_CSEG_SIZE -
uint8_t *dptr = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
unsigned int tlen;
- assert(room >= MLX5_WQE_DSEG_SIZE);
- assert((room % MLX5_WQE_DSEG_SIZE) == 0);
- assert((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
+ MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
+ MLX5_ASSERT((room % MLX5_WQE_DSEG_SIZE) == 0);
+ MLX5_ASSERT((uintptr_t)dseg < (uintptr_t)txq->wqes_end);
/*
* Some Tx offloads may cause an error if
* packet is not long enough, check against
return MLX5_TXCMP_CODE_ERROR;
}
/* Inline or not inline - that's the Question. */
- if (dlen > txq->inlen_empw)
+ if (dlen > txq->inlen_empw ||
+ loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
goto pointer_empw;
/* Inline entire packet, optional VLAN insertion. */
tlen = sizeof(dseg->bcount) + dlen;
* mlx5_tx_able_to_empw() and packet
* fits into inline length guaranteed.
*/
- assert((dlen + sizeof(struct rte_vlan_hdr)) <=
- txq->inlen_empw);
+ MLX5_ASSERT((dlen +
+ sizeof(struct rte_vlan_hdr)) <=
+ txq->inlen_empw);
tlen += sizeof(struct rte_vlan_hdr);
if (room < tlen)
break;
dptr, dlen, olx);
}
tlen = RTE_ALIGN(tlen, MLX5_WSEG_SIZE);
- assert(room >= tlen);
+ MLX5_ASSERT(room >= tlen);
room -= tlen;
/*
* Packet data are completely inlined,
* Not inlinable VLAN packets are
* proceeded outside of this routine.
*/
- assert(room >= MLX5_WQE_DSEG_SIZE);
+ MLX5_ASSERT(room >= MLX5_WQE_DSEG_SIZE);
if (MLX5_TXOFF_CONFIG(VLAN))
- assert(!(loc->mbuf->ol_flags &
- PKT_TX_VLAN_PKT));
+ MLX5_ASSERT(!(loc->mbuf->ol_flags &
+ PKT_TX_VLAN_PKT));
mlx5_tx_dseg_ptr(txq, loc, dseg, dptr, dlen, olx);
/* We have to store mbuf in elts.*/
txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_EXIT;
}
- /* Check if we have minimal room left. */
- if (room < MLX5_WQE_DSEG_SIZE) {
- part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
- goto next_empw;
- }
loc->mbuf = *pkts++;
if (likely(pkts_n > 1))
rte_prefetch0(*pkts);
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= room;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_SINGLE;
}
if (ret != MLX5_TXCMP_CODE_EMPW) {
- assert(false);
+ MLX5_ASSERT(false);
part -= room;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
return MLX5_TXCMP_CODE_ERROR;
}
+ /* Check if we have minimal room left. */
+ nlim--;
+ if (unlikely(!nlim || room < MLX5_WQE_DSEG_SIZE))
+ break;
/*
* Check whether packet parameters coincide
* within assumed eMPW batch:
* - check sum settings
* - metadata value
* - software parser settings
+ * - packets length (legacy MPW only)
*/
- if (!mlx5_tx_match_empw(txq, eseg, loc, olx))
+ if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
break;
/* Packet attributes match, continue the same eMPW. */
if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
* We get here to close an existing eMPW
* session and start the new one.
*/
- assert(pkts_n);
+ MLX5_ASSERT(pkts_n);
part -= room;
if (unlikely(!part))
return MLX5_TXCMP_CODE_EXIT;
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
- goto next_empw;
+ /* Continue the loop with new eMPW session. */
}
- assert(false);
+ MLX5_ASSERT(false);
}
/**
* Subroutine is the part of mlx5_tx_burst_single()
* and sends single-segment packet with SEND opcode.
*/
- assert(loc->elts_free && loc->wqe_free);
- assert(pkts_n > loc->pkts_sent);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(pkts_n > loc->pkts_sent);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
struct mlx5_wqe *restrict wqe;
enum mlx5_txcmp_code ret;
- assert(NB_SEGS(loc->mbuf) == 1);
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (MLX5_TXOFF_CONFIG(INLINE)) {
unsigned int inlen, vlan = 0;
* Otherwise we would do extra check for data
* size to avoid crashes due to length overflow.
*/
- assert(txq->inlen_send >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(txq->inlen_send >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
if (inlen <= txq->inlen_send) {
unsigned int seg_n, wqe_n;
/* Check against minimal length. */
if (inlen <= MLX5_ESEG_MIN_INLINE_SIZE)
return MLX5_TXCMP_CODE_ERROR;
+ if (loc->mbuf->ol_flags &
+ PKT_TX_DYNF_NOINLINE) {
+ /*
+ * The hint flag not to inline packet
+ * data is set. Check whether we can
+ * follow the hint.
+ */
+ if ((!MLX5_TXOFF_CONFIG(EMPW) &&
+ txq->inlen_mode) ||
+ (MLX5_TXOFF_CONFIG(MPW) &&
+ txq->inlen_mode)) {
+ /*
+ * The hardware requires the
+ * minimal inline data header.
+ */
+ goto single_min_inline;
+ }
+ if (MLX5_TXOFF_CONFIG(VLAN) &&
+ vlan && !txq->vlan_en) {
+ /*
+ * We must insert VLAN tag
+ * by software means.
+ */
+ goto single_part_inline;
+ }
+ goto single_no_inline;
+ }
/*
* Completely inlined packet data WQE:
* - Control Segment, SEND opcode
* free the packet immediately.
*/
rte_pktmbuf_free_seg(loc->mbuf);
- } else if (!MLX5_TXOFF_CONFIG(EMPW) &&
- txq->inlen_mode) {
+ } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
+ MLX5_TXOFF_CONFIG(MPW)) &&
+ txq->inlen_mode) {
/*
* If minimal inlining is requested the eMPW
* feature should be disabled due to data is
* We should check the free space in
* WQE ring buffer to inline partially.
*/
- assert(txq->inlen_send >= txq->inlen_mode);
- assert(inlen > txq->inlen_mode);
- assert(txq->inlen_mode >=
- MLX5_ESEG_MIN_INLINE_SIZE);
+single_min_inline:
+ MLX5_ASSERT(txq->inlen_send >= txq->inlen_mode);
+ MLX5_ASSERT(inlen > txq->inlen_mode);
+ MLX5_ASSERT(txq->inlen_mode >=
+ MLX5_ESEG_MIN_INLINE_SIZE);
/*
* Check whether there are enough free WQEBBs:
* - Control Segment
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
/* We have to store mbuf in elts.*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
txq->elts[txq->elts_head++ & txq->elts_m] =
loc->mbuf;
--loc->elts_free;
* We also get here if VLAN insertion is not
* supported by HW, the inline is enabled.
*/
+single_part_inline:
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
loc->wqe_last = wqe;
mlx5_tx_cseg_init(txq, loc, wqe, 4,
* comparing with txq->inlen_send. We should
* not get overflow here.
*/
- assert(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(inlen > MLX5_ESEG_MIN_INLINE_SIZE);
dlen = inlen - MLX5_ESEG_MIN_INLINE_SIZE;
mlx5_tx_dseg_ptr(txq, loc, &wqe->dseg[1],
dptr, dlen, olx);
++txq->wqe_ci;
--loc->wqe_free;
/* We have to store mbuf in elts.*/
- assert(MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
txq->elts[txq->elts_head++ & txq->elts_m] =
loc->mbuf;
--loc->elts_free;
* - Ethernet Segment, optional VLAN, no inline
* - Data Segment, pointer type
*/
+single_no_inline:
wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
loc->wqe_last = wqe;
mlx5_tx_cseg_init(txq, loc, wqe, 3,
* if no inlining is configured, this is done
* by calling routine in a batch copy.
*/
- assert(!MLX5_TXOFF_CONFIG(INLINE));
+ MLX5_ASSERT(!MLX5_TXOFF_CONFIG(INLINE));
--loc->elts_free;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Update sent data bytes counter. */
if (unlikely(ret != MLX5_TXCMP_CODE_SINGLE))
return ret;
}
- assert(false);
+ MLX5_ASSERT(false);
}
static __rte_always_inline enum mlx5_txcmp_code
ret = mlx5_tx_able_to_empw(txq, loc, olx, false);
if (ret == MLX5_TXCMP_CODE_SINGLE)
goto ordinary_send;
- assert(ret == MLX5_TXCMP_CODE_EMPW);
+ MLX5_ASSERT(ret == MLX5_TXCMP_CODE_EMPW);
for (;;) {
/* Optimize for inline/no inline eMPW send. */
ret = (MLX5_TXOFF_CONFIG(INLINE)) ?
if (ret != MLX5_TXCMP_CODE_SINGLE)
return ret;
/* The resources to send one packet should remain. */
- assert(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
ordinary_send:
ret = mlx5_tx_burst_single_send(txq, pkts, pkts_n, loc, olx);
- assert(ret != MLX5_TXCMP_CODE_SINGLE);
+ MLX5_ASSERT(ret != MLX5_TXCMP_CODE_SINGLE);
if (ret != MLX5_TXCMP_CODE_EMPW)
return ret;
/* The resources to send one packet should remain. */
- assert(loc->elts_free && loc->wqe_free);
+ MLX5_ASSERT(loc->elts_free && loc->wqe_free);
}
}
enum mlx5_txcmp_code ret;
unsigned int part;
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (unlikely(!pkts_n))
+ return 0;
+ loc.pkts_sent = 0;
+ loc.pkts_copy = 0;
+ loc.wqe_last = NULL;
+
+send_loop:
+ loc.pkts_loop = loc.pkts_sent;
/*
* Check if there are some CQEs, if any:
* - process an encountered errors
* - free related mbufs
* - doorbell the NIC about processed CQEs
*/
- if (unlikely(!pkts_n))
- return 0;
- rte_prefetch0(*pkts);
+ rte_prefetch0(*(pkts + loc.pkts_sent));
mlx5_tx_handle_completion(txq, olx);
/*
* Calculate the number of available resources - elts and WQEs.
* - data inlining into WQEs, one packet may require multiple
* WQEBBs, the WQEs become the limiting factor.
*/
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
loc.elts_free = txq->elts_s -
(uint16_t)(txq->elts_head - txq->elts_tail);
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
loc.wqe_free = txq->wqe_s -
(uint16_t)(txq->wqe_ci - txq->wqe_pi);
if (unlikely(!loc.elts_free || !loc.wqe_free))
- return 0;
- loc.pkts_sent = 0;
- loc.pkts_copy = 0;
- loc.wqe_last = NULL;
+ goto burst_exit;
for (;;) {
/*
* Fetch the packet from array. Usually this is
* per WQE, do it in dedicated routine.
*/
enter_send_multi:
- assert(loc.pkts_sent >= loc.pkts_copy);
+ MLX5_ASSERT(loc.pkts_sent >= loc.pkts_copy);
part = loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
part, olx);
loc.pkts_copy = loc.pkts_sent;
}
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_mseg(txq, pkts, pkts_n, &loc, olx);
if (!MLX5_TXOFF_CONFIG(INLINE))
loc.pkts_copy = loc.pkts_sent;
goto enter_send_tso;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* in dedicated branch.
*/
enter_send_tso:
- assert(NB_SEGS(loc.mbuf) == 1);
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(NB_SEGS(loc.mbuf) == 1);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_tso(txq, pkts, pkts_n, &loc, olx);
/*
* These returned code checks are supposed
goto enter_send_multi;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* offloads are requested at SQ configuration time).
*/
enter_send_single:
- assert(pkts_n > loc.pkts_sent);
+ MLX5_ASSERT(pkts_n > loc.pkts_sent);
ret = mlx5_tx_burst_single(txq, pkts, pkts_n, &loc, olx);
/*
* These returned code checks are supposed
goto enter_send_tso;
}
/* We must not get here. Something is going wrong. */
- assert(false);
+ MLX5_ASSERT(false);
txq->stats.oerrors++;
break;
}
* - doorbell the hardware
* - copy the rest of mbufs to elts (if any)
*/
- assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
+ MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE) ||
+ loc.pkts_sent >= loc.pkts_copy);
/* Take a shortcut if nothing is sent. */
- if (unlikely(loc.pkts_sent == 0))
- return 0;
- /* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
- mlx5_tx_request_completion(txq, part, &loc, olx);
+ if (unlikely(loc.pkts_sent == loc.pkts_loop))
+ goto burst_exit;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, &loc, olx);
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
* can be completed after doorbell. Tx CQEs for this SQ are
* processed in this thread only by the polling.
+ *
+ * The rdma core library can map doorbell register in two ways,
+ * depending on the environment variable "MLX5_SHUT_UP_BF":
+ *
+ * - as regular cached memory, the variable is either missing or
+ * set to zero. This type of mapping may cause the significant
+ * doorbell register writing latency and requires explicit
+ * memory write barrier to mitigate this issue and prevent
+ * write combining.
+ *
+ * - as non-cached memory, the variable is present and set to
+ * not "0" value. This type of mapping may cause performance
+ * impact under heavy loading conditions but the explicit write
+ * memory barrier is not required and it may improve core
+ * performance.
+ *
+ * - the legacy behaviour (prior 19.08 release) was to use some
+ * heuristics to decide whether write memory barrier should
+ * be performed. This behavior is supported with specifying
+ * tx_db_nc=2, write barrier is skipped if application
+ * provides the full recommended burst of packets, it
+ * supposes the next packets are coming and the write barrier
+ * will be issued on the next burst (after descriptor writing,
+ * at least).
*/
- mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
+ mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
+ (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
+ /* Not all of the mbufs may be stored into elts yet. */
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
- * It can be only if last packet was single-segment.
+ * It can be only if the last packet was single-segment.
* The copying is gathered into one place due to it is
* a good opportunity to optimize that with SIMD.
* Unfortunately if inlining is enabled the gaps in
* inlined mbufs.
*/
mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
+ loc.pkts_copy = loc.pkts_sent;
+ }
+ MLX5_ASSERT(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ MLX5_ASSERT(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (pkts_n > loc.pkts_sent) {
+ /*
+ * If burst size is large there might be no enough CQE
+ * fetched from completion queue and no enough resources
+ * freed to send all the packets.
+ */
+ goto send_loop;
}
+burst_exit:
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
txq->stats.opackets += loc.pkts_sent;
#endif
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
return loc.pkts_sent;
}
MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_CONFIG_METADATA)
+/*
+ * Generate routines with Legacy Multi-Packet Write support.
+ * This mode is supported by ConnectX-4LX only and imposes
+ * offload limitations, not supported:
+ * - ACL/Flows (metadata are becoming meaningless)
+ * - WQE Inline headers
+ * - SRIOV (E-Switch offloads)
+ * - VLAN insertion
+ * - tunnel encapsulation/decapsulation
+ * - TSO
+ */
+MLX5_TXOFF_DECL(none_mpw,
+ MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(mci_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(mc_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(i_mpw,
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
/*
* Array of declared and compiled Tx burst function and corresponding
* supported offloads set. The array is used to select the Tx burst
MLX5_TXOFF_CONFIG_INLINE |
MLX5_TXOFF_CONFIG_METADATA)
-
MLX5_TXOFF_INFO(mtv,
MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_INFO(iv,
MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_CONFIG_METADATA)
+
+MLX5_TXOFF_INFO(none_mpw,
+ MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(mci_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(mc_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(i_mpw,
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
};
/**
"invalid WQE Data Segment size");
static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
"invalid WQE size");
- assert(priv);
+ MLX5_ASSERT(priv);
if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
/* We should support Multi-Segment Packets. */
olx |= MLX5_TXOFF_CONFIG_MULTI;
if (config->mps == MLX5_MPW_ENHANCED &&
config->txq_inline_min <= 0) {
/*
- * The NIC supports Enhanced Multi-Packet Write.
- * We do not support legacy MPW due to its
- * hardware related problems, so we just ignore
- * legacy MLX5_MPW settings. There should be no
- * minimal required inline data.
+ * The NIC supports Enhanced Multi-Packet Write
+ * and does not require minimal inline data.
*/
olx |= MLX5_TXOFF_CONFIG_EMPW;
}
- if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
+ if (rte_flow_dynf_metadata_avail()) {
/* We should support Flow metadata. */
olx |= MLX5_TXOFF_CONFIG_METADATA;
}
+ if (config->mps == MLX5_MPW) {
+ /*
+ * The NIC supports Legacy Multi-Packet Write.
+ * The MLX5_TXOFF_CONFIG_MPW controls the
+ * descriptor building method in combination
+ * with MLX5_TXOFF_CONFIG_EMPW.
+ */
+ if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
+ MLX5_TXOFF_CONFIG_SWP |
+ MLX5_TXOFF_CONFIG_VLAN |
+ MLX5_TXOFF_CONFIG_METADATA)))
+ olx |= MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW;
+ }
/*
* Scan the routines table to find the minimal
* satisfying routine with requested offloads.
DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
- if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
- DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
+ if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
+ if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
+ DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
+ else
+ DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
+ }
return txoff_func[m].func;
}
+/**
+ * DPDK callback to get the TX queue information
+ *
+ * @param dev
+ * Pointer to the device structure.
+ *
+ * @param tx_queue_id
+ * Tx queue identificator.
+ *
+ * @param qinfo
+ * Pointer to the TX queue information structure.
+ *
+ * @return
+ * None.
+ */
+
+void
+mlx5_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_txq_data *txq = (*priv->txqs)[tx_queue_id];
+ struct mlx5_txq_ctrl *txq_ctrl =
+ container_of(txq, struct mlx5_txq_ctrl, txq);
+
+ if (!txq)
+ return;
+ qinfo->nb_desc = txq->elts_s;
+ qinfo->conf.tx_thresh.pthresh = 0;
+ qinfo->conf.tx_thresh.hthresh = 0;
+ qinfo->conf.tx_thresh.wthresh = 0;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.tx_free_thresh = 0;
+ qinfo->conf.tx_deferred_start = txq_ctrl ? 0 : 1;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+}
+
+/**
+ * DPDK callback to get the TX packet burst mode information
+ *
+ * @param dev
+ * Pointer to the device structure.
+ *
+ * @param tx_queue_id
+ * Tx queue identificatior.
+ *
+ * @param mode
+ * Pointer to the burts mode information.
+ *
+ * @return
+ * 0 as success, -EINVAL as failure.
+ */
+int
+mlx5_tx_burst_mode_get(struct rte_eth_dev *dev,
+ uint16_t tx_queue_id __rte_unused,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ unsigned int i, olx;
+
+ for (i = 0; i < RTE_DIM(txoff_func); i++) {
+ if (pkt_burst == txoff_func[i].func) {
+ olx = txoff_func[i].olx;
+ snprintf(mode->info, sizeof(mode->info),
+ "%s%s%s%s%s%s%s%s",
+ (olx & MLX5_TXOFF_CONFIG_EMPW) ?
+ ((olx & MLX5_TXOFF_CONFIG_MPW) ?
+ "Legacy MPW" : "Enhanced MPW") : "No MPW",
+ (olx & MLX5_TXOFF_CONFIG_MULTI) ?
+ " + MULTI" : "",
+ (olx & MLX5_TXOFF_CONFIG_TSO) ?
+ " + TSO" : "",
+ (olx & MLX5_TXOFF_CONFIG_SWP) ?
+ " + SWP" : "",
+ (olx & MLX5_TXOFF_CONFIG_CSUM) ?
+ " + CSUM" : "",
+ (olx & MLX5_TXOFF_CONFIG_INLINE) ?
+ " + INLINE" : "",
+ (olx & MLX5_TXOFF_CONFIG_VLAN) ?
+ " + VLAN" : "",
+ (olx & MLX5_TXOFF_CONFIG_METADATA) ?
+ " + METADATA" : "");
+ return 0;
+ }
+ }
+ return -EINVAL;
+}