#include <rte_branch_prediction.h>
#include <rte_ether.h>
#include <rte_cycles.h>
+#include <rte_flow.h>
+#include <mlx5_devx_cmds.h>
+#include <mlx5_prm.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_prm.h"
/* TX burst subroutines return codes. */
enum mlx5_txcmp_code {
#define MLX5_TXOFF_CONFIG_VLAN (1u << 5) /* VLAN insertion supported.*/
#define MLX5_TXOFF_CONFIG_METADATA (1u << 6) /* Flow metadata. */
#define MLX5_TXOFF_CONFIG_EMPW (1u << 8) /* Enhanced MPW supported.*/
+#define MLX5_TXOFF_CONFIG_MPW (1u << 9) /* Legacy MPW supported.*/
/* The most common offloads groups. */
#define MLX5_TXOFF_CONFIG_NONE 0
MKSTR(path, "%s/%s", MLX5_SYSTEM_LOG_DIR, fname);
fd = fopen(path, "a+");
if (!fd) {
- DRV_LOG(WARNING, "cannot open %s for debug dump\n",
- path);
+ DRV_LOG(WARNING, "cannot open %s for debug dump", path);
MKSTR(path2, "./%s", fname);
fd = fopen(path2, "a+");
if (!fd) {
- DRV_LOG(ERR, "cannot open %s for debug dump\n",
- path2);
+ DRV_LOG(ERR, "cannot open %s for debug dump", path2);
return;
}
- DRV_LOG(INFO, "New debug dump in file %s\n", path2);
+ DRV_LOG(INFO, "New debug dump in file %s", path2);
} else {
- DRV_LOG(INFO, "New debug dump in file %s\n", path);
+ DRV_LOG(INFO, "New debug dump in file %s", path);
}
if (hex_title)
rte_hexdump(fd, hex_title, buf, hex_len);
* Pointer to the error CQE.
*
* @return
- * The last Tx buffer element to free.
+ * Negative value if queue recovery failed, otherwise
+ * the error completion entry is handled successfully.
*/
-uint16_t
+static int
mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
volatile struct mlx5_err_cqe *err_cqe)
{
*/
txq->stats.oerrors += ((txq->wqe_ci & wqe_m) -
new_wqe_pi) & wqe_m;
- if (tx_recover_qp(txq_ctrl) == 0) {
- txq->cq_ci++;
- /* Release all the remaining buffers. */
- return txq->elts_head;
+ if (tx_recover_qp(txq_ctrl)) {
+ /* Recovering failed - retry later on the same WQE. */
+ return -1;
}
- /* Recovering failed - try again later on the same WQE. */
- } else {
- txq->cq_ci++;
+ /* Release all the remaining buffers. */
+ txq_free_elts(txq_ctrl);
}
- /* Do not release buffers. */
- return txq->elts_tail;
+ return 0;
}
/**
&rq_attr);
}
if (ret) {
- DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s\n",
+ DRV_LOG(ERR, "Cannot change Rx WQ state to %u - %s",
sm->state, strerror(errno));
rte_errno = errno;
return ret;
.qp_state = IBV_QPS_RESET,
.port_num = (uint8_t)priv->ibv_port,
};
- struct ibv_qp *qp = txq_ctrl->ibv->qp;
+ struct ibv_qp *qp = txq_ctrl->obj->qp;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
DRV_LOG(ERR, "Cannot change the Tx QP state to RESET "
- "%s\n", strerror(errno));
+ "%s", strerror(errno));
rte_errno = errno;
return ret;
}
ret = mlx5_glue->modify_qp(qp, &mod,
(IBV_QP_STATE | IBV_QP_PORT));
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
strerror(errno));
rte_errno = errno;
return ret;
mod.qp_state = IBV_QPS_RTR;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
strerror(errno));
rte_errno = errno;
return ret;
mod.qp_state = IBV_QPS_RTS;
ret = mlx5_glue->modify_qp(qp, &mod, IBV_QP_STATE);
if (ret) {
- DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s\n",
+ DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
strerror(errno));
rte_errno = errno;
return ret;
*
* @param[in] rxq
* Pointer to RX queue structure.
- * @param[in] mbuf_prepare
- * Whether to prepare mbufs for the RQ.
+ * @param[in] vec
+ * 1 when called from vectorized Rx burst, need to prepare mbufs for the RQ.
+ * 0 when called from non-vectorized Rx burst.
*
* @return
* -1 in case of recovery error, otherwise the CQE status.
*/
int
-mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t mbuf_prepare)
+mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec)
{
const uint16_t cqe_n = 1 << rxq->cqe_n;
const uint16_t cqe_mask = cqe_n - 1;
if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
&sm))
return -1;
- if (mbuf_prepare) {
+ if (vec) {
const uint16_t q_mask = wqe_n - 1;
uint16_t elt_idx;
struct rte_mbuf **elt;
return -1;
}
}
+ for (i = 0; i < (int)wqe_n; ++i) {
+ elt = &(*rxq->elts)[i];
+ DATA_LEN(*elt) =
+ (uint16_t)((*elt)->buf_len -
+ rte_pktmbuf_headroom(*elt));
+ }
+ /* Padding with a fake mbuf for vec Rx. */
+ for (i = 0; i < MLX5_VPMD_DESCS_PER_LOOP; ++i)
+ (*rxq->elts)[wqe_n + i] =
+ &rxq->fake_mbuf;
}
mlx5_rxq_initialize(rxq);
rxq->err_state = MLX5_RXQ_ERR_STATE_NO_ERROR;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
+ if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
+ pkt->ol_flags |= PKT_RX_DYNF_METADATA;
+ *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
+ }
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
if (rxq->vlan_strip &&
}
pkt = seg;
assert(len >= (rxq->crc_present << 2));
- pkt->ol_flags = 0;
+ pkt->ol_flags &= EXT_ATTACHED_MBUF;
/* If compressed, take hash result from mini-CQE. */
rss_hash_res = rte_be_to_cpu_32(mcqe == NULL ?
cqe->rx_hash_res :
(pkts_n - part) * sizeof(struct rte_mbuf *));
}
+/**
+ * Update completion queue consuming index via doorbell
+ * and flush the completed data buffers.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param valid CQE pointer
+ * if not NULL update txq->wqe_pi and flush the buffers
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
+ volatile struct mlx5_cqe *last_cqe,
+ unsigned int olx __rte_unused)
+{
+ if (likely(last_cqe != NULL)) {
+ uint16_t tail;
+
+ txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
+ tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
+ if (likely(tail != txq->elts_tail)) {
+ mlx5_tx_free_elts(txq, tail, olx);
+ assert(tail == txq->elts_tail);
+ }
+ }
+}
+
/**
* Manage TX completions. This routine checks the CQ for
* arrived CQEs, deduces the last accomplished WQE in SQ,
unsigned int olx __rte_unused)
{
unsigned int count = MLX5_TX_COMP_MAX_CQE;
- bool update = false;
- uint16_t tail = txq->elts_tail;
+ volatile struct mlx5_cqe *last_cqe = NULL;
+ uint16_t ci = txq->cq_ci;
int ret;
+ static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+ static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
do {
volatile struct mlx5_cqe *cqe;
- cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
- ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
+ cqe = &txq->cqes[ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, ci);
if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
if (likely(ret != MLX5_CQE_STATUS_ERR)) {
/* No new CQEs in completion queue. */
assert(ret == MLX5_CQE_STATUS_HW_OWN);
break;
}
- /* Some error occurred, try to restart. */
+ /*
+ * Some error occurred, try to restart.
+ * We have no barrier after WQE related Doorbell
+ * written, make sure all writes are completed
+ * here, before we might perform SQ reset.
+ */
rte_wmb();
- tail = mlx5_tx_error_cqe_handle
+ txq->cq_ci = ci;
+ ret = mlx5_tx_error_cqe_handle
(txq, (volatile struct mlx5_err_cqe *)cqe);
- if (likely(tail != txq->elts_tail)) {
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
+ if (unlikely(ret < 0)) {
+ /*
+ * Some error occurred on queue error
+ * handling, we do not advance the index
+ * here, allowing to retry on next call.
+ */
+ return;
}
- /* Allow flushing all CQEs from the queue. */
- count = txq->cqe_s;
- } else {
- volatile struct mlx5_wqe_cseg *cseg;
-
- /* Normal transmit completion. */
- ++txq->cq_ci;
- rte_cio_rmb();
- txq->wqe_pi = rte_be_to_cpu_16(cqe->wqe_counter);
- cseg = (volatile struct mlx5_wqe_cseg *)
- (txq->wqes + (txq->wqe_pi & txq->wqe_m));
- tail = cseg->misc;
+ /*
+ * We are going to fetch all entries with
+ * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
+ * The send queue is supposed to be empty.
+ */
+ ++ci;
+ txq->cq_pi = ci;
+ last_cqe = NULL;
+ continue;
}
-#ifndef NDEBUG
- if (txq->cq_pi)
- --txq->cq_pi;
-#endif
- update = true;
- /*
- * We have to restrict the amount of processed CQEs
- * in one tx_burst routine call. The CQ may be large
- * and many CQEs may be updated by the NIC in one
- * transaction. Buffers freeing is time consuming,
- * multiple iterations may introduce significant
- * latency.
- */
- } while (--count);
- if (likely(tail != txq->elts_tail)) {
- /* Free data buffers from elts. */
- mlx5_tx_free_elts(txq, tail, olx);
- assert(tail == txq->elts_tail);
- }
- if (likely(update)) {
- /* Update the consumer index. */
+ /* Normal transmit completion. */
+ assert(ci != txq->cq_pi);
+ assert((txq->fcqs[ci & txq->cqe_m] >> 16) == cqe->wqe_counter);
+ ++ci;
+ last_cqe = cqe;
+ /*
+ * We have to restrict the amount of processed CQEs
+ * in one tx_burst routine call. The CQ may be large
+ * and many CQEs may be updated by the NIC in one
+ * transaction. Buffers freeing is time consuming,
+ * multiple iterations may introduce significant
+ * latency.
+ */
+ if (likely(--count == 0))
+ break;
+ } while (true);
+ if (likely(ci != txq->cq_ci)) {
+ /*
+ * Update completion queue consuming index
+ * and ring doorbell to notify hardware.
+ */
rte_compiler_barrier();
- *txq->cq_db =
- rte_cpu_to_be_32(txq->cq_ci);
+ txq->cq_ci = ci;
+ *txq->cq_db = rte_cpu_to_be_32(ci);
+ mlx5_tx_comp_flush(txq, last_cqe, olx);
}
}
uint16_t head = txq->elts_head;
unsigned int part;
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc->pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc->pkts_copy : 0);
+ part = MLX5_TXOFF_CONFIG(INLINE) ?
+ 0 : loc->pkts_sent - loc->pkts_copy;
head += part;
if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
(MLX5_TXOFF_CONFIG(INLINE) &&
/* Request unconditional completion on last WQE. */
last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
MLX5_COMP_MODE_OFFSET);
- /* Save elts_head in unused "immediate" field of WQE. */
- last->cseg.misc = head;
- /*
- * A CQE slot must always be available. Count the
- * issued CEQ "always" request instead of production
- * index due to here can be CQE with errors and
- * difference with ci may become inconsistent.
- */
- assert(txq->cqe_s > ++txq->cq_pi);
+ /* Save elts_head in dedicated free on completion queue. */
+#ifdef NDEBUG
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
+#else
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
+ (last->cseg.opcode >> 8) << 16;
+#endif
+ /* A CQE slot must always be available. */
+ assert((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
}
}
{
struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
+ /* For legacy MPW replace the EMPW by TSO with modifier. */
+ if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
+ opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
/* Engage VLAN tag insertion feature if requested. */
if (MLX5_TXOFF_CONFIG(VLAN) &&
loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
/* Fill metadata field if needed. */
es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
- loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0 : 0;
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
(sizeof(uint16_t) +
sizeof(rte_v128u32_t)),
/* Unrolled implementation of generic rte_memcpy. */
dst = (uintptr_t)&dseg->inline_data[0];
src = (uintptr_t)buf;
+ if (len & 0x08) {
#ifdef RTE_ARCH_STRICT_ALIGN
- memcpy(dst, src, len);
+ assert(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
#else
- if (len & 0x08) {
- *(uint64_t *)dst = *(uint64_t *)src;
+ *(uint64_t *)dst = *(unaligned_uint64_t *)src;
dst += sizeof(uint64_t);
src += sizeof(uint64_t);
+#endif
}
if (len & 0x04) {
- *(uint32_t *)dst = *(uint32_t *)src;
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
dst += sizeof(uint32_t);
src += sizeof(uint32_t);
}
if (len & 0x02) {
- *(uint16_t *)dst = *(uint16_t *)src;
+ *(uint16_t *)dst = *(unaligned_uint16_t *)src;
dst += sizeof(uint16_t);
src += sizeof(uint16_t);
}
if (len & 0x01)
*(uint8_t *)dst = *(uint8_t *)src;
-#endif
}
/**
memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
buf += MLX5_DSEG_MIN_INLINE_SIZE;
pdst += MLX5_DSEG_MIN_INLINE_SIZE;
+ len -= MLX5_DSEG_MIN_INLINE_SIZE;
/* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
assert(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
+ pdst = (uint8_t *)txq->wqes;
*(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
loc->mbuf->vlan_tci);
pdst += sizeof(struct rte_vlan_hdr);
- if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
- pdst = (uint8_t *)txq->wqes;
/*
* The WQEBB space availability is checked by caller.
* Here we should be aware of WQE ring buffer wraparound only.
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
} while (true);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
return MLX5_TXCMP_CODE_MULTI;
}
continue;
/* Here ends the series of multi-segment packets. */
if (MLX5_TXOFF_CONFIG(TSO) &&
- unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ unlikely(loc->mbuf->ol_flags & PKT_TX_TCP_SEG))
return MLX5_TXCMP_CODE_TSO;
return MLX5_TXCMP_CODE_SINGLE;
}
--loc->elts_free;
++loc->pkts_sent;
--pkts_n;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
if (MLX5_TXOFF_CONFIG(MULTI) &&
unlikely(NB_SEGS(loc->mbuf) > 1))
return MLX5_TXCMP_CODE_MULTI;
- if (unlikely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
+ if (likely(!(loc->mbuf->ol_flags & PKT_TX_TCP_SEG)))
return MLX5_TXCMP_CODE_SINGLE;
/* Continue with the next TSO packet. */
}
/**
* Check the next packet attributes to match with the eMPW batch ones.
+ * In addition, for legacy MPW the packet length is checked either.
*
* @param txq
* Pointer to TX queue structure.
* Pointer to Ethernet Segment of eMPW batch.
* @param loc
* Pointer to burst routine local context.
+ * @param dlen
+ * Length of previous packet in MPW descriptor.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
struct mlx5_wqe_eseg *restrict es,
struct mlx5_txq_local *restrict loc,
+ uint32_t dlen,
unsigned int olx)
{
uint8_t swp_flags = 0;
return false;
/* Fill metadata field if needed. */
if (MLX5_TXOFF_CONFIG(METADATA) &&
- es->metadata != (loc->mbuf->ol_flags & PKT_TX_METADATA ?
- loc->mbuf->tx_metadata : 0))
+ es->metadata != (loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0))
+ return false;
+ /* Legacy MPW can send packets with the same lengt only. */
+ if (MLX5_TXOFF_CONFIG(MPW) &&
+ dlen != rte_pktmbuf_data_len(loc->mbuf))
return false;
/* There must be no VLAN packets in eMPW loop. */
if (MLX5_TXOFF_CONFIG(VLAN))
struct mlx5_txq_local *restrict loc,
unsigned int ds,
unsigned int slen,
- unsigned int olx)
+ unsigned int olx __rte_unused)
{
assert(!MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
txq->wqe_ci += (ds + 3) / 4;
loc->wqe_free -= (ds + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
}
/*
loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
}
/**
unsigned int slen = 0;
next_empw:
- part = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
+ assert(NB_SEGS(loc->mbuf) == 1);
+ part = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
+ MLX5_MPW_MAX_PACKETS :
+ MLX5_EMPW_MAX_PACKETS);
if (unlikely(loc->elts_free < part)) {
/* We have no enough elts to save all mbufs. */
if (unlikely(loc->elts_free < MLX5_EMPW_MIN_PACKETS))
eseg = &loc->wqe_last->eseg;
dseg = &loc->wqe_last->dseg[0];
loop = part;
+ /* Store the packet length for legacy MPW. */
+ if (MLX5_TXOFF_CONFIG(MPW))
+ eseg->mss = rte_cpu_to_be_16
+ (rte_pktmbuf_data_len(loc->mbuf));
for (;;) {
uint32_t dlen = rte_pktmbuf_data_len(loc->mbuf);
#ifdef MLX5_PMD_SOFT_COUNTERS
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
+ assert(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
* - check sum settings
* - metadata value
* - software parser settings
+ * - packets length (legacy MPW only)
*/
- if (!mlx5_tx_match_empw(txq, eseg, loc, olx)) {
+ if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx)) {
assert(loop);
part -= loop;
mlx5_tx_sdone_empw(txq, loc, part, slen, olx);
txq->wqe_ci += (2 + part + 3) / 4;
loc->wqe_free -= (2 + part + 3) / 4;
pkts_n -= part;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
unsigned int room, part, nlim;
unsigned int slen = 0;
+ assert(NB_SEGS(loc->mbuf) == 1);
/*
* Limits the amount of packets in one WQE
* to improve CQE latency generation.
*/
- nlim = RTE_MIN(pkts_n, MLX5_EMPW_MAX_PACKETS);
+ nlim = RTE_MIN(pkts_n, MLX5_TXOFF_CONFIG(MPW) ?
+ MLX5_MPW_INLINE_MAX_PACKETS :
+ MLX5_EMPW_MAX_PACKETS);
/* Check whether we have minimal amount WQEs */
if (unlikely(loc->wqe_free <
((2 + MLX5_EMPW_MIN_PACKETS + 3) / 4)))
olx & ~MLX5_TXOFF_CONFIG_VLAN);
eseg = &loc->wqe_last->eseg;
dseg = &loc->wqe_last->dseg[0];
+ /* Store the packet length for legacy MPW. */
+ if (MLX5_TXOFF_CONFIG(MPW))
+ eseg->mss = rte_cpu_to_be_16
+ (rte_pktmbuf_data_len(loc->mbuf));
room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
loc->wqe_free) * MLX5_WQE_SIZE -
MLX5_WQE_CSEG_SIZE -
return MLX5_TXCMP_CODE_EXIT;
return MLX5_TXCMP_CODE_MULTI;
}
+ assert(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= room;
mlx5_tx_idone_empw(txq, loc, part, slen, olx);
* - check sum settings
* - metadata value
* - software parser settings
+ * - packets length (legacy MPW only)
*/
- if (!mlx5_tx_match_empw(txq, eseg, loc, olx))
+ if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
break;
/* Packet attributes match, continue the same eMPW. */
if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
* free the packet immediately.
*/
rte_pktmbuf_free_seg(loc->mbuf);
- } else if (!MLX5_TXOFF_CONFIG(EMPW) &&
- txq->inlen_mode) {
+ } else if ((!MLX5_TXOFF_CONFIG(EMPW) ||
+ MLX5_TXOFF_CONFIG(MPW)) &&
+ txq->inlen_mode) {
/*
* If minimal inlining is requested the eMPW
* feature should be disabled due to data is
}
++loc->pkts_sent;
--pkts_n;
- /* Request CQE generation if limits are reached. */
- mlx5_tx_request_completion(txq, loc, olx);
if (unlikely(!pkts_n || !loc->elts_free || !loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
loc->mbuf = *pkts++;
assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (unlikely(!pkts_n))
+ return 0;
+ loc.pkts_sent = 0;
+ loc.pkts_copy = 0;
+ loc.wqe_last = NULL;
+
+send_loop:
+ loc.pkts_loop = loc.pkts_sent;
/*
* Check if there are some CQEs, if any:
* - process an encountered errors
* - free related mbufs
* - doorbell the NIC about processed CQEs
*/
- if (unlikely(!pkts_n))
- return 0;
- rte_prefetch0(*pkts);
+ rte_prefetch0(*(pkts + loc.pkts_sent));
mlx5_tx_handle_completion(txq, olx);
/*
* Calculate the number of available resources - elts and WQEs.
loc.wqe_free = txq->wqe_s -
(uint16_t)(txq->wqe_ci - txq->wqe_pi);
if (unlikely(!loc.elts_free || !loc.wqe_free))
- return 0;
- loc.pkts_sent = 0;
- loc.pkts_copy = 0;
- loc.wqe_last = NULL;
+ goto burst_exit;
for (;;) {
/*
* Fetch the packet from array. Usually this is
*/
assert(MLX5_TXOFF_CONFIG(INLINE) || loc.pkts_sent >= loc.pkts_copy);
/* Take a shortcut if nothing is sent. */
- if (unlikely(loc.pkts_sent == 0))
- return 0;
+ if (unlikely(loc.pkts_sent == loc.pkts_loop))
+ goto burst_exit;
+ /* Request CQE generation if limits are reached. */
+ mlx5_tx_request_completion(txq, &loc, olx);
/*
* Ring QP doorbell immediately after WQE building completion
* to improve latencies. The pure software related data treatment
* can be completed after doorbell. Tx CQEs for this SQ are
* processed in this thread only by the polling.
+ *
+ * The rdma core library can map doorbell register in two ways,
+ * depending on the environment variable "MLX5_SHUT_UP_BF":
+ *
+ * - as regular cached memory, the variable is either missing or
+ * set to zero. This type of mapping may cause the significant
+ * doorbell register writing latency and requires explicit
+ * memory write barrier to mitigate this issue and prevent
+ * write combining.
+ *
+ * - as non-cached memory, the variable is present and set to
+ * not "0" value. This type of mapping may cause performance
+ * impact under heavy loading conditions but the explicit write
+ * memory barrier is not required and it may improve core
+ * performance.
+ *
+ * - the legacy behaviour (prior 19.08 release) was to use some
+ * heuristics to decide whether write memory barrier should
+ * be performed. This behavior is supported with specifying
+ * tx_db_nc=2, write barrier is skipped if application
+ * provides the full recommended burst of packets, it
+ * supposes the next packets are coming and the write barrier
+ * will be issued on the next burst (after descriptor writing,
+ * at least).
*/
- mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, 0);
+ mlx5_tx_dbrec_cond_wmb(txq, loc.wqe_last, !txq->db_nc &&
+ (!txq->db_heu || pkts_n % MLX5_TX_DEFAULT_BURST));
/* Not all of the mbufs may be stored into elts yet. */
- part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent -
- (MLX5_TXOFF_CONFIG(MULTI) ? loc.pkts_copy : 0);
+ part = MLX5_TXOFF_CONFIG(INLINE) ? 0 : loc.pkts_sent - loc.pkts_copy;
if (!MLX5_TXOFF_CONFIG(INLINE) && part) {
/*
* There are some single-segment mbufs not stored in elts.
* inlined mbufs.
*/
mlx5_tx_copy_elts(txq, pkts + loc.pkts_copy, part, olx);
+ loc.pkts_copy = loc.pkts_sent;
+ }
+ assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
+ assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
+ if (pkts_n > loc.pkts_sent) {
+ /*
+ * If burst size is large there might be no enough CQE
+ * fetched from completion queue and no enough resources
+ * freed to send all the packets.
+ */
+ goto send_loop;
}
+burst_exit:
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent packets counter. */
txq->stats.opackets += loc.pkts_sent;
#endif
- assert(txq->elts_s >= (uint16_t)(txq->elts_head - txq->elts_tail));
- assert(txq->wqe_s >= (uint16_t)(txq->wqe_ci - txq->wqe_pi));
return loc.pkts_sent;
}
MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_CONFIG_METADATA)
+/*
+ * Generate routines with Legacy Multi-Packet Write support.
+ * This mode is supported by ConnectX-4LX only and imposes
+ * offload limitations, not supported:
+ * - ACL/Flows (metadata are becoming meaningless)
+ * - WQE Inline headers
+ * - SRIOV (E-Switch offloads)
+ * - VLAN insertion
+ * - tunnel encapsulation/decapsulation
+ * - TSO
+ */
+MLX5_TXOFF_DECL(none_mpw,
+ MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(mci_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(mc_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_DECL(i_mpw,
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
/*
* Array of declared and compiled Tx burst function and corresponding
* supported offloads set. The array is used to select the Tx burst
MLX5_TXOFF_CONFIG_INLINE |
MLX5_TXOFF_CONFIG_METADATA)
-
MLX5_TXOFF_INFO(mtv,
MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_TSO |
MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_INFO(iv,
MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_VLAN |
MLX5_TXOFF_CONFIG_METADATA)
+
+MLX5_TXOFF_INFO(none_mpw,
+ MLX5_TXOFF_CONFIG_NONE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(mci_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(mc_mpw,
+ MLX5_TXOFF_CONFIG_MULTI | MLX5_TXOFF_CONFIG_CSUM |
+ MLX5_TXOFF_CONFIG_EMPW | MLX5_TXOFF_CONFIG_MPW)
+
+MLX5_TXOFF_INFO(i_mpw,
+ MLX5_TXOFF_CONFIG_INLINE | MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW)
};
/**
if (config->mps == MLX5_MPW_ENHANCED &&
config->txq_inline_min <= 0) {
/*
- * The NIC supports Enhanced Multi-Packet Write.
- * We do not support legacy MPW due to its
- * hardware related problems, so we just ignore
- * legacy MLX5_MPW settings. There should be no
- * minimal required inline data.
+ * The NIC supports Enhanced Multi-Packet Write
+ * and does not require minimal inline data.
*/
olx |= MLX5_TXOFF_CONFIG_EMPW;
}
- if (tx_offloads & DEV_TX_OFFLOAD_MATCH_METADATA) {
+ if (rte_flow_dynf_metadata_avail()) {
/* We should support Flow metadata. */
olx |= MLX5_TXOFF_CONFIG_METADATA;
}
+ if (config->mps == MLX5_MPW) {
+ /*
+ * The NIC supports Legacy Multi-Packet Write.
+ * The MLX5_TXOFF_CONFIG_MPW controls the
+ * descriptor building method in combination
+ * with MLX5_TXOFF_CONFIG_EMPW.
+ */
+ if (!(olx & (MLX5_TXOFF_CONFIG_TSO |
+ MLX5_TXOFF_CONFIG_SWP |
+ MLX5_TXOFF_CONFIG_VLAN |
+ MLX5_TXOFF_CONFIG_METADATA)))
+ olx |= MLX5_TXOFF_CONFIG_EMPW |
+ MLX5_TXOFF_CONFIG_MPW;
+ }
/*
* Scan the routines table to find the minimal
* satisfying routine with requested offloads.
DRV_LOG(DEBUG, "\tVLANI (VLAN insertion)");
if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_METADATA)
DRV_LOG(DEBUG, "\tMETAD (tx Flow metadata)");
- if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW)
- DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
+ if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_EMPW) {
+ if (txoff_func[m].olx & MLX5_TXOFF_CONFIG_MPW)
+ DRV_LOG(DEBUG, "\tMPW (Legacy MPW)");
+ else
+ DRV_LOG(DEBUG, "\tEMPW (Enhanced MPW)");
+ }
return txoff_func[m].func;
}