#include "mlx5_defs.h"
#include "mlx5.h"
+#include "mlx5_mr.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_autoconf.h"
struct mlx5_mp_arg_queue_state_modify *sm);
static inline void
-mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
- volatile struct mlx5_cqe *restrict cqe,
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
+ volatile struct mlx5_cqe *__rte_restrict cqe,
uint32_t phcsum);
static inline void
-mlx5_lro_update_hdr(uint8_t *restrict padd,
- volatile struct mlx5_cqe *restrict cqe,
+mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
+ volatile struct mlx5_cqe *__rte_restrict cqe,
uint32_t len);
uint32_t mlx5_ptype_table[] __rte_cache_aligned = {
* Software Parser flags are set by pointer.
*/
static __rte_always_inline uint32_t
-txq_mbuf_to_swp(struct mlx5_txq_local *restrict loc,
+txq_mbuf_to_swp(struct mlx5_txq_local *__rte_restrict loc,
uint8_t *swp_flags,
unsigned int olx)
{
* the error completion entry is handled successfully.
*/
static int
-mlx5_tx_error_cqe_handle(struct mlx5_txq_data *restrict txq,
+mlx5_tx_error_cqe_handle(struct mlx5_txq_data *__rte_restrict txq,
volatile struct mlx5_err_cqe *err_cqe)
{
if (err_cqe->syndrome != MLX5_CQE_SYNDROME_WR_FLUSH_ERR) {
container_of(txq, struct mlx5_txq_ctrl, txq);
struct ibv_qp_attr mod = {
.qp_state = IBV_QPS_RESET,
- .port_num = (uint8_t)priv->ibv_port,
+ .port_num = (uint8_t)priv->dev_port,
};
struct ibv_qp *qp = txq_ctrl->obj->qp;
mlx5_queue_state_modify(struct rte_eth_dev *dev,
struct mlx5_mp_arg_queue_state_modify *sm)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
int ret = 0;
switch (rte_eal_process_type()) {
ret = mlx5_queue_state_modify_primary(dev, sm);
break;
case RTE_PROC_SECONDARY:
- ret = mlx5_mp_req_queue_state_modify(dev, sm);
+ ret = mlx5_mp_req_queue_state_modify(&priv->mp_id, sm);
break;
default:
break;
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
- if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
- pkt->ol_flags |= PKT_RX_DYNF_METADATA;
- *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
+ if (rxq->dynf_meta && cqe->flow_table_metadata) {
+ pkt->ol_flags |= rxq->flow_meta_mask;
+ *RTE_MBUF_DYNFIELD(pkt, rxq->flow_meta_offset, uint32_t *) =
+ cqe->flow_table_metadata;
}
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
* The L3 pseudo-header checksum.
*/
static inline void
-mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *restrict tcp,
- volatile struct mlx5_cqe *restrict cqe,
+mlx5_lro_update_tcp_hdr(struct rte_tcp_hdr *__rte_restrict tcp,
+ volatile struct mlx5_cqe *__rte_restrict cqe,
uint32_t phcsum)
{
uint8_t l4_type = (rte_be_to_cpu_16(cqe->hdr_type_etc) &
if (cqe->lro_tcppsh_abort_dupack & MLX5_CQE_LRO_PUSH_MASK)
tcp->tcp_flags |= RTE_TCP_PSH_FLAG;
tcp->cksum = 0;
- csum += rte_raw_cksum(tcp, (tcp->data_off & 0xF) * 4);
+ csum += rte_raw_cksum(tcp, (tcp->data_off >> 4) * 4);
csum = ((csum & 0xffff0000) >> 16) + (csum & 0xffff);
csum = (~csum) & 0xffff;
if (csum == 0)
* The packet length.
*/
static inline void
-mlx5_lro_update_hdr(uint8_t *restrict padd,
- volatile struct mlx5_cqe *restrict cqe,
+mlx5_lro_update_hdr(uint8_t *__rte_restrict padd,
+ volatile struct mlx5_cqe *__rte_restrict cqe,
uint32_t len)
{
union {
unsigned int i = 0;
uint32_t rq_ci = rxq->rq_ci;
uint16_t consumed_strd = rxq->consumed_strd;
- uint16_t headroom_sz = rxq->strd_headroom_en * RTE_PKTMBUF_HEADROOM;
struct mlx5_mprq_buf *buf = (*rxq->mprq_bufs)[rq_ci & wq_mask];
while (i < pkts_n) {
struct rte_mbuf *pkt;
void *addr;
int ret;
- unsigned int len;
+ uint32_t len;
uint16_t strd_cnt;
uint16_t strd_idx;
uint32_t offset;
uint32_t byte_cnt;
+ int32_t hdrm_overlap;
volatile struct mlx5_mini_cqe8 *mcqe = NULL;
uint32_t rss_hash_res = 0;
- uint8_t lro_num_seg;
if (consumed_strd == strd_n) {
/* Replace WQE only if the buffer is still in use. */
MLX5_ASSERT(strd_idx < strd_n);
MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
wq_mask));
- lro_num_seg = cqe->lro_num_seg;
- /*
- * Currently configured to receive a packet per a stride. But if
- * MTU is adjusted through kernel interface, device could
- * consume multiple strides without raising an error. In this
- * case, the packet should be dropped because it is bigger than
- * the max_rx_pkt_len.
- */
- if (unlikely(!lro_num_seg && strd_cnt > 1)) {
- ++rxq->stats.idropped;
- continue;
- }
pkt = rte_pktmbuf_alloc(rxq->mp);
if (unlikely(pkt == NULL)) {
++rxq->stats.rx_nombuf;
len -= RTE_ETHER_CRC_LEN;
offset = strd_idx * strd_sz + strd_shift;
addr = RTE_PTR_ADD(mlx5_mprq_buf_addr(buf, strd_n), offset);
+ hdrm_overlap = len + RTE_PKTMBUF_HEADROOM - strd_cnt * strd_sz;
/*
* Memcpy packets to the target mbuf if:
* - The size of packet is smaller than mprq_max_memcpy_len.
* - Out of buffer in the Mempool for Multi-Packet RQ.
+ * - The packet's stride overlaps a headroom and scatter is off.
*/
- if (len <= rxq->mprq_max_memcpy_len || rxq->mprq_repl == NULL) {
- /*
- * When memcpy'ing packet due to out-of-buffer, the
- * packet must be smaller than the target mbuf.
- */
- if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
+ if (len <= rxq->mprq_max_memcpy_len ||
+ rxq->mprq_repl == NULL ||
+ (hdrm_overlap > 0 && !rxq->strd_scatter_en)) {
+ if (likely(rte_pktmbuf_tailroom(pkt) >= len)) {
+ rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
+ addr, len);
+ DATA_LEN(pkt) = len;
+ } else if (rxq->strd_scatter_en) {
+ struct rte_mbuf *prev = pkt;
+ uint32_t seg_len =
+ RTE_MIN(rte_pktmbuf_tailroom(pkt), len);
+ uint32_t rem_len = len - seg_len;
+
+ rte_memcpy(rte_pktmbuf_mtod(pkt, void *),
+ addr, seg_len);
+ DATA_LEN(pkt) = seg_len;
+ while (rem_len) {
+ struct rte_mbuf *next =
+ rte_pktmbuf_alloc(rxq->mp);
+
+ if (unlikely(next == NULL)) {
+ rte_pktmbuf_free(pkt);
+ ++rxq->stats.rx_nombuf;
+ goto out;
+ }
+ NEXT(prev) = next;
+ SET_DATA_OFF(next, 0);
+ addr = RTE_PTR_ADD(addr, seg_len);
+ seg_len = RTE_MIN
+ (rte_pktmbuf_tailroom(next),
+ rem_len);
+ rte_memcpy
+ (rte_pktmbuf_mtod(next, void *),
+ addr, seg_len);
+ DATA_LEN(next) = seg_len;
+ rem_len -= seg_len;
+ prev = next;
+ ++NB_SEGS(pkt);
+ }
+ } else {
rte_pktmbuf_free_seg(pkt);
++rxq->stats.idropped;
continue;
}
- rte_memcpy(rte_pktmbuf_mtod(pkt, void *), addr, len);
- DATA_LEN(pkt) = len;
} else {
rte_iova_t buf_iova;
struct rte_mbuf_ext_shared_info *shinfo;
rte_atomic16_add_return(&buf->refcnt, 1);
MLX5_ASSERT((uint16_t)rte_atomic16_read(&buf->refcnt) <=
strd_n + 1);
- buf_addr = RTE_PTR_SUB(addr, headroom_sz);
+ buf_addr = RTE_PTR_SUB(addr, RTE_PKTMBUF_HEADROOM);
/*
* MLX5 device doesn't use iova but it is necessary in a
* case where the Rx packet is transmitted via a
rte_pktmbuf_attach_extbuf(pkt, buf_addr, buf_iova,
buf_len, shinfo);
/* Set mbuf head-room. */
- pkt->data_off = headroom_sz;
+ SET_DATA_OFF(pkt, RTE_PKTMBUF_HEADROOM);
MLX5_ASSERT(pkt->ol_flags == EXT_ATTACHED_MBUF);
- /*
- * Prevent potential overflow due to MTU change through
- * kernel interface.
- */
- if (unlikely(rte_pktmbuf_tailroom(pkt) < len)) {
- rte_pktmbuf_free_seg(pkt);
- ++rxq->stats.idropped;
- continue;
- }
+ MLX5_ASSERT(rte_pktmbuf_tailroom(pkt) >=
+ len - (hdrm_overlap > 0 ? hdrm_overlap : 0));
DATA_LEN(pkt) = len;
/*
- * LRO packet may consume all the stride memory, in this
- * case packet head-room space is not guaranteed so must
- * to add an empty mbuf for the head-room.
+ * Copy the last fragment of a packet (up to headroom
+ * size bytes) in case there is a stride overlap with
+ * a next packet's headroom. Allocate a separate mbuf
+ * to store this fragment and link it. Scatter is on.
*/
- if (!rxq->strd_headroom_en) {
- struct rte_mbuf *headroom_mbuf =
- rte_pktmbuf_alloc(rxq->mp);
+ if (hdrm_overlap > 0) {
+ MLX5_ASSERT(rxq->strd_scatter_en);
+ struct rte_mbuf *seg =
+ rte_pktmbuf_alloc(rxq->mp);
- if (unlikely(headroom_mbuf == NULL)) {
+ if (unlikely(seg == NULL)) {
rte_pktmbuf_free_seg(pkt);
++rxq->stats.rx_nombuf;
break;
}
- PORT(pkt) = rxq->port_id;
- NEXT(headroom_mbuf) = pkt;
- pkt = headroom_mbuf;
+ SET_DATA_OFF(seg, 0);
+ rte_memcpy(rte_pktmbuf_mtod(seg, void *),
+ RTE_PTR_ADD(addr, len - hdrm_overlap),
+ hdrm_overlap);
+ DATA_LEN(seg) = hdrm_overlap;
+ DATA_LEN(pkt) = len - hdrm_overlap;
+ NEXT(pkt) = seg;
NB_SEGS(pkt) = 2;
}
}
rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
- if (lro_num_seg > 1) {
+ if (cqe->lro_num_seg > 1) {
mlx5_lro_update_hdr(addr, cqe, len);
pkt->ol_flags |= PKT_RX_LRO;
- pkt->tso_segsz = strd_sz;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
}
PKT_LEN(pkt) = len;
PORT(pkt) = rxq->port_id;
*(pkts++) = pkt;
++i;
}
+out:
/* Update the consumer indexes. */
rxq->consumed_strd = consumed_strd;
rte_cio_wmb();
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_free_mbuf(struct rte_mbuf **restrict pkts,
+mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
unsigned int olx __rte_unused)
{
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_free_elts(struct mlx5_txq_data *restrict txq,
+mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
uint16_t tail,
unsigned int olx __rte_unused)
{
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_copy_elts(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
unsigned int olx __rte_unused)
{
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_comp_flush(struct mlx5_txq_data *restrict txq,
+mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
volatile struct mlx5_cqe *last_cqe,
unsigned int olx __rte_unused)
{
* routine smaller, simple and faster - from experiments.
*/
static void
-mlx5_tx_handle_completion(struct mlx5_txq_data *restrict txq,
+mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
unsigned int olx __rte_unused)
{
unsigned int count = MLX5_TX_COMP_MAX_CQE;
volatile struct mlx5_cqe *last_cqe = NULL;
- uint16_t ci = txq->cq_ci;
+ bool ring_doorbell = false;
int ret;
static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
do {
volatile struct mlx5_cqe *cqe;
- cqe = &txq->cqes[ci & txq->cqe_m];
- ret = check_cqe(cqe, txq->cqe_s, ci);
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
if (likely(ret != MLX5_CQE_STATUS_ERR)) {
/* No new CQEs in completion queue. */
* here, before we might perform SQ reset.
*/
rte_wmb();
- txq->cq_ci = ci;
ret = mlx5_tx_error_cqe_handle
(txq, (volatile struct mlx5_err_cqe *)cqe);
if (unlikely(ret < 0)) {
* MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
* The send queue is supposed to be empty.
*/
- ++ci;
- txq->cq_pi = ci;
+ ring_doorbell = true;
+ ++txq->cq_ci;
+ txq->cq_pi = txq->cq_ci;
last_cqe = NULL;
continue;
}
/* Normal transmit completion. */
- MLX5_ASSERT(ci != txq->cq_pi);
- MLX5_ASSERT((txq->fcqs[ci & txq->cqe_m] >> 16) ==
+ MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
+ MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
cqe->wqe_counter);
- ++ci;
+ ring_doorbell = true;
+ ++txq->cq_ci;
last_cqe = cqe;
/*
* We have to restrict the amount of processed CQEs
if (likely(--count == 0))
break;
} while (true);
- if (likely(ci != txq->cq_ci)) {
- /*
- * Update completion queue consuming index
- * and ring doorbell to notify hardware.
- */
+ if (likely(ring_doorbell)) {
+ /* Ring doorbell to notify hardware. */
rte_compiler_barrier();
- txq->cq_ci = ci;
- *txq->cq_db = rte_cpu_to_be_32(ci);
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
mlx5_tx_comp_flush(txq, last_cqe, olx);
}
}
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_request_completion(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
uint16_t head = txq->elts_head;
(uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
volatile struct mlx5_wqe *last = loc->wqe_last;
+ MLX5_ASSERT(last);
txq->elts_comp = head;
if (MLX5_TXOFF_CONFIG(INLINE))
txq->wqe_comp = txq->wqe_ci;
int
mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
- struct mlx5_txq_data *restrict txq = tx_queue;
+ struct mlx5_txq_data *__rte_restrict txq = tx_queue;
uint16_t used;
mlx5_tx_handle_completion(txq, 0);
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_cseg_init(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc __rte_unused,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int ds,
unsigned int opcode,
unsigned int olx __rte_unused)
{
- struct mlx5_wqe_cseg *restrict cs = &wqe->cseg;
+ struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
/* For legacy MPW replace the EMPW by TSO with modifier. */
if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_eseg_none(struct mlx5_txq_data *restrict txq __rte_unused,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int olx)
{
- struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
uint32_t csum;
/*
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_eseg_dmin(struct mlx5_txq_data *restrict txq __rte_unused,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int vlan,
unsigned int olx)
{
- struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
uint32_t csum;
uint8_t *psrc, *pdst;
* Pointer to the next Data Segment (aligned and wrapped around).
*/
static __rte_always_inline struct mlx5_wqe_dseg *
-mlx5_tx_eseg_data(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int vlan,
unsigned int inlen,
unsigned int tso,
unsigned int olx)
{
- struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
uint32_t csum;
uint8_t *psrc, *pdst;
unsigned int part;
*/
static __rte_always_inline unsigned int
mlx5_tx_mseg_memcpy(uint8_t *pdst,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int len,
unsigned int must,
unsigned int olx __rte_unused)
* wrapping check on its own).
*/
static __rte_always_inline struct mlx5_wqe_dseg *
-mlx5_tx_eseg_mdat(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int vlan,
unsigned int inlen,
unsigned int tso,
unsigned int olx)
{
- struct mlx5_wqe_eseg *restrict es = &wqe->eseg;
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
uint32_t csum;
uint8_t *pdst;
unsigned int part, tlen = 0;
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_dseg_ptr(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe_dseg *restrict dseg,
+mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
uint8_t *buf,
unsigned int len,
unsigned int olx __rte_unused)
* compile time and may be used for optimization.
*/
static __rte_always_inline void
-mlx5_tx_dseg_iptr(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe_dseg *restrict dseg,
+mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
uint8_t *buf,
unsigned int len,
unsigned int olx __rte_unused)
* last packet in the eMPW session.
*/
static __rte_always_inline struct mlx5_wqe_dseg *
-mlx5_tx_dseg_empw(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc __rte_unused,
- struct mlx5_wqe_dseg *restrict dseg,
+mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
uint8_t *buf,
unsigned int len,
unsigned int olx __rte_unused)
* Ring buffer wraparound check is needed.
*/
static __rte_always_inline struct mlx5_wqe_dseg *
-mlx5_tx_dseg_vlan(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc __rte_unused,
- struct mlx5_wqe_dseg *restrict dseg,
+mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
uint8_t *buf,
unsigned int len,
unsigned int olx __rte_unused)
* Actual size of built WQE in segments.
*/
static __rte_always_inline unsigned int
-mlx5_tx_mseg_build(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
- struct mlx5_wqe *restrict wqe,
+mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
unsigned int vlan,
unsigned int inlen,
unsigned int tso,
unsigned int olx __rte_unused)
{
- struct mlx5_wqe_dseg *restrict dseg;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
unsigned int ds;
MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
* Local context variables partially updated.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_packet_multi_tso(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
- struct mlx5_wqe *restrict wqe;
+ struct mlx5_wqe *__rte_restrict wqe;
unsigned int ds, dlen, inlen, ntcp, vlan = 0;
/*
* Local context variables partially updated.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_packet_multi_send(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
- struct mlx5_wqe_dseg *restrict dseg;
- struct mlx5_wqe *restrict wqe;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ struct mlx5_wqe *__rte_restrict wqe;
unsigned int ds, nseg;
MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
* Local context variables partially updated.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_packet_multi_inline(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_packet_multi_inline(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
- struct mlx5_wqe *restrict wqe;
+ struct mlx5_wqe *__rte_restrict wqe;
unsigned int ds, inlen, dlen, vlan = 0;
MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
* Local context variables updated.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_mseg(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_mseg(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
MLX5_ASSERT(loc->elts_free && loc->wqe_free);
* Local context variables updated.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_tso(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_tso(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
MLX5_ASSERT(loc->elts_free && loc->wqe_free);
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
- struct mlx5_wqe_dseg *restrict dseg;
- struct mlx5_wqe *restrict wqe;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ struct mlx5_wqe *__rte_restrict wqe;
unsigned int ds, dlen, hlen, ntcp, vlan = 0;
uint8_t *dptr;
* MLX5_TXCMP_CODE_EMPW - single-segment packet, use MPW.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_able_to_empw(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_able_to_empw(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx,
bool newp)
{
* false - no match, eMPW should be restarted.
*/
static __rte_always_inline bool
-mlx5_tx_match_empw(struct mlx5_txq_data *restrict txq __rte_unused,
- struct mlx5_wqe_eseg *restrict es,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_match_empw(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
+ struct mlx5_wqe_eseg *__rte_restrict es,
+ struct mlx5_txq_local *__rte_restrict loc,
uint32_t dlen,
unsigned int olx)
{
* false - no match, eMPW should be restarted.
*/
static __rte_always_inline void
-mlx5_tx_sdone_empw(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_sdone_empw(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int ds,
unsigned int slen,
unsigned int olx __rte_unused)
* Total size of descriptor/data in bytes.
* @param slen
* Accumulated statistics, data bytes sent.
+ * @param wqem
+ * The base WQE for the eMPW/MPW descriptor.
* @param olx
* Configured Tx offloads mask. It is fully defined at
* compile time and may be used for optimization.
* false - no match, eMPW should be restarted.
*/
static __rte_always_inline void
-mlx5_tx_idone_empw(struct mlx5_txq_data *restrict txq,
- struct mlx5_txq_local *restrict loc,
+mlx5_tx_idone_empw(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int len,
unsigned int slen,
+ struct mlx5_wqe *__rte_restrict wqem,
unsigned int olx __rte_unused)
{
- struct mlx5_wqe_dseg *dseg = &loc->wqe_last->dseg[0];
+ struct mlx5_wqe_dseg *dseg = &wqem->dseg[0];
MLX5_ASSERT(MLX5_TXOFF_CONFIG(INLINE));
#ifdef MLX5_PMD_SOFT_COUNTERS
MLX5_ASSERT((len % MLX5_WSEG_SIZE) == 0);
len = len / MLX5_WSEG_SIZE + 2;
}
- loc->wqe_last->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
+ wqem->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | len);
txq->wqe_ci += (len + 3) / 4;
loc->wqe_free -= (len + 3) / 4;
+ loc->wqe_last = wqem;
}
/**
* No VLAN insertion is supported.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_empw_simple(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
/*
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
- struct mlx5_wqe_dseg *restrict dseg;
- struct mlx5_wqe_eseg *restrict eseg;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ struct mlx5_wqe_eseg *__rte_restrict eseg;
enum mlx5_txcmp_code ret;
unsigned int part, loop;
unsigned int slen = 0;
* with inlining, optionally supports VLAN insertion.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_empw_inline(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
/*
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
- struct mlx5_wqe_dseg *restrict dseg;
- struct mlx5_wqe_eseg *restrict eseg;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ struct mlx5_wqe *__rte_restrict wqem;
enum mlx5_txcmp_code ret;
unsigned int room, part, nlim;
unsigned int slen = 0;
return MLX5_TXCMP_CODE_EXIT;
if (likely(pkts_n > 1))
rte_prefetch0(*pkts);
- loc->wqe_last = txq->wqes + (txq->wqe_ci & txq->wqe_m);
+ wqem = txq->wqes + (txq->wqe_ci & txq->wqe_m);
/*
* Build eMPW title WQEBB:
* - Control Segment, eMPW opcode, zero DS
* - Ethernet Segment, no inline
*/
- mlx5_tx_cseg_init(txq, loc, loc->wqe_last, 0,
+ mlx5_tx_cseg_init(txq, loc, wqem, 0,
MLX5_OPCODE_ENHANCED_MPSW, olx);
- mlx5_tx_eseg_none(txq, loc, loc->wqe_last,
+ mlx5_tx_eseg_none(txq, loc, wqem,
olx & ~MLX5_TXOFF_CONFIG_VLAN);
- eseg = &loc->wqe_last->eseg;
- dseg = &loc->wqe_last->dseg[0];
+ dseg = &wqem->dseg[0];
/* Store the packet length for legacy MPW. */
if (MLX5_TXOFF_CONFIG(MPW))
- eseg->mss = rte_cpu_to_be_16
- (rte_pktmbuf_data_len(loc->mbuf));
+ wqem->eseg.mss = rte_cpu_to_be_16
+ (rte_pktmbuf_data_len(loc->mbuf));
room = RTE_MIN(MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE,
loc->wqe_free) * MLX5_WQE_SIZE -
MLX5_WQE_CSEG_SIZE -
* We have some successfully built
* packet Data Segments to send.
*/
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
return MLX5_TXCMP_CODE_ERROR;
}
/* Inline or not inline - that's the Question. */
loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE)
goto pointer_empw;
if (MLX5_TXOFF_CONFIG(MPW)) {
+ if (dlen > txq->inlen_send)
+ goto pointer_empw;
tlen = dlen;
if (part == room) {
/* Open new inline MPW session. */
* No pointer and inline descriptor
* intermix for legacy MPW sessions.
*/
- if (loc->wqe_last->dseg[0].bcount)
+ if (wqem->dseg[0].bcount)
break;
}
} else {
*/
if (MLX5_TXOFF_CONFIG(MPW) &&
part != room &&
- loc->wqe_last->dseg[0].bcount == RTE_BE32(0))
+ wqem->dseg[0].bcount == RTE_BE32(0))
break;
/*
* Not inlinable VLAN packets are
* continue build descriptors.
*/
part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
return MLX5_TXCMP_CODE_EXIT;
}
loc->mbuf = *pkts++;
*/
if (ret == MLX5_TXCMP_CODE_MULTI) {
part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
if (ret == MLX5_TXCMP_CODE_TSO) {
part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
}
if (ret == MLX5_TXCMP_CODE_SINGLE) {
part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
if (ret != MLX5_TXCMP_CODE_EMPW) {
MLX5_ASSERT(false);
part -= room;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part,
+ slen, wqem, olx);
return MLX5_TXCMP_CODE_ERROR;
}
/* Check if we have minimal room left. */
* - software parser settings
* - packets length (legacy MPW only)
*/
- if (!mlx5_tx_match_empw(txq, eseg, loc, dlen, olx))
+ if (!mlx5_tx_match_empw(txq, &wqem->eseg,
+ loc, dlen, olx))
break;
/* Packet attributes match, continue the same eMPW. */
if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
part -= room;
if (unlikely(!part))
return MLX5_TXCMP_CODE_EXIT;
- mlx5_tx_idone_empw(txq, loc, part, slen, olx);
+ mlx5_tx_idone_empw(txq, loc, part, slen, wqem, olx);
if (unlikely(!loc->elts_free ||
!loc->wqe_free))
return MLX5_TXCMP_CODE_EXIT;
* Data inlining and VLAN insertion are supported.
*/
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_single_send(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
/*
pkts += loc->pkts_sent + 1;
pkts_n -= loc->pkts_sent;
for (;;) {
- struct mlx5_wqe *restrict wqe;
+ struct mlx5_wqe *__rte_restrict wqe;
enum mlx5_txcmp_code ret;
MLX5_ASSERT(NB_SEGS(loc->mbuf) == 1);
* not contain inlined data for eMPW due to
* segment shared for all packets.
*/
- struct mlx5_wqe_dseg *restrict dseg;
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
unsigned int ds;
uint8_t *dptr;
}
static __rte_always_inline enum mlx5_txcmp_code
-mlx5_tx_burst_single(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_single(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
unsigned int pkts_n,
- struct mlx5_txq_local *restrict loc,
+ struct mlx5_txq_local *__rte_restrict loc,
unsigned int olx)
{
enum mlx5_txcmp_code ret;
* Number of packets successfully transmitted (<= pkts_n).
*/
static __rte_always_inline uint16_t
-mlx5_tx_burst_tmpl(struct mlx5_txq_data *restrict txq,
- struct rte_mbuf **restrict pkts,
+mlx5_tx_burst_tmpl(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
uint16_t pkts_n,
unsigned int olx)
{
/*
* Calculate the number of available resources - elts and WQEs.
* There are two possible different scenarios:
- * - no data inlining into WQEs, one WQEBB may contains upto
+ * - no data inlining into WQEs, one WQEBB may contains up to
* four packets, in this case elts become scarce resource
* - data inlining into WQEs, one packet may require multiple
* WQEBBs, the WQEs become the limiting factor.
/*
* Generate routines with Legacy Multi-Packet Write support.
- * This mode is supported by ConnectX-4LX only and imposes
+ * This mode is supported by ConnectX-4 Lx only and imposes
* offload limitations, not supported:
* - ACL/Flows (metadata are becoming meaningless)
* - WQE Inline headers
/* Does not meet requested offloads at all. */
continue;
}
+ if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_MPW)
+ /* Do not enable legacy MPW if not configured. */
+ continue;
if ((olx ^ tmp) & MLX5_TXOFF_CONFIG_EMPW)
/* Do not enable eMPW if not configured. */
continue;