+ cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask];
+ ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe);
+ if (!ret)
+ break;
+ byte_cnt = ret;
+ strd_cnt = (byte_cnt & MLX5_MPRQ_STRIDE_NUM_MASK) >>
+ MLX5_MPRQ_STRIDE_NUM_SHIFT;
+ MLX5_ASSERT(strd_cnt);
+ consumed_strd += strd_cnt;
+ if (byte_cnt & MLX5_MPRQ_FILLER_MASK)
+ continue;
+ if (mcqe == NULL) {
+ rss_hash_res = rte_be_to_cpu_32(cqe->rx_hash_res);
+ strd_idx = rte_be_to_cpu_16(cqe->wqe_counter);
+ } else {
+ /* mini-CQE for MPRQ doesn't have hash result. */
+ strd_idx = rte_be_to_cpu_16(mcqe->stride_idx);
+ }
+ MLX5_ASSERT(strd_idx < strd_n);
+ MLX5_ASSERT(!((rte_be_to_cpu_16(cqe->wqe_id) ^ rq_ci) &
+ wq_mask));
+ pkt = rte_pktmbuf_alloc(rxq->mp);
+ if (unlikely(pkt == NULL)) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT;
+ MLX5_ASSERT((int)len >= (rxq->crc_present << 2));
+ if (rxq->crc_present)
+ len -= RTE_ETHER_CRC_LEN;
+ rxq_code = mprq_buf_to_pkt(rxq, pkt, len, buf,
+ strd_idx, strd_cnt);
+ if (unlikely(rxq_code != MLX5_RXQ_CODE_EXIT)) {
+ rte_pktmbuf_free_seg(pkt);
+ if (rxq_code == MLX5_RXQ_CODE_DROPPED) {
+ ++rxq->stats.idropped;
+ continue;
+ }
+ if (rxq_code == MLX5_RXQ_CODE_NOMBUF) {
+ ++rxq->stats.rx_nombuf;
+ break;
+ }
+ }
+ rxq_cq_to_mbuf(rxq, pkt, cqe, rss_hash_res);
+ if (cqe->lro_num_seg > 1) {
+ mlx5_lro_update_hdr(rte_pktmbuf_mtod(pkt, uint8_t *),
+ cqe, len);
+ pkt->ol_flags |= PKT_RX_LRO;
+ pkt->tso_segsz = len / cqe->lro_num_seg;
+ }
+ PKT_LEN(pkt) = len;
+ PORT(pkt) = rxq->port_id;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment bytes counter. */
+ rxq->stats.ibytes += PKT_LEN(pkt);
+#endif
+ /* Return packet. */
+ *(pkts++) = pkt;
+ ++i;
+ }
+ /* Update the consumer indexes. */
+ rxq->consumed_strd = consumed_strd;
+ rte_io_wmb();
+ *rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
+ if (rq_ci != rxq->rq_ci) {
+ rxq->rq_ci = rq_ci;
+ rte_io_wmb();
+ *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci);
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment packets counter. */
+ rxq->stats.ipackets += i;
+#endif
+ return i;
+}
+
+/**
+ * Dummy DPDK callback for TX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+removed_tx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ rte_mb();
+ return 0;
+}
+
+/**
+ * Dummy DPDK callback for RX.
+ *
+ * This function is used to temporarily replace the real callback during
+ * unsafe control operations on the queue, or in case of error.
+ *
+ * @param dpdk_rxq
+ * Generic pointer to RX queue structure.
+ * @param[out] pkts
+ * Array to store received packets.
+ * @param pkts_n
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received (<= pkts_n).
+ */
+uint16_t
+removed_rx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ rte_mb();
+ return 0;
+}
+
+/*
+ * Vectorized Rx/Tx routines are not compiled in when required vector
+ * instructions are not supported on a target architecture. The following null
+ * stubs are needed for linkage when those are not included outside of this file
+ * (e.g. mlx5_rxtx_vec_sse.c for x86).
+ */
+
+__rte_weak uint16_t
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak uint16_t
+mlx5_rx_burst_mprq_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
+{
+ return 0;
+}
+
+__rte_weak int
+mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+__rte_weak int
+mlx5_check_vec_rx_support(struct rte_eth_dev *dev __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+/**
+ * Free the mbufs from the linear array of pointers.
+ *
+ * @param pkts
+ * Pointer to array of packets to be free.
+ * @param pkts_n
+ * Number of packets to be freed.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_free_mbuf(struct rte_mbuf **__rte_restrict pkts,
+ unsigned int pkts_n,
+ unsigned int olx __rte_unused)
+{
+ struct rte_mempool *pool = NULL;
+ struct rte_mbuf **p_free = NULL;
+ struct rte_mbuf *mbuf;
+ unsigned int n_free = 0;
+
+ /*
+ * The implemented algorithm eliminates
+ * copying pointers to temporary array
+ * for rte_mempool_put_bulk() calls.
+ */
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
+ for (;;) {
+ for (;;) {
+ /*
+ * Decrement mbuf reference counter, detach
+ * indirect and external buffers if needed.
+ */
+ mbuf = rte_pktmbuf_prefree_seg(*pkts);
+ if (likely(mbuf != NULL)) {
+ MLX5_ASSERT(mbuf == *pkts);
+ if (likely(n_free != 0)) {
+ if (unlikely(pool != mbuf->pool))
+ /* From different pool. */
+ break;
+ } else {
+ /* Start new scan array. */
+ pool = mbuf->pool;
+ p_free = pkts;
+ }
+ ++n_free;
+ ++pkts;
+ --pkts_n;
+ if (unlikely(pkts_n == 0)) {
+ mbuf = NULL;
+ break;
+ }
+ } else {
+ /*
+ * This happens if mbuf is still referenced.
+ * We can't put it back to the pool, skip.
+ */
+ ++pkts;
+ --pkts_n;
+ if (unlikely(n_free != 0))
+ /* There is some array to free.*/
+ break;
+ if (unlikely(pkts_n == 0))
+ /* Last mbuf, nothing to free. */
+ return;
+ }
+ }
+ for (;;) {
+ /*
+ * This loop is implemented to avoid multiple
+ * inlining of rte_mempool_put_bulk().
+ */
+ MLX5_ASSERT(pool);
+ MLX5_ASSERT(p_free);
+ MLX5_ASSERT(n_free);
+ /*
+ * Free the array of pre-freed mbufs
+ * belonging to the same memory pool.
+ */
+ rte_mempool_put_bulk(pool, (void *)p_free, n_free);
+ if (unlikely(mbuf != NULL)) {
+ /* There is the request to start new scan. */
+ pool = mbuf->pool;
+ p_free = pkts++;
+ n_free = 1;
+ --pkts_n;
+ if (likely(pkts_n != 0))
+ break;
+ /*
+ * This is the last mbuf to be freed.
+ * Do one more loop iteration to complete.
+ * This is rare case of the last unique mbuf.
+ */
+ mbuf = NULL;
+ continue;
+ }
+ if (likely(pkts_n == 0))
+ return;
+ n_free = 0;
+ break;
+ }
+ }
+}
+
+/**
+ * Free the mbuf from the elts ring buffer till new tail.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param tail
+ * Index in elts to free up to, becomes new elts tail.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_free_elts(struct mlx5_txq_data *__rte_restrict txq,
+ uint16_t tail,
+ unsigned int olx __rte_unused)
+{
+ uint16_t n_elts = tail - txq->elts_tail;
+
+ MLX5_ASSERT(n_elts);
+ MLX5_ASSERT(n_elts <= txq->elts_s);
+ /*
+ * Implement a loop to support ring buffer wraparound
+ * with single inlining of mlx5_tx_free_mbuf().
+ */
+ do {
+ unsigned int part;
+
+ part = txq->elts_s - (txq->elts_tail & txq->elts_m);
+ part = RTE_MIN(part, n_elts);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
+ mlx5_tx_free_mbuf(&txq->elts[txq->elts_tail & txq->elts_m],
+ part, olx);
+ txq->elts_tail += part;
+ n_elts -= part;
+ } while (n_elts);
+}
+
+/**
+ * Store the mbuf being sent into elts ring buffer.
+ * On Tx completion these mbufs will be freed.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param pkts
+ * Pointer to array of packets to be stored.
+ * @param pkts_n
+ * Number of packets to be stored.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_copy_elts(struct mlx5_txq_data *__rte_restrict txq,
+ struct rte_mbuf **__rte_restrict pkts,
+ unsigned int pkts_n,
+ unsigned int olx __rte_unused)
+{
+ unsigned int part;
+ struct rte_mbuf **elts = (struct rte_mbuf **)txq->elts;
+
+ MLX5_ASSERT(pkts);
+ MLX5_ASSERT(pkts_n);
+ part = txq->elts_s - (txq->elts_head & txq->elts_m);
+ MLX5_ASSERT(part);
+ MLX5_ASSERT(part <= txq->elts_s);
+ /* This code is a good candidate for vectorizing with SIMD. */
+ rte_memcpy((void *)(elts + (txq->elts_head & txq->elts_m)),
+ (void *)pkts,
+ RTE_MIN(part, pkts_n) * sizeof(struct rte_mbuf *));
+ txq->elts_head += pkts_n;
+ if (unlikely(part < pkts_n))
+ /* The copy is wrapping around the elts array. */
+ rte_memcpy((void *)elts, (void *)(pkts + part),
+ (pkts_n - part) * sizeof(struct rte_mbuf *));
+}
+
+/**
+ * Update completion queue consuming index via doorbell
+ * and flush the completed data buffers.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param valid CQE pointer
+ * if not NULL update txq->wqe_pi and flush the buffers
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_comp_flush(struct mlx5_txq_data *__rte_restrict txq,
+ volatile struct mlx5_cqe *last_cqe,
+ unsigned int olx __rte_unused)
+{
+ if (likely(last_cqe != NULL)) {
+ uint16_t tail;
+
+ txq->wqe_pi = rte_be_to_cpu_16(last_cqe->wqe_counter);
+ tail = txq->fcqs[(txq->cq_ci - 1) & txq->cqe_m];
+ if (likely(tail != txq->elts_tail)) {
+ mlx5_tx_free_elts(txq, tail, olx);
+ MLX5_ASSERT(tail == txq->elts_tail);
+ }
+ }
+}
+
+/**
+ * Manage TX completions. This routine checks the CQ for
+ * arrived CQEs, deduces the last accomplished WQE in SQ,
+ * updates SQ producing index and frees all completed mbufs.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * NOTE: not inlined intentionally, it makes tx_burst
+ * routine smaller, simple and faster - from experiments.
+ */
+static void
+mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
+ unsigned int olx __rte_unused)
+{
+ unsigned int count = MLX5_TX_COMP_MAX_CQE;
+ volatile struct mlx5_cqe *last_cqe = NULL;
+ bool ring_doorbell = false;
+ int ret;
+
+ static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+ static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
+ do {
+ volatile struct mlx5_cqe *cqe;
+
+ cqe = &txq->cqes[txq->cq_ci & txq->cqe_m];
+ ret = check_cqe(cqe, txq->cqe_s, txq->cq_ci);
+ if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
+ if (likely(ret != MLX5_CQE_STATUS_ERR)) {
+ /* No new CQEs in completion queue. */
+ MLX5_ASSERT(ret == MLX5_CQE_STATUS_HW_OWN);
+ break;
+ }
+ /*
+ * Some error occurred, try to restart.
+ * We have no barrier after WQE related Doorbell
+ * written, make sure all writes are completed
+ * here, before we might perform SQ reset.
+ */
+ rte_wmb();
+ ret = mlx5_tx_error_cqe_handle
+ (txq, (volatile struct mlx5_err_cqe *)cqe);
+ if (unlikely(ret < 0)) {
+ /*
+ * Some error occurred on queue error
+ * handling, we do not advance the index
+ * here, allowing to retry on next call.
+ */
+ return;
+ }
+ /*
+ * We are going to fetch all entries with
+ * MLX5_CQE_SYNDROME_WR_FLUSH_ERR status.
+ * The send queue is supposed to be empty.
+ */
+ ring_doorbell = true;
+ ++txq->cq_ci;
+ txq->cq_pi = txq->cq_ci;
+ last_cqe = NULL;
+ continue;
+ }
+ /* Normal transmit completion. */
+ MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
+ MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
+ cqe->wqe_counter);
+ ring_doorbell = true;
+ ++txq->cq_ci;
+ last_cqe = cqe;
+ /*
+ * We have to restrict the amount of processed CQEs
+ * in one tx_burst routine call. The CQ may be large
+ * and many CQEs may be updated by the NIC in one
+ * transaction. Buffers freeing is time consuming,
+ * multiple iterations may introduce significant
+ * latency.
+ */
+ if (likely(--count == 0))
+ break;
+ } while (true);
+ if (likely(ring_doorbell)) {
+ /* Ring doorbell to notify hardware. */
+ rte_compiler_barrier();
+ *txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ mlx5_tx_comp_flush(txq, last_cqe, olx);
+ }
+}
+
+/**
+ * Check if the completion request flag should be set in the last WQE.
+ * Both pushed mbufs and WQEs are monitored and the completion request
+ * flag is set if any of thresholds is reached.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_request_completion(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ unsigned int olx)
+{
+ uint16_t head = txq->elts_head;
+ unsigned int part;
+
+ part = MLX5_TXOFF_CONFIG(INLINE) ?
+ 0 : loc->pkts_sent - loc->pkts_copy;
+ head += part;
+ if ((uint16_t)(head - txq->elts_comp) >= MLX5_TX_COMP_THRESH ||
+ (MLX5_TXOFF_CONFIG(INLINE) &&
+ (uint16_t)(txq->wqe_ci - txq->wqe_comp) >= txq->wqe_thres)) {
+ volatile struct mlx5_wqe *last = loc->wqe_last;
+
+ MLX5_ASSERT(last);
+ txq->elts_comp = head;
+ if (MLX5_TXOFF_CONFIG(INLINE))
+ txq->wqe_comp = txq->wqe_ci;
+ /* Request unconditional completion on last WQE. */
+ last->cseg.flags = RTE_BE32(MLX5_COMP_ALWAYS <<
+ MLX5_COMP_MODE_OFFSET);
+ /* Save elts_head in dedicated free on completion queue. */
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head |
+ (last->cseg.opcode >> 8) << 16;
+#else
+ txq->fcqs[txq->cq_pi++ & txq->cqe_m] = head;
+#endif
+ /* A CQE slot must always be available. */
+ MLX5_ASSERT((txq->cq_pi - txq->cq_ci) <= txq->cqe_s);
+ }
+}
+
+/**
+ * DPDK callback to check the status of a tx descriptor.
+ *
+ * @param tx_queue
+ * The tx queue.
+ * @param[in] offset
+ * The index of the descriptor in the ring.
+ *
+ * @return
+ * The status of the tx descriptor.
+ */
+int
+mlx5_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct mlx5_txq_data *__rte_restrict txq = tx_queue;
+ uint16_t used;
+
+ mlx5_tx_handle_completion(txq, 0);
+ used = txq->elts_head - txq->elts_tail;
+ if (offset < used)
+ return RTE_ETH_TX_DESC_FULL;
+ return RTE_ETH_TX_DESC_DONE;
+}
+
+/**
+ * Build the Control Segment with specified opcode:
+ * - MLX5_OPCODE_SEND
+ * - MLX5_OPCODE_ENHANCED_MPSW
+ * - MLX5_OPCODE_TSO
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Control Segment.
+ * @param ds
+ * Supposed length of WQE in segments.
+ * @param opcode
+ * SQ WQE opcode to put into Control Segment.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_cseg_init(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int ds,
+ unsigned int opcode,
+ unsigned int olx __rte_unused)
+{
+ struct mlx5_wqe_cseg *__rte_restrict cs = &wqe->cseg;
+
+ /* For legacy MPW replace the EMPW by TSO with modifier. */
+ if (MLX5_TXOFF_CONFIG(MPW) && opcode == MLX5_OPCODE_ENHANCED_MPSW)
+ opcode = MLX5_OPCODE_TSO | MLX5_OPC_MOD_MPW << 24;
+ cs->opcode = rte_cpu_to_be_32((txq->wqe_ci << 8) | opcode);
+ cs->sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
+ cs->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
+ MLX5_COMP_MODE_OFFSET);
+ cs->misc = RTE_BE32(0);
+}
+
+/**
+ * Build the Synchronize Queue Segment with specified completion index.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Control Segment.
+ * @param wci
+ * Completion index in Clock Queue to wait.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_wseg_init(struct mlx5_txq_data *restrict txq,
+ struct mlx5_txq_local *restrict loc __rte_unused,
+ struct mlx5_wqe *restrict wqe,
+ unsigned int wci,
+ unsigned int olx __rte_unused)
+{
+ struct mlx5_wqe_qseg *qs;
+
+ qs = RTE_PTR_ADD(wqe, MLX5_WSEG_SIZE);
+ qs->max_index = rte_cpu_to_be_32(wci);
+ qs->qpn_cqn = rte_cpu_to_be_32(txq->sh->txpp.clock_queue.cq->id);
+ qs->reserved0 = RTE_BE32(0);
+ qs->reserved1 = RTE_BE32(0);
+}
+
+/**
+ * Build the Ethernet Segment without inlined data.
+ * Supports Software Parser, Checksums and VLAN
+ * insertion Tx offload features.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Ethernet Segment.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_eseg_none(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int olx)
+{
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
+ uint32_t csum;
+
+ /*
+ * Calculate and set check sum flags first, dword field
+ * in segment may be shared with Software Parser flags.
+ */
+ csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
+ es->flags = rte_cpu_to_le_32(csum);
+ /*
+ * Calculate and set Software Parser offsets and flags.
+ * These flags a set for custom UDP and IP tunnel packets.
+ */
+ es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
+ /* Fill metadata field if needed. */
+ es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
+ /* Engage VLAN tag insertion feature if requested. */
+ if (MLX5_TXOFF_CONFIG(VLAN) &&
+ loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+ /*
+ * We should get here only if device support
+ * this feature correctly.
+ */
+ MLX5_ASSERT(txq->vlan_en);
+ es->inline_hdr = rte_cpu_to_be_32(MLX5_ETH_WQE_VLAN_INSERT |
+ loc->mbuf->vlan_tci);
+ } else {
+ es->inline_hdr = RTE_BE32(0);
+ }
+}
+
+/**
+ * Build the Ethernet Segment with minimal inlined data
+ * of MLX5_ESEG_MIN_INLINE_SIZE bytes length. This is
+ * used to fill the gap in single WQEBB WQEs.
+ * Supports Software Parser, Checksums and VLAN
+ * insertion Tx offload features.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Ethernet Segment.
+ * @param vlan
+ * Length of VLAN tag insertion if any.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int vlan,
+ unsigned int olx)
+{
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
+ uint32_t csum;
+ uint8_t *psrc, *pdst;
+
+ /*
+ * Calculate and set check sum flags first, dword field
+ * in segment may be shared with Software Parser flags.
+ */
+ csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
+ es->flags = rte_cpu_to_le_32(csum);
+ /*
+ * Calculate and set Software Parser offsets and flags.
+ * These flags a set for custom UDP and IP tunnel packets.
+ */
+ es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
+ /* Fill metadata field if needed. */
+ es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+ psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
+ es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
+ es->inline_data = *(unaligned_uint16_t *)psrc;
+ psrc += sizeof(uint16_t);
+ pdst = (uint8_t *)(es + 1);
+ if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
+ /* Implement VLAN tag insertion as part inline data. */
+ memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
+ pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
+ psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
+ /* Insert VLAN ethertype + VLAN tag. */
+ *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
+ ((RTE_ETHER_TYPE_VLAN << 16) |
+ loc->mbuf->vlan_tci);
+ pdst += sizeof(struct rte_vlan_hdr);
+ /* Copy the rest two bytes from packet data. */
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
+ } else {
+ /* Fill the gap in the title WQEBB with inline data. */
+ rte_mov16(pdst, psrc);
+ }
+}
+
+/**
+ * Build the Ethernet Segment with entire packet
+ * data inlining. Checks the boundary of WQEBB and
+ * ring buffer wrapping, supports Software Parser,
+ * Checksums and VLAN insertion Tx offload features.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Ethernet Segment.
+ * @param vlan
+ * Length of VLAN tag insertion if any.
+ * @param inlen
+ * Length of data to inline (VLAN included, if any).
+ * @param tso
+ * TSO flag, set mss field from the packet.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Pointer to the next Data Segment (aligned and wrapped around).
+ */
+static __rte_always_inline struct mlx5_wqe_dseg *
+mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int vlan,
+ unsigned int inlen,
+ unsigned int tso,
+ unsigned int olx)
+{
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
+ uint32_t csum;
+ uint8_t *psrc, *pdst;
+ unsigned int part;
+
+ /*
+ * Calculate and set check sum flags first, dword field
+ * in segment may be shared with Software Parser flags.
+ */
+ csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
+ if (tso) {
+ csum <<= 24;
+ csum |= loc->mbuf->tso_segsz;
+ es->flags = rte_cpu_to_be_32(csum);
+ } else {
+ es->flags = rte_cpu_to_le_32(csum);
+ }
+ /*
+ * Calculate and set Software Parser offsets and flags.
+ * These flags a set for custom UDP and IP tunnel packets.
+ */
+ es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
+ /* Fill metadata field if needed. */
+ es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+ psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
+ es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
+ es->inline_data = *(unaligned_uint16_t *)psrc;
+ psrc += sizeof(uint16_t);
+ pdst = (uint8_t *)(es + 1);
+ if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
+ /* Implement VLAN tag insertion as part inline data. */
+ memcpy(pdst, psrc, 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t));
+ pdst += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
+ psrc += 2 * RTE_ETHER_ADDR_LEN - sizeof(uint16_t);
+ /* Insert VLAN ethertype + VLAN tag. */
+ *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
+ ((RTE_ETHER_TYPE_VLAN << 16) |
+ loc->mbuf->vlan_tci);
+ pdst += sizeof(struct rte_vlan_hdr);
+ /* Copy the rest two bytes from packet data. */
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, sizeof(uint16_t)));
+ *(uint16_t *)pdst = *(unaligned_uint16_t *)psrc;
+ psrc += sizeof(uint16_t);
+ } else {
+ /* Fill the gap in the title WQEBB with inline data. */
+ rte_mov16(pdst, psrc);
+ psrc += sizeof(rte_v128u32_t);
+ }
+ pdst = (uint8_t *)(es + 2);
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
+ inlen -= MLX5_ESEG_MIN_INLINE_SIZE;
+ if (!inlen) {
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ return (struct mlx5_wqe_dseg *)pdst;
+ }
+ /*
+ * The WQEBB space availability is checked by caller.
+ * Here we should be aware of WQE ring buffer wraparound only.
+ */
+ part = (uint8_t *)txq->wqes_end - pdst;
+ part = RTE_MIN(part, inlen);
+ do {
+ rte_memcpy(pdst, psrc, part);
+ inlen -= part;
+ if (likely(!inlen)) {
+ /*
+ * If return value is not used by the caller
+ * the code below will be optimized out.
+ */
+ pdst += part;
+ pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
+ if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
+ pdst = (uint8_t *)txq->wqes;
+ return (struct mlx5_wqe_dseg *)pdst;
+ }
+ pdst = (uint8_t *)txq->wqes;
+ psrc += part;
+ part = inlen;
+ } while (true);
+}
+
+/**
+ * Copy data from chain of mbuf to the specified linear buffer.
+ * Checksums and VLAN insertion Tx offload features. If data
+ * from some mbuf copied completely this mbuf is freed. Local
+ * structure is used to keep the byte stream state.
+ *
+ * @param pdst
+ * Pointer to the destination linear buffer.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param len
+ * Length of data to be copied.
+ * @param must
+ * Length of data to be copied ignoring no inline hint.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Number of actual copied data bytes. This is always greater than or
+ * equal to must parameter and might be lesser than len in no inline
+ * hint flag is encountered.
+ */
+static __rte_always_inline unsigned int
+mlx5_tx_mseg_memcpy(uint8_t *pdst,
+ struct mlx5_txq_local *__rte_restrict loc,
+ unsigned int len,
+ unsigned int must,
+ unsigned int olx __rte_unused)
+{
+ struct rte_mbuf *mbuf;
+ unsigned int part, dlen, copy = 0;
+ uint8_t *psrc;
+
+ MLX5_ASSERT(len);
+ MLX5_ASSERT(must <= len);
+ do {
+ /* Allow zero length packets, must check first. */
+ dlen = rte_pktmbuf_data_len(loc->mbuf);
+ if (dlen <= loc->mbuf_off) {
+ /* Exhausted packet, just free. */
+ mbuf = loc->mbuf;
+ loc->mbuf = mbuf->next;
+ rte_pktmbuf_free_seg(mbuf);
+ loc->mbuf_off = 0;
+ MLX5_ASSERT(loc->mbuf_nseg > 1);
+ MLX5_ASSERT(loc->mbuf);
+ --loc->mbuf_nseg;
+ if (loc->mbuf->ol_flags & PKT_TX_DYNF_NOINLINE) {
+ unsigned int diff;
+
+ if (copy >= must) {
+ /*
+ * We already copied the minimal
+ * requested amount of data.
+ */
+ return copy;
+ }
+ diff = must - copy;
+ if (diff <= rte_pktmbuf_data_len(loc->mbuf)) {
+ /*
+ * Copy only the minimal required
+ * part of the data buffer.
+ */
+ len = diff;
+ }
+ }
+ continue;
+ }
+ dlen -= loc->mbuf_off;
+ psrc = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
+ loc->mbuf_off);
+ part = RTE_MIN(len, dlen);
+ rte_memcpy(pdst, psrc, part);
+ copy += part;
+ loc->mbuf_off += part;
+ len -= part;
+ if (!len) {
+ if (loc->mbuf_off >= rte_pktmbuf_data_len(loc->mbuf)) {
+ loc->mbuf_off = 0;
+ /* Exhausted packet, just free. */
+ mbuf = loc->mbuf;
+ loc->mbuf = mbuf->next;
+ rte_pktmbuf_free_seg(mbuf);
+ loc->mbuf_off = 0;
+ MLX5_ASSERT(loc->mbuf_nseg >= 1);
+ --loc->mbuf_nseg;
+ }
+ return copy;
+ }
+ pdst += part;
+ } while (true);
+}
+
+/**
+ * Build the Ethernet Segment with inlined data from
+ * multi-segment packet. Checks the boundary of WQEBB
+ * and ring buffer wrapping, supports Software Parser,
+ * Checksums and VLAN insertion Tx offload features.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Ethernet Segment.
+ * @param vlan
+ * Length of VLAN tag insertion if any.
+ * @param inlen
+ * Length of data to inline (VLAN included, if any).
+ * @param tso
+ * TSO flag, set mss field from the packet.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Pointer to the next Data Segment (aligned and
+ * possible NOT wrapped around - caller should do
+ * wrapping check on its own).
+ */
+static __rte_always_inline struct mlx5_wqe_dseg *
+mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int vlan,
+ unsigned int inlen,
+ unsigned int tso,
+ unsigned int olx)
+{
+ struct mlx5_wqe_eseg *__rte_restrict es = &wqe->eseg;
+ uint32_t csum;
+ uint8_t *pdst;
+ unsigned int part, tlen = 0;
+
+ /*
+ * Calculate and set check sum flags first, uint32_t field
+ * in segment may be shared with Software Parser flags.
+ */
+ csum = MLX5_TXOFF_CONFIG(CSUM) ? txq_ol_cksum_to_cs(loc->mbuf) : 0;
+ if (tso) {
+ csum <<= 24;
+ csum |= loc->mbuf->tso_segsz;
+ es->flags = rte_cpu_to_be_32(csum);
+ } else {
+ es->flags = rte_cpu_to_le_32(csum);
+ }
+ /*
+ * Calculate and set Software Parser offsets and flags.
+ * These flags a set for custom UDP and IP tunnel packets.
+ */
+ es->swp_offs = txq_mbuf_to_swp(loc, &es->swp_flags, olx);
+ /* Fill metadata field if needed. */
+ es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
+ loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
+ *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(rte_v128u32_t)),
+ "invalid Ethernet Segment data size");
+ static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+ (sizeof(uint16_t) +
+ sizeof(struct rte_vlan_hdr) +
+ 2 * RTE_ETHER_ADDR_LEN),
+ "invalid Ethernet Segment data size");
+ MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
+ pdst = (uint8_t *)&es->inline_data;
+ if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
+ /* Implement VLAN tag insertion as part inline data. */
+ mlx5_tx_mseg_memcpy(pdst, loc,
+ 2 * RTE_ETHER_ADDR_LEN,
+ 2 * RTE_ETHER_ADDR_LEN, olx);
+ pdst += 2 * RTE_ETHER_ADDR_LEN;
+ *(unaligned_uint32_t *)pdst = rte_cpu_to_be_32
+ ((RTE_ETHER_TYPE_VLAN << 16) |
+ loc->mbuf->vlan_tci);
+ pdst += sizeof(struct rte_vlan_hdr);
+ tlen += 2 * RTE_ETHER_ADDR_LEN + sizeof(struct rte_vlan_hdr);
+ }
+ MLX5_ASSERT(pdst < (uint8_t *)txq->wqes_end);
+ /*
+ * The WQEBB space availability is checked by caller.
+ * Here we should be aware of WQE ring buffer wraparound only.
+ */
+ part = (uint8_t *)txq->wqes_end - pdst;
+ part = RTE_MIN(part, inlen - tlen);
+ MLX5_ASSERT(part);
+ do {
+ unsigned int copy;
+
+ /*
+ * Copying may be interrupted inside the routine
+ * if run into no inline hint flag.
+ */
+ copy = tlen >= txq->inlen_mode ? 0 : (txq->inlen_mode - tlen);
+ copy = mlx5_tx_mseg_memcpy(pdst, loc, part, copy, olx);
+ tlen += copy;
+ if (likely(inlen <= tlen) || copy < part) {
+ es->inline_hdr_sz = rte_cpu_to_be_16(tlen);
+ pdst += copy;
+ pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
+ return (struct mlx5_wqe_dseg *)pdst;
+ }
+ pdst = (uint8_t *)txq->wqes;
+ part = inlen - tlen;
+ } while (true);
+}
+
+/**
+ * Build the Data Segment of pointer type.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param dseg
+ * Pointer to WQE to fill with built Data Segment.
+ * @param buf
+ * Data buffer to point.
+ * @param len
+ * Data buffer length.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_dseg_ptr(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
+ uint8_t *buf,
+ unsigned int len,
+ unsigned int olx __rte_unused)
+
+{
+ MLX5_ASSERT(len);
+ dseg->bcount = rte_cpu_to_be_32(len);
+ dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
+}
+
+/**
+ * Build the Data Segment of pointer type or inline
+ * if data length is less than buffer in minimal
+ * Data Segment size.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param dseg
+ * Pointer to WQE to fill with built Data Segment.
+ * @param buf
+ * Data buffer to point.
+ * @param len
+ * Data buffer length.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ */
+static __rte_always_inline void
+mlx5_tx_dseg_iptr(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
+ uint8_t *buf,
+ unsigned int len,
+ unsigned int olx __rte_unused)
+
+{
+ uintptr_t dst, src;
+
+ MLX5_ASSERT(len);
+ if (len > MLX5_DSEG_MIN_INLINE_SIZE) {
+ dseg->bcount = rte_cpu_to_be_32(len);
+ dseg->lkey = mlx5_tx_mb2mr(txq, loc->mbuf);
+ dseg->pbuf = rte_cpu_to_be_64((uintptr_t)buf);
+
+ return;
+ }
+ dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
+ /* Unrolled implementation of generic rte_memcpy. */
+ dst = (uintptr_t)&dseg->inline_data[0];
+ src = (uintptr_t)buf;
+ if (len & 0x08) {
+#ifdef RTE_ARCH_STRICT_ALIGN
+ MLX5_ASSERT(dst == RTE_PTR_ALIGN(dst, sizeof(uint32_t)));
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+#else
+ *(uint64_t *)dst = *(unaligned_uint64_t *)src;
+ dst += sizeof(uint64_t);
+ src += sizeof(uint64_t);
+#endif
+ }
+ if (len & 0x04) {
+ *(uint32_t *)dst = *(unaligned_uint32_t *)src;
+ dst += sizeof(uint32_t);
+ src += sizeof(uint32_t);
+ }
+ if (len & 0x02) {
+ *(uint16_t *)dst = *(unaligned_uint16_t *)src;
+ dst += sizeof(uint16_t);
+ src += sizeof(uint16_t);
+ }
+ if (len & 0x01)
+ *(uint8_t *)dst = *(uint8_t *)src;
+}
+
+/**
+ * Build the Data Segment of inlined data from single
+ * segment packet, no VLAN insertion.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param dseg
+ * Pointer to WQE to fill with built Data Segment.
+ * @param buf
+ * Data buffer to point.
+ * @param len
+ * Data buffer length.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Pointer to the next Data Segment after inlined data.
+ * Ring buffer wraparound check is needed. We do not
+ * do it here because it may not be needed for the
+ * last packet in the eMPW session.
+ */
+static __rte_always_inline struct mlx5_wqe_dseg *
+mlx5_tx_dseg_empw(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
+ uint8_t *buf,
+ unsigned int len,
+ unsigned int olx __rte_unused)
+{
+ unsigned int part;
+ uint8_t *pdst;
+
+ if (!MLX5_TXOFF_CONFIG(MPW)) {
+ /* Store the descriptor byte counter for eMPW sessions. */
+ dseg->bcount = rte_cpu_to_be_32(len | MLX5_ETH_WQE_DATA_INLINE);
+ pdst = &dseg->inline_data[0];
+ } else {
+ /* The entire legacy MPW session counter is stored on close. */
+ pdst = (uint8_t *)dseg;
+ }
+ /*
+ * The WQEBB space availability is checked by caller.
+ * Here we should be aware of WQE ring buffer wraparound only.
+ */
+ part = (uint8_t *)txq->wqes_end - pdst;
+ part = RTE_MIN(part, len);
+ do {
+ rte_memcpy(pdst, buf, part);
+ len -= part;
+ if (likely(!len)) {
+ pdst += part;
+ if (!MLX5_TXOFF_CONFIG(MPW))
+ pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
+ /* Note: no final wraparound check here. */
+ return (struct mlx5_wqe_dseg *)pdst;
+ }
+ pdst = (uint8_t *)txq->wqes;
+ buf += part;
+ part = len;
+ } while (true);
+}
+
+/**
+ * Build the Data Segment of inlined data from single
+ * segment packet with VLAN insertion.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param dseg
+ * Pointer to the dseg fill with built Data Segment.
+ * @param buf
+ * Data buffer to point.
+ * @param len
+ * Data buffer length.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Pointer to the next Data Segment after inlined data.
+ * Ring buffer wraparound check is needed.
+ */
+static __rte_always_inline struct mlx5_wqe_dseg *
+mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc __rte_unused,
+ struct mlx5_wqe_dseg *__rte_restrict dseg,
+ uint8_t *buf,
+ unsigned int len,
+ unsigned int olx __rte_unused)
+
+{
+ unsigned int part;
+ uint8_t *pdst;
+
+ MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
+ static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
+ (2 * RTE_ETHER_ADDR_LEN),
+ "invalid Data Segment data size");
+ if (!MLX5_TXOFF_CONFIG(MPW)) {
+ /* Store the descriptor byte counter for eMPW sessions. */
+ dseg->bcount = rte_cpu_to_be_32
+ ((len + sizeof(struct rte_vlan_hdr)) |
+ MLX5_ETH_WQE_DATA_INLINE);
+ pdst = &dseg->inline_data[0];
+ } else {
+ /* The entire legacy MPW session counter is stored on close. */
+ pdst = (uint8_t *)dseg;
+ }
+ memcpy(pdst, buf, MLX5_DSEG_MIN_INLINE_SIZE);
+ buf += MLX5_DSEG_MIN_INLINE_SIZE;
+ pdst += MLX5_DSEG_MIN_INLINE_SIZE;
+ len -= MLX5_DSEG_MIN_INLINE_SIZE;
+ /* Insert VLAN ethertype + VLAN tag. Pointer is aligned. */
+ MLX5_ASSERT(pdst == RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE));
+ if (unlikely(pdst >= (uint8_t *)txq->wqes_end))
+ pdst = (uint8_t *)txq->wqes;
+ *(uint32_t *)pdst = rte_cpu_to_be_32((RTE_ETHER_TYPE_VLAN << 16) |
+ loc->mbuf->vlan_tci);
+ pdst += sizeof(struct rte_vlan_hdr);
+ /*
+ * The WQEBB space availability is checked by caller.
+ * Here we should be aware of WQE ring buffer wraparound only.
+ */
+ part = (uint8_t *)txq->wqes_end - pdst;
+ part = RTE_MIN(part, len);
+ do {
+ rte_memcpy(pdst, buf, part);
+ len -= part;
+ if (likely(!len)) {
+ pdst += part;
+ if (!MLX5_TXOFF_CONFIG(MPW))
+ pdst = RTE_PTR_ALIGN(pdst, MLX5_WSEG_SIZE);
+ /* Note: no final wraparound check here. */
+ return (struct mlx5_wqe_dseg *)pdst;
+ }
+ pdst = (uint8_t *)txq->wqes;
+ buf += part;
+ part = len;
+ } while (true);
+}
+
+/**
+ * Build the Ethernet Segment with optionally inlined data with
+ * VLAN insertion and following Data Segments (if any) from
+ * multi-segment packet. Used by ordinary send and TSO.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param wqe
+ * Pointer to WQE to fill with built Ethernet/Data Segments.
+ * @param vlan
+ * Length of VLAN header to insert, 0 means no VLAN insertion.
+ * @param inlen
+ * Data length to inline. For TSO this parameter specifies
+ * exact value, for ordinary send routine can be aligned by
+ * caller to provide better WQE space saving and data buffer
+ * start address alignment. This length includes VLAN header
+ * being inserted.
+ * @param tso
+ * Zero means ordinary send, inlined data can be extended,
+ * otherwise this is TSO, inlined data length is fixed.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * Actual size of built WQE in segments.
+ */
+static __rte_always_inline unsigned int
+mlx5_tx_mseg_build(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ struct mlx5_wqe *__rte_restrict wqe,
+ unsigned int vlan,
+ unsigned int inlen,
+ unsigned int tso,
+ unsigned int olx __rte_unused)
+{
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ unsigned int ds;
+
+ MLX5_ASSERT((rte_pktmbuf_pkt_len(loc->mbuf) + vlan) >= inlen);
+ loc->mbuf_nseg = NB_SEGS(loc->mbuf);
+ loc->mbuf_off = 0;
+
+ dseg = mlx5_tx_eseg_mdat(txq, loc, wqe, vlan, inlen, tso, olx);
+ if (!loc->mbuf_nseg)
+ goto dseg_done;
+ /*
+ * There are still some mbuf remaining, not inlined.
+ * The first mbuf may be partially inlined and we
+ * must process the possible non-zero data offset.
+ */
+ if (loc->mbuf_off) {
+ unsigned int dlen;
+ uint8_t *dptr;
+
+ /*
+ * Exhausted packets must be dropped before.
+ * Non-zero offset means there are some data
+ * remained in the packet.
+ */
+ MLX5_ASSERT(loc->mbuf_off < rte_pktmbuf_data_len(loc->mbuf));
+ MLX5_ASSERT(rte_pktmbuf_data_len(loc->mbuf));
+ dptr = rte_pktmbuf_mtod_offset(loc->mbuf, uint8_t *,
+ loc->mbuf_off);
+ dlen = rte_pktmbuf_data_len(loc->mbuf) - loc->mbuf_off;
+ /*
+ * Build the pointer/minimal data Data Segment.
+ * Do ring buffer wrapping check in advance.
+ */
+ if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
+ dseg = (struct mlx5_wqe_dseg *)txq->wqes;
+ mlx5_tx_dseg_iptr(txq, loc, dseg, dptr, dlen, olx);
+ /* Store the mbuf to be freed on completion. */
+ MLX5_ASSERT(loc->elts_free);
+ txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+ --loc->elts_free;
+ ++dseg;
+ if (--loc->mbuf_nseg == 0)
+ goto dseg_done;
+ loc->mbuf = loc->mbuf->next;
+ loc->mbuf_off = 0;
+ }
+ do {
+ if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
+ struct rte_mbuf *mbuf;
+
+ /* Zero length segment found, just skip. */
+ mbuf = loc->mbuf;
+ loc->mbuf = loc->mbuf->next;
+ rte_pktmbuf_free_seg(mbuf);
+ if (--loc->mbuf_nseg == 0)
+ break;
+ } else {
+ if ((uintptr_t)dseg >= (uintptr_t)txq->wqes_end)
+ dseg = (struct mlx5_wqe_dseg *)txq->wqes;
+ mlx5_tx_dseg_iptr
+ (txq, loc, dseg,
+ rte_pktmbuf_mtod(loc->mbuf, uint8_t *),
+ rte_pktmbuf_data_len(loc->mbuf), olx);
+ MLX5_ASSERT(loc->elts_free);
+ txq->elts[txq->elts_head++ & txq->elts_m] = loc->mbuf;
+ --loc->elts_free;
+ ++dseg;
+ if (--loc->mbuf_nseg == 0)
+ break;
+ loc->mbuf = loc->mbuf->next;
+ }
+ } while (true);
+
+dseg_done:
+ /* Calculate actual segments used from the dseg pointer. */
+ if ((uintptr_t)wqe < (uintptr_t)dseg)
+ ds = ((uintptr_t)dseg - (uintptr_t)wqe) / MLX5_WSEG_SIZE;
+ else
+ ds = (((uintptr_t)dseg - (uintptr_t)wqe) +
+ txq->wqe_s * MLX5_WQE_SIZE) / MLX5_WSEG_SIZE;
+ return ds;
+}
+
+/**
+ * The routine checks timestamp flag in the current packet,
+ * and push WAIT WQE into the queue if scheduling is required.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
+ * MLX5_TXCMP_CODE_SINGLE - continue processing with the packet.
+ * MLX5_TXCMP_CODE_MULTI - the WAIT inserted, continue processing.
+ * Local context variables partially updated.
+ */
+static __rte_always_inline enum mlx5_txcmp_code
+mlx5_tx_schedule_send(struct mlx5_txq_data *restrict txq,
+ struct mlx5_txq_local *restrict loc,
+ unsigned int olx)
+{
+ if (MLX5_TXOFF_CONFIG(TXPP) &&
+ loc->mbuf->ol_flags & txq->ts_mask) {
+ struct mlx5_wqe *wqe;
+ uint64_t ts;
+ int32_t wci;
+
+ /*
+ * Estimate the required space quickly and roughly.
+ * We would like to ensure the packet can be pushed
+ * to the queue and we won't get the orphan WAIT WQE.
+ */
+ if (loc->wqe_free <= MLX5_WQE_SIZE_MAX / MLX5_WQE_SIZE ||
+ loc->elts_free < NB_SEGS(loc->mbuf))
+ return MLX5_TXCMP_CODE_EXIT;
+ /* Convert the timestamp into completion to wait. */
+ ts = *RTE_MBUF_DYNFIELD(loc->mbuf, txq->ts_offset, uint64_t *);
+ wci = mlx5_txpp_convert_tx_ts(txq->sh, ts);
+ if (unlikely(wci < 0))
+ return MLX5_TXCMP_CODE_SINGLE;
+ /* Build the WAIT WQE with specified completion. */
+ wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
+ mlx5_tx_cseg_init(txq, loc, wqe, 2, MLX5_OPCODE_WAIT, olx);
+ mlx5_tx_wseg_init(txq, loc, wqe, wci, olx);
+ ++txq->wqe_ci;
+ --loc->wqe_free;
+ return MLX5_TXCMP_CODE_MULTI;
+ }
+ return MLX5_TXCMP_CODE_SINGLE;
+}
+
+/**
+ * Tx one packet function for multi-segment TSO. Supports all
+ * types of Tx offloads, uses MLX5_OPCODE_TSO to build WQEs,
+ * sends one packet per WQE.
+ *
+ * This routine is responsible for storing processed mbuf
+ * into elts ring buffer and update elts_head.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
+ * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
+ * Local context variables partially updated.
+ */
+static __rte_always_inline enum mlx5_txcmp_code
+mlx5_tx_packet_multi_tso(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ unsigned int olx)
+{
+ struct mlx5_wqe *__rte_restrict wqe;
+ unsigned int ds, dlen, inlen, ntcp, vlan = 0;
+
+ if (MLX5_TXOFF_CONFIG(TXPP)) {
+ enum mlx5_txcmp_code wret;
+
+ /* Generate WAIT for scheduling if requested. */
+ wret = mlx5_tx_schedule_send(txq, loc, olx);
+ if (wret == MLX5_TXCMP_CODE_EXIT)
+ return MLX5_TXCMP_CODE_EXIT;
+ if (wret == MLX5_TXCMP_CODE_ERROR)
+ return MLX5_TXCMP_CODE_ERROR;
+ }
+ /*
+ * Calculate data length to be inlined to estimate
+ * the required space in WQE ring buffer.
+ */
+ dlen = rte_pktmbuf_pkt_len(loc->mbuf);
+ if (MLX5_TXOFF_CONFIG(VLAN) && loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ vlan = sizeof(struct rte_vlan_hdr);
+ inlen = loc->mbuf->l2_len + vlan +
+ loc->mbuf->l3_len + loc->mbuf->l4_len;
+ if (unlikely((!inlen || !loc->mbuf->tso_segsz)))
+ return MLX5_TXCMP_CODE_ERROR;
+ if (loc->mbuf->ol_flags & PKT_TX_TUNNEL_MASK)
+ inlen += loc->mbuf->outer_l2_len + loc->mbuf->outer_l3_len;
+ /* Packet must contain all TSO headers. */
+ if (unlikely(inlen > MLX5_MAX_TSO_HEADER ||
+ inlen <= MLX5_ESEG_MIN_INLINE_SIZE ||
+ inlen > (dlen + vlan)))
+ return MLX5_TXCMP_CODE_ERROR;
+ MLX5_ASSERT(inlen >= txq->inlen_mode);
+ /*
+ * Check whether there are enough free WQEBBs:
+ * - Control Segment
+ * - Ethernet Segment
+ * - First Segment of inlined Ethernet data
+ * - ... data continued ...
+ * - Data Segments of pointer/min inline type
+ */
+ ds = NB_SEGS(loc->mbuf) + 2 + (inlen -
+ MLX5_ESEG_MIN_INLINE_SIZE +
+ MLX5_WSEG_SIZE +
+ MLX5_WSEG_SIZE - 1) / MLX5_WSEG_SIZE;
+ if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
+ return MLX5_TXCMP_CODE_EXIT;
+ /* Check for maximal WQE size. */
+ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
+ return MLX5_TXCMP_CODE_ERROR;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Update sent data bytes/packets counters. */
+ ntcp = (dlen - (inlen - vlan) + loc->mbuf->tso_segsz - 1) /
+ loc->mbuf->tso_segsz;
+ /*
+ * One will be added for mbuf itself
+ * at the end of the mlx5_tx_burst from
+ * loc->pkts_sent field.
+ */
+ --ntcp;
+ txq->stats.opackets += ntcp;
+ txq->stats.obytes += dlen + vlan + ntcp * inlen;
+#endif
+ wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
+ loc->wqe_last = wqe;
+ mlx5_tx_cseg_init(txq, loc, wqe, 0, MLX5_OPCODE_TSO, olx);
+ ds = mlx5_tx_mseg_build(txq, loc, wqe, vlan, inlen, 1, olx);
+ wqe->cseg.sq_ds = rte_cpu_to_be_32(txq->qp_num_8s | ds);
+ txq->wqe_ci += (ds + 3) / 4;
+ loc->wqe_free -= (ds + 3) / 4;
+ return MLX5_TXCMP_CODE_MULTI;
+}
+
+/**
+ * Tx one packet function for multi-segment SEND. Supports all
+ * types of Tx offloads, uses MLX5_OPCODE_SEND to build WQEs,
+ * sends one packet per WQE, without any data inlining in
+ * Ethernet Segment.
+ *
+ * This routine is responsible for storing processed mbuf
+ * into elts ring buffer and update elts_head.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param loc
+ * Pointer to burst routine local context.
+ * @param olx
+ * Configured Tx offloads mask. It is fully defined at
+ * compile time and may be used for optimization.
+ *
+ * @return
+ * MLX5_TXCMP_CODE_EXIT - sending is done or impossible.
+ * MLX5_TXCMP_CODE_ERROR - some unrecoverable error occurred.
+ * Local context variables partially updated.
+ */
+static __rte_always_inline enum mlx5_txcmp_code
+mlx5_tx_packet_multi_send(struct mlx5_txq_data *__rte_restrict txq,
+ struct mlx5_txq_local *__rte_restrict loc,
+ unsigned int olx)
+{
+ struct mlx5_wqe_dseg *__rte_restrict dseg;
+ struct mlx5_wqe *__rte_restrict wqe;
+ unsigned int ds, nseg;
+
+ MLX5_ASSERT(NB_SEGS(loc->mbuf) > 1);
+ if (MLX5_TXOFF_CONFIG(TXPP)) {
+ enum mlx5_txcmp_code wret;
+
+ /* Generate WAIT for scheduling if requested. */
+ wret = mlx5_tx_schedule_send(txq, loc, olx);
+ if (wret == MLX5_TXCMP_CODE_EXIT)
+ return MLX5_TXCMP_CODE_EXIT;
+ if (wret == MLX5_TXCMP_CODE_ERROR)
+ return MLX5_TXCMP_CODE_ERROR;
+ }
+ /*
+ * No inline at all, it means the CPU cycles saving
+ * is prioritized at configuration, we should not
+ * copy any packet data to WQE.
+ */
+ nseg = NB_SEGS(loc->mbuf);
+ ds = 2 + nseg;
+ if (unlikely(loc->wqe_free < ((ds + 3) / 4)))
+ return MLX5_TXCMP_CODE_EXIT;
+ /* Check for maximal WQE size. */
+ if (unlikely((MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE) < ((ds + 3) / 4)))
+ return MLX5_TXCMP_CODE_ERROR;
+ /*
+ * Some Tx offloads may cause an error if
+ * packet is not long enough, check against
+ * assumed minimal length.
+ */
+ if (rte_pktmbuf_pkt_len(loc->mbuf) <= MLX5_ESEG_MIN_INLINE_SIZE)
+ return MLX5_TXCMP_CODE_ERROR;
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Update sent data bytes counter. */
+ txq->stats.obytes += rte_pktmbuf_pkt_len(loc->mbuf);
+ if (MLX5_TXOFF_CONFIG(VLAN) &&
+ loc->mbuf->ol_flags & PKT_TX_VLAN_PKT)
+ txq->stats.obytes += sizeof(struct rte_vlan_hdr);
+#endif
+ /*
+ * SEND WQE, one WQEBB:
+ * - Control Segment, SEND opcode
+ * - Ethernet Segment, optional VLAN, no inline
+ * - Data Segments, pointer only type
+ */
+ wqe = txq->wqes + (txq->wqe_ci & txq->wqe_m);
+ loc->wqe_last = wqe;
+ mlx5_tx_cseg_init(txq, loc, wqe, ds, MLX5_OPCODE_SEND, olx);
+ mlx5_tx_eseg_none(txq, loc, wqe, olx);
+ dseg = &wqe->dseg[0];
+ do {
+ if (unlikely(!rte_pktmbuf_data_len(loc->mbuf))) {
+ struct rte_mbuf *mbuf;
+