-/**
- * Fill in buffer descriptors in a multi-packet send descriptor.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param dseg
- * Pointer to buffer descriptor to be written.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param n
- * Number of packets to be filled.
- */
-static inline void
-txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
- struct rte_mbuf **pkts, unsigned int n)
-{
- unsigned int pos;
- uintptr_t addr;
- const uint8x16_t dseg_shuf_m = {
- 3, 2, 1, 0, /* length, bswap32 */
- 4, 5, 6, 7, /* lkey */
- 15, 14, 13, 12, /* addr, bswap64 */
- 11, 10, 9, 8
- };
-#ifdef MLX5_PMD_SOFT_COUNTERS
- uint32_t tx_byte = 0;
-#endif
-
- for (pos = 0; pos < n; ++pos, dseg += MLX5_WQE_DWORD_SIZE) {
- uint8x16_t desc;
- struct rte_mbuf *pkt = pkts[pos];
-
- addr = rte_pktmbuf_mtod(pkt, uintptr_t);
- desc = vreinterpretq_u8_u32((uint32x4_t) {
- DATA_LEN(pkt),
- mlx5_tx_mb2mr(txq, pkt),
- addr,
- addr >> 32 });
- desc = vqtbl1q_u8(desc, dseg_shuf_m);
- vst1q_u8(dseg, desc);
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tx_byte += DATA_LEN(pkt);
-#endif
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.obytes += tx_byte;
-#endif
-}
-
-/**
- * Send multi-segmented packets until it encounters a single segment packet in
- * the pkts list.
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param pkts_n
- * Number of packets to be sent.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static uint16_t
-txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts,
- uint16_t pkts_n)
-{
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- const uint16_t wq_n = 1 << txq->wqe_n;
- const uint16_t wq_mask = wq_n - 1;
- const unsigned int nb_dword_per_wqebb =
- MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
- const unsigned int nb_dword_in_hdr =
- sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
- unsigned int n;
- volatile struct mlx5_wqe *wqe = NULL;
-
- assert(elts_n > pkts_n);
- mlx5_tx_complete(txq);
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- if (unlikely(!pkts_n))
- return 0;
- for (n = 0; n < pkts_n; ++n) {
- struct rte_mbuf *buf = pkts[n];
- unsigned int segs_n = buf->nb_segs;
- unsigned int ds = nb_dword_in_hdr;
- unsigned int len = PKT_LEN(buf);
- uint16_t wqe_ci = txq->wqe_ci;
- const uint8x16_t ctrl_shuf_m = {
- 3, 2, 1, 0, /* bswap32 */
- 7, 6, 5, 4, /* bswap32 */
- 11, 10, 9, 8, /* bswap32 */
- 12, 13, 14, 15
- };
- uint8_t cs_flags;
- uint16_t max_elts;
- uint16_t max_wqe;
- uint8x16_t *t_wqe;
- uint8_t *dseg;
- uint8x16_t ctrl;
-
- assert(segs_n);
- max_elts = elts_n - (elts_head - txq->elts_tail);
- max_wqe = wq_n - (txq->wqe_ci - txq->wqe_pi);
- /*
- * A MPW session consumes 2 WQEs at most to
- * include MLX5_MPW_DSEG_MAX pointers.
- */
- if (segs_n == 1 ||
- max_elts < segs_n || max_wqe < 2)
- break;
- wqe = &((volatile struct mlx5_wqe64 *)
- txq->wqes)[wqe_ci & wq_mask].hdr;
- cs_flags = txq_ol_cksum_to_cs(txq, buf);
- /* Title WQEBB pointer. */
- t_wqe = (uint8x16_t *)wqe;
- dseg = (uint8_t *)(wqe + 1);
- do {
- if (!(ds++ % nb_dword_per_wqebb)) {
- dseg = (uint8_t *)
- &((volatile struct mlx5_wqe64 *)
- txq->wqes)[++wqe_ci & wq_mask];
- }
- txq_wr_dseg_v(txq, dseg, &buf, 1);
- dseg += MLX5_WQE_DWORD_SIZE;
- (*txq->elts)[elts_head++ & elts_m] = buf;
- buf = buf->next;
- } while (--segs_n);
- ++wqe_ci;
- /* Fill CTRL in the header. */
- ctrl = vreinterpretq_u8_u32((uint32x4_t) {
- MLX5_OPC_MOD_MPW << 24 |
- txq->wqe_ci << 8 | MLX5_OPCODE_TSO,
- txq->qp_num_8s | ds, 0, 0});
- ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
- vst1q_u8((void *)t_wqe, ctrl);
- /* Fill ESEG in the header. */
- vst1q_u16((void *)(t_wqe + 1),
- (uint16x8_t) { 0, 0, cs_flags, rte_cpu_to_be_16(len),
- 0, 0, 0, 0 });
- txq->wqe_ci = wqe_ci;
- }
- if (!n)
- return 0;
- txq->elts_comp += (uint16_t)(elts_head - txq->elts_head);
- txq->elts_head = elts_head;
- if (txq->elts_comp >= MLX5_TX_COMP_THRESH) {
- wqe->ctrl[2] = rte_cpu_to_be_32(8);
- wqe->ctrl[3] = txq->elts_head;
- txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.opackets += n;
-#endif
- mlx5_tx_dbrec(txq, wqe);
- return n;
-}
-
-/**
- * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet,
- * it returns to make it processed by txq_scatter_v(). All the packets in
- * the pkts list should be single segment packets having same offload flags.
- * This must be checked by txq_count_contig_single_seg() and txq_calc_offload().
- *
- * @param txq
- * Pointer to TX queue structure.
- * @param pkts
- * Pointer to array of packets to be sent.
- * @param pkts_n
- * Number of packets to be sent (<= MLX5_VPMD_TX_MAX_BURST).
- * @param cs_flags
- * Checksum offload flags to be written in the descriptor.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-static inline uint16_t
-txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n,
- uint8_t cs_flags)
-{
- struct rte_mbuf **elts;
- uint16_t elts_head = txq->elts_head;
- const uint16_t elts_n = 1 << txq->elts_n;
- const uint16_t elts_m = elts_n - 1;
- const unsigned int nb_dword_per_wqebb =
- MLX5_WQE_SIZE / MLX5_WQE_DWORD_SIZE;
- const unsigned int nb_dword_in_hdr =
- sizeof(struct mlx5_wqe) / MLX5_WQE_DWORD_SIZE;
- unsigned int n = 0;
- unsigned int pos;
- uint16_t max_elts;
- uint16_t max_wqe;
- uint32_t comp_req = 0;
- const uint16_t wq_n = 1 << txq->wqe_n;
- const uint16_t wq_mask = wq_n - 1;
- uint16_t wq_idx = txq->wqe_ci & wq_mask;
- volatile struct mlx5_wqe64 *wq =
- &((volatile struct mlx5_wqe64 *)txq->wqes)[wq_idx];
- volatile struct mlx5_wqe *wqe = (volatile struct mlx5_wqe *)wq;
- const uint8x16_t ctrl_shuf_m = {
- 3, 2, 1, 0, /* bswap32 */
- 7, 6, 5, 4, /* bswap32 */
- 11, 10, 9, 8, /* bswap32 */
- 12, 13, 14, 15
- };
- uint8x16_t *t_wqe;
- uint8_t *dseg;
- uint8x16_t ctrl;
-
- /* Make sure all packets can fit into a single WQE. */
- assert(elts_n > pkts_n);
- mlx5_tx_complete(txq);
- max_elts = (elts_n - (elts_head - txq->elts_tail));
- /* A CQE slot must always be available. */
- assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci));
- max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
- pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts);
- if (unlikely(!pkts_n))
- return 0;
- elts = &(*txq->elts)[elts_head & elts_m];
- /* Loop for available tailroom first. */
- n = RTE_MIN(elts_n - (elts_head & elts_m), pkts_n);
- for (pos = 0; pos < (n & -2); pos += 2)
- vst1q_u64((void *)&elts[pos], vld1q_u64((void *)&pkts[pos]));
- if (n & 1)
- elts[pos] = pkts[pos];
- /* Check if it crosses the end of the queue. */
- if (unlikely(n < pkts_n)) {
- elts = &(*txq->elts)[0];
- for (pos = 0; pos < pkts_n - n; ++pos)
- elts[pos] = pkts[n + pos];
- }
- txq->elts_head += pkts_n;
- /* Save title WQEBB pointer. */
- t_wqe = (uint8x16_t *)wqe;
- dseg = (uint8_t *)(wqe + 1);
- /* Calculate the number of entries to the end. */
- n = RTE_MIN(
- (wq_n - wq_idx) * nb_dword_per_wqebb - nb_dword_in_hdr,
- pkts_n);
- /* Fill DSEGs. */
- txq_wr_dseg_v(txq, dseg, pkts, n);
- /* Check if it crosses the end of the queue. */
- if (n < pkts_n) {
- dseg = (uint8_t *)txq->wqes;
- txq_wr_dseg_v(txq, dseg, &pkts[n], pkts_n - n);
- }
- if (txq->elts_comp + pkts_n < MLX5_TX_COMP_THRESH) {
- txq->elts_comp += pkts_n;
- } else {
- /* Request a completion. */
- txq->elts_comp = 0;
-#ifndef NDEBUG
- ++txq->cq_pi;
-#endif
- comp_req = 8;
- }
- /* Fill CTRL in the header. */
- ctrl = vreinterpretq_u8_u32((uint32x4_t) {
- MLX5_OPC_MOD_ENHANCED_MPSW << 24 |
- txq->wqe_ci << 8 | MLX5_OPCODE_ENHANCED_MPSW,
- txq->qp_num_8s | (pkts_n + 2),
- comp_req,
- txq->elts_head });
- ctrl = vqtbl1q_u8(ctrl, ctrl_shuf_m);
- vst1q_u8((void *)t_wqe, ctrl);
- /* Fill ESEG in the header. */
- vst1q_u8((void *)(t_wqe + 1),
- (uint8x16_t) { 0, 0, 0, 0,
- cs_flags, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0 });
-#ifdef MLX5_PMD_SOFT_COUNTERS
- txq->stats.opackets += pkts_n;
-#endif
- txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) /
- nb_dword_per_wqebb;
- /* Ring QP doorbell. */
- mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST);
- return pkts_n;
-}
-