++txq->wqe_ci;
}
+/**
+ * Write a inline WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
+ */
+static inline void
+mlx5_wqe_write_inline(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length)
+{
+ uint32_t size;
+ uint16_t wqe_cnt = txq->wqe_n - 1;
+ uint16_t wqe_ci = txq->wqe_ci + 1;
+
+ /* Copy the first 16 bytes into inline header. */
+ rte_memcpy((void *)(uintptr_t)wqe->inl.eseg.inline_hdr_start,
+ (void *)(uintptr_t)addr,
+ MLX5_ETH_INLINE_HEADER_SIZE);
+ addr += MLX5_ETH_INLINE_HEADER_SIZE;
+ length -= MLX5_ETH_INLINE_HEADER_SIZE;
+ size = 3 + ((4 + length + 15) / 16);
+ wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG);
+ rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0],
+ (void *)addr, MLX5_WQE64_INL_DATA);
+ addr += MLX5_WQE64_INL_DATA;
+ length -= MLX5_WQE64_INL_DATA;
+ while (length) {
+ volatile union mlx5_wqe *wqe_next =
+ &(*txq->wqes)[wqe_ci & wqe_cnt];
+ uint32_t copy_bytes = (length > sizeof(*wqe)) ?
+ sizeof(*wqe) :
+ length;
+
+ rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0],
+ (uint8_t *)addr);
+ addr += copy_bytes;
+ length -= copy_bytes;
+ ++wqe_ci;
+ }
+ assert(size < 64);
+ wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size);
+ wqe->inl.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE);
+ /* Increment consumer index. */
+ txq->wqe_ci = wqe_ci;
+}
+
+/**
+ * Write a inline WQE with VLAN.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe
+ * Pointer to the WQE to fill.
+ * @param addr
+ * Buffer data address.
+ * @param length
+ * Packet length.
+ * @param lkey
+ * Memory region lkey.
+ * @param vlan_tci
+ * VLAN field to insert in packet.
+ */
+static inline void
+mlx5_wqe_write_inline_vlan(struct txq *txq, volatile union mlx5_wqe *wqe,
+ uintptr_t addr, uint32_t length, uint16_t vlan_tci)
+{
+ uint32_t size;
+ uint32_t wqe_cnt = txq->wqe_n - 1;
+ uint16_t wqe_ci = txq->wqe_ci + 1;
+ uint32_t vlan = htonl(0x81000000 | vlan_tci);
+
+ /*
+ * Copy 12 bytes of source & destination MAC address.
+ * Copy 4 bytes of VLAN.
+ * Copy 2 bytes of Ether type.
+ */
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start,
+ (uint8_t *)addr, 12);
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 12,
+ &vlan, sizeof(vlan));
+ rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 16,
+ ((uint8_t *)addr + 12), 2);
+ addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan);
+ size = (sizeof(wqe->inl.ctrl.ctrl) +
+ sizeof(wqe->inl.eseg) +
+ sizeof(wqe->inl.byte_cnt) +
+ length + 15) / 16;
+ wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG);
+ rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0],
+ (void *)addr, MLX5_WQE64_INL_DATA);
+ addr += MLX5_WQE64_INL_DATA;
+ length -= MLX5_WQE64_INL_DATA;
+ while (length) {
+ volatile union mlx5_wqe *wqe_next =
+ &(*txq->wqes)[wqe_ci & wqe_cnt];
+ uint32_t copy_bytes = (length > sizeof(*wqe)) ?
+ sizeof(*wqe) :
+ length;
+
+ rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0],
+ (uint8_t *)addr);
+ addr += copy_bytes;
+ length -= copy_bytes;
+ ++wqe_ci;
+ }
+ assert(size < 64);
+ wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND);
+ wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size);
+ wqe->inl.ctrl.data[3] = 0;
+ wqe->inl.eseg.rsvd0 = 0;
+ wqe->inl.eseg.rsvd1 = 0;
+ wqe->inl.eseg.mss = 0;
+ wqe->inl.eseg.rsvd2 = 0;
+ wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE);
+ /* Increment consumer index. */
+ txq->wqe_ci = wqe_ci;
+}
+
/**
* Ring TX queue doorbell.
*
rte_prefetch0(cqe);
}
+/**
+ * Prefetch a WQE.
+ *
+ * @param txq
+ * Pointer to TX queue structure.
+ * @param wqe_ci
+ * WQE consumer index.
+ */
+static inline void
+tx_prefetch_wqe(struct txq *txq, uint16_t ci)
+{
+ volatile union mlx5_wqe *wqe;
+
+ wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)];
+ rte_prefetch0(wqe);
+}
+
/**
* DPDK callback for TX.
*
return i;
}
+/**
+ * DPDK callback for TX with inline support.
+ *
+ * @param dpdk_txq
+ * Generic pointer to TX queue structure.
+ * @param[in] pkts
+ * Packets to transmit.
+ * @param pkts_n
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted (<= pkts_n).
+ */
+uint16_t
+mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+{
+ struct txq *txq = (struct txq *)dpdk_txq;
+ uint16_t elts_head = txq->elts_head;
+ const unsigned int elts_n = txq->elts_n;
+ unsigned int i;
+ unsigned int max;
+ unsigned int comp;
+ volatile union mlx5_wqe *wqe;
+ struct rte_mbuf *buf;
+ unsigned int max_inline = txq->max_inline;
+
+ if (unlikely(!pkts_n))
+ return 0;
+ buf = pkts[0];
+ /* Prefetch first packet cacheline. */
+ tx_prefetch_cqe(txq, txq->cq_ci);
+ tx_prefetch_cqe(txq, txq->cq_ci + 1);
+ rte_prefetch0(buf);
+ /* Start processing. */
+ txq_complete(txq);
+ max = (elts_n - (elts_head - txq->elts_tail));
+ if (max > elts_n)
+ max -= elts_n;
+ assert(max >= 1);
+ assert(max <= elts_n);
+ /* Always leave one free entry in the ring. */
+ --max;
+ if (max == 0)
+ return 0;
+ if (max > pkts_n)
+ max = pkts_n;
+ for (i = 0; (i != max); ++i) {
+ unsigned int elts_head_next = (elts_head + 1) & (elts_n - 1);
+ uintptr_t addr;
+ uint32_t length;
+ uint32_t lkey;
+
+ wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)];
+ tx_prefetch_wqe(txq, txq->wqe_ci);
+ tx_prefetch_wqe(txq, txq->wqe_ci + 1);
+ if (i + 1 < max)
+ rte_prefetch0(pkts[i + 1]);
+ /* Should we enable HW CKSUM offload */
+ if (buf->ol_flags &
+ (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+ wqe->inl.eseg.cs_flags =
+ MLX5_ETH_WQE_L3_CSUM |
+ MLX5_ETH_WQE_L4_CSUM;
+ } else {
+ wqe->inl.eseg.cs_flags = 0;
+ }
+ /* Retrieve buffer information. */
+ addr = rte_pktmbuf_mtod(buf, uintptr_t);
+ length = DATA_LEN(buf);
+ /* Update element. */
+ (*txq->elts)[elts_head] = buf;
+ /* Prefetch next buffer data. */
+ if (i + 1 < max)
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i + 1],
+ volatile void *));
+ if (length <= max_inline) {
+ if (buf->ol_flags & PKT_TX_VLAN_PKT)
+ mlx5_wqe_write_inline_vlan(txq, wqe,
+ addr, length,
+ buf->vlan_tci);
+ else
+ mlx5_wqe_write_inline(txq, wqe, addr, length);
+ } else {
+ /* Retrieve Memory Region key for this memory pool. */
+ lkey = txq_mp2mr(txq, txq_mb2mp(buf));
+ if (buf->ol_flags & PKT_TX_VLAN_PKT)
+ mlx5_wqe_write_vlan(txq, wqe, addr, length,
+ lkey, buf->vlan_tci);
+ else
+ mlx5_wqe_write(txq, wqe, addr, length, lkey);
+ }
+ wqe->inl.ctrl.data[2] = 0;
+ elts_head = elts_head_next;
+ buf = pkts[i + 1];
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent bytes counter. */
+ txq->stats.obytes += length;
+#endif
+ }
+ /* Take a shortcut if nothing must be sent. */
+ if (unlikely(i == 0))
+ return 0;
+ /* Check whether completion threshold has been reached. */
+ comp = txq->elts_comp + i;
+ if (comp >= MLX5_TX_COMP_THRESH) {
+ /* Request completion on last WQE. */
+ wqe->inl.ctrl.data[2] = htonl(8);
+ /* Save elts_head in unused "immediate" field of WQE. */
+ wqe->inl.ctrl.data[3] = elts_head;
+ txq->elts_comp = 0;
+ } else {
+ txq->elts_comp = comp;
+ }
+#ifdef MLX5_PMD_SOFT_COUNTERS
+ /* Increment sent packets counter. */
+ txq->stats.opackets += i;
+#endif
+ /* Ring QP doorbell. */
+ mlx5_tx_dbrec(txq);
+ txq->elts_head = elts_head;
+ return i;
+}
+
/**
* Translate RX completion flags to packet type.
*