+af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ struct pkt_tx_queue *txq = queue;
+ struct xsk_umem_info *umem = txq->umem;
+ struct rte_mbuf *mbuf;
+ unsigned long tx_bytes = 0;
+ int i;
+ uint32_t idx_tx;
+ uint16_t count = 0;
+ struct xdp_desc *desc;
+ uint64_t addr, offset;
+
+ pull_umem_cq(umem, nb_pkts);
+
+ for (i = 0; i < nb_pkts; i++) {
+ mbuf = bufs[i];
+
+ if (mbuf->pool == umem->mb_pool) {
+ if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+ kick_tx(txq);
+ goto out;
+ }
+ desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
+ desc->len = mbuf->pkt_len;
+ addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
+ umem->mb_pool->header_size;
+ offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
+ (uint64_t)mbuf +
+ umem->mb_pool->header_size;
+ offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ desc->addr = addr | offset;
+ count++;
+ } else {
+ struct rte_mbuf *local_mbuf =
+ rte_pktmbuf_alloc(umem->mb_pool);
+ void *pkt;
+
+ if (local_mbuf == NULL)
+ goto out;
+
+ if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
+ rte_pktmbuf_free(local_mbuf);
+ kick_tx(txq);
+ goto out;
+ }
+
+ desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
+ desc->len = mbuf->pkt_len;
+
+ addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
+ umem->mb_pool->header_size;
+ offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
+ (uint64_t)local_mbuf +
+ umem->mb_pool->header_size;
+ pkt = xsk_umem__get_data(umem->buffer, addr + offset);
+ offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ desc->addr = addr | offset;
+ rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
+ desc->len);
+ rte_pktmbuf_free(mbuf);
+ count++;
+ }
+
+ tx_bytes += mbuf->pkt_len;
+ }
+
+ kick_tx(txq);
+
+out:
+ xsk_ring_prod__submit(&txq->tx, count);
+
+ txq->stats.tx_pkts += count;
+ txq->stats.tx_bytes += tx_bytes;
+ txq->stats.tx_dropped += nb_pkts - count;
+
+ return count;
+}
+#else
+static uint16_t
+af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)