net/virtio: allow virtual address to fill vring descriptors
[dpdk.git] / drivers / net / virtio / virtio_rxtx_simple.c
index f6c00c1..7b50119 100644 (file)
@@ -80,10 +80,10 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
        vq->sw_ring[desc_idx] = cookie;
 
        start_dp = vq->vq_ring.desc;
-       start_dp[desc_idx].addr = (uint64_t)((uintptr_t)cookie->buf_physaddr +
-               RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+       start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) -
+                                 vq->hw->vtnet_hdr_size;
        start_dp[desc_idx].len = cookie->buf_len -
-               RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+               RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
 
        vq->vq_free_cnt--;
        vq->vq_avail_idx++;
@@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
 }
 
 static inline void
-virtio_rxq_rearm_vec(struct virtqueue *rxvq)
+virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 {
        int i;
        uint16_t desc_idx;
        struct rte_mbuf **sw_ring;
        struct vring_desc *start_dp;
        int ret;
+       struct virtqueue *vq = rxvq->vq;
 
-       desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1);
-       sw_ring = &rxvq->sw_ring[desc_idx];
-       start_dp = &rxvq->vq_ring.desc[desc_idx];
+       desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
+       sw_ring = &vq->sw_ring[desc_idx];
+       start_dp = &vq->vq_ring.desc[desc_idx];
 
        ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
                RTE_VIRTIO_VPMD_RX_REARM_THRESH);
@@ -119,15 +120,15 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq)
                *(uint64_t *)p = rxvq->mbuf_initializer;
 
                start_dp[i].addr =
-                       (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr +
-                       RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+                       MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) -
+                       vq->hw->vtnet_hdr_size;
                start_dp[i].len = sw_ring[i]->buf_len -
-                       RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
+                       RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
        }
 
-       rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
-       rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
-       vq_update_avail_idx(rxvq);
+       vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+       vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH;
+       vq_update_avail_idx(vq);
 }
 
 /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP)
@@ -143,7 +144,8 @@ uint16_t
 virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        uint16_t nb_pkts)
 {
-       struct virtqueue *rxvq = rx_queue;
+       struct virtnet_rx *rxvq = rx_queue;
+       struct virtqueue *vq = rxvq->vq;
        uint16_t nb_used;
        uint16_t desc_idx;
        struct vring_used_elem *rused;
@@ -175,15 +177,15 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        len_adjust = _mm_set_epi16(
                0, 0,
                0,
-               (uint16_t) -sizeof(struct virtio_net_hdr),
-               0, (uint16_t) -sizeof(struct virtio_net_hdr),
+               (uint16_t)-vq->hw->vtnet_hdr_size,
+               0, (uint16_t)-vq->hw->vtnet_hdr_size,
                0, 0);
 
        if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
                return 0;
 
-       nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx -
-               rxvq->vq_used_cons_idx;
+       nb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx -
+               vq->vq_used_cons_idx;
 
        rte_compiler_barrier();
 
@@ -193,17 +195,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP);
        nb_used = RTE_MIN(nb_used, nb_pkts);
 
-       desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1));
-       rused = &rxvq->vq_ring.used->ring[desc_idx];
-       sw_ring  = &rxvq->sw_ring[desc_idx];
-       sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries];
+       desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+       rused = &vq->vq_ring.used->ring[desc_idx];
+       sw_ring  = &vq->sw_ring[desc_idx];
+       sw_ring_end = &vq->sw_ring[vq->vq_nentries];
 
        _mm_prefetch((const void *)rused, _MM_HINT_T0);
 
-       if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
+       if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                virtio_rxq_rearm_vec(rxvq);
-               if (unlikely(virtqueue_kick_prepare(rxvq)))
-                       virtqueue_notify(rxvq);
+               if (unlikely(virtqueue_kick_prepare(vq)))
+                       virtqueue_notify(vq);
        }
 
        for (nb_pkts_received = 0;
@@ -286,14 +288,122 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                }
        }
 
-       rxvq->vq_used_cons_idx += nb_pkts_received;
-       rxvq->vq_free_cnt += nb_pkts_received;
-       rxvq->packets += nb_pkts_received;
+       vq->vq_used_cons_idx += nb_pkts_received;
+       vq->vq_free_cnt += nb_pkts_received;
+       rxvq->stats.packets += nb_pkts_received;
        return nb_pkts_received;
 }
 
+#define VIRTIO_TX_FREE_THRESH 32
+#define VIRTIO_TX_MAX_FREE_BUF_SZ 32
+#define VIRTIO_TX_FREE_NR 32
+/* TODO: vq->tx_free_cnt could mean num of free slots so we could avoid shift */
+static inline void
+virtio_xmit_cleanup(struct virtqueue *vq)
+{
+       uint16_t i, desc_idx;
+       int nb_free = 0;
+       struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
+
+       desc_idx = (uint16_t)(vq->vq_used_cons_idx &
+                  ((vq->vq_nentries >> 1) - 1));
+       m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+       m = __rte_pktmbuf_prefree_seg(m);
+       if (likely(m != NULL)) {
+               free[0] = m;
+               nb_free = 1;
+               for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+                       m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+                       m = __rte_pktmbuf_prefree_seg(m);
+                       if (likely(m != NULL)) {
+                               if (likely(m->pool == free[0]->pool))
+                                       free[nb_free++] = m;
+                               else {
+                                       rte_mempool_put_bulk(free[0]->pool,
+                                               (void **)free, nb_free);
+                                       free[0] = m;
+                                       nb_free = 1;
+                               }
+                       }
+               }
+               rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+       } else {
+               for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
+                       m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
+                       m = __rte_pktmbuf_prefree_seg(m);
+                       if (m != NULL)
+                               rte_mempool_put(m->pool, m);
+               }
+       }
+
+       vq->vq_used_cons_idx += VIRTIO_TX_FREE_NR;
+       vq->vq_free_cnt += (VIRTIO_TX_FREE_NR << 1);
+}
+
+uint16_t
+virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
+       uint16_t nb_pkts)
+{
+       struct virtnet_tx *txvq = tx_queue;
+       struct virtqueue *vq = txvq->vq;
+       uint16_t nb_used;
+       uint16_t desc_idx;
+       struct vring_desc *start_dp;
+       uint16_t nb_tail, nb_commit;
+       int i;
+       uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1;
+
+       nb_used = VIRTQUEUE_NUSED(vq);
+       rte_compiler_barrier();
+
+       if (nb_used >= VIRTIO_TX_FREE_THRESH)
+               virtio_xmit_cleanup(vq);
+
+       nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts);
+       desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max);
+       start_dp = vq->vq_ring.desc;
+       nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx);
+
+       if (nb_commit >= nb_tail) {
+               for (i = 0; i < nb_tail; i++)
+                       vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+               for (i = 0; i < nb_tail; i++) {
+                       start_dp[desc_idx].addr =
+                               MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+                       start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
+                       tx_pkts++;
+                       desc_idx++;
+               }
+               nb_commit -= nb_tail;
+               desc_idx = 0;
+       }
+       for (i = 0; i < nb_commit; i++)
+               vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
+       for (i = 0; i < nb_commit; i++) {
+               start_dp[desc_idx].addr =
+                       MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+               start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
+               tx_pkts++;
+               desc_idx++;
+       }
+
+       rte_compiler_barrier();
+
+       vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1);
+       vq->vq_avail_idx += nb_pkts;
+       vq->vq_ring.avail->idx = vq->vq_avail_idx;
+       txvq->stats.packets += nb_pkts;
+
+       if (likely(nb_pkts)) {
+               if (unlikely(virtqueue_kick_prepare(vq)))
+                       virtqueue_notify(vq);
+       }
+
+       return nb_pkts;
+}
+
 int __attribute__((cold))
-virtio_rxq_vec_setup(struct virtqueue *rxq)
+virtio_rxq_vec_setup(struct virtnet_rx *rxq)
 {
        uintptr_t p;
        struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */