net/ixgbe/base: add EEE support for some PHYs
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 15ef0b0..595f67c 100644 (file)
@@ -91,6 +91,56 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
        return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
 }
 
+static inline void __attribute__((always_inline))
+do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                         uint16_t to, uint16_t from, uint16_t size)
+{
+       rte_memcpy(&vq->used->ring[to],
+                       &vq->shadow_used_ring[from],
+                       size * sizeof(struct vring_used_elem));
+       vhost_log_used_vring(dev, vq,
+                       offsetof(struct vring_used, ring[to]),
+                       size * sizeof(struct vring_used_elem));
+}
+
+static inline void __attribute__((always_inline))
+flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
+
+       if (used_idx + vq->shadow_used_idx <= vq->size) {
+               do_flush_shadow_used_ring(dev, vq, used_idx, 0,
+                                         vq->shadow_used_idx);
+       } else {
+               uint16_t size;
+
+               /* update used ring interval [used_idx, vq->size] */
+               size = vq->size - used_idx;
+               do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
+
+               /* update the left half used ring interval [0, left_size] */
+               do_flush_shadow_used_ring(dev, vq, 0, size,
+                                         vq->shadow_used_idx - size);
+       }
+       vq->last_used_idx += vq->shadow_used_idx;
+
+       rte_smp_wmb();
+
+       *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
+       vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
+               sizeof(vq->used->idx));
+}
+
+static inline void __attribute__((always_inline))
+update_shadow_used_ring(struct vhost_virtqueue *vq,
+                        uint16_t desc_idx, uint16_t len)
+{
+       uint16_t i = vq->shadow_used_idx++;
+
+       vq->shadow_used_ring[i].id  = desc_idx;
+       vq->shadow_used_ring[i].len = len;
+}
+
 static void
 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
 {
@@ -136,8 +186,8 @@ copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
 }
 
 static inline int __attribute__((always_inline))
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                 struct rte_mbuf *m, uint16_t desc_idx)
+copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
+                 struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
 {
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
@@ -146,7 +196,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint64_t desc_addr;
        struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
 
-       desc = &vq->desc[desc_idx];
+       desc = &descs[desc_idx];
        desc_addr = gpa_to_vva(dev, desc->addr);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
@@ -183,10 +233,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                /* Room in vring buffer is not enough */
                                return -1;
                        }
-                       if (unlikely(desc->next >= vq->size))
+                       if (unlikely(desc->next >= size))
                                return -1;
 
-                       desc = &vq->desc[desc->next];
+                       desc = &descs[desc->next];
                        desc_addr = gpa_to_vva(dev, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
@@ -226,8 +276,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        struct vhost_virtqueue *vq;
        uint16_t avail_idx, free_entries, start_idx;
        uint16_t desc_indexes[MAX_PKT_BURST];
+       struct vring_desc *descs;
        uint16_t used_idx;
-       uint32_t i;
+       uint32_t i, sz;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
@@ -269,7 +320,22 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                uint16_t desc_idx = desc_indexes[i];
                int err;
 
-               err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+               if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+                                       vq->desc[desc_idx].addr);
+                       if (unlikely(!descs)) {
+                               count = i;
+                               break;
+                       }
+
+                       desc_idx = 0;
+                       sz = vq->desc[desc_idx].len / sizeof(*descs);
+               } else {
+                       descs = vq->desc;
+                       sz = vq->size;
+               }
+
+               err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
                if (unlikely(err)) {
                        used_idx = (start_idx + i) & (vq->size - 1);
                        vq->used->ring[used_idx].len = dev->vhost_hlen;
@@ -300,33 +366,46 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        return count;
 }
 
-static inline int
-fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
-            uint32_t *allocated, uint32_t *vec_idx,
-            struct buf_vector *buf_vec)
+static inline int __attribute__((always_inline))
+fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                        uint32_t avail_idx, uint32_t *vec_idx,
+                        struct buf_vector *buf_vec, uint16_t *desc_chain_head,
+                        uint16_t *desc_chain_len)
 {
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
-       uint32_t len    = *allocated;
+       uint32_t len    = 0;
+       struct vring_desc *descs = vq->desc;
+
+       *desc_chain_head = idx;
+
+       if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               descs = (struct vring_desc *)(uintptr_t)
+                                       gpa_to_vva(dev, vq->desc[idx].addr);
+               if (unlikely(!descs))
+                       return -1;
+
+               idx = 0;
+       }
 
        while (1) {
                if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
                        return -1;
 
-               len += vq->desc[idx].len;
-               buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-               buf_vec[vec_id].buf_len  = vq->desc[idx].len;
+               len += descs[idx].len;
+               buf_vec[vec_id].buf_addr = descs[idx].addr;
+               buf_vec[vec_id].buf_len  = descs[idx].len;
                buf_vec[vec_id].desc_idx = idx;
                vec_id++;
 
-               if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
+               if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
                        break;
 
-               idx = vq->desc[idx].next;
+               idx = descs[idx].next;
        }
 
-       *allocated = len;
-       *vec_idx   = vec_id;
+       *desc_chain_len = len;
+       *vec_idx = vec_id;
 
        return 0;
 }
@@ -335,31 +414,34 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
  * Returns -1 on fail, 0 on success
  */
 static inline int
-reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
-                           uint16_t *end, struct buf_vector *buf_vec)
+reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                               uint32_t size, struct buf_vector *buf_vec,
+                               uint16_t *num_buffers, uint16_t avail_head)
 {
        uint16_t cur_idx;
-       uint16_t avail_idx;
-       uint32_t allocated = 0;
        uint32_t vec_idx = 0;
        uint16_t tries = 0;
 
-       cur_idx  = vq->last_used_idx;
+       uint16_t head_idx = 0;
+       uint16_t len = 0;
 
-       while (1) {
-               avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-               if (unlikely(cur_idx == avail_idx))
+       *num_buffers = 0;
+       cur_idx  = vq->last_avail_idx;
+
+       while (size > 0) {
+               if (unlikely(cur_idx == avail_head))
                        return -1;
 
-               if (unlikely(fill_vec_buf(vq, cur_idx, &allocated,
-                                         &vec_idx, buf_vec) < 0))
+               if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
+                                               &head_idx, &len) < 0))
                        return -1;
+               len = RTE_MIN(len, size);
+               update_shadow_used_ring(vq, head_idx, len);
+               size -= len;
 
                cur_idx++;
                tries++;
-
-               if (allocated >= size)
-                       break;
+               *num_buffers += 1;
 
                /*
                 * if we tried all available ring items, and still
@@ -370,77 +452,50 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
                        return -1;
        }
 
-       *end = cur_idx;
        return 0;
 }
 
-static inline uint32_t __attribute__((always_inline))
-copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                           uint16_t end_idx, struct rte_mbuf *m,
-                           struct buf_vector *buf_vec)
+static inline int __attribute__((always_inline))
+copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
+                           struct buf_vector *buf_vec, uint16_t num_buffers)
 {
        struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
        uint32_t vec_idx = 0;
-       uint16_t start_idx = vq->last_used_idx;
-       uint16_t cur_idx = start_idx;
        uint64_t desc_addr;
-       uint32_t desc_chain_head;
-       uint32_t desc_chain_len;
        uint32_t mbuf_offset, mbuf_avail;
        uint32_t desc_offset, desc_avail;
        uint32_t cpy_len;
-       uint16_t desc_idx, used_idx;
+       uint64_t hdr_addr, hdr_phys_addr;
+       struct rte_mbuf *hdr_mbuf;
 
        if (unlikely(m == NULL))
-               return 0;
-
-       LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
-               dev->vid, cur_idx, end_idx);
+               return -1;
 
        desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
        if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
-               return 0;
+               return -1;
 
-       rte_prefetch0((void *)(uintptr_t)desc_addr);
+       hdr_mbuf = m;
+       hdr_addr = desc_addr;
+       hdr_phys_addr = buf_vec[vec_idx].buf_addr;
+       rte_prefetch0((void *)(uintptr_t)hdr_addr);
 
-       virtio_hdr.num_buffers = end_idx - start_idx;
+       virtio_hdr.num_buffers = num_buffers;
        LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
-               dev->vid, virtio_hdr.num_buffers);
-
-       virtio_enqueue_offload(m, &virtio_hdr.hdr);
-       copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
-       vhost_log_write(dev, buf_vec[vec_idx].buf_addr, dev->vhost_hlen);
-       PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+               dev->vid, num_buffers);
 
        desc_avail  = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
        desc_offset = dev->vhost_hlen;
-       desc_chain_head = buf_vec[vec_idx].desc_idx;
-       desc_chain_len = desc_offset;
 
        mbuf_avail  = rte_pktmbuf_data_len(m);
        mbuf_offset = 0;
        while (mbuf_avail != 0 || m->next != NULL) {
                /* done with current desc buf, get the next one */
                if (desc_avail == 0) {
-                       desc_idx = buf_vec[vec_idx].desc_idx;
                        vec_idx++;
-
-                       if (!(vq->desc[desc_idx].flags & VRING_DESC_F_NEXT)) {
-                               /* Update used ring with desc information */
-                               used_idx = cur_idx++ & (vq->size - 1);
-                               vq->used->ring[used_idx].id = desc_chain_head;
-                               vq->used->ring[used_idx].len = desc_chain_len;
-                               vhost_log_used_vring(dev, vq,
-                                       offsetof(struct vring_used,
-                                                ring[used_idx]),
-                                       sizeof(vq->used->ring[used_idx]));
-                               desc_chain_head = buf_vec[vec_idx].desc_idx;
-                               desc_chain_len = 0;
-                       }
-
                        desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
                        if (unlikely(!desc_addr))
-                               return 0;
+                               return -1;
 
                        /* Prefetch buffer address. */
                        rte_prefetch0((void *)(uintptr_t)desc_addr);
@@ -456,6 +511,16 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        mbuf_avail  = rte_pktmbuf_data_len(m);
                }
 
+               if (hdr_addr) {
+                       virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr);
+                       copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
+                       vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
+                       PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+                                    dev->vhost_hlen, 0);
+
+                       hdr_addr = 0;
+               }
+
                cpy_len = RTE_MIN(desc_avail, mbuf_avail);
                rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
                        rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
@@ -469,17 +534,9 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                mbuf_offset += cpy_len;
                desc_avail  -= cpy_len;
                desc_offset += cpy_len;
-               desc_chain_len += cpy_len;
        }
 
-       used_idx = cur_idx & (vq->size - 1);
-       vq->used->ring[used_idx].id = desc_chain_head;
-       vq->used->ring[used_idx].len = desc_chain_len;
-       vhost_log_used_vring(dev, vq,
-               offsetof(struct vring_used, ring[used_idx]),
-               sizeof(vq->used->ring[used_idx]));
-
-       return end_idx - start_idx;
+       return 0;
 }
 
 static inline uint32_t __attribute__((always_inline))
@@ -487,9 +544,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        struct rte_mbuf **pkts, uint32_t count)
 {
        struct vhost_virtqueue *vq;
-       uint32_t pkt_idx = 0, nr_used = 0;
-       uint16_t end;
+       uint32_t pkt_idx = 0;
+       uint16_t num_buffers;
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
+       uint16_t avail_head;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
@@ -506,28 +564,39 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        if (count == 0)
                return 0;
 
+       rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+       vq->shadow_used_idx = 0;
+       avail_head = *((volatile uint16_t *)&vq->avail->idx);
        for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
                uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 
-               if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len,
-                                                        &end, buf_vec) < 0)) {
+               if (unlikely(reserve_avail_buf_mergeable(dev, vq,
+                                               pkt_len, buf_vec, &num_buffers,
+                                               avail_head) < 0)) {
                        LOG_DEBUG(VHOST_DATA,
                                "(%d) failed to get enough desc from vring\n",
                                dev->vid);
+                       vq->shadow_used_idx -= num_buffers;
                        break;
                }
 
-               nr_used = copy_mbuf_to_desc_mergeable(dev, vq, end,
-                                                     pkts[pkt_idx], buf_vec);
-               rte_smp_wmb();
+               LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+                       dev->vid, vq->last_avail_idx,
+                       vq->last_avail_idx + num_buffers);
 
-               *(volatile uint16_t *)&vq->used->idx += nr_used;
-               vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
-                       sizeof(vq->used->idx));
-               vq->last_used_idx += nr_used;
+               if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx],
+                                               buf_vec, num_buffers) < 0) {
+                       vq->shadow_used_idx -= num_buffers;
+                       break;
+               }
+
+               vq->last_avail_idx += num_buffers;
        }
 
-       if (likely(pkt_idx)) {
+       if (likely(vq->shadow_used_idx)) {
+               flush_shadow_used_ring(dev, vq);
+
                /* flush used->idx update before we read avail->flags. */
                rte_mb();