test/eventdev: add octeontx multi link establishment
[dpdk.git] / lib / librte_vhost / virtio_net.c
index b4a0760..d6b7c7a 100644 (file)
@@ -39,7 +39,7 @@
 #include <rte_memcpy.h>
 #include <rte_ether.h>
 #include <rte_ip.h>
-#include <rte_virtio_net.h>
+#include <rte_vhost.h>
 #include <rte_tcp.h>
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include "vhost.h"
 
 #define MAX_PKT_BURST 32
-#define VHOST_LOG_PAGE 4096
-
-static inline void __attribute__((always_inline))
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
-       log_base[page / 8] |= 1 << (page % 8);
-}
-
-static inline void __attribute__((always_inline))
-vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
-{
-       uint64_t page;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
-
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
-
-       /* To make sure guest memory updates are committed before logging */
-       rte_smp_wmb();
-
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-               page += 1;
-       }
-}
-
-static inline void __attribute__((always_inline))
-vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                    uint64_t offset, uint64_t len)
-{
-       vhost_log_write(dev, vq->log_guest_addr + offset, len);
-}
 
 static bool
-is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
+is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
 {
-       return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
+       return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
 }
 
 static inline void __attribute__((always_inline))
@@ -186,8 +150,8 @@ copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
 }
 
 static inline int __attribute__((always_inline))
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                 struct rte_mbuf *m, uint16_t desc_idx)
+copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
+                 struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
 {
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
@@ -195,9 +159,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct vring_desc *desc;
        uint64_t desc_addr;
        struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+       /* A counter to avoid desc dead loop chain */
+       uint16_t nr_desc = 1;
 
-       desc = &vq->desc[desc_idx];
-       desc_addr = gpa_to_vva(dev, desc->addr);
+       desc = &descs[desc_idx];
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
         * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
@@ -233,11 +199,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                /* Room in vring buffer is not enough */
                                return -1;
                        }
-                       if (unlikely(desc->next >= vq->size))
+                       if (unlikely(desc->next >= size || ++nr_desc > size))
                                return -1;
 
-                       desc = &vq->desc[desc->next];
-                       desc_addr = gpa_to_vva(dev, desc->addr);
+                       desc = &descs[desc->next];
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
 
@@ -276,11 +242,12 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        struct vhost_virtqueue *vq;
        uint16_t avail_idx, free_entries, start_idx;
        uint16_t desc_indexes[MAX_PKT_BURST];
+       struct vring_desc *descs;
        uint16_t used_idx;
-       uint32_t i;
+       uint32_t i, sz;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -319,7 +286,23 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                uint16_t desc_idx = desc_indexes[i];
                int err;
 
-               err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+               if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       descs = (struct vring_desc *)(uintptr_t)
+                               rte_vhost_gpa_to_vva(dev->mem,
+                                       vq->desc[desc_idx].addr);
+                       if (unlikely(!descs)) {
+                               count = i;
+                               break;
+                       }
+
+                       desc_idx = 0;
+                       sz = vq->desc[desc_idx].len / sizeof(*descs);
+               } else {
+                       descs = vq->desc;
+                       sz = vq->size;
+               }
+
+               err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
                if (unlikely(err)) {
                        used_idx = (start_idx + i) & (vq->size - 1);
                        vq->used->ring[used_idx].len = dev->vhost_hlen;
@@ -351,29 +334,41 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 }
 
 static inline int __attribute__((always_inline))
-fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
-            uint32_t *vec_idx, struct buf_vector *buf_vec,
-            uint16_t *desc_chain_head, uint16_t *desc_chain_len)
+fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                        uint32_t avail_idx, uint32_t *vec_idx,
+                        struct buf_vector *buf_vec, uint16_t *desc_chain_head,
+                        uint16_t *desc_chain_len)
 {
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
        uint32_t len    = 0;
+       struct vring_desc *descs = vq->desc;
 
        *desc_chain_head = idx;
+
+       if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               descs = (struct vring_desc *)(uintptr_t)
+                       rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
+               if (unlikely(!descs))
+                       return -1;
+
+               idx = 0;
+       }
+
        while (1) {
                if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
                        return -1;
 
-               len += vq->desc[idx].len;
-               buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-               buf_vec[vec_id].buf_len  = vq->desc[idx].len;
+               len += descs[idx].len;
+               buf_vec[vec_id].buf_addr = descs[idx].addr;
+               buf_vec[vec_id].buf_len  = descs[idx].len;
                buf_vec[vec_id].desc_idx = idx;
                vec_id++;
 
-               if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
+               if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
                        break;
 
-               idx = vq->desc[idx].next;
+               idx = descs[idx].next;
        }
 
        *desc_chain_len = len;
@@ -386,11 +381,11 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
  * Returns -1 on fail, 0 on success
  */
 static inline int
-reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
-                           struct buf_vector *buf_vec, uint16_t *num_buffers)
+reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                               uint32_t size, struct buf_vector *buf_vec,
+                               uint16_t *num_buffers, uint16_t avail_head)
 {
        uint16_t cur_idx;
-       uint16_t avail_idx;
        uint32_t vec_idx = 0;
        uint16_t tries = 0;
 
@@ -401,12 +396,11 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
        cur_idx  = vq->last_avail_idx;
 
        while (size > 0) {
-               avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-               if (unlikely(cur_idx == avail_idx))
+               if (unlikely(cur_idx == avail_head))
                        return -1;
 
-               if (unlikely(fill_vec_buf(vq, cur_idx, &vec_idx, buf_vec,
-                                         &head_idx, &len) < 0))
+               if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
+                                               &head_idx, &len) < 0))
                        return -1;
                len = RTE_MIN(len, size);
                update_shadow_used_ring(vq, head_idx, len);
@@ -444,7 +438,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
        if (unlikely(m == NULL))
                return -1;
 
-       desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
        if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
                return -1;
 
@@ -466,7 +460,8 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
                /* done with current desc buf, get the next one */
                if (desc_avail == 0) {
                        vec_idx++;
-                       desc_addr = gpa_to_vva(dev, buf_vec[vec_idx].buf_addr);
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem,
+                                       buf_vec[vec_idx].buf_addr);
                        if (unlikely(!desc_addr))
                                return -1;
 
@@ -520,9 +515,10 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        uint32_t pkt_idx = 0;
        uint16_t num_buffers;
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
+       uint16_t avail_head;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -536,12 +532,16 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        if (count == 0)
                return 0;
 
+       rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
        vq->shadow_used_idx = 0;
+       avail_head = *((volatile uint16_t *)&vq->avail->idx);
        for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
                uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 
-               if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len, buf_vec,
-                                                        &num_buffers) < 0)) {
+               if (unlikely(reserve_avail_buf_mergeable(dev, vq,
+                                               pkt_len, buf_vec, &num_buffers,
+                                               avail_head) < 0)) {
                        LOG_DEBUG(VHOST_DATA,
                                "(%d) failed to get enough desc from vring\n",
                                dev->vid);
@@ -645,6 +645,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        default:
                m->l3_len = 0;
                *l4_proto = 0;
+               *l4_hdr = NULL;
                break;
        }
 }
@@ -681,7 +682,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
                }
        }
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+       if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                case VIRTIO_NET_HDR_GSO_TCPV6:
@@ -763,7 +764,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                        (desc->flags & VRING_DESC_F_INDIRECT))
                return -1;
 
-       desc_addr = gpa_to_vva(dev, desc->addr);
+       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
        if (unlikely(!desc_addr))
                return -1;
 
@@ -783,7 +784,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
                        return -1;
 
-               desc_addr = gpa_to_vva(dev, desc->addr);
+               desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                if (unlikely(!desc_addr))
                        return -1;
 
@@ -847,7 +848,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
                        if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
                                return -1;
 
-                       desc_addr = gpa_to_vva(dev, desc->addr);
+                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
 
@@ -982,7 +983,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        if (!dev)
                return 0;
 
-       if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
                return 0;
@@ -1021,9 +1022,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
         * array, to looks like that guest actually send such packet.
         *
         * Check user_send_rarp() for more information.
+        *
+        * broadcast_rarp shares a cacheline in the virtio_net structure
+        * with some fields that are accessed during enqueue and
+        * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
+        * result in false sharing between enqueue and dequeue.
+        *
+        * Prevent unnecessary false sharing by reading broadcast_rarp first
+        * and only performing cmpset if the read indicates it is likely to
+        * be set.
         */
-       if (unlikely(rte_atomic16_cmpset((volatile uint16_t *)
-                                        &dev->broadcast_rarp.cnt, 1, 0))) {
+
+       if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
+                       rte_atomic16_cmpset((volatile uint16_t *)
+                               &dev->broadcast_rarp.cnt, 1, 0))) {
+
                rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
                if (rarp_mbuf == NULL) {
                        RTE_LOG(ERR, VHOST_DATA,
@@ -1078,7 +1091,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
 
                if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
-                       desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+                       desc = (struct vring_desc *)(uintptr_t)
+                               rte_vhost_gpa_to_vva(dev->mem,
                                        vq->desc[desc_indexes[i]].addr);
                        if (unlikely(!desc))
                                break;