vhost: support MTU protocol feature
[dpdk.git] / lib / librte_vhost / virtio_net.c
index eed0b1c..337470d 100644 (file)
@@ -186,8 +186,8 @@ copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
 }
 
 static inline int __attribute__((always_inline))
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                 struct rte_mbuf *m, uint16_t desc_idx)
+copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
+                 struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
 {
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
@@ -195,8 +195,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct vring_desc *desc;
        uint64_t desc_addr;
        struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
+       /* A counter to avoid desc dead loop chain */
+       uint16_t nr_desc = 1;
 
-       desc = &vq->desc[desc_idx];
+       desc = &descs[desc_idx];
        desc_addr = gpa_to_vva(dev, desc->addr);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
@@ -233,10 +235,10 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                /* Room in vring buffer is not enough */
                                return -1;
                        }
-                       if (unlikely(desc->next >= vq->size))
+                       if (unlikely(desc->next >= size || ++nr_desc > size))
                                return -1;
 
-                       desc = &vq->desc[desc->next];
+                       desc = &descs[desc->next];
                        desc_addr = gpa_to_vva(dev, desc->addr);
                        if (unlikely(!desc_addr))
                                return -1;
@@ -276,8 +278,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        struct vhost_virtqueue *vq;
        uint16_t avail_idx, free_entries, start_idx;
        uint16_t desc_indexes[MAX_PKT_BURST];
+       struct vring_desc *descs;
        uint16_t used_idx;
-       uint32_t i;
+       uint32_t i, sz;
 
        LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
@@ -319,7 +322,22 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                uint16_t desc_idx = desc_indexes[i];
                int err;
 
-               err = copy_mbuf_to_desc(dev, vq, pkts[i], desc_idx);
+               if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
+                                       vq->desc[desc_idx].addr);
+                       if (unlikely(!descs)) {
+                               count = i;
+                               break;
+                       }
+
+                       desc_idx = 0;
+                       sz = vq->desc[desc_idx].len / sizeof(*descs);
+               } else {
+                       descs = vq->desc;
+                       sz = vq->size;
+               }
+
+               err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
                if (unlikely(err)) {
                        used_idx = (start_idx + i) & (vq->size - 1);
                        vq->used->ring[used_idx].len = dev->vhost_hlen;
@@ -351,29 +369,41 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 }
 
 static inline int __attribute__((always_inline))
-fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
-            uint32_t *vec_idx, struct buf_vector *buf_vec,
-            uint16_t *desc_chain_head, uint16_t *desc_chain_len)
+fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                        uint32_t avail_idx, uint32_t *vec_idx,
+                        struct buf_vector *buf_vec, uint16_t *desc_chain_head,
+                        uint16_t *desc_chain_len)
 {
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
        uint32_t len    = 0;
+       struct vring_desc *descs = vq->desc;
 
        *desc_chain_head = idx;
+
+       if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               descs = (struct vring_desc *)(uintptr_t)
+                                       gpa_to_vva(dev, vq->desc[idx].addr);
+               if (unlikely(!descs))
+                       return -1;
+
+               idx = 0;
+       }
+
        while (1) {
                if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
                        return -1;
 
-               len += vq->desc[idx].len;
-               buf_vec[vec_id].buf_addr = vq->desc[idx].addr;
-               buf_vec[vec_id].buf_len  = vq->desc[idx].len;
+               len += descs[idx].len;
+               buf_vec[vec_id].buf_addr = descs[idx].addr;
+               buf_vec[vec_id].buf_len  = descs[idx].len;
                buf_vec[vec_id].desc_idx = idx;
                vec_id++;
 
-               if ((vq->desc[idx].flags & VRING_DESC_F_NEXT) == 0)
+               if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
                        break;
 
-               idx = vq->desc[idx].next;
+               idx = descs[idx].next;
        }
 
        *desc_chain_len = len;
@@ -386,9 +416,9 @@ fill_vec_buf(struct vhost_virtqueue *vq, uint32_t avail_idx,
  * Returns -1 on fail, 0 on success
  */
 static inline int
-reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
-                           struct buf_vector *buf_vec, uint16_t *num_buffers,
-                           uint16_t avail_head)
+reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                               uint32_t size, struct buf_vector *buf_vec,
+                               uint16_t *num_buffers, uint16_t avail_head)
 {
        uint16_t cur_idx;
        uint32_t vec_idx = 0;
@@ -404,8 +434,8 @@ reserve_avail_buf_mergeable(struct vhost_virtqueue *vq, uint32_t size,
                if (unlikely(cur_idx == avail_head))
                        return -1;
 
-               if (unlikely(fill_vec_buf(vq, cur_idx, &vec_idx, buf_vec,
-                                         &head_idx, &len) < 0))
+               if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
+                                               &head_idx, &len) < 0))
                        return -1;
                len = RTE_MIN(len, size);
                update_shadow_used_ring(vq, head_idx, len);
@@ -543,8 +573,9 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
                uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
 
-               if (unlikely(reserve_avail_buf_mergeable(vq, pkt_len, buf_vec,
-                                       &num_buffers, avail_head) < 0)) {
+               if (unlikely(reserve_avail_buf_mergeable(dev, vq,
+                                               pkt_len, buf_vec, &num_buffers,
+                                               avail_head) < 0)) {
                        LOG_DEBUG(VHOST_DATA,
                                "(%d) failed to get enough desc from vring\n",
                                dev->vid);
@@ -648,6 +679,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        default:
                m->l3_len = 0;
                *l4_proto = 0;
+               *l4_hdr = NULL;
                break;
        }
 }
@@ -684,7 +716,7 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
                }
        }
 
-       if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+       if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
                switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
                case VIRTIO_NET_HDR_GSO_TCPV4:
                case VIRTIO_NET_HDR_GSO_TCPV6: