vhost: add packed ring batch dequeue
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 4fb6552..7643520 100644 (file)
@@ -881,6 +881,76 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return pkt_idx;
 }
 
+static __rte_unused int
+virtio_dev_rx_batch_packed(struct virtio_net *dev,
+                          struct vhost_virtqueue *vq,
+                          struct rte_mbuf **pkts)
+{
+       bool wrap_counter = vq->avail_wrap_counter;
+       struct vring_packed_desc *descs = vq->desc_packed;
+       uint16_t avail_idx = vq->last_avail_idx;
+       uint64_t desc_addrs[PACKED_BATCH_SIZE];
+       struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
+       uint32_t buf_offset = dev->vhost_hlen;
+       uint64_t lens[PACKED_BATCH_SIZE];
+       uint16_t i;
+
+       if (unlikely(avail_idx & PACKED_BATCH_MASK))
+               return -1;
+
+       if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+               return -1;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(pkts[i]->next != NULL))
+                       return -1;
+               if (unlikely(!desc_is_avail(&descs[avail_idx + i],
+                                           wrap_counter)))
+                       return -1;
+       }
+
+       rte_smp_rmb();
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               lens[i] = descs[avail_idx + i].len;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
+                       return -1;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+                                                 descs[avail_idx + i].addr,
+                                                 &lens[i],
+                                                 VHOST_ACCESS_RW);
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(lens[i] != descs[avail_idx + i].len))
+                       return -1;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+               hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
+                                       (uintptr_t)desc_addrs[i];
+               lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
+
+       vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+                          rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+                          pkts[i]->pkt_len);
+       }
+
+       return 0;
+}
+
 static __rte_unused int16_t
 virtio_dev_rx_single_packed(struct virtio_net *dev,
                            struct vhost_virtqueue *vq,
@@ -1565,6 +1635,172 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return i;
 }
 
+static __rte_always_inline int
+vhost_reserve_avail_batch_packed(struct virtio_net *dev,
+                                struct vhost_virtqueue *vq,
+                                struct rte_mempool *mbuf_pool,
+                                struct rte_mbuf **pkts,
+                                uint16_t avail_idx,
+                                uintptr_t *desc_addrs,
+                                uint16_t *ids)
+{
+       bool wrap = vq->avail_wrap_counter;
+       struct vring_packed_desc *descs = vq->desc_packed;
+       struct virtio_net_hdr *hdr;
+       uint64_t lens[PACKED_BATCH_SIZE];
+       uint64_t buf_lens[PACKED_BATCH_SIZE];
+       uint32_t buf_offset = dev->vhost_hlen;
+       uint16_t flags, i;
+
+       if (unlikely(avail_idx & PACKED_BATCH_MASK))
+               return -1;
+       if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
+               return -1;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               flags = descs[avail_idx + i].flags;
+               if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
+                            (wrap == !!(flags & VRING_DESC_F_USED))  ||
+                            (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
+                       return -1;
+       }
+
+       rte_smp_rmb();
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               lens[i] = descs[avail_idx + i].len;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               desc_addrs[i] = vhost_iova_to_vva(dev, vq,
+                                                 descs[avail_idx + i].addr,
+                                                 &lens[i], VHOST_ACCESS_RW);
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely((lens[i] != descs[avail_idx + i].len)))
+                       return -1;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
+               if (!pkts[i])
+                       goto free_buf;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
+                       goto free_buf;
+       }
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
+               pkts[i]->data_len = pkts[i]->pkt_len;
+               ids[i] = descs[avail_idx + i].id;
+       }
+
+       if (virtio_net_with_host_offload(dev)) {
+               vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+                       hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
+                       vhost_dequeue_offload(hdr, pkts[i]);
+               }
+       }
+
+       return 0;
+
+free_buf:
+       for (i = 0; i < PACKED_BATCH_SIZE; i++)
+               rte_pktmbuf_free(pkts[i]);
+
+       return -1;
+}
+
+static __rte_unused int
+virtio_dev_tx_batch_packed(struct virtio_net *dev,
+                          struct vhost_virtqueue *vq,
+                          struct rte_mempool *mbuf_pool,
+                          struct rte_mbuf **pkts)
+{
+       uint16_t avail_idx = vq->last_avail_idx;
+       uint32_t buf_offset = dev->vhost_hlen;
+       uintptr_t desc_addrs[PACKED_BATCH_SIZE];
+       uint16_t ids[PACKED_BATCH_SIZE];
+       uint16_t i;
+
+       if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
+                                            avail_idx, desc_addrs, ids))
+               return -1;
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
+
+       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
+               rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
+                          (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
+                          pkts[i]->pkt_len);
+
+       vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
+
+       return 0;
+}
+
+static __rte_always_inline int
+vhost_dequeue_single_packed(struct virtio_net *dev,
+                           struct vhost_virtqueue *vq,
+                           struct rte_mempool *mbuf_pool,
+                           struct rte_mbuf **pkts,
+                           uint16_t *buf_id,
+                           uint16_t *desc_count)
+{
+       struct buf_vector buf_vec[BUF_VECTOR_MAX];
+       uint32_t buf_len;
+       uint16_t nr_vec = 0;
+       int err;
+
+       if (unlikely(fill_vec_buf_packed(dev, vq,
+                                        vq->last_avail_idx, desc_count,
+                                        buf_vec, &nr_vec,
+                                        buf_id, &buf_len,
+                                        VHOST_ACCESS_RO) < 0))
+               return -1;
+
+       *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
+       if (unlikely(*pkts == NULL)) {
+               RTE_LOG(ERR, VHOST_DATA,
+                       "Failed to allocate memory for mbuf.\n");
+               return -1;
+       }
+
+       err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
+                               mbuf_pool);
+       if (unlikely(err)) {
+               rte_pktmbuf_free(*pkts);
+               return -1;
+       }
+
+       return 0;
+}
+
+static __rte_unused int
+virtio_dev_tx_single_packed(struct virtio_net *dev,
+                           struct vhost_virtqueue *vq,
+                           struct rte_mempool *mbuf_pool,
+                           struct rte_mbuf **pkts)
+{
+
+       uint16_t buf_id, desc_count;
+
+       if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+                                       &desc_count))
+               return -1;
+
+       vq_inc_last_avail_packed(vq, desc_count);
+
+       return 0;
+}
+
 static __rte_noinline uint16_t
 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)