vhost: cleanup async enqueue
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost_async.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24
25 #define MAX_BATCH_LEN 256
26
27 #define VHOST_ASYNC_BATCH_THRESHOLD 32
28
29 static  __rte_always_inline bool
30 rxvq_is_mergeable(struct virtio_net *dev)
31 {
32         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
33 }
34
35 static  __rte_always_inline bool
36 virtio_net_is_inorder(struct virtio_net *dev)
37 {
38         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
39 }
40
41 static bool
42 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
43 {
44         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
45 }
46
47 static inline void
48 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
49 {
50         struct batch_copy_elem *elem = vq->batch_copy_elems;
51         uint16_t count = vq->batch_copy_nb_elems;
52         int i;
53
54         for (i = 0; i < count; i++) {
55                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
56                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
57                                            elem[i].len);
58                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
59         }
60
61         vq->batch_copy_nb_elems = 0;
62 }
63
64 static inline void
65 do_data_copy_dequeue(struct vhost_virtqueue *vq)
66 {
67         struct batch_copy_elem *elem = vq->batch_copy_elems;
68         uint16_t count = vq->batch_copy_nb_elems;
69         int i;
70
71         for (i = 0; i < count; i++)
72                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
73
74         vq->batch_copy_nb_elems = 0;
75 }
76
77 static __rte_always_inline void
78 do_flush_shadow_used_ring_split(struct virtio_net *dev,
79                         struct vhost_virtqueue *vq,
80                         uint16_t to, uint16_t from, uint16_t size)
81 {
82         rte_memcpy(&vq->used->ring[to],
83                         &vq->shadow_used_split[from],
84                         size * sizeof(struct vring_used_elem));
85         vhost_log_cache_used_vring(dev, vq,
86                         offsetof(struct vring_used, ring[to]),
87                         size * sizeof(struct vring_used_elem));
88 }
89
90 static __rte_always_inline void
91 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
92 {
93         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
94
95         if (used_idx + vq->shadow_used_idx <= vq->size) {
96                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
97                                           vq->shadow_used_idx);
98         } else {
99                 uint16_t size;
100
101                 /* update used ring interval [used_idx, vq->size] */
102                 size = vq->size - used_idx;
103                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
104
105                 /* update the left half used ring interval [0, left_size] */
106                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
107                                           vq->shadow_used_idx - size);
108         }
109         vq->last_used_idx += vq->shadow_used_idx;
110
111         vhost_log_cache_sync(dev, vq);
112
113         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
114                            __ATOMIC_RELEASE);
115         vq->shadow_used_idx = 0;
116         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
117                 sizeof(vq->used->idx));
118 }
119
120 static __rte_always_inline void
121 async_flush_shadow_used_ring_split(struct virtio_net *dev,
122         struct vhost_virtqueue *vq)
123 {
124         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
125
126         if (used_idx + vq->shadow_used_idx <= vq->size) {
127                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
128                                           vq->shadow_used_idx);
129         } else {
130                 uint16_t size;
131
132                 /* update used ring interval [used_idx, vq->size] */
133                 size = vq->size - used_idx;
134                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
135
136                 /* update the left half used ring interval [0, left_size] */
137                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
138                                           vq->shadow_used_idx - size);
139         }
140
141         vq->last_used_idx += vq->shadow_used_idx;
142         vq->shadow_used_idx = 0;
143 }
144
145 static __rte_always_inline void
146 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
147                          uint16_t desc_idx, uint32_t len)
148 {
149         uint16_t i = vq->shadow_used_idx++;
150
151         vq->shadow_used_split[i].id  = desc_idx;
152         vq->shadow_used_split[i].len = len;
153 }
154
155 static __rte_always_inline void
156 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
157                                   struct vhost_virtqueue *vq)
158 {
159         int i;
160         uint16_t used_idx = vq->last_used_idx;
161         uint16_t head_idx = vq->last_used_idx;
162         uint16_t head_flags = 0;
163
164         /* Split loop in two to save memory barriers */
165         for (i = 0; i < vq->shadow_used_idx; i++) {
166                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
167                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
168
169                 used_idx += vq->shadow_used_packed[i].count;
170                 if (used_idx >= vq->size)
171                         used_idx -= vq->size;
172         }
173
174         /* The ordering for storing desc flags needs to be enforced. */
175         rte_atomic_thread_fence(__ATOMIC_RELEASE);
176
177         for (i = 0; i < vq->shadow_used_idx; i++) {
178                 uint16_t flags;
179
180                 if (vq->shadow_used_packed[i].len)
181                         flags = VRING_DESC_F_WRITE;
182                 else
183                         flags = 0;
184
185                 if (vq->used_wrap_counter) {
186                         flags |= VRING_DESC_F_USED;
187                         flags |= VRING_DESC_F_AVAIL;
188                 } else {
189                         flags &= ~VRING_DESC_F_USED;
190                         flags &= ~VRING_DESC_F_AVAIL;
191                 }
192
193                 if (i > 0) {
194                         vq->desc_packed[vq->last_used_idx].flags = flags;
195
196                         vhost_log_cache_used_vring(dev, vq,
197                                         vq->last_used_idx *
198                                         sizeof(struct vring_packed_desc),
199                                         sizeof(struct vring_packed_desc));
200                 } else {
201                         head_idx = vq->last_used_idx;
202                         head_flags = flags;
203                 }
204
205                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
206         }
207
208         vq->desc_packed[head_idx].flags = head_flags;
209
210         vhost_log_cache_used_vring(dev, vq,
211                                 head_idx *
212                                 sizeof(struct vring_packed_desc),
213                                 sizeof(struct vring_packed_desc));
214
215         vq->shadow_used_idx = 0;
216         vhost_log_cache_sync(dev, vq);
217 }
218
219 static __rte_always_inline void
220 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
221                                   struct vhost_virtqueue *vq)
222 {
223         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
224
225         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
226         /* desc flags is the synchronization point for virtio packed vring */
227         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
228                          used_elem->flags, __ATOMIC_RELEASE);
229
230         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
231                                    sizeof(struct vring_packed_desc),
232                                    sizeof(struct vring_packed_desc));
233         vq->shadow_used_idx = 0;
234         vhost_log_cache_sync(dev, vq);
235 }
236
237 static __rte_always_inline void
238 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
239                                  struct vhost_virtqueue *vq,
240                                  uint64_t *lens,
241                                  uint16_t *ids)
242 {
243         uint16_t i;
244         uint16_t flags;
245
246         if (vq->shadow_used_idx) {
247                 do_data_copy_enqueue(dev, vq);
248                 vhost_flush_enqueue_shadow_packed(dev, vq);
249         }
250
251         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
252
253         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
254                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
255                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
256         }
257
258         rte_atomic_thread_fence(__ATOMIC_RELEASE);
259
260         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
261                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
262
263         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
264                                    sizeof(struct vring_packed_desc),
265                                    sizeof(struct vring_packed_desc) *
266                                    PACKED_BATCH_SIZE);
267         vhost_log_cache_sync(dev, vq);
268
269         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
270 }
271
272 static __rte_always_inline void
273 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
274                                           uint16_t id)
275 {
276         vq->shadow_used_packed[0].id = id;
277
278         if (!vq->shadow_used_idx) {
279                 vq->shadow_last_used_idx = vq->last_used_idx;
280                 vq->shadow_used_packed[0].flags =
281                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
282                 vq->shadow_used_packed[0].len = 0;
283                 vq->shadow_used_packed[0].count = 1;
284                 vq->shadow_used_idx++;
285         }
286
287         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
288 }
289
290 static __rte_always_inline void
291 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
292                                   struct vhost_virtqueue *vq,
293                                   uint16_t *ids)
294 {
295         uint16_t flags;
296         uint16_t i;
297         uint16_t begin;
298
299         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
300
301         if (!vq->shadow_used_idx) {
302                 vq->shadow_last_used_idx = vq->last_used_idx;
303                 vq->shadow_used_packed[0].id  = ids[0];
304                 vq->shadow_used_packed[0].len = 0;
305                 vq->shadow_used_packed[0].count = 1;
306                 vq->shadow_used_packed[0].flags = flags;
307                 vq->shadow_used_idx++;
308                 begin = 1;
309         } else
310                 begin = 0;
311
312         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
313                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
314                 vq->desc_packed[vq->last_used_idx + i].len = 0;
315         }
316
317         rte_atomic_thread_fence(__ATOMIC_RELEASE);
318         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
319                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
320
321         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
322                                    sizeof(struct vring_packed_desc),
323                                    sizeof(struct vring_packed_desc) *
324                                    PACKED_BATCH_SIZE);
325         vhost_log_cache_sync(dev, vq);
326
327         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
328 }
329
330 static __rte_always_inline void
331 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
332                                    uint16_t buf_id,
333                                    uint16_t count)
334 {
335         uint16_t flags;
336
337         flags = vq->desc_packed[vq->last_used_idx].flags;
338         if (vq->used_wrap_counter) {
339                 flags |= VRING_DESC_F_USED;
340                 flags |= VRING_DESC_F_AVAIL;
341         } else {
342                 flags &= ~VRING_DESC_F_USED;
343                 flags &= ~VRING_DESC_F_AVAIL;
344         }
345
346         if (!vq->shadow_used_idx) {
347                 vq->shadow_last_used_idx = vq->last_used_idx;
348
349                 vq->shadow_used_packed[0].id  = buf_id;
350                 vq->shadow_used_packed[0].len = 0;
351                 vq->shadow_used_packed[0].flags = flags;
352                 vq->shadow_used_idx++;
353         } else {
354                 vq->desc_packed[vq->last_used_idx].id = buf_id;
355                 vq->desc_packed[vq->last_used_idx].len = 0;
356                 vq->desc_packed[vq->last_used_idx].flags = flags;
357         }
358
359         vq_inc_last_used_packed(vq, count);
360 }
361
362 static __rte_always_inline void
363 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
364                                            uint16_t buf_id,
365                                            uint16_t count)
366 {
367         uint16_t flags;
368
369         vq->shadow_used_packed[0].id = buf_id;
370
371         flags = vq->desc_packed[vq->last_used_idx].flags;
372         if (vq->used_wrap_counter) {
373                 flags |= VRING_DESC_F_USED;
374                 flags |= VRING_DESC_F_AVAIL;
375         } else {
376                 flags &= ~VRING_DESC_F_USED;
377                 flags &= ~VRING_DESC_F_AVAIL;
378         }
379
380         if (!vq->shadow_used_idx) {
381                 vq->shadow_last_used_idx = vq->last_used_idx;
382                 vq->shadow_used_packed[0].len = 0;
383                 vq->shadow_used_packed[0].flags = flags;
384                 vq->shadow_used_idx++;
385         }
386
387         vq_inc_last_used_packed(vq, count);
388 }
389
390 static __rte_always_inline void
391 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
392                                    struct vhost_virtqueue *vq,
393                                    uint32_t len[],
394                                    uint16_t id[],
395                                    uint16_t count[],
396                                    uint16_t num_buffers)
397 {
398         uint16_t i;
399         for (i = 0; i < num_buffers; i++) {
400                 /* enqueue shadow flush action aligned with batch num */
401                 if (!vq->shadow_used_idx)
402                         vq->shadow_aligned_idx = vq->last_used_idx &
403                                 PACKED_BATCH_MASK;
404                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
405                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
406                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
407                 vq->shadow_aligned_idx += count[i];
408                 vq->shadow_used_idx++;
409         }
410
411         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
412                 do_data_copy_enqueue(dev, vq);
413                 vhost_flush_enqueue_shadow_packed(dev, vq);
414         }
415 }
416
417 /* avoid write operation when necessary, to lessen cache issues */
418 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
419         if ((var) != (val))                     \
420                 (var) = (val);                  \
421 } while (0)
422
423 static __rte_always_inline void
424 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
425 {
426         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
427
428         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
429                 csum_l4 |= PKT_TX_TCP_CKSUM;
430
431         if (csum_l4) {
432                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
433                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
434
435                 switch (csum_l4) {
436                 case PKT_TX_TCP_CKSUM:
437                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
438                                                 cksum));
439                         break;
440                 case PKT_TX_UDP_CKSUM:
441                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
442                                                 dgram_cksum));
443                         break;
444                 case PKT_TX_SCTP_CKSUM:
445                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
446                                                 cksum));
447                         break;
448                 }
449         } else {
450                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
451                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
452                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
453         }
454
455         /* IP cksum verification cannot be bypassed, then calculate here */
456         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
457                 struct rte_ipv4_hdr *ipv4_hdr;
458
459                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
460                                                    m_buf->l2_len);
461                 ipv4_hdr->hdr_checksum = 0;
462                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
463         }
464
465         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
466                 if (m_buf->ol_flags & PKT_TX_IPV4)
467                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
468                 else
469                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
470                 net_hdr->gso_size = m_buf->tso_segsz;
471                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
472                                         + m_buf->l4_len;
473         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
474                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
475                 net_hdr->gso_size = m_buf->tso_segsz;
476                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
477                         m_buf->l4_len;
478         } else {
479                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
480                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
481                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
482         }
483 }
484
485 static __rte_always_inline int
486 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
487                 struct buf_vector *buf_vec, uint16_t *vec_idx,
488                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
489 {
490         uint16_t vec_id = *vec_idx;
491
492         while (desc_len) {
493                 uint64_t desc_addr;
494                 uint64_t desc_chunck_len = desc_len;
495
496                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
497                         return -1;
498
499                 desc_addr = vhost_iova_to_vva(dev, vq,
500                                 desc_iova,
501                                 &desc_chunck_len,
502                                 perm);
503                 if (unlikely(!desc_addr))
504                         return -1;
505
506                 rte_prefetch0((void *)(uintptr_t)desc_addr);
507
508                 buf_vec[vec_id].buf_iova = desc_iova;
509                 buf_vec[vec_id].buf_addr = desc_addr;
510                 buf_vec[vec_id].buf_len  = desc_chunck_len;
511
512                 desc_len -= desc_chunck_len;
513                 desc_iova += desc_chunck_len;
514                 vec_id++;
515         }
516         *vec_idx = vec_id;
517
518         return 0;
519 }
520
521 static __rte_always_inline int
522 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
523                          uint32_t avail_idx, uint16_t *vec_idx,
524                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
525                          uint32_t *desc_chain_len, uint8_t perm)
526 {
527         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
528         uint16_t vec_id = *vec_idx;
529         uint32_t len    = 0;
530         uint64_t dlen;
531         uint32_t nr_descs = vq->size;
532         uint32_t cnt    = 0;
533         struct vring_desc *descs = vq->desc;
534         struct vring_desc *idesc = NULL;
535
536         if (unlikely(idx >= vq->size))
537                 return -1;
538
539         *desc_chain_head = idx;
540
541         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
542                 dlen = vq->desc[idx].len;
543                 nr_descs = dlen / sizeof(struct vring_desc);
544                 if (unlikely(nr_descs > vq->size))
545                         return -1;
546
547                 descs = (struct vring_desc *)(uintptr_t)
548                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
549                                                 &dlen,
550                                                 VHOST_ACCESS_RO);
551                 if (unlikely(!descs))
552                         return -1;
553
554                 if (unlikely(dlen < vq->desc[idx].len)) {
555                         /*
556                          * The indirect desc table is not contiguous
557                          * in process VA space, we have to copy it.
558                          */
559                         idesc = vhost_alloc_copy_ind_table(dev, vq,
560                                         vq->desc[idx].addr, vq->desc[idx].len);
561                         if (unlikely(!idesc))
562                                 return -1;
563
564                         descs = idesc;
565                 }
566
567                 idx = 0;
568         }
569
570         while (1) {
571                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
572                         free_ind_table(idesc);
573                         return -1;
574                 }
575
576                 len += descs[idx].len;
577
578                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
579                                                 descs[idx].addr, descs[idx].len,
580                                                 perm))) {
581                         free_ind_table(idesc);
582                         return -1;
583                 }
584
585                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
586                         break;
587
588                 idx = descs[idx].next;
589         }
590
591         *desc_chain_len = len;
592         *vec_idx = vec_id;
593
594         if (unlikely(!!idesc))
595                 free_ind_table(idesc);
596
597         return 0;
598 }
599
600 /*
601  * Returns -1 on fail, 0 on success
602  */
603 static inline int
604 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
605                                 uint32_t size, struct buf_vector *buf_vec,
606                                 uint16_t *num_buffers, uint16_t avail_head,
607                                 uint16_t *nr_vec)
608 {
609         uint16_t cur_idx;
610         uint16_t vec_idx = 0;
611         uint16_t max_tries, tries = 0;
612
613         uint16_t head_idx = 0;
614         uint32_t len = 0;
615
616         *num_buffers = 0;
617         cur_idx  = vq->last_avail_idx;
618
619         if (rxvq_is_mergeable(dev))
620                 max_tries = vq->size - 1;
621         else
622                 max_tries = 1;
623
624         while (size > 0) {
625                 if (unlikely(cur_idx == avail_head))
626                         return -1;
627                 /*
628                  * if we tried all available ring items, and still
629                  * can't get enough buf, it means something abnormal
630                  * happened.
631                  */
632                 if (unlikely(++tries > max_tries))
633                         return -1;
634
635                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
636                                                 &vec_idx, buf_vec,
637                                                 &head_idx, &len,
638                                                 VHOST_ACCESS_RW) < 0))
639                         return -1;
640                 len = RTE_MIN(len, size);
641                 update_shadow_used_ring_split(vq, head_idx, len);
642                 size -= len;
643
644                 cur_idx++;
645                 *num_buffers += 1;
646         }
647
648         *nr_vec = vec_idx;
649
650         return 0;
651 }
652
653 static __rte_always_inline int
654 fill_vec_buf_packed_indirect(struct virtio_net *dev,
655                         struct vhost_virtqueue *vq,
656                         struct vring_packed_desc *desc, uint16_t *vec_idx,
657                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
658 {
659         uint16_t i;
660         uint32_t nr_descs;
661         uint16_t vec_id = *vec_idx;
662         uint64_t dlen;
663         struct vring_packed_desc *descs, *idescs = NULL;
664
665         dlen = desc->len;
666         descs = (struct vring_packed_desc *)(uintptr_t)
667                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
668         if (unlikely(!descs))
669                 return -1;
670
671         if (unlikely(dlen < desc->len)) {
672                 /*
673                  * The indirect desc table is not contiguous
674                  * in process VA space, we have to copy it.
675                  */
676                 idescs = vhost_alloc_copy_ind_table(dev,
677                                 vq, desc->addr, desc->len);
678                 if (unlikely(!idescs))
679                         return -1;
680
681                 descs = idescs;
682         }
683
684         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
685         if (unlikely(nr_descs >= vq->size)) {
686                 free_ind_table(idescs);
687                 return -1;
688         }
689
690         for (i = 0; i < nr_descs; i++) {
691                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
692                         free_ind_table(idescs);
693                         return -1;
694                 }
695
696                 *len += descs[i].len;
697                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
698                                                 descs[i].addr, descs[i].len,
699                                                 perm)))
700                         return -1;
701         }
702         *vec_idx = vec_id;
703
704         if (unlikely(!!idescs))
705                 free_ind_table(idescs);
706
707         return 0;
708 }
709
710 static __rte_always_inline int
711 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
712                                 uint16_t avail_idx, uint16_t *desc_count,
713                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
714                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
715 {
716         bool wrap_counter = vq->avail_wrap_counter;
717         struct vring_packed_desc *descs = vq->desc_packed;
718         uint16_t vec_id = *vec_idx;
719
720         if (avail_idx < vq->last_avail_idx)
721                 wrap_counter ^= 1;
722
723         /*
724          * Perform a load-acquire barrier in desc_is_avail to
725          * enforce the ordering between desc flags and desc
726          * content.
727          */
728         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
729                 return -1;
730
731         *desc_count = 0;
732         *len = 0;
733
734         while (1) {
735                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
736                         return -1;
737
738                 if (unlikely(*desc_count >= vq->size))
739                         return -1;
740
741                 *desc_count += 1;
742                 *buf_id = descs[avail_idx].id;
743
744                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
745                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
746                                                         &descs[avail_idx],
747                                                         &vec_id, buf_vec,
748                                                         len, perm) < 0))
749                                 return -1;
750                 } else {
751                         *len += descs[avail_idx].len;
752
753                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
754                                                         descs[avail_idx].addr,
755                                                         descs[avail_idx].len,
756                                                         perm)))
757                                 return -1;
758                 }
759
760                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
761                         break;
762
763                 if (++avail_idx >= vq->size) {
764                         avail_idx -= vq->size;
765                         wrap_counter ^= 1;
766                 }
767         }
768
769         *vec_idx = vec_id;
770
771         return 0;
772 }
773
774 static __rte_noinline void
775 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
776                 struct buf_vector *buf_vec,
777                 struct virtio_net_hdr_mrg_rxbuf *hdr)
778 {
779         uint64_t len;
780         uint64_t remain = dev->vhost_hlen;
781         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
782         uint64_t iova = buf_vec->buf_iova;
783
784         while (remain) {
785                 len = RTE_MIN(remain,
786                                 buf_vec->buf_len);
787                 dst = buf_vec->buf_addr;
788                 rte_memcpy((void *)(uintptr_t)dst,
789                                 (void *)(uintptr_t)src,
790                                 len);
791
792                 PRINT_PACKET(dev, (uintptr_t)dst,
793                                 (uint32_t)len, 0);
794                 vhost_log_cache_write_iova(dev, vq,
795                                 iova, len);
796
797                 remain -= len;
798                 iova += len;
799                 src += len;
800                 buf_vec++;
801         }
802 }
803
804 static __rte_always_inline int
805 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
806                             struct rte_mbuf *m, struct buf_vector *buf_vec,
807                             uint16_t nr_vec, uint16_t num_buffers)
808 {
809         uint32_t vec_idx = 0;
810         uint32_t mbuf_offset, mbuf_avail;
811         uint32_t buf_offset, buf_avail;
812         uint64_t buf_addr, buf_iova, buf_len;
813         uint32_t cpy_len;
814         uint64_t hdr_addr;
815         struct rte_mbuf *hdr_mbuf;
816         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
817         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
818         int error = 0;
819
820         if (unlikely(m == NULL)) {
821                 error = -1;
822                 goto out;
823         }
824
825         buf_addr = buf_vec[vec_idx].buf_addr;
826         buf_iova = buf_vec[vec_idx].buf_iova;
827         buf_len = buf_vec[vec_idx].buf_len;
828
829         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
830                 error = -1;
831                 goto out;
832         }
833
834         hdr_mbuf = m;
835         hdr_addr = buf_addr;
836         if (unlikely(buf_len < dev->vhost_hlen))
837                 hdr = &tmp_hdr;
838         else
839                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
840
841         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
842                 dev->vid, num_buffers);
843
844         if (unlikely(buf_len < dev->vhost_hlen)) {
845                 buf_offset = dev->vhost_hlen - buf_len;
846                 vec_idx++;
847                 buf_addr = buf_vec[vec_idx].buf_addr;
848                 buf_iova = buf_vec[vec_idx].buf_iova;
849                 buf_len = buf_vec[vec_idx].buf_len;
850                 buf_avail = buf_len - buf_offset;
851         } else {
852                 buf_offset = dev->vhost_hlen;
853                 buf_avail = buf_len - dev->vhost_hlen;
854         }
855
856         mbuf_avail  = rte_pktmbuf_data_len(m);
857         mbuf_offset = 0;
858         while (mbuf_avail != 0 || m->next != NULL) {
859                 /* done with current buf, get the next one */
860                 if (buf_avail == 0) {
861                         vec_idx++;
862                         if (unlikely(vec_idx >= nr_vec)) {
863                                 error = -1;
864                                 goto out;
865                         }
866
867                         buf_addr = buf_vec[vec_idx].buf_addr;
868                         buf_iova = buf_vec[vec_idx].buf_iova;
869                         buf_len = buf_vec[vec_idx].buf_len;
870
871                         buf_offset = 0;
872                         buf_avail  = buf_len;
873                 }
874
875                 /* done with current mbuf, get the next one */
876                 if (mbuf_avail == 0) {
877                         m = m->next;
878
879                         mbuf_offset = 0;
880                         mbuf_avail  = rte_pktmbuf_data_len(m);
881                 }
882
883                 if (hdr_addr) {
884                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
885                         if (rxvq_is_mergeable(dev))
886                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
887                                                 num_buffers);
888
889                         if (unlikely(hdr == &tmp_hdr)) {
890                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
891                         } else {
892                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
893                                                 dev->vhost_hlen, 0);
894                                 vhost_log_cache_write_iova(dev, vq,
895                                                 buf_vec[0].buf_iova,
896                                                 dev->vhost_hlen);
897                         }
898
899                         hdr_addr = 0;
900                 }
901
902                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
903
904                 if (likely(cpy_len > MAX_BATCH_LEN ||
905                                         vq->batch_copy_nb_elems >= vq->size)) {
906                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
907                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
908                                 cpy_len);
909                         vhost_log_cache_write_iova(dev, vq,
910                                                    buf_iova + buf_offset,
911                                                    cpy_len);
912                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
913                                 cpy_len, 0);
914                 } else {
915                         batch_copy[vq->batch_copy_nb_elems].dst =
916                                 (void *)((uintptr_t)(buf_addr + buf_offset));
917                         batch_copy[vq->batch_copy_nb_elems].src =
918                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
919                         batch_copy[vq->batch_copy_nb_elems].log_addr =
920                                 buf_iova + buf_offset;
921                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
922                         vq->batch_copy_nb_elems++;
923                 }
924
925                 mbuf_avail  -= cpy_len;
926                 mbuf_offset += cpy_len;
927                 buf_avail  -= cpy_len;
928                 buf_offset += cpy_len;
929         }
930
931 out:
932
933         return error;
934 }
935
936 static __rte_always_inline void
937 async_fill_vec(struct iovec *v, void *base, size_t len)
938 {
939         v->iov_base = base;
940         v->iov_len = len;
941 }
942
943 static __rte_always_inline void
944 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
945         struct iovec *vec, unsigned long nr_seg)
946 {
947         it->offset = 0;
948         it->count = count;
949
950         if (count) {
951                 it->iov = vec;
952                 it->nr_segs = nr_seg;
953         } else {
954                 it->iov = 0;
955                 it->nr_segs = 0;
956         }
957 }
958
959 static __rte_always_inline void
960 async_fill_desc(struct rte_vhost_async_desc *desc,
961         struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
962 {
963         desc->src = src;
964         desc->dst = dst;
965 }
966
967 static __rte_always_inline int
968 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
969                         struct rte_mbuf *m, struct buf_vector *buf_vec,
970                         uint16_t nr_vec, uint16_t num_buffers,
971                         struct iovec *src_iovec, struct iovec *dst_iovec,
972                         struct rte_vhost_iov_iter *src_it,
973                         struct rte_vhost_iov_iter *dst_it)
974 {
975         uint32_t vec_idx = 0;
976         uint32_t mbuf_offset, mbuf_avail;
977         uint32_t buf_offset, buf_avail;
978         uint64_t buf_addr, buf_iova, buf_len;
979         uint32_t cpy_len, cpy_threshold;
980         uint64_t hdr_addr;
981         struct rte_mbuf *hdr_mbuf;
982         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
983         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
984         int error = 0;
985         uint64_t mapped_len;
986
987         uint32_t tlen = 0;
988         int tvec_idx = 0;
989         void *hpa;
990
991         if (unlikely(m == NULL)) {
992                 error = -1;
993                 goto out;
994         }
995
996         cpy_threshold = vq->async_threshold;
997
998         buf_addr = buf_vec[vec_idx].buf_addr;
999         buf_iova = buf_vec[vec_idx].buf_iova;
1000         buf_len = buf_vec[vec_idx].buf_len;
1001
1002         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1003                 error = -1;
1004                 goto out;
1005         }
1006
1007         hdr_mbuf = m;
1008         hdr_addr = buf_addr;
1009         if (unlikely(buf_len < dev->vhost_hlen))
1010                 hdr = &tmp_hdr;
1011         else
1012                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1013
1014         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1015                 dev->vid, num_buffers);
1016
1017         if (unlikely(buf_len < dev->vhost_hlen)) {
1018                 buf_offset = dev->vhost_hlen - buf_len;
1019                 vec_idx++;
1020                 buf_addr = buf_vec[vec_idx].buf_addr;
1021                 buf_iova = buf_vec[vec_idx].buf_iova;
1022                 buf_len = buf_vec[vec_idx].buf_len;
1023                 buf_avail = buf_len - buf_offset;
1024         } else {
1025                 buf_offset = dev->vhost_hlen;
1026                 buf_avail = buf_len - dev->vhost_hlen;
1027         }
1028
1029         mbuf_avail  = rte_pktmbuf_data_len(m);
1030         mbuf_offset = 0;
1031
1032         while (mbuf_avail != 0 || m->next != NULL) {
1033                 /* done with current buf, get the next one */
1034                 if (buf_avail == 0) {
1035                         vec_idx++;
1036                         if (unlikely(vec_idx >= nr_vec)) {
1037                                 error = -1;
1038                                 goto out;
1039                         }
1040
1041                         buf_addr = buf_vec[vec_idx].buf_addr;
1042                         buf_iova = buf_vec[vec_idx].buf_iova;
1043                         buf_len = buf_vec[vec_idx].buf_len;
1044
1045                         buf_offset = 0;
1046                         buf_avail  = buf_len;
1047                 }
1048
1049                 /* done with current mbuf, get the next one */
1050                 if (mbuf_avail == 0) {
1051                         m = m->next;
1052
1053                         mbuf_offset = 0;
1054                         mbuf_avail  = rte_pktmbuf_data_len(m);
1055                 }
1056
1057                 if (hdr_addr) {
1058                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1059                         if (rxvq_is_mergeable(dev))
1060                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1061                                                 num_buffers);
1062
1063                         if (unlikely(hdr == &tmp_hdr)) {
1064                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1065                         } else {
1066                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1067                                                 dev->vhost_hlen, 0);
1068                                 vhost_log_cache_write_iova(dev, vq,
1069                                                 buf_vec[0].buf_iova,
1070                                                 dev->vhost_hlen);
1071                         }
1072
1073                         hdr_addr = 0;
1074                 }
1075
1076                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1077
1078                 while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
1079                         hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1080                                         buf_iova + buf_offset,
1081                                         cpy_len, &mapped_len);
1082
1083                         if (unlikely(!hpa || mapped_len < cpy_threshold))
1084                                 break;
1085
1086                         async_fill_vec(src_iovec + tvec_idx,
1087                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1088                                 mbuf_offset), (size_t)mapped_len);
1089
1090                         async_fill_vec(dst_iovec + tvec_idx,
1091                                         hpa, (size_t)mapped_len);
1092
1093                         tlen += (uint32_t)mapped_len;
1094                         cpy_len -= (uint32_t)mapped_len;
1095                         mbuf_avail  -= (uint32_t)mapped_len;
1096                         mbuf_offset += (uint32_t)mapped_len;
1097                         buf_avail  -= (uint32_t)mapped_len;
1098                         buf_offset += (uint32_t)mapped_len;
1099                         tvec_idx++;
1100                 }
1101
1102                 if (likely(cpy_len)) {
1103                         if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
1104                                 rte_memcpy(
1105                                 (void *)((uintptr_t)(buf_addr + buf_offset)),
1106                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1107                                 cpy_len);
1108
1109                                 PRINT_PACKET(dev,
1110                                         (uintptr_t)(buf_addr + buf_offset),
1111                                         cpy_len, 0);
1112                         } else {
1113                                 batch_copy[vq->batch_copy_nb_elems].dst =
1114                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1115                                 batch_copy[vq->batch_copy_nb_elems].src =
1116                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1117                                 batch_copy[vq->batch_copy_nb_elems].log_addr =
1118                                         buf_iova + buf_offset;
1119                                 batch_copy[vq->batch_copy_nb_elems].len =
1120                                         cpy_len;
1121                                 vq->batch_copy_nb_elems++;
1122                         }
1123
1124                         mbuf_avail  -= cpy_len;
1125                         mbuf_offset += cpy_len;
1126                         buf_avail  -= cpy_len;
1127                         buf_offset += cpy_len;
1128                 }
1129
1130         }
1131
1132 out:
1133         if (tlen) {
1134                 async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
1135                 async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
1136         } else {
1137                 src_it->count = 0;
1138         }
1139
1140         return error;
1141 }
1142
1143 static __rte_always_inline int
1144 vhost_enqueue_single_packed(struct virtio_net *dev,
1145                             struct vhost_virtqueue *vq,
1146                             struct rte_mbuf *pkt,
1147                             struct buf_vector *buf_vec,
1148                             uint16_t *nr_descs)
1149 {
1150         uint16_t nr_vec = 0;
1151         uint16_t avail_idx = vq->last_avail_idx;
1152         uint16_t max_tries, tries = 0;
1153         uint16_t buf_id = 0;
1154         uint32_t len = 0;
1155         uint16_t desc_count;
1156         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1157         uint16_t num_buffers = 0;
1158         uint32_t buffer_len[vq->size];
1159         uint16_t buffer_buf_id[vq->size];
1160         uint16_t buffer_desc_count[vq->size];
1161
1162         if (rxvq_is_mergeable(dev))
1163                 max_tries = vq->size - 1;
1164         else
1165                 max_tries = 1;
1166
1167         while (size > 0) {
1168                 /*
1169                  * if we tried all available ring items, and still
1170                  * can't get enough buf, it means something abnormal
1171                  * happened.
1172                  */
1173                 if (unlikely(++tries > max_tries))
1174                         return -1;
1175
1176                 if (unlikely(fill_vec_buf_packed(dev, vq,
1177                                                 avail_idx, &desc_count,
1178                                                 buf_vec, &nr_vec,
1179                                                 &buf_id, &len,
1180                                                 VHOST_ACCESS_RW) < 0))
1181                         return -1;
1182
1183                 len = RTE_MIN(len, size);
1184                 size -= len;
1185
1186                 buffer_len[num_buffers] = len;
1187                 buffer_buf_id[num_buffers] = buf_id;
1188                 buffer_desc_count[num_buffers] = desc_count;
1189                 num_buffers += 1;
1190
1191                 *nr_descs += desc_count;
1192                 avail_idx += desc_count;
1193                 if (avail_idx >= vq->size)
1194                         avail_idx -= vq->size;
1195         }
1196
1197         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1198                 return -1;
1199
1200         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1201                                            buffer_desc_count, num_buffers);
1202
1203         return 0;
1204 }
1205
1206 static __rte_noinline uint32_t
1207 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1208         struct rte_mbuf **pkts, uint32_t count)
1209 {
1210         uint32_t pkt_idx = 0;
1211         uint16_t num_buffers;
1212         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1213         uint16_t avail_head;
1214
1215         /*
1216          * The ordering between avail index and
1217          * desc reads needs to be enforced.
1218          */
1219         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1220
1221         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1222
1223         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1224                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1225                 uint16_t nr_vec = 0;
1226
1227                 if (unlikely(reserve_avail_buf_split(dev, vq,
1228                                                 pkt_len, buf_vec, &num_buffers,
1229                                                 avail_head, &nr_vec) < 0)) {
1230                         VHOST_LOG_DATA(DEBUG,
1231                                 "(%d) failed to get enough desc from vring\n",
1232                                 dev->vid);
1233                         vq->shadow_used_idx -= num_buffers;
1234                         break;
1235                 }
1236
1237                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1238                         dev->vid, vq->last_avail_idx,
1239                         vq->last_avail_idx + num_buffers);
1240
1241                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1242                                                 buf_vec, nr_vec,
1243                                                 num_buffers) < 0) {
1244                         vq->shadow_used_idx -= num_buffers;
1245                         break;
1246                 }
1247
1248                 vq->last_avail_idx += num_buffers;
1249         }
1250
1251         do_data_copy_enqueue(dev, vq);
1252
1253         if (likely(vq->shadow_used_idx)) {
1254                 flush_shadow_used_ring_split(dev, vq);
1255                 vhost_vring_call_split(dev, vq);
1256         }
1257
1258         return pkt_idx;
1259 }
1260
1261 static __rte_always_inline int
1262 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1263                            struct vhost_virtqueue *vq,
1264                            struct rte_mbuf **pkts)
1265 {
1266         bool wrap_counter = vq->avail_wrap_counter;
1267         struct vring_packed_desc *descs = vq->desc_packed;
1268         uint16_t avail_idx = vq->last_avail_idx;
1269         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1270         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1271         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1272         uint64_t lens[PACKED_BATCH_SIZE];
1273         uint16_t ids[PACKED_BATCH_SIZE];
1274         uint16_t i;
1275
1276         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1277                 return -1;
1278
1279         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1280                 return -1;
1281
1282         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1283                 if (unlikely(pkts[i]->next != NULL))
1284                         return -1;
1285                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1286                                             wrap_counter)))
1287                         return -1;
1288         }
1289
1290         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1291                 lens[i] = descs[avail_idx + i].len;
1292
1293         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1294                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1295                         return -1;
1296         }
1297
1298         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1299                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1300                                                   descs[avail_idx + i].addr,
1301                                                   &lens[i],
1302                                                   VHOST_ACCESS_RW);
1303
1304         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1305                 if (unlikely(!desc_addrs[i]))
1306                         return -1;
1307                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1308                         return -1;
1309         }
1310
1311         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1312                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1313                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1314                                         (uintptr_t)desc_addrs[i];
1315                 lens[i] = pkts[i]->pkt_len +
1316                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1317         }
1318
1319         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1320                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1321
1322         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1323
1324         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1325                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1326                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1327                            pkts[i]->pkt_len);
1328         }
1329
1330         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1331                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1332                                            lens[i]);
1333
1334         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1335                 ids[i] = descs[avail_idx + i].id;
1336
1337         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1338
1339         return 0;
1340 }
1341
1342 static __rte_always_inline int16_t
1343 virtio_dev_rx_single_packed(struct virtio_net *dev,
1344                             struct vhost_virtqueue *vq,
1345                             struct rte_mbuf *pkt)
1346 {
1347         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1348         uint16_t nr_descs = 0;
1349
1350         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1351                                                  &nr_descs) < 0)) {
1352                 VHOST_LOG_DATA(DEBUG,
1353                                 "(%d) failed to get enough desc from vring\n",
1354                                 dev->vid);
1355                 return -1;
1356         }
1357
1358         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1359                         dev->vid, vq->last_avail_idx,
1360                         vq->last_avail_idx + nr_descs);
1361
1362         vq_inc_last_avail_packed(vq, nr_descs);
1363
1364         return 0;
1365 }
1366
1367 static __rte_noinline uint32_t
1368 virtio_dev_rx_packed(struct virtio_net *dev,
1369                      struct vhost_virtqueue *__rte_restrict vq,
1370                      struct rte_mbuf **__rte_restrict pkts,
1371                      uint32_t count)
1372 {
1373         uint32_t pkt_idx = 0;
1374         uint32_t remained = count;
1375
1376         do {
1377                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1378
1379                 if (remained >= PACKED_BATCH_SIZE) {
1380                         if (!virtio_dev_rx_batch_packed(dev, vq,
1381                                                         &pkts[pkt_idx])) {
1382                                 pkt_idx += PACKED_BATCH_SIZE;
1383                                 remained -= PACKED_BATCH_SIZE;
1384                                 continue;
1385                         }
1386                 }
1387
1388                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1389                         break;
1390                 pkt_idx++;
1391                 remained--;
1392
1393         } while (pkt_idx < count);
1394
1395         if (vq->shadow_used_idx) {
1396                 do_data_copy_enqueue(dev, vq);
1397                 vhost_flush_enqueue_shadow_packed(dev, vq);
1398         }
1399
1400         if (pkt_idx)
1401                 vhost_vring_call_packed(dev, vq);
1402
1403         return pkt_idx;
1404 }
1405
1406 static __rte_always_inline uint32_t
1407 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1408         struct rte_mbuf **pkts, uint32_t count)
1409 {
1410         struct vhost_virtqueue *vq;
1411         uint32_t nb_tx = 0;
1412
1413         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1414         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1415                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1416                         dev->vid, __func__, queue_id);
1417                 return 0;
1418         }
1419
1420         vq = dev->virtqueue[queue_id];
1421
1422         rte_spinlock_lock(&vq->access_lock);
1423
1424         if (unlikely(vq->enabled == 0))
1425                 goto out_access_unlock;
1426
1427         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1428                 vhost_user_iotlb_rd_lock(vq);
1429
1430         if (unlikely(vq->access_ok == 0))
1431                 if (unlikely(vring_translate(dev, vq) < 0))
1432                         goto out;
1433
1434         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1435         if (count == 0)
1436                 goto out;
1437
1438         if (vq_is_packed(dev))
1439                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1440         else
1441                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1442
1443 out:
1444         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1445                 vhost_user_iotlb_rd_unlock(vq);
1446
1447 out_access_unlock:
1448         rte_spinlock_unlock(&vq->access_lock);
1449
1450         return nb_tx;
1451 }
1452
1453 uint16_t
1454 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1455         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1456 {
1457         struct virtio_net *dev = get_device(vid);
1458
1459         if (!dev)
1460                 return 0;
1461
1462         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1463                 VHOST_LOG_DATA(ERR,
1464                         "(%d) %s: built-in vhost net backend is disabled.\n",
1465                         dev->vid, __func__);
1466                 return 0;
1467         }
1468
1469         return virtio_dev_rx(dev, queue_id, pkts, count);
1470 }
1471
1472 static __rte_always_inline uint16_t
1473 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1474         uint16_t vq_size, uint16_t n_inflight)
1475 {
1476         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1477                 (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
1478 }
1479
1480 static __rte_noinline uint32_t
1481 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1482         struct vhost_virtqueue *vq, uint16_t queue_id,
1483         struct rte_mbuf **pkts, uint32_t count)
1484 {
1485         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1486         uint16_t num_buffers;
1487         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1488         uint16_t avail_head;
1489
1490         struct rte_vhost_iov_iter *it_pool = vq->it_pool;
1491         struct iovec *vec_pool = vq->vec_pool;
1492         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1493         struct iovec *src_iovec = vec_pool;
1494         struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1495         struct rte_vhost_iov_iter *src_it = it_pool;
1496         struct rte_vhost_iov_iter *dst_it = it_pool + 1;
1497         uint16_t n_free_slot, slot_idx = 0;
1498         uint16_t segs_await = 0;
1499         struct async_inflight_info *pkts_info = vq->async_pkts_info;
1500         uint32_t n_pkts = 0, pkt_err = 0;
1501
1502         /*
1503          * The ordering between avail index and desc reads need to be enforced.
1504          */
1505         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1506
1507         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1508
1509         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1510                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1511                 uint16_t nr_vec = 0;
1512
1513                 if (unlikely(reserve_avail_buf_split(dev, vq,
1514                                                 pkt_len, buf_vec, &num_buffers,
1515                                                 avail_head, &nr_vec) < 0)) {
1516                         VHOST_LOG_DATA(DEBUG,
1517                                 "(%d) failed to get enough desc from vring\n",
1518                                 dev->vid);
1519                         vq->shadow_used_idx -= num_buffers;
1520                         break;
1521                 }
1522
1523                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1524                         dev->vid, vq->last_avail_idx,
1525                         vq->last_avail_idx + num_buffers);
1526
1527                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1528                                 buf_vec, nr_vec, num_buffers,
1529                                 src_iovec, dst_iovec, src_it, dst_it) < 0) {
1530                         vq->shadow_used_idx -= num_buffers;
1531                         break;
1532                 }
1533
1534                 slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
1535                 if (src_it->count) {
1536                         async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
1537                         pkt_burst_idx++;
1538                         pkts_info[slot_idx].descs = num_buffers;
1539                         pkts_info[slot_idx].segs = src_it->nr_segs;
1540                         src_iovec += src_it->nr_segs;
1541                         dst_iovec += dst_it->nr_segs;
1542                         src_it += 2;
1543                         dst_it += 2;
1544                         segs_await += src_it->nr_segs;
1545                 } else {
1546                         pkts_info[slot_idx].info = num_buffers;
1547                         vq->async_pkts_inflight_n++;
1548                 }
1549
1550                 vq->last_avail_idx += num_buffers;
1551
1552                 /*
1553                  * conditions to trigger async device transfer:
1554                  * - buffered packet number reaches transfer threshold
1555                  * - unused async iov number is less than max vhost vector
1556                  */
1557                 if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1558                         (VHOST_MAX_ASYNC_VEC / 2 - segs_await <
1559                         BUF_VECTOR_MAX)) {
1560                         n_pkts = vq->async_ops.transfer_data(dev->vid,
1561                                         queue_id, tdes, 0, pkt_burst_idx);
1562                         src_iovec = vec_pool;
1563                         dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1564                         src_it = it_pool;
1565                         dst_it = it_pool + 1;
1566                         segs_await = 0;
1567                         vq->async_pkts_inflight_n += pkt_burst_idx;
1568
1569                         if (unlikely(n_pkts < pkt_burst_idx)) {
1570                                 /*
1571                                  * log error packets number here and do actual
1572                                  * error processing when applications poll
1573                                  * completion
1574                                  */
1575                                 pkt_err = pkt_burst_idx - n_pkts;
1576                                 pkt_burst_idx = 0;
1577                                 break;
1578                         }
1579
1580                         pkt_burst_idx = 0;
1581                 }
1582         }
1583
1584         if (pkt_burst_idx) {
1585                 n_pkts = vq->async_ops.transfer_data(dev->vid,
1586                                 queue_id, tdes, 0, pkt_burst_idx);
1587                 vq->async_pkts_inflight_n += pkt_burst_idx;
1588
1589                 if (unlikely(n_pkts < pkt_burst_idx))
1590                         pkt_err = pkt_burst_idx - n_pkts;
1591         }
1592
1593         do_data_copy_enqueue(dev, vq);
1594
1595         while (unlikely(pkt_err && pkt_idx)) {
1596                 if (pkts_info[slot_idx].segs)
1597                         pkt_err--;
1598                 vq->last_avail_idx -= pkts_info[slot_idx].descs;
1599                 vq->shadow_used_idx -= pkts_info[slot_idx].descs;
1600                 vq->async_pkts_inflight_n--;
1601                 slot_idx = (slot_idx - 1) & (vq->size - 1);
1602                 pkt_idx--;
1603         }
1604
1605         n_free_slot = vq->size - vq->async_pkts_idx;
1606         if (n_free_slot > pkt_idx) {
1607                 rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
1608                         pkts, pkt_idx * sizeof(uintptr_t));
1609                 vq->async_pkts_idx += pkt_idx;
1610         } else {
1611                 rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
1612                         pkts, n_free_slot * sizeof(uintptr_t));
1613                 rte_memcpy(&vq->async_pkts_pending[0],
1614                         &pkts[n_free_slot],
1615                         (pkt_idx - n_free_slot) * sizeof(uintptr_t));
1616                 vq->async_pkts_idx = pkt_idx - n_free_slot;
1617         }
1618
1619         if (likely(vq->shadow_used_idx))
1620                 async_flush_shadow_used_ring_split(dev, vq);
1621
1622         return pkt_idx;
1623 }
1624
1625 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
1626                 struct rte_mbuf **pkts, uint16_t count)
1627 {
1628         struct virtio_net *dev = get_device(vid);
1629         struct vhost_virtqueue *vq;
1630         uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
1631         uint16_t start_idx, pkts_idx, vq_size;
1632         uint16_t n_inflight;
1633         struct async_inflight_info *pkts_info;
1634
1635         if (!dev)
1636                 return 0;
1637
1638         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1639         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1640                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1641                         dev->vid, __func__, queue_id);
1642                 return 0;
1643         }
1644
1645         vq = dev->virtqueue[queue_id];
1646
1647         if (unlikely(!vq->async_registered)) {
1648                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
1649                         dev->vid, __func__, queue_id);
1650                 return 0;
1651         }
1652
1653         rte_spinlock_lock(&vq->access_lock);
1654
1655         n_inflight = vq->async_pkts_inflight_n;
1656         pkts_idx = vq->async_pkts_idx;
1657         pkts_info = vq->async_pkts_info;
1658         vq_size = vq->size;
1659         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1660                 vq_size, vq->async_pkts_inflight_n);
1661
1662         if (count > vq->async_last_pkts_n)
1663                 n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
1664                         queue_id, 0, count - vq->async_last_pkts_n);
1665         n_pkts_cpl += vq->async_last_pkts_n;
1666
1667         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1668
1669         while (likely((n_pkts_put < count) && n_inflight)) {
1670                 uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
1671                 if (n_pkts_cpl && pkts_info[info_idx].segs)
1672                         n_pkts_cpl--;
1673                 else if (!n_pkts_cpl && pkts_info[info_idx].segs)
1674                         break;
1675                 n_pkts_put++;
1676                 n_inflight--;
1677                 n_descs += pkts_info[info_idx].descs;
1678         }
1679
1680         vq->async_last_pkts_n = n_pkts_cpl;
1681
1682         if (n_pkts_put) {
1683                 vq->async_pkts_inflight_n = n_inflight;
1684                 if (likely(vq->enabled && vq->access_ok)) {
1685                         __atomic_add_fetch(&vq->used->idx,
1686                                         n_descs, __ATOMIC_RELEASE);
1687                         vhost_vring_call_split(dev, vq);
1688                 }
1689
1690                 if (start_idx + n_pkts_put <= vq_size) {
1691                         rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
1692                                 n_pkts_put * sizeof(uintptr_t));
1693                 } else {
1694                         rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
1695                                 (vq_size - start_idx) * sizeof(uintptr_t));
1696                         rte_memcpy(&pkts[vq_size - start_idx],
1697                                 vq->async_pkts_pending,
1698                                 (n_pkts_put + start_idx - vq_size) *
1699                                 sizeof(uintptr_t));
1700                 }
1701         }
1702
1703         rte_spinlock_unlock(&vq->access_lock);
1704
1705         return n_pkts_put;
1706 }
1707
1708 static __rte_always_inline uint32_t
1709 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
1710         struct rte_mbuf **pkts, uint32_t count)
1711 {
1712         struct vhost_virtqueue *vq;
1713         uint32_t nb_tx = 0;
1714
1715         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1716         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1717                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1718                         dev->vid, __func__, queue_id);
1719                 return 0;
1720         }
1721
1722         vq = dev->virtqueue[queue_id];
1723
1724         rte_spinlock_lock(&vq->access_lock);
1725
1726         if (unlikely(vq->enabled == 0 || !vq->async_registered))
1727                 goto out_access_unlock;
1728
1729         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1730                 vhost_user_iotlb_rd_lock(vq);
1731
1732         if (unlikely(vq->access_ok == 0))
1733                 if (unlikely(vring_translate(dev, vq) < 0))
1734                         goto out;
1735
1736         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1737         if (count == 0)
1738                 goto out;
1739
1740         /* TODO: packed queue not implemented */
1741         if (vq_is_packed(dev))
1742                 nb_tx = 0;
1743         else
1744                 nb_tx = virtio_dev_rx_async_submit_split(dev,
1745                                 vq, queue_id, pkts, count);
1746
1747 out:
1748         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1749                 vhost_user_iotlb_rd_unlock(vq);
1750
1751 out_access_unlock:
1752         rte_spinlock_unlock(&vq->access_lock);
1753
1754         return nb_tx;
1755 }
1756
1757 uint16_t
1758 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
1759                 struct rte_mbuf **pkts, uint16_t count)
1760 {
1761         struct virtio_net *dev = get_device(vid);
1762
1763         if (!dev)
1764                 return 0;
1765
1766         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1767                 VHOST_LOG_DATA(ERR,
1768                         "(%d) %s: built-in vhost net backend is disabled.\n",
1769                         dev->vid, __func__);
1770                 return 0;
1771         }
1772
1773         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
1774 }
1775
1776 static inline bool
1777 virtio_net_with_host_offload(struct virtio_net *dev)
1778 {
1779         if (dev->features &
1780                         ((1ULL << VIRTIO_NET_F_CSUM) |
1781                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1782                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1783                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1784                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1785                 return true;
1786
1787         return false;
1788 }
1789
1790 static void
1791 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1792 {
1793         struct rte_ipv4_hdr *ipv4_hdr;
1794         struct rte_ipv6_hdr *ipv6_hdr;
1795         void *l3_hdr = NULL;
1796         struct rte_ether_hdr *eth_hdr;
1797         uint16_t ethertype;
1798
1799         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1800
1801         m->l2_len = sizeof(struct rte_ether_hdr);
1802         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1803
1804         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1805                 struct rte_vlan_hdr *vlan_hdr =
1806                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1807
1808                 m->l2_len += sizeof(struct rte_vlan_hdr);
1809                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1810         }
1811
1812         l3_hdr = (char *)eth_hdr + m->l2_len;
1813
1814         switch (ethertype) {
1815         case RTE_ETHER_TYPE_IPV4:
1816                 ipv4_hdr = l3_hdr;
1817                 *l4_proto = ipv4_hdr->next_proto_id;
1818                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
1819                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1820                 m->ol_flags |= PKT_TX_IPV4;
1821                 break;
1822         case RTE_ETHER_TYPE_IPV6:
1823                 ipv6_hdr = l3_hdr;
1824                 *l4_proto = ipv6_hdr->proto;
1825                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1826                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1827                 m->ol_flags |= PKT_TX_IPV6;
1828                 break;
1829         default:
1830                 m->l3_len = 0;
1831                 *l4_proto = 0;
1832                 *l4_hdr = NULL;
1833                 break;
1834         }
1835 }
1836
1837 static __rte_always_inline void
1838 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1839 {
1840         uint16_t l4_proto = 0;
1841         void *l4_hdr = NULL;
1842         struct rte_tcp_hdr *tcp_hdr = NULL;
1843
1844         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1845                 return;
1846
1847         parse_ethernet(m, &l4_proto, &l4_hdr);
1848         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1849                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1850                         switch (hdr->csum_offset) {
1851                         case (offsetof(struct rte_tcp_hdr, cksum)):
1852                                 if (l4_proto == IPPROTO_TCP)
1853                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1854                                 break;
1855                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1856                                 if (l4_proto == IPPROTO_UDP)
1857                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1858                                 break;
1859                         case (offsetof(struct rte_sctp_hdr, cksum)):
1860                                 if (l4_proto == IPPROTO_SCTP)
1861                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1862                                 break;
1863                         default:
1864                                 break;
1865                         }
1866                 }
1867         }
1868
1869         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1870                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1871                 case VIRTIO_NET_HDR_GSO_TCPV4:
1872                 case VIRTIO_NET_HDR_GSO_TCPV6:
1873                         tcp_hdr = l4_hdr;
1874                         m->ol_flags |= PKT_TX_TCP_SEG;
1875                         m->tso_segsz = hdr->gso_size;
1876                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1877                         break;
1878                 case VIRTIO_NET_HDR_GSO_UDP:
1879                         m->ol_flags |= PKT_TX_UDP_SEG;
1880                         m->tso_segsz = hdr->gso_size;
1881                         m->l4_len = sizeof(struct rte_udp_hdr);
1882                         break;
1883                 default:
1884                         VHOST_LOG_DATA(WARNING,
1885                                 "unsupported gso type %u.\n", hdr->gso_type);
1886                         break;
1887                 }
1888         }
1889 }
1890
1891 static __rte_noinline void
1892 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1893                 struct buf_vector *buf_vec)
1894 {
1895         uint64_t len;
1896         uint64_t remain = sizeof(struct virtio_net_hdr);
1897         uint64_t src;
1898         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1899
1900         while (remain) {
1901                 len = RTE_MIN(remain, buf_vec->buf_len);
1902                 src = buf_vec->buf_addr;
1903                 rte_memcpy((void *)(uintptr_t)dst,
1904                                 (void *)(uintptr_t)src, len);
1905
1906                 remain -= len;
1907                 dst += len;
1908                 buf_vec++;
1909         }
1910 }
1911
1912 static __rte_always_inline int
1913 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1914                   struct buf_vector *buf_vec, uint16_t nr_vec,
1915                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1916 {
1917         uint32_t buf_avail, buf_offset;
1918         uint64_t buf_addr, buf_len;
1919         uint32_t mbuf_avail, mbuf_offset;
1920         uint32_t cpy_len;
1921         struct rte_mbuf *cur = m, *prev = m;
1922         struct virtio_net_hdr tmp_hdr;
1923         struct virtio_net_hdr *hdr = NULL;
1924         /* A counter to avoid desc dead loop chain */
1925         uint16_t vec_idx = 0;
1926         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1927         int error = 0;
1928
1929         buf_addr = buf_vec[vec_idx].buf_addr;
1930         buf_len = buf_vec[vec_idx].buf_len;
1931
1932         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1933                 error = -1;
1934                 goto out;
1935         }
1936
1937         if (virtio_net_with_host_offload(dev)) {
1938                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1939                         /*
1940                          * No luck, the virtio-net header doesn't fit
1941                          * in a contiguous virtual area.
1942                          */
1943                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1944                         hdr = &tmp_hdr;
1945                 } else {
1946                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1947                 }
1948         }
1949
1950         /*
1951          * A virtio driver normally uses at least 2 desc buffers
1952          * for Tx: the first for storing the header, and others
1953          * for storing the data.
1954          */
1955         if (unlikely(buf_len < dev->vhost_hlen)) {
1956                 buf_offset = dev->vhost_hlen - buf_len;
1957                 vec_idx++;
1958                 buf_addr = buf_vec[vec_idx].buf_addr;
1959                 buf_len = buf_vec[vec_idx].buf_len;
1960                 buf_avail  = buf_len - buf_offset;
1961         } else if (buf_len == dev->vhost_hlen) {
1962                 if (unlikely(++vec_idx >= nr_vec))
1963                         goto out;
1964                 buf_addr = buf_vec[vec_idx].buf_addr;
1965                 buf_len = buf_vec[vec_idx].buf_len;
1966
1967                 buf_offset = 0;
1968                 buf_avail = buf_len;
1969         } else {
1970                 buf_offset = dev->vhost_hlen;
1971                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1972         }
1973
1974         PRINT_PACKET(dev,
1975                         (uintptr_t)(buf_addr + buf_offset),
1976                         (uint32_t)buf_avail, 0);
1977
1978         mbuf_offset = 0;
1979         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1980         while (1) {
1981                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1982
1983                 if (likely(cpy_len > MAX_BATCH_LEN ||
1984                                         vq->batch_copy_nb_elems >= vq->size ||
1985                                         (hdr && cur == m))) {
1986                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1987                                                 mbuf_offset),
1988                                         (void *)((uintptr_t)(buf_addr +
1989                                                         buf_offset)), cpy_len);
1990                 } else {
1991                         batch_copy[vq->batch_copy_nb_elems].dst =
1992                                 rte_pktmbuf_mtod_offset(cur, void *,
1993                                                 mbuf_offset);
1994                         batch_copy[vq->batch_copy_nb_elems].src =
1995                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1996                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
1997                         vq->batch_copy_nb_elems++;
1998                 }
1999
2000                 mbuf_avail  -= cpy_len;
2001                 mbuf_offset += cpy_len;
2002                 buf_avail -= cpy_len;
2003                 buf_offset += cpy_len;
2004
2005                 /* This buf reaches to its end, get the next one */
2006                 if (buf_avail == 0) {
2007                         if (++vec_idx >= nr_vec)
2008                                 break;
2009
2010                         buf_addr = buf_vec[vec_idx].buf_addr;
2011                         buf_len = buf_vec[vec_idx].buf_len;
2012
2013                         buf_offset = 0;
2014                         buf_avail  = buf_len;
2015
2016                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2017                                         (uint32_t)buf_avail, 0);
2018                 }
2019
2020                 /*
2021                  * This mbuf reaches to its end, get a new one
2022                  * to hold more data.
2023                  */
2024                 if (mbuf_avail == 0) {
2025                         cur = rte_pktmbuf_alloc(mbuf_pool);
2026                         if (unlikely(cur == NULL)) {
2027                                 VHOST_LOG_DATA(ERR, "Failed to "
2028                                         "allocate memory for mbuf.\n");
2029                                 error = -1;
2030                                 goto out;
2031                         }
2032
2033                         prev->next = cur;
2034                         prev->data_len = mbuf_offset;
2035                         m->nb_segs += 1;
2036                         m->pkt_len += mbuf_offset;
2037                         prev = cur;
2038
2039                         mbuf_offset = 0;
2040                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2041                 }
2042         }
2043
2044         prev->data_len = mbuf_offset;
2045         m->pkt_len    += mbuf_offset;
2046
2047         if (hdr)
2048                 vhost_dequeue_offload(hdr, m);
2049
2050 out:
2051
2052         return error;
2053 }
2054
2055 static void
2056 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2057 {
2058         rte_free(opaque);
2059 }
2060
2061 static int
2062 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2063 {
2064         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2065         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2066         uint16_t buf_len;
2067         rte_iova_t iova;
2068         void *buf;
2069
2070         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2071         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2072
2073         if (unlikely(total_len > UINT16_MAX))
2074                 return -ENOSPC;
2075
2076         buf_len = total_len;
2077         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2078         if (unlikely(buf == NULL))
2079                 return -ENOMEM;
2080
2081         /* Initialize shinfo */
2082         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2083                                                 virtio_dev_extbuf_free, buf);
2084         if (unlikely(shinfo == NULL)) {
2085                 rte_free(buf);
2086                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2087                 return -1;
2088         }
2089
2090         iova = rte_malloc_virt2iova(buf);
2091         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2092         rte_pktmbuf_reset_headroom(pkt);
2093
2094         return 0;
2095 }
2096
2097 /*
2098  * Allocate a host supported pktmbuf.
2099  */
2100 static __rte_always_inline struct rte_mbuf *
2101 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
2102                          uint32_t data_len)
2103 {
2104         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
2105
2106         if (unlikely(pkt == NULL)) {
2107                 VHOST_LOG_DATA(ERR,
2108                         "Failed to allocate memory for mbuf.\n");
2109                 return NULL;
2110         }
2111
2112         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2113                 return pkt;
2114
2115         /* attach an external buffer if supported */
2116         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2117                 return pkt;
2118
2119         /* check if chained buffers are allowed */
2120         if (!dev->linearbuf)
2121                 return pkt;
2122
2123         /* Data doesn't fit into the buffer and the host supports
2124          * only linear buffers
2125          */
2126         rte_pktmbuf_free(pkt);
2127
2128         return NULL;
2129 }
2130
2131 static __rte_noinline uint16_t
2132 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2133         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2134 {
2135         uint16_t i;
2136         uint16_t free_entries;
2137         uint16_t dropped = 0;
2138         static bool allocerr_warned;
2139
2140         /*
2141          * The ordering between avail index and
2142          * desc reads needs to be enforced.
2143          */
2144         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2145                         vq->last_avail_idx;
2146         if (free_entries == 0)
2147                 return 0;
2148
2149         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2150
2151         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2152
2153         count = RTE_MIN(count, MAX_PKT_BURST);
2154         count = RTE_MIN(count, free_entries);
2155         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2156                         dev->vid, count);
2157
2158         for (i = 0; i < count; i++) {
2159                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2160                 uint16_t head_idx;
2161                 uint32_t buf_len;
2162                 uint16_t nr_vec = 0;
2163                 int err;
2164
2165                 if (unlikely(fill_vec_buf_split(dev, vq,
2166                                                 vq->last_avail_idx + i,
2167                                                 &nr_vec, buf_vec,
2168                                                 &head_idx, &buf_len,
2169                                                 VHOST_ACCESS_RO) < 0))
2170                         break;
2171
2172                 update_shadow_used_ring_split(vq, head_idx, 0);
2173
2174                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2175                 if (unlikely(pkts[i] == NULL)) {
2176                         /*
2177                          * mbuf allocation fails for jumbo packets when external
2178                          * buffer allocation is not allowed and linear buffer
2179                          * is required. Drop this packet.
2180                          */
2181                         if (!allocerr_warned) {
2182                                 VHOST_LOG_DATA(ERR,
2183                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2184                                         buf_len, mbuf_pool->name, dev->ifname);
2185                                 allocerr_warned = true;
2186                         }
2187                         dropped += 1;
2188                         i++;
2189                         break;
2190                 }
2191
2192                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2193                                 mbuf_pool);
2194                 if (unlikely(err)) {
2195                         rte_pktmbuf_free(pkts[i]);
2196                         if (!allocerr_warned) {
2197                                 VHOST_LOG_DATA(ERR,
2198                                         "Failed to copy desc to mbuf on %s.\n",
2199                                         dev->ifname);
2200                                 allocerr_warned = true;
2201                         }
2202                         dropped += 1;
2203                         i++;
2204                         break;
2205                 }
2206         }
2207
2208         vq->last_avail_idx += i;
2209
2210         do_data_copy_dequeue(vq);
2211         if (unlikely(i < count))
2212                 vq->shadow_used_idx = i;
2213         if (likely(vq->shadow_used_idx)) {
2214                 flush_shadow_used_ring_split(dev, vq);
2215                 vhost_vring_call_split(dev, vq);
2216         }
2217
2218         return (i - dropped);
2219 }
2220
2221 static __rte_always_inline int
2222 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2223                                  struct vhost_virtqueue *vq,
2224                                  struct rte_mempool *mbuf_pool,
2225                                  struct rte_mbuf **pkts,
2226                                  uint16_t avail_idx,
2227                                  uintptr_t *desc_addrs,
2228                                  uint16_t *ids)
2229 {
2230         bool wrap = vq->avail_wrap_counter;
2231         struct vring_packed_desc *descs = vq->desc_packed;
2232         struct virtio_net_hdr *hdr;
2233         uint64_t lens[PACKED_BATCH_SIZE];
2234         uint64_t buf_lens[PACKED_BATCH_SIZE];
2235         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2236         uint16_t flags, i;
2237
2238         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2239                 return -1;
2240         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2241                 return -1;
2242
2243         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2244                 flags = descs[avail_idx + i].flags;
2245                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2246                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2247                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2248                         return -1;
2249         }
2250
2251         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2252
2253         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2254                 lens[i] = descs[avail_idx + i].len;
2255
2256         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2257                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2258                                                   descs[avail_idx + i].addr,
2259                                                   &lens[i], VHOST_ACCESS_RW);
2260         }
2261
2262         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2263                 if (unlikely(!desc_addrs[i]))
2264                         return -1;
2265                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2266                         return -1;
2267         }
2268
2269         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2270                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
2271                 if (!pkts[i])
2272                         goto free_buf;
2273         }
2274
2275         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2276                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2277
2278         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2279                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2280                         goto free_buf;
2281         }
2282
2283         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2284                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
2285                 pkts[i]->data_len = pkts[i]->pkt_len;
2286                 ids[i] = descs[avail_idx + i].id;
2287         }
2288
2289         if (virtio_net_with_host_offload(dev)) {
2290                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2291                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2292                         vhost_dequeue_offload(hdr, pkts[i]);
2293                 }
2294         }
2295
2296         return 0;
2297
2298 free_buf:
2299         for (i = 0; i < PACKED_BATCH_SIZE; i++)
2300                 rte_pktmbuf_free(pkts[i]);
2301
2302         return -1;
2303 }
2304
2305 static __rte_always_inline int
2306 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2307                            struct vhost_virtqueue *vq,
2308                            struct rte_mempool *mbuf_pool,
2309                            struct rte_mbuf **pkts)
2310 {
2311         uint16_t avail_idx = vq->last_avail_idx;
2312         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2313         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2314         uint16_t ids[PACKED_BATCH_SIZE];
2315         uint16_t i;
2316
2317         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
2318                                              avail_idx, desc_addrs, ids))
2319                 return -1;
2320
2321         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2322                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2323
2324         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2325                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2326                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2327                            pkts[i]->pkt_len);
2328
2329         if (virtio_net_is_inorder(dev))
2330                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2331                         ids[PACKED_BATCH_SIZE - 1]);
2332         else
2333                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2334
2335         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2336
2337         return 0;
2338 }
2339
2340 static __rte_always_inline int
2341 vhost_dequeue_single_packed(struct virtio_net *dev,
2342                             struct vhost_virtqueue *vq,
2343                             struct rte_mempool *mbuf_pool,
2344                             struct rte_mbuf **pkts,
2345                             uint16_t *buf_id,
2346                             uint16_t *desc_count)
2347 {
2348         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2349         uint32_t buf_len;
2350         uint16_t nr_vec = 0;
2351         int err;
2352         static bool allocerr_warned;
2353
2354         if (unlikely(fill_vec_buf_packed(dev, vq,
2355                                          vq->last_avail_idx, desc_count,
2356                                          buf_vec, &nr_vec,
2357                                          buf_id, &buf_len,
2358                                          VHOST_ACCESS_RO) < 0))
2359                 return -1;
2360
2361         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2362         if (unlikely(*pkts == NULL)) {
2363                 if (!allocerr_warned) {
2364                         VHOST_LOG_DATA(ERR,
2365                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2366                                 buf_len, mbuf_pool->name, dev->ifname);
2367                         allocerr_warned = true;
2368                 }
2369                 return -1;
2370         }
2371
2372         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
2373                                 mbuf_pool);
2374         if (unlikely(err)) {
2375                 if (!allocerr_warned) {
2376                         VHOST_LOG_DATA(ERR,
2377                                 "Failed to copy desc to mbuf on %s.\n",
2378                                 dev->ifname);
2379                         allocerr_warned = true;
2380                 }
2381                 rte_pktmbuf_free(*pkts);
2382                 return -1;
2383         }
2384
2385         return 0;
2386 }
2387
2388 static __rte_always_inline int
2389 virtio_dev_tx_single_packed(struct virtio_net *dev,
2390                             struct vhost_virtqueue *vq,
2391                             struct rte_mempool *mbuf_pool,
2392                             struct rte_mbuf **pkts)
2393 {
2394
2395         uint16_t buf_id, desc_count = 0;
2396         int ret;
2397
2398         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2399                                         &desc_count);
2400
2401         if (likely(desc_count > 0)) {
2402                 if (virtio_net_is_inorder(dev))
2403                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2404                                                                    desc_count);
2405                 else
2406                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2407                                         desc_count);
2408
2409                 vq_inc_last_avail_packed(vq, desc_count);
2410         }
2411
2412         return ret;
2413 }
2414
2415 static __rte_noinline uint16_t
2416 virtio_dev_tx_packed(struct virtio_net *dev,
2417                      struct vhost_virtqueue *__rte_restrict vq,
2418                      struct rte_mempool *mbuf_pool,
2419                      struct rte_mbuf **__rte_restrict pkts,
2420                      uint32_t count)
2421 {
2422         uint32_t pkt_idx = 0;
2423         uint32_t remained = count;
2424
2425         do {
2426                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2427
2428                 if (remained >= PACKED_BATCH_SIZE) {
2429                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2430                                                         &pkts[pkt_idx])) {
2431                                 pkt_idx += PACKED_BATCH_SIZE;
2432                                 remained -= PACKED_BATCH_SIZE;
2433                                 continue;
2434                         }
2435                 }
2436
2437                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2438                                                 &pkts[pkt_idx]))
2439                         break;
2440                 pkt_idx++;
2441                 remained--;
2442
2443         } while (remained);
2444
2445         if (vq->shadow_used_idx) {
2446                 do_data_copy_dequeue(vq);
2447
2448                 vhost_flush_dequeue_shadow_packed(dev, vq);
2449                 vhost_vring_call_packed(dev, vq);
2450         }
2451
2452         return pkt_idx;
2453 }
2454
2455 uint16_t
2456 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2457         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2458 {
2459         struct virtio_net *dev;
2460         struct rte_mbuf *rarp_mbuf = NULL;
2461         struct vhost_virtqueue *vq;
2462         int16_t success = 1;
2463
2464         dev = get_device(vid);
2465         if (!dev)
2466                 return 0;
2467
2468         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2469                 VHOST_LOG_DATA(ERR,
2470                         "(%d) %s: built-in vhost net backend is disabled.\n",
2471                         dev->vid, __func__);
2472                 return 0;
2473         }
2474
2475         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2476                 VHOST_LOG_DATA(ERR,
2477                         "(%d) %s: invalid virtqueue idx %d.\n",
2478                         dev->vid, __func__, queue_id);
2479                 return 0;
2480         }
2481
2482         vq = dev->virtqueue[queue_id];
2483
2484         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2485                 return 0;
2486
2487         if (unlikely(vq->enabled == 0)) {
2488                 count = 0;
2489                 goto out_access_unlock;
2490         }
2491
2492         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2493                 vhost_user_iotlb_rd_lock(vq);
2494
2495         if (unlikely(vq->access_ok == 0))
2496                 if (unlikely(vring_translate(dev, vq) < 0)) {
2497                         count = 0;
2498                         goto out;
2499                 }
2500
2501         /*
2502          * Construct a RARP broadcast packet, and inject it to the "pkts"
2503          * array, to looks like that guest actually send such packet.
2504          *
2505          * Check user_send_rarp() for more information.
2506          *
2507          * broadcast_rarp shares a cacheline in the virtio_net structure
2508          * with some fields that are accessed during enqueue and
2509          * __atomic_compare_exchange_n causes a write if performed compare
2510          * and exchange. This could result in false sharing between enqueue
2511          * and dequeue.
2512          *
2513          * Prevent unnecessary false sharing by reading broadcast_rarp first
2514          * and only performing compare and exchange if the read indicates it
2515          * is likely to be set.
2516          */
2517         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
2518                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
2519                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
2520
2521                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2522                 if (rarp_mbuf == NULL) {
2523                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2524                         count = 0;
2525                         goto out;
2526                 }
2527                 count -= 1;
2528         }
2529
2530         if (vq_is_packed(dev))
2531                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2532         else
2533                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2534
2535 out:
2536         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2537                 vhost_user_iotlb_rd_unlock(vq);
2538
2539 out_access_unlock:
2540         rte_spinlock_unlock(&vq->access_lock);
2541
2542         if (unlikely(rarp_mbuf != NULL)) {
2543                 /*
2544                  * Inject it to the head of "pkts" array, so that switch's mac
2545                  * learning table will get updated first.
2546                  */
2547                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2548                 pkts[0] = rarp_mbuf;
2549                 count += 1;
2550         }
2551
2552         return count;
2553 }