vhost: fix batch dequeue potential buffer overflow
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost_async.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24
25 #define MAX_BATCH_LEN 256
26
27 #define VHOST_ASYNC_BATCH_THRESHOLD 32
28
29 static  __rte_always_inline bool
30 rxvq_is_mergeable(struct virtio_net *dev)
31 {
32         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
33 }
34
35 static  __rte_always_inline bool
36 virtio_net_is_inorder(struct virtio_net *dev)
37 {
38         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
39 }
40
41 static bool
42 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
43 {
44         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
45 }
46
47 static inline void
48 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
49 {
50         struct batch_copy_elem *elem = vq->batch_copy_elems;
51         uint16_t count = vq->batch_copy_nb_elems;
52         int i;
53
54         for (i = 0; i < count; i++) {
55                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
56                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
57                                            elem[i].len);
58                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
59         }
60
61         vq->batch_copy_nb_elems = 0;
62 }
63
64 static inline void
65 do_data_copy_dequeue(struct vhost_virtqueue *vq)
66 {
67         struct batch_copy_elem *elem = vq->batch_copy_elems;
68         uint16_t count = vq->batch_copy_nb_elems;
69         int i;
70
71         for (i = 0; i < count; i++)
72                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
73
74         vq->batch_copy_nb_elems = 0;
75 }
76
77 static __rte_always_inline void
78 do_flush_shadow_used_ring_split(struct virtio_net *dev,
79                         struct vhost_virtqueue *vq,
80                         uint16_t to, uint16_t from, uint16_t size)
81 {
82         rte_memcpy(&vq->used->ring[to],
83                         &vq->shadow_used_split[from],
84                         size * sizeof(struct vring_used_elem));
85         vhost_log_cache_used_vring(dev, vq,
86                         offsetof(struct vring_used, ring[to]),
87                         size * sizeof(struct vring_used_elem));
88 }
89
90 static __rte_always_inline void
91 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
92 {
93         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
94
95         if (used_idx + vq->shadow_used_idx <= vq->size) {
96                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
97                                           vq->shadow_used_idx);
98         } else {
99                 uint16_t size;
100
101                 /* update used ring interval [used_idx, vq->size] */
102                 size = vq->size - used_idx;
103                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
104
105                 /* update the left half used ring interval [0, left_size] */
106                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
107                                           vq->shadow_used_idx - size);
108         }
109         vq->last_used_idx += vq->shadow_used_idx;
110
111         vhost_log_cache_sync(dev, vq);
112
113         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
114                            __ATOMIC_RELEASE);
115         vq->shadow_used_idx = 0;
116         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
117                 sizeof(vq->used->idx));
118 }
119
120 static __rte_always_inline void
121 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
122                          uint16_t desc_idx, uint32_t len)
123 {
124         uint16_t i = vq->shadow_used_idx++;
125
126         vq->shadow_used_split[i].id  = desc_idx;
127         vq->shadow_used_split[i].len = len;
128 }
129
130 static __rte_always_inline void
131 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
132                                   struct vhost_virtqueue *vq)
133 {
134         int i;
135         uint16_t used_idx = vq->last_used_idx;
136         uint16_t head_idx = vq->last_used_idx;
137         uint16_t head_flags = 0;
138
139         /* Split loop in two to save memory barriers */
140         for (i = 0; i < vq->shadow_used_idx; i++) {
141                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
142                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
143
144                 used_idx += vq->shadow_used_packed[i].count;
145                 if (used_idx >= vq->size)
146                         used_idx -= vq->size;
147         }
148
149         /* The ordering for storing desc flags needs to be enforced. */
150         rte_atomic_thread_fence(__ATOMIC_RELEASE);
151
152         for (i = 0; i < vq->shadow_used_idx; i++) {
153                 uint16_t flags;
154
155                 if (vq->shadow_used_packed[i].len)
156                         flags = VRING_DESC_F_WRITE;
157                 else
158                         flags = 0;
159
160                 if (vq->used_wrap_counter) {
161                         flags |= VRING_DESC_F_USED;
162                         flags |= VRING_DESC_F_AVAIL;
163                 } else {
164                         flags &= ~VRING_DESC_F_USED;
165                         flags &= ~VRING_DESC_F_AVAIL;
166                 }
167
168                 if (i > 0) {
169                         vq->desc_packed[vq->last_used_idx].flags = flags;
170
171                         vhost_log_cache_used_vring(dev, vq,
172                                         vq->last_used_idx *
173                                         sizeof(struct vring_packed_desc),
174                                         sizeof(struct vring_packed_desc));
175                 } else {
176                         head_idx = vq->last_used_idx;
177                         head_flags = flags;
178                 }
179
180                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
181         }
182
183         vq->desc_packed[head_idx].flags = head_flags;
184
185         vhost_log_cache_used_vring(dev, vq,
186                                 head_idx *
187                                 sizeof(struct vring_packed_desc),
188                                 sizeof(struct vring_packed_desc));
189
190         vq->shadow_used_idx = 0;
191         vhost_log_cache_sync(dev, vq);
192 }
193
194 static __rte_always_inline void
195 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
196                                   struct vhost_virtqueue *vq)
197 {
198         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
199
200         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
201         /* desc flags is the synchronization point for virtio packed vring */
202         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
203                          used_elem->flags, __ATOMIC_RELEASE);
204
205         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
206                                    sizeof(struct vring_packed_desc),
207                                    sizeof(struct vring_packed_desc));
208         vq->shadow_used_idx = 0;
209         vhost_log_cache_sync(dev, vq);
210 }
211
212 static __rte_always_inline void
213 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
214                                  struct vhost_virtqueue *vq,
215                                  uint64_t *lens,
216                                  uint16_t *ids)
217 {
218         uint16_t i;
219         uint16_t flags;
220
221         if (vq->shadow_used_idx) {
222                 do_data_copy_enqueue(dev, vq);
223                 vhost_flush_enqueue_shadow_packed(dev, vq);
224         }
225
226         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
227
228         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
229                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
230                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
231         }
232
233         rte_atomic_thread_fence(__ATOMIC_RELEASE);
234
235         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
236                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
237
238         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
239                                    sizeof(struct vring_packed_desc),
240                                    sizeof(struct vring_packed_desc) *
241                                    PACKED_BATCH_SIZE);
242         vhost_log_cache_sync(dev, vq);
243
244         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
245 }
246
247 static __rte_always_inline void
248 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
249                                           uint16_t id)
250 {
251         vq->shadow_used_packed[0].id = id;
252
253         if (!vq->shadow_used_idx) {
254                 vq->shadow_last_used_idx = vq->last_used_idx;
255                 vq->shadow_used_packed[0].flags =
256                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
257                 vq->shadow_used_packed[0].len = 0;
258                 vq->shadow_used_packed[0].count = 1;
259                 vq->shadow_used_idx++;
260         }
261
262         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
263 }
264
265 static __rte_always_inline void
266 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
267                                   struct vhost_virtqueue *vq,
268                                   uint16_t *ids)
269 {
270         uint16_t flags;
271         uint16_t i;
272         uint16_t begin;
273
274         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].id  = ids[0];
279                 vq->shadow_used_packed[0].len = 0;
280                 vq->shadow_used_packed[0].count = 1;
281                 vq->shadow_used_packed[0].flags = flags;
282                 vq->shadow_used_idx++;
283                 begin = 1;
284         } else
285                 begin = 0;
286
287         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
288                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
289                 vq->desc_packed[vq->last_used_idx + i].len = 0;
290         }
291
292         rte_atomic_thread_fence(__ATOMIC_RELEASE);
293         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
294                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
295
296         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
297                                    sizeof(struct vring_packed_desc),
298                                    sizeof(struct vring_packed_desc) *
299                                    PACKED_BATCH_SIZE);
300         vhost_log_cache_sync(dev, vq);
301
302         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
303 }
304
305 static __rte_always_inline void
306 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
307                                    uint16_t buf_id,
308                                    uint16_t count)
309 {
310         uint16_t flags;
311
312         flags = vq->desc_packed[vq->last_used_idx].flags;
313         if (vq->used_wrap_counter) {
314                 flags |= VRING_DESC_F_USED;
315                 flags |= VRING_DESC_F_AVAIL;
316         } else {
317                 flags &= ~VRING_DESC_F_USED;
318                 flags &= ~VRING_DESC_F_AVAIL;
319         }
320
321         if (!vq->shadow_used_idx) {
322                 vq->shadow_last_used_idx = vq->last_used_idx;
323
324                 vq->shadow_used_packed[0].id  = buf_id;
325                 vq->shadow_used_packed[0].len = 0;
326                 vq->shadow_used_packed[0].flags = flags;
327                 vq->shadow_used_idx++;
328         } else {
329                 vq->desc_packed[vq->last_used_idx].id = buf_id;
330                 vq->desc_packed[vq->last_used_idx].len = 0;
331                 vq->desc_packed[vq->last_used_idx].flags = flags;
332         }
333
334         vq_inc_last_used_packed(vq, count);
335 }
336
337 static __rte_always_inline void
338 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
339                                            uint16_t buf_id,
340                                            uint16_t count)
341 {
342         uint16_t flags;
343
344         vq->shadow_used_packed[0].id = buf_id;
345
346         flags = vq->desc_packed[vq->last_used_idx].flags;
347         if (vq->used_wrap_counter) {
348                 flags |= VRING_DESC_F_USED;
349                 flags |= VRING_DESC_F_AVAIL;
350         } else {
351                 flags &= ~VRING_DESC_F_USED;
352                 flags &= ~VRING_DESC_F_AVAIL;
353         }
354
355         if (!vq->shadow_used_idx) {
356                 vq->shadow_last_used_idx = vq->last_used_idx;
357                 vq->shadow_used_packed[0].len = 0;
358                 vq->shadow_used_packed[0].flags = flags;
359                 vq->shadow_used_idx++;
360         }
361
362         vq_inc_last_used_packed(vq, count);
363 }
364
365 static __rte_always_inline void
366 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
367                                    struct vhost_virtqueue *vq,
368                                    uint32_t len[],
369                                    uint16_t id[],
370                                    uint16_t count[],
371                                    uint16_t num_buffers)
372 {
373         uint16_t i;
374         for (i = 0; i < num_buffers; i++) {
375                 /* enqueue shadow flush action aligned with batch num */
376                 if (!vq->shadow_used_idx)
377                         vq->shadow_aligned_idx = vq->last_used_idx &
378                                 PACKED_BATCH_MASK;
379                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
380                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
381                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
382                 vq->shadow_aligned_idx += count[i];
383                 vq->shadow_used_idx++;
384         }
385
386         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
387                 do_data_copy_enqueue(dev, vq);
388                 vhost_flush_enqueue_shadow_packed(dev, vq);
389         }
390 }
391
392 /* avoid write operation when necessary, to lessen cache issues */
393 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
394         if ((var) != (val))                     \
395                 (var) = (val);                  \
396 } while (0)
397
398 static __rte_always_inline void
399 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
400 {
401         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
402
403         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
404                 csum_l4 |= PKT_TX_TCP_CKSUM;
405
406         if (csum_l4) {
407                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
408                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
409
410                 switch (csum_l4) {
411                 case PKT_TX_TCP_CKSUM:
412                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
413                                                 cksum));
414                         break;
415                 case PKT_TX_UDP_CKSUM:
416                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
417                                                 dgram_cksum));
418                         break;
419                 case PKT_TX_SCTP_CKSUM:
420                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
421                                                 cksum));
422                         break;
423                 }
424         } else {
425                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
426                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
427                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
428         }
429
430         /* IP cksum verification cannot be bypassed, then calculate here */
431         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
432                 struct rte_ipv4_hdr *ipv4_hdr;
433
434                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
435                                                    m_buf->l2_len);
436                 ipv4_hdr->hdr_checksum = 0;
437                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
438         }
439
440         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
441                 if (m_buf->ol_flags & PKT_TX_IPV4)
442                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
443                 else
444                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
445                 net_hdr->gso_size = m_buf->tso_segsz;
446                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
447                                         + m_buf->l4_len;
448         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
449                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
450                 net_hdr->gso_size = m_buf->tso_segsz;
451                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
452                         m_buf->l4_len;
453         } else {
454                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
455                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
456                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
457         }
458 }
459
460 static __rte_always_inline int
461 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
462                 struct buf_vector *buf_vec, uint16_t *vec_idx,
463                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
464 {
465         uint16_t vec_id = *vec_idx;
466
467         while (desc_len) {
468                 uint64_t desc_addr;
469                 uint64_t desc_chunck_len = desc_len;
470
471                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
472                         return -1;
473
474                 desc_addr = vhost_iova_to_vva(dev, vq,
475                                 desc_iova,
476                                 &desc_chunck_len,
477                                 perm);
478                 if (unlikely(!desc_addr))
479                         return -1;
480
481                 rte_prefetch0((void *)(uintptr_t)desc_addr);
482
483                 buf_vec[vec_id].buf_iova = desc_iova;
484                 buf_vec[vec_id].buf_addr = desc_addr;
485                 buf_vec[vec_id].buf_len  = desc_chunck_len;
486
487                 desc_len -= desc_chunck_len;
488                 desc_iova += desc_chunck_len;
489                 vec_id++;
490         }
491         *vec_idx = vec_id;
492
493         return 0;
494 }
495
496 static __rte_always_inline int
497 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
498                          uint32_t avail_idx, uint16_t *vec_idx,
499                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
500                          uint32_t *desc_chain_len, uint8_t perm)
501 {
502         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
503         uint16_t vec_id = *vec_idx;
504         uint32_t len    = 0;
505         uint64_t dlen;
506         uint32_t nr_descs = vq->size;
507         uint32_t cnt    = 0;
508         struct vring_desc *descs = vq->desc;
509         struct vring_desc *idesc = NULL;
510
511         if (unlikely(idx >= vq->size))
512                 return -1;
513
514         *desc_chain_head = idx;
515
516         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
517                 dlen = vq->desc[idx].len;
518                 nr_descs = dlen / sizeof(struct vring_desc);
519                 if (unlikely(nr_descs > vq->size))
520                         return -1;
521
522                 descs = (struct vring_desc *)(uintptr_t)
523                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
524                                                 &dlen,
525                                                 VHOST_ACCESS_RO);
526                 if (unlikely(!descs))
527                         return -1;
528
529                 if (unlikely(dlen < vq->desc[idx].len)) {
530                         /*
531                          * The indirect desc table is not contiguous
532                          * in process VA space, we have to copy it.
533                          */
534                         idesc = vhost_alloc_copy_ind_table(dev, vq,
535                                         vq->desc[idx].addr, vq->desc[idx].len);
536                         if (unlikely(!idesc))
537                                 return -1;
538
539                         descs = idesc;
540                 }
541
542                 idx = 0;
543         }
544
545         while (1) {
546                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
547                         free_ind_table(idesc);
548                         return -1;
549                 }
550
551                 dlen = descs[idx].len;
552                 len += dlen;
553
554                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
555                                                 descs[idx].addr, dlen,
556                                                 perm))) {
557                         free_ind_table(idesc);
558                         return -1;
559                 }
560
561                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
562                         break;
563
564                 idx = descs[idx].next;
565         }
566
567         *desc_chain_len = len;
568         *vec_idx = vec_id;
569
570         if (unlikely(!!idesc))
571                 free_ind_table(idesc);
572
573         return 0;
574 }
575
576 /*
577  * Returns -1 on fail, 0 on success
578  */
579 static inline int
580 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
581                                 uint32_t size, struct buf_vector *buf_vec,
582                                 uint16_t *num_buffers, uint16_t avail_head,
583                                 uint16_t *nr_vec)
584 {
585         uint16_t cur_idx;
586         uint16_t vec_idx = 0;
587         uint16_t max_tries, tries = 0;
588
589         uint16_t head_idx = 0;
590         uint32_t len = 0;
591
592         *num_buffers = 0;
593         cur_idx  = vq->last_avail_idx;
594
595         if (rxvq_is_mergeable(dev))
596                 max_tries = vq->size - 1;
597         else
598                 max_tries = 1;
599
600         while (size > 0) {
601                 if (unlikely(cur_idx == avail_head))
602                         return -1;
603                 /*
604                  * if we tried all available ring items, and still
605                  * can't get enough buf, it means something abnormal
606                  * happened.
607                  */
608                 if (unlikely(++tries > max_tries))
609                         return -1;
610
611                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
612                                                 &vec_idx, buf_vec,
613                                                 &head_idx, &len,
614                                                 VHOST_ACCESS_RW) < 0))
615                         return -1;
616                 len = RTE_MIN(len, size);
617                 update_shadow_used_ring_split(vq, head_idx, len);
618                 size -= len;
619
620                 cur_idx++;
621                 *num_buffers += 1;
622         }
623
624         *nr_vec = vec_idx;
625
626         return 0;
627 }
628
629 static __rte_always_inline int
630 fill_vec_buf_packed_indirect(struct virtio_net *dev,
631                         struct vhost_virtqueue *vq,
632                         struct vring_packed_desc *desc, uint16_t *vec_idx,
633                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
634 {
635         uint16_t i;
636         uint32_t nr_descs;
637         uint16_t vec_id = *vec_idx;
638         uint64_t dlen;
639         struct vring_packed_desc *descs, *idescs = NULL;
640
641         dlen = desc->len;
642         descs = (struct vring_packed_desc *)(uintptr_t)
643                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
644         if (unlikely(!descs))
645                 return -1;
646
647         if (unlikely(dlen < desc->len)) {
648                 /*
649                  * The indirect desc table is not contiguous
650                  * in process VA space, we have to copy it.
651                  */
652                 idescs = vhost_alloc_copy_ind_table(dev,
653                                 vq, desc->addr, desc->len);
654                 if (unlikely(!idescs))
655                         return -1;
656
657                 descs = idescs;
658         }
659
660         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
661         if (unlikely(nr_descs >= vq->size)) {
662                 free_ind_table(idescs);
663                 return -1;
664         }
665
666         for (i = 0; i < nr_descs; i++) {
667                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
668                         free_ind_table(idescs);
669                         return -1;
670                 }
671
672                 dlen = descs[i].len;
673                 *len += dlen;
674                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
675                                                 descs[i].addr, dlen,
676                                                 perm)))
677                         return -1;
678         }
679         *vec_idx = vec_id;
680
681         if (unlikely(!!idescs))
682                 free_ind_table(idescs);
683
684         return 0;
685 }
686
687 static __rte_always_inline int
688 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
689                                 uint16_t avail_idx, uint16_t *desc_count,
690                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
691                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
692 {
693         bool wrap_counter = vq->avail_wrap_counter;
694         struct vring_packed_desc *descs = vq->desc_packed;
695         uint16_t vec_id = *vec_idx;
696         uint64_t dlen;
697
698         if (avail_idx < vq->last_avail_idx)
699                 wrap_counter ^= 1;
700
701         /*
702          * Perform a load-acquire barrier in desc_is_avail to
703          * enforce the ordering between desc flags and desc
704          * content.
705          */
706         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
707                 return -1;
708
709         *desc_count = 0;
710         *len = 0;
711
712         while (1) {
713                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
714                         return -1;
715
716                 if (unlikely(*desc_count >= vq->size))
717                         return -1;
718
719                 *desc_count += 1;
720                 *buf_id = descs[avail_idx].id;
721
722                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
723                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
724                                                         &descs[avail_idx],
725                                                         &vec_id, buf_vec,
726                                                         len, perm) < 0))
727                                 return -1;
728                 } else {
729                         dlen = descs[avail_idx].len;
730                         *len += dlen;
731
732                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
733                                                         descs[avail_idx].addr,
734                                                         dlen,
735                                                         perm)))
736                                 return -1;
737                 }
738
739                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
740                         break;
741
742                 if (++avail_idx >= vq->size) {
743                         avail_idx -= vq->size;
744                         wrap_counter ^= 1;
745                 }
746         }
747
748         *vec_idx = vec_id;
749
750         return 0;
751 }
752
753 static __rte_noinline void
754 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
755                 struct buf_vector *buf_vec,
756                 struct virtio_net_hdr_mrg_rxbuf *hdr)
757 {
758         uint64_t len;
759         uint64_t remain = dev->vhost_hlen;
760         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
761         uint64_t iova = buf_vec->buf_iova;
762
763         while (remain) {
764                 len = RTE_MIN(remain,
765                                 buf_vec->buf_len);
766                 dst = buf_vec->buf_addr;
767                 rte_memcpy((void *)(uintptr_t)dst,
768                                 (void *)(uintptr_t)src,
769                                 len);
770
771                 PRINT_PACKET(dev, (uintptr_t)dst,
772                                 (uint32_t)len, 0);
773                 vhost_log_cache_write_iova(dev, vq,
774                                 iova, len);
775
776                 remain -= len;
777                 iova += len;
778                 src += len;
779                 buf_vec++;
780         }
781 }
782
783 static __rte_always_inline int
784 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
785                             struct rte_mbuf *m, struct buf_vector *buf_vec,
786                             uint16_t nr_vec, uint16_t num_buffers)
787 {
788         uint32_t vec_idx = 0;
789         uint32_t mbuf_offset, mbuf_avail;
790         uint32_t buf_offset, buf_avail;
791         uint64_t buf_addr, buf_iova, buf_len;
792         uint32_t cpy_len;
793         uint64_t hdr_addr;
794         struct rte_mbuf *hdr_mbuf;
795         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
796         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
797         int error = 0;
798
799         if (unlikely(m == NULL)) {
800                 error = -1;
801                 goto out;
802         }
803
804         buf_addr = buf_vec[vec_idx].buf_addr;
805         buf_iova = buf_vec[vec_idx].buf_iova;
806         buf_len = buf_vec[vec_idx].buf_len;
807
808         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
809                 error = -1;
810                 goto out;
811         }
812
813         hdr_mbuf = m;
814         hdr_addr = buf_addr;
815         if (unlikely(buf_len < dev->vhost_hlen))
816                 hdr = &tmp_hdr;
817         else
818                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
819
820         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
821                 dev->vid, num_buffers);
822
823         if (unlikely(buf_len < dev->vhost_hlen)) {
824                 buf_offset = dev->vhost_hlen - buf_len;
825                 vec_idx++;
826                 buf_addr = buf_vec[vec_idx].buf_addr;
827                 buf_iova = buf_vec[vec_idx].buf_iova;
828                 buf_len = buf_vec[vec_idx].buf_len;
829                 buf_avail = buf_len - buf_offset;
830         } else {
831                 buf_offset = dev->vhost_hlen;
832                 buf_avail = buf_len - dev->vhost_hlen;
833         }
834
835         mbuf_avail  = rte_pktmbuf_data_len(m);
836         mbuf_offset = 0;
837         while (mbuf_avail != 0 || m->next != NULL) {
838                 /* done with current buf, get the next one */
839                 if (buf_avail == 0) {
840                         vec_idx++;
841                         if (unlikely(vec_idx >= nr_vec)) {
842                                 error = -1;
843                                 goto out;
844                         }
845
846                         buf_addr = buf_vec[vec_idx].buf_addr;
847                         buf_iova = buf_vec[vec_idx].buf_iova;
848                         buf_len = buf_vec[vec_idx].buf_len;
849
850                         buf_offset = 0;
851                         buf_avail  = buf_len;
852                 }
853
854                 /* done with current mbuf, get the next one */
855                 if (mbuf_avail == 0) {
856                         m = m->next;
857
858                         mbuf_offset = 0;
859                         mbuf_avail  = rte_pktmbuf_data_len(m);
860                 }
861
862                 if (hdr_addr) {
863                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
864                         if (rxvq_is_mergeable(dev))
865                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
866                                                 num_buffers);
867
868                         if (unlikely(hdr == &tmp_hdr)) {
869                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
870                         } else {
871                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
872                                                 dev->vhost_hlen, 0);
873                                 vhost_log_cache_write_iova(dev, vq,
874                                                 buf_vec[0].buf_iova,
875                                                 dev->vhost_hlen);
876                         }
877
878                         hdr_addr = 0;
879                 }
880
881                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
882
883                 if (likely(cpy_len > MAX_BATCH_LEN ||
884                                         vq->batch_copy_nb_elems >= vq->size)) {
885                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
886                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
887                                 cpy_len);
888                         vhost_log_cache_write_iova(dev, vq,
889                                                    buf_iova + buf_offset,
890                                                    cpy_len);
891                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
892                                 cpy_len, 0);
893                 } else {
894                         batch_copy[vq->batch_copy_nb_elems].dst =
895                                 (void *)((uintptr_t)(buf_addr + buf_offset));
896                         batch_copy[vq->batch_copy_nb_elems].src =
897                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
898                         batch_copy[vq->batch_copy_nb_elems].log_addr =
899                                 buf_iova + buf_offset;
900                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
901                         vq->batch_copy_nb_elems++;
902                 }
903
904                 mbuf_avail  -= cpy_len;
905                 mbuf_offset += cpy_len;
906                 buf_avail  -= cpy_len;
907                 buf_offset += cpy_len;
908         }
909
910 out:
911
912         return error;
913 }
914
915 static __rte_always_inline void
916 async_fill_vec(struct iovec *v, void *base, size_t len)
917 {
918         v->iov_base = base;
919         v->iov_len = len;
920 }
921
922 static __rte_always_inline void
923 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
924         struct iovec *vec, unsigned long nr_seg)
925 {
926         it->offset = 0;
927         it->count = count;
928
929         if (count) {
930                 it->iov = vec;
931                 it->nr_segs = nr_seg;
932         } else {
933                 it->iov = 0;
934                 it->nr_segs = 0;
935         }
936 }
937
938 static __rte_always_inline void
939 async_fill_desc(struct rte_vhost_async_desc *desc,
940         struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
941 {
942         desc->src = src;
943         desc->dst = dst;
944 }
945
946 static __rte_always_inline int
947 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
948                         struct rte_mbuf *m, struct buf_vector *buf_vec,
949                         uint16_t nr_vec, uint16_t num_buffers,
950                         struct iovec *src_iovec, struct iovec *dst_iovec,
951                         struct rte_vhost_iov_iter *src_it,
952                         struct rte_vhost_iov_iter *dst_it)
953 {
954         uint32_t vec_idx = 0;
955         uint32_t mbuf_offset, mbuf_avail;
956         uint32_t buf_offset, buf_avail;
957         uint64_t buf_addr, buf_iova, buf_len;
958         uint32_t cpy_len, cpy_threshold;
959         uint64_t hdr_addr;
960         struct rte_mbuf *hdr_mbuf;
961         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
962         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
963         int error = 0;
964         uint64_t mapped_len;
965
966         uint32_t tlen = 0;
967         int tvec_idx = 0;
968         void *hpa;
969
970         if (unlikely(m == NULL)) {
971                 error = -1;
972                 goto out;
973         }
974
975         cpy_threshold = vq->async_threshold;
976
977         buf_addr = buf_vec[vec_idx].buf_addr;
978         buf_iova = buf_vec[vec_idx].buf_iova;
979         buf_len = buf_vec[vec_idx].buf_len;
980
981         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
982                 error = -1;
983                 goto out;
984         }
985
986         hdr_mbuf = m;
987         hdr_addr = buf_addr;
988         if (unlikely(buf_len < dev->vhost_hlen))
989                 hdr = &tmp_hdr;
990         else
991                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
992
993         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
994                 dev->vid, num_buffers);
995
996         if (unlikely(buf_len < dev->vhost_hlen)) {
997                 buf_offset = dev->vhost_hlen - buf_len;
998                 vec_idx++;
999                 buf_addr = buf_vec[vec_idx].buf_addr;
1000                 buf_iova = buf_vec[vec_idx].buf_iova;
1001                 buf_len = buf_vec[vec_idx].buf_len;
1002                 buf_avail = buf_len - buf_offset;
1003         } else {
1004                 buf_offset = dev->vhost_hlen;
1005                 buf_avail = buf_len - dev->vhost_hlen;
1006         }
1007
1008         mbuf_avail  = rte_pktmbuf_data_len(m);
1009         mbuf_offset = 0;
1010
1011         while (mbuf_avail != 0 || m->next != NULL) {
1012                 /* done with current buf, get the next one */
1013                 if (buf_avail == 0) {
1014                         vec_idx++;
1015                         if (unlikely(vec_idx >= nr_vec)) {
1016                                 error = -1;
1017                                 goto out;
1018                         }
1019
1020                         buf_addr = buf_vec[vec_idx].buf_addr;
1021                         buf_iova = buf_vec[vec_idx].buf_iova;
1022                         buf_len = buf_vec[vec_idx].buf_len;
1023
1024                         buf_offset = 0;
1025                         buf_avail  = buf_len;
1026                 }
1027
1028                 /* done with current mbuf, get the next one */
1029                 if (mbuf_avail == 0) {
1030                         m = m->next;
1031
1032                         mbuf_offset = 0;
1033                         mbuf_avail  = rte_pktmbuf_data_len(m);
1034                 }
1035
1036                 if (hdr_addr) {
1037                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1038                         if (rxvq_is_mergeable(dev))
1039                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1040                                                 num_buffers);
1041
1042                         if (unlikely(hdr == &tmp_hdr)) {
1043                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1044                         } else {
1045                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1046                                                 dev->vhost_hlen, 0);
1047                                 vhost_log_cache_write_iova(dev, vq,
1048                                                 buf_vec[0].buf_iova,
1049                                                 dev->vhost_hlen);
1050                         }
1051
1052                         hdr_addr = 0;
1053                 }
1054
1055                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1056
1057                 while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
1058                         hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1059                                         buf_iova + buf_offset,
1060                                         cpy_len, &mapped_len);
1061
1062                         if (unlikely(!hpa || mapped_len < cpy_threshold))
1063                                 break;
1064
1065                         async_fill_vec(src_iovec + tvec_idx,
1066                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1067                                 mbuf_offset), (size_t)mapped_len);
1068
1069                         async_fill_vec(dst_iovec + tvec_idx,
1070                                         hpa, (size_t)mapped_len);
1071
1072                         tlen += (uint32_t)mapped_len;
1073                         cpy_len -= (uint32_t)mapped_len;
1074                         mbuf_avail  -= (uint32_t)mapped_len;
1075                         mbuf_offset += (uint32_t)mapped_len;
1076                         buf_avail  -= (uint32_t)mapped_len;
1077                         buf_offset += (uint32_t)mapped_len;
1078                         tvec_idx++;
1079                 }
1080
1081                 if (likely(cpy_len)) {
1082                         if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
1083                                 rte_memcpy(
1084                                 (void *)((uintptr_t)(buf_addr + buf_offset)),
1085                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1086                                 cpy_len);
1087
1088                                 PRINT_PACKET(dev,
1089                                         (uintptr_t)(buf_addr + buf_offset),
1090                                         cpy_len, 0);
1091                         } else {
1092                                 batch_copy[vq->batch_copy_nb_elems].dst =
1093                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1094                                 batch_copy[vq->batch_copy_nb_elems].src =
1095                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1096                                 batch_copy[vq->batch_copy_nb_elems].log_addr =
1097                                         buf_iova + buf_offset;
1098                                 batch_copy[vq->batch_copy_nb_elems].len =
1099                                         cpy_len;
1100                                 vq->batch_copy_nb_elems++;
1101                         }
1102
1103                         mbuf_avail  -= cpy_len;
1104                         mbuf_offset += cpy_len;
1105                         buf_avail  -= cpy_len;
1106                         buf_offset += cpy_len;
1107                 }
1108
1109         }
1110
1111 out:
1112         if (tlen) {
1113                 async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
1114                 async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
1115         } else {
1116                 src_it->count = 0;
1117         }
1118
1119         return error;
1120 }
1121
1122 static __rte_always_inline int
1123 vhost_enqueue_single_packed(struct virtio_net *dev,
1124                             struct vhost_virtqueue *vq,
1125                             struct rte_mbuf *pkt,
1126                             struct buf_vector *buf_vec,
1127                             uint16_t *nr_descs)
1128 {
1129         uint16_t nr_vec = 0;
1130         uint16_t avail_idx = vq->last_avail_idx;
1131         uint16_t max_tries, tries = 0;
1132         uint16_t buf_id = 0;
1133         uint32_t len = 0;
1134         uint16_t desc_count;
1135         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1136         uint16_t num_buffers = 0;
1137         uint32_t buffer_len[vq->size];
1138         uint16_t buffer_buf_id[vq->size];
1139         uint16_t buffer_desc_count[vq->size];
1140
1141         if (rxvq_is_mergeable(dev))
1142                 max_tries = vq->size - 1;
1143         else
1144                 max_tries = 1;
1145
1146         while (size > 0) {
1147                 /*
1148                  * if we tried all available ring items, and still
1149                  * can't get enough buf, it means something abnormal
1150                  * happened.
1151                  */
1152                 if (unlikely(++tries > max_tries))
1153                         return -1;
1154
1155                 if (unlikely(fill_vec_buf_packed(dev, vq,
1156                                                 avail_idx, &desc_count,
1157                                                 buf_vec, &nr_vec,
1158                                                 &buf_id, &len,
1159                                                 VHOST_ACCESS_RW) < 0))
1160                         return -1;
1161
1162                 len = RTE_MIN(len, size);
1163                 size -= len;
1164
1165                 buffer_len[num_buffers] = len;
1166                 buffer_buf_id[num_buffers] = buf_id;
1167                 buffer_desc_count[num_buffers] = desc_count;
1168                 num_buffers += 1;
1169
1170                 *nr_descs += desc_count;
1171                 avail_idx += desc_count;
1172                 if (avail_idx >= vq->size)
1173                         avail_idx -= vq->size;
1174         }
1175
1176         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1177                 return -1;
1178
1179         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1180                                            buffer_desc_count, num_buffers);
1181
1182         return 0;
1183 }
1184
1185 static __rte_noinline uint32_t
1186 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1187         struct rte_mbuf **pkts, uint32_t count)
1188 {
1189         uint32_t pkt_idx = 0;
1190         uint16_t num_buffers;
1191         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1192         uint16_t avail_head;
1193
1194         /*
1195          * The ordering between avail index and
1196          * desc reads needs to be enforced.
1197          */
1198         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1199
1200         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1201
1202         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1203                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1204                 uint16_t nr_vec = 0;
1205
1206                 if (unlikely(reserve_avail_buf_split(dev, vq,
1207                                                 pkt_len, buf_vec, &num_buffers,
1208                                                 avail_head, &nr_vec) < 0)) {
1209                         VHOST_LOG_DATA(DEBUG,
1210                                 "(%d) failed to get enough desc from vring\n",
1211                                 dev->vid);
1212                         vq->shadow_used_idx -= num_buffers;
1213                         break;
1214                 }
1215
1216                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1217                         dev->vid, vq->last_avail_idx,
1218                         vq->last_avail_idx + num_buffers);
1219
1220                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1221                                                 buf_vec, nr_vec,
1222                                                 num_buffers) < 0) {
1223                         vq->shadow_used_idx -= num_buffers;
1224                         break;
1225                 }
1226
1227                 vq->last_avail_idx += num_buffers;
1228         }
1229
1230         do_data_copy_enqueue(dev, vq);
1231
1232         if (likely(vq->shadow_used_idx)) {
1233                 flush_shadow_used_ring_split(dev, vq);
1234                 vhost_vring_call_split(dev, vq);
1235         }
1236
1237         return pkt_idx;
1238 }
1239
1240 static __rte_always_inline int
1241 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1242                            struct vhost_virtqueue *vq,
1243                            struct rte_mbuf **pkts)
1244 {
1245         bool wrap_counter = vq->avail_wrap_counter;
1246         struct vring_packed_desc *descs = vq->desc_packed;
1247         uint16_t avail_idx = vq->last_avail_idx;
1248         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1249         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1250         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1251         uint64_t lens[PACKED_BATCH_SIZE];
1252         uint16_t ids[PACKED_BATCH_SIZE];
1253         uint16_t i;
1254
1255         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1256                 return -1;
1257
1258         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1259                 return -1;
1260
1261         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1262                 if (unlikely(pkts[i]->next != NULL))
1263                         return -1;
1264                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1265                                             wrap_counter)))
1266                         return -1;
1267         }
1268
1269         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1270                 lens[i] = descs[avail_idx + i].len;
1271
1272         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1273                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1274                         return -1;
1275         }
1276
1277         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1278                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1279                                                   descs[avail_idx + i].addr,
1280                                                   &lens[i],
1281                                                   VHOST_ACCESS_RW);
1282
1283         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1284                 if (unlikely(!desc_addrs[i]))
1285                         return -1;
1286                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1287                         return -1;
1288         }
1289
1290         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1291                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1292                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1293                                         (uintptr_t)desc_addrs[i];
1294                 lens[i] = pkts[i]->pkt_len +
1295                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1296         }
1297
1298         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1299                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1300
1301         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1302
1303         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1304                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1305                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1306                            pkts[i]->pkt_len);
1307         }
1308
1309         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1310                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1311                                            lens[i]);
1312
1313         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1314                 ids[i] = descs[avail_idx + i].id;
1315
1316         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1317
1318         return 0;
1319 }
1320
1321 static __rte_always_inline int16_t
1322 virtio_dev_rx_single_packed(struct virtio_net *dev,
1323                             struct vhost_virtqueue *vq,
1324                             struct rte_mbuf *pkt)
1325 {
1326         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1327         uint16_t nr_descs = 0;
1328
1329         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1330                                                  &nr_descs) < 0)) {
1331                 VHOST_LOG_DATA(DEBUG,
1332                                 "(%d) failed to get enough desc from vring\n",
1333                                 dev->vid);
1334                 return -1;
1335         }
1336
1337         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1338                         dev->vid, vq->last_avail_idx,
1339                         vq->last_avail_idx + nr_descs);
1340
1341         vq_inc_last_avail_packed(vq, nr_descs);
1342
1343         return 0;
1344 }
1345
1346 static __rte_noinline uint32_t
1347 virtio_dev_rx_packed(struct virtio_net *dev,
1348                      struct vhost_virtqueue *__rte_restrict vq,
1349                      struct rte_mbuf **__rte_restrict pkts,
1350                      uint32_t count)
1351 {
1352         uint32_t pkt_idx = 0;
1353         uint32_t remained = count;
1354
1355         do {
1356                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1357
1358                 if (remained >= PACKED_BATCH_SIZE) {
1359                         if (!virtio_dev_rx_batch_packed(dev, vq,
1360                                                         &pkts[pkt_idx])) {
1361                                 pkt_idx += PACKED_BATCH_SIZE;
1362                                 remained -= PACKED_BATCH_SIZE;
1363                                 continue;
1364                         }
1365                 }
1366
1367                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1368                         break;
1369                 pkt_idx++;
1370                 remained--;
1371
1372         } while (pkt_idx < count);
1373
1374         if (vq->shadow_used_idx) {
1375                 do_data_copy_enqueue(dev, vq);
1376                 vhost_flush_enqueue_shadow_packed(dev, vq);
1377         }
1378
1379         if (pkt_idx)
1380                 vhost_vring_call_packed(dev, vq);
1381
1382         return pkt_idx;
1383 }
1384
1385 static __rte_always_inline uint32_t
1386 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1387         struct rte_mbuf **pkts, uint32_t count)
1388 {
1389         struct vhost_virtqueue *vq;
1390         uint32_t nb_tx = 0;
1391
1392         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1393         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1394                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1395                         dev->vid, __func__, queue_id);
1396                 return 0;
1397         }
1398
1399         vq = dev->virtqueue[queue_id];
1400
1401         rte_spinlock_lock(&vq->access_lock);
1402
1403         if (unlikely(!vq->enabled))
1404                 goto out_access_unlock;
1405
1406         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1407                 vhost_user_iotlb_rd_lock(vq);
1408
1409         if (unlikely(!vq->access_ok))
1410                 if (unlikely(vring_translate(dev, vq) < 0))
1411                         goto out;
1412
1413         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1414         if (count == 0)
1415                 goto out;
1416
1417         if (vq_is_packed(dev))
1418                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1419         else
1420                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1421
1422 out:
1423         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1424                 vhost_user_iotlb_rd_unlock(vq);
1425
1426 out_access_unlock:
1427         rte_spinlock_unlock(&vq->access_lock);
1428
1429         return nb_tx;
1430 }
1431
1432 uint16_t
1433 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1434         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1435 {
1436         struct virtio_net *dev = get_device(vid);
1437
1438         if (!dev)
1439                 return 0;
1440
1441         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1442                 VHOST_LOG_DATA(ERR,
1443                         "(%d) %s: built-in vhost net backend is disabled.\n",
1444                         dev->vid, __func__);
1445                 return 0;
1446         }
1447
1448         return virtio_dev_rx(dev, queue_id, pkts, count);
1449 }
1450
1451 static __rte_always_inline uint16_t
1452 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1453         uint16_t vq_size, uint16_t n_inflight)
1454 {
1455         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1456                 (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
1457 }
1458
1459 static __rte_noinline uint32_t
1460 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1461         struct vhost_virtqueue *vq, uint16_t queue_id,
1462         struct rte_mbuf **pkts, uint32_t count,
1463         struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1464 {
1465         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1466         uint16_t num_buffers;
1467         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1468         uint16_t avail_head;
1469
1470         struct rte_vhost_iov_iter *it_pool = vq->it_pool;
1471         struct iovec *vec_pool = vq->vec_pool;
1472         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1473         struct iovec *src_iovec = vec_pool;
1474         struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1475         struct rte_vhost_iov_iter *src_it = it_pool;
1476         struct rte_vhost_iov_iter *dst_it = it_pool + 1;
1477         uint16_t slot_idx = 0;
1478         uint16_t segs_await = 0;
1479         struct async_inflight_info *pkts_info = vq->async_pkts_info;
1480         uint32_t n_pkts = 0, pkt_err = 0;
1481         uint32_t num_async_pkts = 0, num_done_pkts = 0;
1482         struct {
1483                 uint16_t pkt_idx;
1484                 uint16_t last_avail_idx;
1485         } async_pkts_log[MAX_PKT_BURST];
1486
1487         /*
1488          * The ordering between avail index and desc reads need to be enforced.
1489          */
1490         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1491
1492         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1493
1494         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1495                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1496                 uint16_t nr_vec = 0;
1497
1498                 if (unlikely(reserve_avail_buf_split(dev, vq,
1499                                                 pkt_len, buf_vec, &num_buffers,
1500                                                 avail_head, &nr_vec) < 0)) {
1501                         VHOST_LOG_DATA(DEBUG,
1502                                 "(%d) failed to get enough desc from vring\n",
1503                                 dev->vid);
1504                         vq->shadow_used_idx -= num_buffers;
1505                         break;
1506                 }
1507
1508                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1509                         dev->vid, vq->last_avail_idx,
1510                         vq->last_avail_idx + num_buffers);
1511
1512                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1513                                 buf_vec, nr_vec, num_buffers,
1514                                 src_iovec, dst_iovec, src_it, dst_it) < 0) {
1515                         vq->shadow_used_idx -= num_buffers;
1516                         break;
1517                 }
1518
1519                 slot_idx = (vq->async_pkts_idx + num_async_pkts) &
1520                         (vq->size - 1);
1521                 if (src_it->count) {
1522                         uint16_t from, to;
1523
1524                         async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
1525                         pkts_info[slot_idx].descs = num_buffers;
1526                         pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1527                         async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
1528                         async_pkts_log[num_async_pkts++].last_avail_idx =
1529                                 vq->last_avail_idx;
1530                         src_iovec += src_it->nr_segs;
1531                         dst_iovec += dst_it->nr_segs;
1532                         src_it += 2;
1533                         dst_it += 2;
1534                         segs_await += src_it->nr_segs;
1535
1536                         /**
1537                          * recover shadow used ring and keep DMA-occupied
1538                          * descriptors.
1539                          */
1540                         from = vq->shadow_used_idx - num_buffers;
1541                         to = vq->async_desc_idx & (vq->size - 1);
1542                         if (num_buffers + to <= vq->size) {
1543                                 rte_memcpy(&vq->async_descs_split[to],
1544                                                 &vq->shadow_used_split[from],
1545                                                 num_buffers *
1546                                                 sizeof(struct vring_used_elem));
1547                         } else {
1548                                 int size = vq->size - to;
1549
1550                                 rte_memcpy(&vq->async_descs_split[to],
1551                                                 &vq->shadow_used_split[from],
1552                                                 size *
1553                                                 sizeof(struct vring_used_elem));
1554                                 rte_memcpy(vq->async_descs_split,
1555                                                 &vq->shadow_used_split[from +
1556                                                 size], (num_buffers - size) *
1557                                            sizeof(struct vring_used_elem));
1558                         }
1559                         vq->async_desc_idx += num_buffers;
1560                         vq->shadow_used_idx -= num_buffers;
1561                 } else
1562                         comp_pkts[num_done_pkts++] = pkts[pkt_idx];
1563
1564                 vq->last_avail_idx += num_buffers;
1565
1566                 /*
1567                  * conditions to trigger async device transfer:
1568                  * - buffered packet number reaches transfer threshold
1569                  * - unused async iov number is less than max vhost vector
1570                  */
1571                 if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1572                         ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
1573                         BUF_VECTOR_MAX))) {
1574                         n_pkts = vq->async_ops.transfer_data(dev->vid,
1575                                         queue_id, tdes, 0, pkt_burst_idx);
1576                         src_iovec = vec_pool;
1577                         dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1578                         src_it = it_pool;
1579                         dst_it = it_pool + 1;
1580                         segs_await = 0;
1581                         vq->async_pkts_inflight_n += n_pkts;
1582
1583                         if (unlikely(n_pkts < pkt_burst_idx)) {
1584                                 /*
1585                                  * log error packets number here and do actual
1586                                  * error processing when applications poll
1587                                  * completion
1588                                  */
1589                                 pkt_err = pkt_burst_idx - n_pkts;
1590                                 pkt_burst_idx = 0;
1591                                 break;
1592                         }
1593
1594                         pkt_burst_idx = 0;
1595                 }
1596         }
1597
1598         if (pkt_burst_idx) {
1599                 n_pkts = vq->async_ops.transfer_data(dev->vid,
1600                                 queue_id, tdes, 0, pkt_burst_idx);
1601                 vq->async_pkts_inflight_n += n_pkts;
1602
1603                 if (unlikely(n_pkts < pkt_burst_idx))
1604                         pkt_err = pkt_burst_idx - n_pkts;
1605         }
1606
1607         do_data_copy_enqueue(dev, vq);
1608
1609         if (unlikely(pkt_err)) {
1610                 uint16_t num_descs = 0;
1611
1612                 num_async_pkts -= pkt_err;
1613                 /* calculate the sum of descriptors of DMA-error packets. */
1614                 while (pkt_err-- > 0) {
1615                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1616                         slot_idx--;
1617                 }
1618                 vq->async_desc_idx -= num_descs;
1619                 /* recover shadow used ring and available ring */
1620                 vq->shadow_used_idx -= (vq->last_avail_idx -
1621                                 async_pkts_log[num_async_pkts].last_avail_idx -
1622                                 num_descs);
1623                 vq->last_avail_idx =
1624                         async_pkts_log[num_async_pkts].last_avail_idx;
1625                 pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
1626                 num_done_pkts = pkt_idx - num_async_pkts;
1627         }
1628
1629         vq->async_pkts_idx += num_async_pkts;
1630         *comp_count = num_done_pkts;
1631
1632         if (likely(vq->shadow_used_idx)) {
1633                 flush_shadow_used_ring_split(dev, vq);
1634                 vhost_vring_call_split(dev, vq);
1635         }
1636
1637         return pkt_idx;
1638 }
1639
1640 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
1641                 struct rte_mbuf **pkts, uint16_t count)
1642 {
1643         struct virtio_net *dev = get_device(vid);
1644         struct vhost_virtqueue *vq;
1645         uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
1646         uint16_t start_idx, pkts_idx, vq_size;
1647         struct async_inflight_info *pkts_info;
1648         uint16_t from, i;
1649
1650         if (!dev)
1651                 return 0;
1652
1653         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1654         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1655                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1656                         dev->vid, __func__, queue_id);
1657                 return 0;
1658         }
1659
1660         vq = dev->virtqueue[queue_id];
1661
1662         if (unlikely(!vq->async_registered)) {
1663                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
1664                         dev->vid, __func__, queue_id);
1665                 return 0;
1666         }
1667
1668         rte_spinlock_lock(&vq->access_lock);
1669
1670         pkts_idx = vq->async_pkts_idx & (vq->size - 1);
1671         pkts_info = vq->async_pkts_info;
1672         vq_size = vq->size;
1673         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1674                 vq_size, vq->async_pkts_inflight_n);
1675
1676         if (count > vq->async_last_pkts_n)
1677                 n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
1678                         queue_id, 0, count - vq->async_last_pkts_n);
1679         n_pkts_cpl += vq->async_last_pkts_n;
1680
1681         n_pkts_put = RTE_MIN(count, n_pkts_cpl);
1682         if (unlikely(n_pkts_put == 0)) {
1683                 vq->async_last_pkts_n = n_pkts_cpl;
1684                 goto done;
1685         }
1686
1687         for (i = 0; i < n_pkts_put; i++) {
1688                 from = (start_idx + i) & (vq_size - 1);
1689                 n_descs += pkts_info[from].descs;
1690                 pkts[i] = pkts_info[from].mbuf;
1691         }
1692         vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
1693         vq->async_pkts_inflight_n -= n_pkts_put;
1694
1695         if (likely(vq->enabled && vq->access_ok)) {
1696                 uint16_t nr_left = n_descs;
1697                 uint16_t nr_copy;
1698                 uint16_t to;
1699
1700                 /* write back completed descriptors to used ring */
1701                 do {
1702                         from = vq->last_async_desc_idx & (vq->size - 1);
1703                         nr_copy = nr_left + from <= vq->size ? nr_left :
1704                                 vq->size - from;
1705                         to = vq->last_used_idx & (vq->size - 1);
1706
1707                         if (to + nr_copy <= vq->size) {
1708                                 rte_memcpy(&vq->used->ring[to],
1709                                                 &vq->async_descs_split[from],
1710                                                 nr_copy *
1711                                                 sizeof(struct vring_used_elem));
1712                         } else {
1713                                 uint16_t size = vq->size - to;
1714
1715                                 rte_memcpy(&vq->used->ring[to],
1716                                                 &vq->async_descs_split[from],
1717                                                 size *
1718                                                 sizeof(struct vring_used_elem));
1719                                 rte_memcpy(vq->used->ring,
1720                                                 &vq->async_descs_split[from +
1721                                                 size], (nr_copy - size) *
1722                                                 sizeof(struct vring_used_elem));
1723                         }
1724
1725                         vq->last_async_desc_idx += nr_copy;
1726                         vq->last_used_idx += nr_copy;
1727                         nr_left -= nr_copy;
1728                 } while (nr_left > 0);
1729
1730                 __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
1731                 vhost_vring_call_split(dev, vq);
1732         } else
1733                 vq->last_async_desc_idx += n_descs;
1734
1735 done:
1736         rte_spinlock_unlock(&vq->access_lock);
1737
1738         return n_pkts_put;
1739 }
1740
1741 static __rte_always_inline uint32_t
1742 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
1743         struct rte_mbuf **pkts, uint32_t count,
1744         struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1745 {
1746         struct vhost_virtqueue *vq;
1747         uint32_t nb_tx = 0;
1748
1749         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1750         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1751                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1752                         dev->vid, __func__, queue_id);
1753                 return 0;
1754         }
1755
1756         vq = dev->virtqueue[queue_id];
1757
1758         rte_spinlock_lock(&vq->access_lock);
1759
1760         if (unlikely(!vq->enabled || !vq->async_registered))
1761                 goto out_access_unlock;
1762
1763         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1764                 vhost_user_iotlb_rd_lock(vq);
1765
1766         if (unlikely(!vq->access_ok))
1767                 if (unlikely(vring_translate(dev, vq) < 0))
1768                         goto out;
1769
1770         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1771         if (count == 0)
1772                 goto out;
1773
1774         /* TODO: packed queue not implemented */
1775         if (vq_is_packed(dev))
1776                 nb_tx = 0;
1777         else
1778                 nb_tx = virtio_dev_rx_async_submit_split(dev,
1779                                 vq, queue_id, pkts, count, comp_pkts,
1780                                 comp_count);
1781
1782 out:
1783         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1784                 vhost_user_iotlb_rd_unlock(vq);
1785
1786 out_access_unlock:
1787         rte_spinlock_unlock(&vq->access_lock);
1788
1789         return nb_tx;
1790 }
1791
1792 uint16_t
1793 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
1794                 struct rte_mbuf **pkts, uint16_t count,
1795                 struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1796 {
1797         struct virtio_net *dev = get_device(vid);
1798
1799         *comp_count = 0;
1800         if (!dev)
1801                 return 0;
1802
1803         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1804                 VHOST_LOG_DATA(ERR,
1805                         "(%d) %s: built-in vhost net backend is disabled.\n",
1806                         dev->vid, __func__);
1807                 return 0;
1808         }
1809
1810         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, comp_pkts,
1811                         comp_count);
1812 }
1813
1814 static inline bool
1815 virtio_net_with_host_offload(struct virtio_net *dev)
1816 {
1817         if (dev->features &
1818                         ((1ULL << VIRTIO_NET_F_CSUM) |
1819                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1820                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1821                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1822                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1823                 return true;
1824
1825         return false;
1826 }
1827
1828 static void
1829 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1830 {
1831         struct rte_ipv4_hdr *ipv4_hdr;
1832         struct rte_ipv6_hdr *ipv6_hdr;
1833         void *l3_hdr = NULL;
1834         struct rte_ether_hdr *eth_hdr;
1835         uint16_t ethertype;
1836
1837         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1838
1839         m->l2_len = sizeof(struct rte_ether_hdr);
1840         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1841
1842         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1843                 struct rte_vlan_hdr *vlan_hdr =
1844                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1845
1846                 m->l2_len += sizeof(struct rte_vlan_hdr);
1847                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1848         }
1849
1850         l3_hdr = (char *)eth_hdr + m->l2_len;
1851
1852         switch (ethertype) {
1853         case RTE_ETHER_TYPE_IPV4:
1854                 ipv4_hdr = l3_hdr;
1855                 *l4_proto = ipv4_hdr->next_proto_id;
1856                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
1857                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1858                 m->ol_flags |= PKT_TX_IPV4;
1859                 break;
1860         case RTE_ETHER_TYPE_IPV6:
1861                 ipv6_hdr = l3_hdr;
1862                 *l4_proto = ipv6_hdr->proto;
1863                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1864                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1865                 m->ol_flags |= PKT_TX_IPV6;
1866                 break;
1867         default:
1868                 m->l3_len = 0;
1869                 *l4_proto = 0;
1870                 *l4_hdr = NULL;
1871                 break;
1872         }
1873 }
1874
1875 static __rte_always_inline void
1876 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1877 {
1878         uint16_t l4_proto = 0;
1879         void *l4_hdr = NULL;
1880         struct rte_tcp_hdr *tcp_hdr = NULL;
1881
1882         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1883                 return;
1884
1885         parse_ethernet(m, &l4_proto, &l4_hdr);
1886         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1887                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1888                         switch (hdr->csum_offset) {
1889                         case (offsetof(struct rte_tcp_hdr, cksum)):
1890                                 if (l4_proto == IPPROTO_TCP)
1891                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1892                                 break;
1893                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1894                                 if (l4_proto == IPPROTO_UDP)
1895                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1896                                 break;
1897                         case (offsetof(struct rte_sctp_hdr, cksum)):
1898                                 if (l4_proto == IPPROTO_SCTP)
1899                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1900                                 break;
1901                         default:
1902                                 break;
1903                         }
1904                 }
1905         }
1906
1907         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1908                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1909                 case VIRTIO_NET_HDR_GSO_TCPV4:
1910                 case VIRTIO_NET_HDR_GSO_TCPV6:
1911                         tcp_hdr = l4_hdr;
1912                         m->ol_flags |= PKT_TX_TCP_SEG;
1913                         m->tso_segsz = hdr->gso_size;
1914                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1915                         break;
1916                 case VIRTIO_NET_HDR_GSO_UDP:
1917                         m->ol_flags |= PKT_TX_UDP_SEG;
1918                         m->tso_segsz = hdr->gso_size;
1919                         m->l4_len = sizeof(struct rte_udp_hdr);
1920                         break;
1921                 default:
1922                         VHOST_LOG_DATA(WARNING,
1923                                 "unsupported gso type %u.\n", hdr->gso_type);
1924                         break;
1925                 }
1926         }
1927 }
1928
1929 static __rte_noinline void
1930 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1931                 struct buf_vector *buf_vec)
1932 {
1933         uint64_t len;
1934         uint64_t remain = sizeof(struct virtio_net_hdr);
1935         uint64_t src;
1936         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1937
1938         while (remain) {
1939                 len = RTE_MIN(remain, buf_vec->buf_len);
1940                 src = buf_vec->buf_addr;
1941                 rte_memcpy((void *)(uintptr_t)dst,
1942                                 (void *)(uintptr_t)src, len);
1943
1944                 remain -= len;
1945                 dst += len;
1946                 buf_vec++;
1947         }
1948 }
1949
1950 static __rte_always_inline int
1951 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1952                   struct buf_vector *buf_vec, uint16_t nr_vec,
1953                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1954 {
1955         uint32_t buf_avail, buf_offset;
1956         uint64_t buf_addr, buf_len;
1957         uint32_t mbuf_avail, mbuf_offset;
1958         uint32_t cpy_len;
1959         struct rte_mbuf *cur = m, *prev = m;
1960         struct virtio_net_hdr tmp_hdr;
1961         struct virtio_net_hdr *hdr = NULL;
1962         /* A counter to avoid desc dead loop chain */
1963         uint16_t vec_idx = 0;
1964         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1965         int error = 0;
1966
1967         buf_addr = buf_vec[vec_idx].buf_addr;
1968         buf_len = buf_vec[vec_idx].buf_len;
1969
1970         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1971                 error = -1;
1972                 goto out;
1973         }
1974
1975         if (virtio_net_with_host_offload(dev)) {
1976                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1977                         /*
1978                          * No luck, the virtio-net header doesn't fit
1979                          * in a contiguous virtual area.
1980                          */
1981                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1982                         hdr = &tmp_hdr;
1983                 } else {
1984                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1985                 }
1986         }
1987
1988         /*
1989          * A virtio driver normally uses at least 2 desc buffers
1990          * for Tx: the first for storing the header, and others
1991          * for storing the data.
1992          */
1993         if (unlikely(buf_len < dev->vhost_hlen)) {
1994                 buf_offset = dev->vhost_hlen - buf_len;
1995                 vec_idx++;
1996                 buf_addr = buf_vec[vec_idx].buf_addr;
1997                 buf_len = buf_vec[vec_idx].buf_len;
1998                 buf_avail  = buf_len - buf_offset;
1999         } else if (buf_len == dev->vhost_hlen) {
2000                 if (unlikely(++vec_idx >= nr_vec))
2001                         goto out;
2002                 buf_addr = buf_vec[vec_idx].buf_addr;
2003                 buf_len = buf_vec[vec_idx].buf_len;
2004
2005                 buf_offset = 0;
2006                 buf_avail = buf_len;
2007         } else {
2008                 buf_offset = dev->vhost_hlen;
2009                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2010         }
2011
2012         PRINT_PACKET(dev,
2013                         (uintptr_t)(buf_addr + buf_offset),
2014                         (uint32_t)buf_avail, 0);
2015
2016         mbuf_offset = 0;
2017         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2018         while (1) {
2019                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2020
2021                 if (likely(cpy_len > MAX_BATCH_LEN ||
2022                                         vq->batch_copy_nb_elems >= vq->size ||
2023                                         (hdr && cur == m))) {
2024                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2025                                                 mbuf_offset),
2026                                         (void *)((uintptr_t)(buf_addr +
2027                                                         buf_offset)), cpy_len);
2028                 } else {
2029                         batch_copy[vq->batch_copy_nb_elems].dst =
2030                                 rte_pktmbuf_mtod_offset(cur, void *,
2031                                                 mbuf_offset);
2032                         batch_copy[vq->batch_copy_nb_elems].src =
2033                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2034                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2035                         vq->batch_copy_nb_elems++;
2036                 }
2037
2038                 mbuf_avail  -= cpy_len;
2039                 mbuf_offset += cpy_len;
2040                 buf_avail -= cpy_len;
2041                 buf_offset += cpy_len;
2042
2043                 /* This buf reaches to its end, get the next one */
2044                 if (buf_avail == 0) {
2045                         if (++vec_idx >= nr_vec)
2046                                 break;
2047
2048                         buf_addr = buf_vec[vec_idx].buf_addr;
2049                         buf_len = buf_vec[vec_idx].buf_len;
2050
2051                         buf_offset = 0;
2052                         buf_avail  = buf_len;
2053
2054                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2055                                         (uint32_t)buf_avail, 0);
2056                 }
2057
2058                 /*
2059                  * This mbuf reaches to its end, get a new one
2060                  * to hold more data.
2061                  */
2062                 if (mbuf_avail == 0) {
2063                         cur = rte_pktmbuf_alloc(mbuf_pool);
2064                         if (unlikely(cur == NULL)) {
2065                                 VHOST_LOG_DATA(ERR, "Failed to "
2066                                         "allocate memory for mbuf.\n");
2067                                 error = -1;
2068                                 goto out;
2069                         }
2070
2071                         prev->next = cur;
2072                         prev->data_len = mbuf_offset;
2073                         m->nb_segs += 1;
2074                         m->pkt_len += mbuf_offset;
2075                         prev = cur;
2076
2077                         mbuf_offset = 0;
2078                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2079                 }
2080         }
2081
2082         prev->data_len = mbuf_offset;
2083         m->pkt_len    += mbuf_offset;
2084
2085         if (hdr)
2086                 vhost_dequeue_offload(hdr, m);
2087
2088 out:
2089
2090         return error;
2091 }
2092
2093 static void
2094 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2095 {
2096         rte_free(opaque);
2097 }
2098
2099 static int
2100 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2101 {
2102         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2103         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2104         uint16_t buf_len;
2105         rte_iova_t iova;
2106         void *buf;
2107
2108         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2109         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2110
2111         if (unlikely(total_len > UINT16_MAX))
2112                 return -ENOSPC;
2113
2114         buf_len = total_len;
2115         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2116         if (unlikely(buf == NULL))
2117                 return -ENOMEM;
2118
2119         /* Initialize shinfo */
2120         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2121                                                 virtio_dev_extbuf_free, buf);
2122         if (unlikely(shinfo == NULL)) {
2123                 rte_free(buf);
2124                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2125                 return -1;
2126         }
2127
2128         iova = rte_malloc_virt2iova(buf);
2129         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2130         rte_pktmbuf_reset_headroom(pkt);
2131
2132         return 0;
2133 }
2134
2135 /*
2136  * Allocate a host supported pktmbuf.
2137  */
2138 static __rte_always_inline struct rte_mbuf *
2139 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
2140                          uint32_t data_len)
2141 {
2142         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
2143
2144         if (unlikely(pkt == NULL)) {
2145                 VHOST_LOG_DATA(ERR,
2146                         "Failed to allocate memory for mbuf.\n");
2147                 return NULL;
2148         }
2149
2150         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2151                 return pkt;
2152
2153         /* attach an external buffer if supported */
2154         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2155                 return pkt;
2156
2157         /* check if chained buffers are allowed */
2158         if (!dev->linearbuf)
2159                 return pkt;
2160
2161         /* Data doesn't fit into the buffer and the host supports
2162          * only linear buffers
2163          */
2164         rte_pktmbuf_free(pkt);
2165
2166         return NULL;
2167 }
2168
2169 static __rte_noinline uint16_t
2170 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2171         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2172 {
2173         uint16_t i;
2174         uint16_t free_entries;
2175         uint16_t dropped = 0;
2176         static bool allocerr_warned;
2177
2178         /*
2179          * The ordering between avail index and
2180          * desc reads needs to be enforced.
2181          */
2182         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2183                         vq->last_avail_idx;
2184         if (free_entries == 0)
2185                 return 0;
2186
2187         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2188
2189         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2190
2191         count = RTE_MIN(count, MAX_PKT_BURST);
2192         count = RTE_MIN(count, free_entries);
2193         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2194                         dev->vid, count);
2195
2196         for (i = 0; i < count; i++) {
2197                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2198                 uint16_t head_idx;
2199                 uint32_t buf_len;
2200                 uint16_t nr_vec = 0;
2201                 int err;
2202
2203                 if (unlikely(fill_vec_buf_split(dev, vq,
2204                                                 vq->last_avail_idx + i,
2205                                                 &nr_vec, buf_vec,
2206                                                 &head_idx, &buf_len,
2207                                                 VHOST_ACCESS_RO) < 0))
2208                         break;
2209
2210                 update_shadow_used_ring_split(vq, head_idx, 0);
2211
2212                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2213                 if (unlikely(pkts[i] == NULL)) {
2214                         /*
2215                          * mbuf allocation fails for jumbo packets when external
2216                          * buffer allocation is not allowed and linear buffer
2217                          * is required. Drop this packet.
2218                          */
2219                         if (!allocerr_warned) {
2220                                 VHOST_LOG_DATA(ERR,
2221                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2222                                         buf_len, mbuf_pool->name, dev->ifname);
2223                                 allocerr_warned = true;
2224                         }
2225                         dropped += 1;
2226                         i++;
2227                         break;
2228                 }
2229
2230                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2231                                 mbuf_pool);
2232                 if (unlikely(err)) {
2233                         rte_pktmbuf_free(pkts[i]);
2234                         if (!allocerr_warned) {
2235                                 VHOST_LOG_DATA(ERR,
2236                                         "Failed to copy desc to mbuf on %s.\n",
2237                                         dev->ifname);
2238                                 allocerr_warned = true;
2239                         }
2240                         dropped += 1;
2241                         i++;
2242                         break;
2243                 }
2244         }
2245
2246         vq->last_avail_idx += i;
2247
2248         do_data_copy_dequeue(vq);
2249         if (unlikely(i < count))
2250                 vq->shadow_used_idx = i;
2251         if (likely(vq->shadow_used_idx)) {
2252                 flush_shadow_used_ring_split(dev, vq);
2253                 vhost_vring_call_split(dev, vq);
2254         }
2255
2256         return (i - dropped);
2257 }
2258
2259 static __rte_always_inline int
2260 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2261                                  struct vhost_virtqueue *vq,
2262                                  struct rte_mempool *mbuf_pool,
2263                                  struct rte_mbuf **pkts,
2264                                  uint16_t avail_idx,
2265                                  uintptr_t *desc_addrs,
2266                                  uint16_t *ids)
2267 {
2268         bool wrap = vq->avail_wrap_counter;
2269         struct vring_packed_desc *descs = vq->desc_packed;
2270         uint64_t lens[PACKED_BATCH_SIZE];
2271         uint64_t buf_lens[PACKED_BATCH_SIZE];
2272         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2273         uint16_t flags, i;
2274
2275         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2276                 return -1;
2277         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2278                 return -1;
2279
2280         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2281                 flags = descs[avail_idx + i].flags;
2282                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2283                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2284                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2285                         return -1;
2286         }
2287
2288         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2289
2290         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2291                 lens[i] = descs[avail_idx + i].len;
2292
2293         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2294                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2295                                                   descs[avail_idx + i].addr,
2296                                                   &lens[i], VHOST_ACCESS_RW);
2297         }
2298
2299         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2300                 if (unlikely(!desc_addrs[i]))
2301                         return -1;
2302                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2303                         return -1;
2304         }
2305
2306         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2307                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
2308                 if (!pkts[i])
2309                         goto free_buf;
2310         }
2311
2312         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2313                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2314
2315         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2316                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2317                         goto free_buf;
2318         }
2319
2320         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2321                 pkts[i]->pkt_len = lens[i] - buf_offset;
2322                 pkts[i]->data_len = pkts[i]->pkt_len;
2323                 ids[i] = descs[avail_idx + i].id;
2324         }
2325
2326         return 0;
2327
2328 free_buf:
2329         for (i = 0; i < PACKED_BATCH_SIZE; i++)
2330                 rte_pktmbuf_free(pkts[i]);
2331
2332         return -1;
2333 }
2334
2335 static __rte_always_inline int
2336 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2337                            struct vhost_virtqueue *vq,
2338                            struct rte_mempool *mbuf_pool,
2339                            struct rte_mbuf **pkts)
2340 {
2341         uint16_t avail_idx = vq->last_avail_idx;
2342         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2343         struct virtio_net_hdr *hdr;
2344         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2345         uint16_t ids[PACKED_BATCH_SIZE];
2346         uint16_t i;
2347
2348         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
2349                                              avail_idx, desc_addrs, ids))
2350                 return -1;
2351
2352         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2353                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2354
2355         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2356                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2357                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2358                            pkts[i]->pkt_len);
2359
2360         if (virtio_net_with_host_offload(dev)) {
2361                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2362                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2363                         vhost_dequeue_offload(hdr, pkts[i]);
2364                 }
2365         }
2366
2367         if (virtio_net_is_inorder(dev))
2368                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2369                         ids[PACKED_BATCH_SIZE - 1]);
2370         else
2371                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2372
2373         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2374
2375         return 0;
2376 }
2377
2378 static __rte_always_inline int
2379 vhost_dequeue_single_packed(struct virtio_net *dev,
2380                             struct vhost_virtqueue *vq,
2381                             struct rte_mempool *mbuf_pool,
2382                             struct rte_mbuf **pkts,
2383                             uint16_t *buf_id,
2384                             uint16_t *desc_count)
2385 {
2386         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2387         uint32_t buf_len;
2388         uint16_t nr_vec = 0;
2389         int err;
2390         static bool allocerr_warned;
2391
2392         if (unlikely(fill_vec_buf_packed(dev, vq,
2393                                          vq->last_avail_idx, desc_count,
2394                                          buf_vec, &nr_vec,
2395                                          buf_id, &buf_len,
2396                                          VHOST_ACCESS_RO) < 0))
2397                 return -1;
2398
2399         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2400         if (unlikely(*pkts == NULL)) {
2401                 if (!allocerr_warned) {
2402                         VHOST_LOG_DATA(ERR,
2403                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2404                                 buf_len, mbuf_pool->name, dev->ifname);
2405                         allocerr_warned = true;
2406                 }
2407                 return -1;
2408         }
2409
2410         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
2411                                 mbuf_pool);
2412         if (unlikely(err)) {
2413                 if (!allocerr_warned) {
2414                         VHOST_LOG_DATA(ERR,
2415                                 "Failed to copy desc to mbuf on %s.\n",
2416                                 dev->ifname);
2417                         allocerr_warned = true;
2418                 }
2419                 rte_pktmbuf_free(*pkts);
2420                 return -1;
2421         }
2422
2423         return 0;
2424 }
2425
2426 static __rte_always_inline int
2427 virtio_dev_tx_single_packed(struct virtio_net *dev,
2428                             struct vhost_virtqueue *vq,
2429                             struct rte_mempool *mbuf_pool,
2430                             struct rte_mbuf **pkts)
2431 {
2432
2433         uint16_t buf_id, desc_count = 0;
2434         int ret;
2435
2436         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2437                                         &desc_count);
2438
2439         if (likely(desc_count > 0)) {
2440                 if (virtio_net_is_inorder(dev))
2441                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2442                                                                    desc_count);
2443                 else
2444                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2445                                         desc_count);
2446
2447                 vq_inc_last_avail_packed(vq, desc_count);
2448         }
2449
2450         return ret;
2451 }
2452
2453 static __rte_noinline uint16_t
2454 virtio_dev_tx_packed(struct virtio_net *dev,
2455                      struct vhost_virtqueue *__rte_restrict vq,
2456                      struct rte_mempool *mbuf_pool,
2457                      struct rte_mbuf **__rte_restrict pkts,
2458                      uint32_t count)
2459 {
2460         uint32_t pkt_idx = 0;
2461         uint32_t remained = count;
2462
2463         do {
2464                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2465
2466                 if (remained >= PACKED_BATCH_SIZE) {
2467                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2468                                                         &pkts[pkt_idx])) {
2469                                 pkt_idx += PACKED_BATCH_SIZE;
2470                                 remained -= PACKED_BATCH_SIZE;
2471                                 continue;
2472                         }
2473                 }
2474
2475                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2476                                                 &pkts[pkt_idx]))
2477                         break;
2478                 pkt_idx++;
2479                 remained--;
2480
2481         } while (remained);
2482
2483         if (vq->shadow_used_idx) {
2484                 do_data_copy_dequeue(vq);
2485
2486                 vhost_flush_dequeue_shadow_packed(dev, vq);
2487                 vhost_vring_call_packed(dev, vq);
2488         }
2489
2490         return pkt_idx;
2491 }
2492
2493 uint16_t
2494 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2495         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2496 {
2497         struct virtio_net *dev;
2498         struct rte_mbuf *rarp_mbuf = NULL;
2499         struct vhost_virtqueue *vq;
2500         int16_t success = 1;
2501
2502         dev = get_device(vid);
2503         if (!dev)
2504                 return 0;
2505
2506         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2507                 VHOST_LOG_DATA(ERR,
2508                         "(%d) %s: built-in vhost net backend is disabled.\n",
2509                         dev->vid, __func__);
2510                 return 0;
2511         }
2512
2513         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2514                 VHOST_LOG_DATA(ERR,
2515                         "(%d) %s: invalid virtqueue idx %d.\n",
2516                         dev->vid, __func__, queue_id);
2517                 return 0;
2518         }
2519
2520         vq = dev->virtqueue[queue_id];
2521
2522         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2523                 return 0;
2524
2525         if (unlikely(!vq->enabled)) {
2526                 count = 0;
2527                 goto out_access_unlock;
2528         }
2529
2530         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2531                 vhost_user_iotlb_rd_lock(vq);
2532
2533         if (unlikely(!vq->access_ok))
2534                 if (unlikely(vring_translate(dev, vq) < 0)) {
2535                         count = 0;
2536                         goto out;
2537                 }
2538
2539         /*
2540          * Construct a RARP broadcast packet, and inject it to the "pkts"
2541          * array, to looks like that guest actually send such packet.
2542          *
2543          * Check user_send_rarp() for more information.
2544          *
2545          * broadcast_rarp shares a cacheline in the virtio_net structure
2546          * with some fields that are accessed during enqueue and
2547          * __atomic_compare_exchange_n causes a write if performed compare
2548          * and exchange. This could result in false sharing between enqueue
2549          * and dequeue.
2550          *
2551          * Prevent unnecessary false sharing by reading broadcast_rarp first
2552          * and only performing compare and exchange if the read indicates it
2553          * is likely to be set.
2554          */
2555         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
2556                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
2557                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
2558
2559                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2560                 if (rarp_mbuf == NULL) {
2561                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2562                         count = 0;
2563                         goto out;
2564                 }
2565                 count -= 1;
2566         }
2567
2568         if (vq_is_packed(dev))
2569                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2570         else
2571                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2572
2573 out:
2574         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2575                 vhost_user_iotlb_rd_unlock(vq);
2576
2577 out_access_unlock:
2578         rte_spinlock_unlock(&vq->access_lock);
2579
2580         if (unlikely(rarp_mbuf != NULL)) {
2581                 /*
2582                  * Inject it to the head of "pkts" array, so that switch's mac
2583                  * learning table will get updated first.
2584                  */
2585                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2586                 pkts[0] = rarp_mbuf;
2587                 count += 1;
2588         }
2589
2590         return count;
2591 }