vhost: fix split ring potential buffer overflow
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost_async.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24
25 #define MAX_BATCH_LEN 256
26
27 #define VHOST_ASYNC_BATCH_THRESHOLD 32
28
29 static  __rte_always_inline bool
30 rxvq_is_mergeable(struct virtio_net *dev)
31 {
32         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
33 }
34
35 static  __rte_always_inline bool
36 virtio_net_is_inorder(struct virtio_net *dev)
37 {
38         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
39 }
40
41 static bool
42 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
43 {
44         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
45 }
46
47 static inline void
48 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
49 {
50         struct batch_copy_elem *elem = vq->batch_copy_elems;
51         uint16_t count = vq->batch_copy_nb_elems;
52         int i;
53
54         for (i = 0; i < count; i++) {
55                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
56                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
57                                            elem[i].len);
58                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
59         }
60
61         vq->batch_copy_nb_elems = 0;
62 }
63
64 static inline void
65 do_data_copy_dequeue(struct vhost_virtqueue *vq)
66 {
67         struct batch_copy_elem *elem = vq->batch_copy_elems;
68         uint16_t count = vq->batch_copy_nb_elems;
69         int i;
70
71         for (i = 0; i < count; i++)
72                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
73
74         vq->batch_copy_nb_elems = 0;
75 }
76
77 static __rte_always_inline void
78 do_flush_shadow_used_ring_split(struct virtio_net *dev,
79                         struct vhost_virtqueue *vq,
80                         uint16_t to, uint16_t from, uint16_t size)
81 {
82         rte_memcpy(&vq->used->ring[to],
83                         &vq->shadow_used_split[from],
84                         size * sizeof(struct vring_used_elem));
85         vhost_log_cache_used_vring(dev, vq,
86                         offsetof(struct vring_used, ring[to]),
87                         size * sizeof(struct vring_used_elem));
88 }
89
90 static __rte_always_inline void
91 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
92 {
93         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
94
95         if (used_idx + vq->shadow_used_idx <= vq->size) {
96                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
97                                           vq->shadow_used_idx);
98         } else {
99                 uint16_t size;
100
101                 /* update used ring interval [used_idx, vq->size] */
102                 size = vq->size - used_idx;
103                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
104
105                 /* update the left half used ring interval [0, left_size] */
106                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
107                                           vq->shadow_used_idx - size);
108         }
109         vq->last_used_idx += vq->shadow_used_idx;
110
111         vhost_log_cache_sync(dev, vq);
112
113         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
114                            __ATOMIC_RELEASE);
115         vq->shadow_used_idx = 0;
116         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
117                 sizeof(vq->used->idx));
118 }
119
120 static __rte_always_inline void
121 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
122                          uint16_t desc_idx, uint32_t len)
123 {
124         uint16_t i = vq->shadow_used_idx++;
125
126         vq->shadow_used_split[i].id  = desc_idx;
127         vq->shadow_used_split[i].len = len;
128 }
129
130 static __rte_always_inline void
131 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
132                                   struct vhost_virtqueue *vq)
133 {
134         int i;
135         uint16_t used_idx = vq->last_used_idx;
136         uint16_t head_idx = vq->last_used_idx;
137         uint16_t head_flags = 0;
138
139         /* Split loop in two to save memory barriers */
140         for (i = 0; i < vq->shadow_used_idx; i++) {
141                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
142                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
143
144                 used_idx += vq->shadow_used_packed[i].count;
145                 if (used_idx >= vq->size)
146                         used_idx -= vq->size;
147         }
148
149         /* The ordering for storing desc flags needs to be enforced. */
150         rte_atomic_thread_fence(__ATOMIC_RELEASE);
151
152         for (i = 0; i < vq->shadow_used_idx; i++) {
153                 uint16_t flags;
154
155                 if (vq->shadow_used_packed[i].len)
156                         flags = VRING_DESC_F_WRITE;
157                 else
158                         flags = 0;
159
160                 if (vq->used_wrap_counter) {
161                         flags |= VRING_DESC_F_USED;
162                         flags |= VRING_DESC_F_AVAIL;
163                 } else {
164                         flags &= ~VRING_DESC_F_USED;
165                         flags &= ~VRING_DESC_F_AVAIL;
166                 }
167
168                 if (i > 0) {
169                         vq->desc_packed[vq->last_used_idx].flags = flags;
170
171                         vhost_log_cache_used_vring(dev, vq,
172                                         vq->last_used_idx *
173                                         sizeof(struct vring_packed_desc),
174                                         sizeof(struct vring_packed_desc));
175                 } else {
176                         head_idx = vq->last_used_idx;
177                         head_flags = flags;
178                 }
179
180                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
181         }
182
183         vq->desc_packed[head_idx].flags = head_flags;
184
185         vhost_log_cache_used_vring(dev, vq,
186                                 head_idx *
187                                 sizeof(struct vring_packed_desc),
188                                 sizeof(struct vring_packed_desc));
189
190         vq->shadow_used_idx = 0;
191         vhost_log_cache_sync(dev, vq);
192 }
193
194 static __rte_always_inline void
195 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
196                                   struct vhost_virtqueue *vq)
197 {
198         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
199
200         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
201         /* desc flags is the synchronization point for virtio packed vring */
202         __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
203                          used_elem->flags, __ATOMIC_RELEASE);
204
205         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
206                                    sizeof(struct vring_packed_desc),
207                                    sizeof(struct vring_packed_desc));
208         vq->shadow_used_idx = 0;
209         vhost_log_cache_sync(dev, vq);
210 }
211
212 static __rte_always_inline void
213 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
214                                  struct vhost_virtqueue *vq,
215                                  uint64_t *lens,
216                                  uint16_t *ids)
217 {
218         uint16_t i;
219         uint16_t flags;
220
221         if (vq->shadow_used_idx) {
222                 do_data_copy_enqueue(dev, vq);
223                 vhost_flush_enqueue_shadow_packed(dev, vq);
224         }
225
226         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
227
228         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
229                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
230                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
231         }
232
233         rte_atomic_thread_fence(__ATOMIC_RELEASE);
234
235         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
236                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
237
238         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
239                                    sizeof(struct vring_packed_desc),
240                                    sizeof(struct vring_packed_desc) *
241                                    PACKED_BATCH_SIZE);
242         vhost_log_cache_sync(dev, vq);
243
244         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
245 }
246
247 static __rte_always_inline void
248 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
249                                           uint16_t id)
250 {
251         vq->shadow_used_packed[0].id = id;
252
253         if (!vq->shadow_used_idx) {
254                 vq->shadow_last_used_idx = vq->last_used_idx;
255                 vq->shadow_used_packed[0].flags =
256                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
257                 vq->shadow_used_packed[0].len = 0;
258                 vq->shadow_used_packed[0].count = 1;
259                 vq->shadow_used_idx++;
260         }
261
262         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
263 }
264
265 static __rte_always_inline void
266 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
267                                   struct vhost_virtqueue *vq,
268                                   uint16_t *ids)
269 {
270         uint16_t flags;
271         uint16_t i;
272         uint16_t begin;
273
274         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].id  = ids[0];
279                 vq->shadow_used_packed[0].len = 0;
280                 vq->shadow_used_packed[0].count = 1;
281                 vq->shadow_used_packed[0].flags = flags;
282                 vq->shadow_used_idx++;
283                 begin = 1;
284         } else
285                 begin = 0;
286
287         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
288                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
289                 vq->desc_packed[vq->last_used_idx + i].len = 0;
290         }
291
292         rte_atomic_thread_fence(__ATOMIC_RELEASE);
293         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
294                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
295
296         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
297                                    sizeof(struct vring_packed_desc),
298                                    sizeof(struct vring_packed_desc) *
299                                    PACKED_BATCH_SIZE);
300         vhost_log_cache_sync(dev, vq);
301
302         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
303 }
304
305 static __rte_always_inline void
306 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
307                                    uint16_t buf_id,
308                                    uint16_t count)
309 {
310         uint16_t flags;
311
312         flags = vq->desc_packed[vq->last_used_idx].flags;
313         if (vq->used_wrap_counter) {
314                 flags |= VRING_DESC_F_USED;
315                 flags |= VRING_DESC_F_AVAIL;
316         } else {
317                 flags &= ~VRING_DESC_F_USED;
318                 flags &= ~VRING_DESC_F_AVAIL;
319         }
320
321         if (!vq->shadow_used_idx) {
322                 vq->shadow_last_used_idx = vq->last_used_idx;
323
324                 vq->shadow_used_packed[0].id  = buf_id;
325                 vq->shadow_used_packed[0].len = 0;
326                 vq->shadow_used_packed[0].flags = flags;
327                 vq->shadow_used_idx++;
328         } else {
329                 vq->desc_packed[vq->last_used_idx].id = buf_id;
330                 vq->desc_packed[vq->last_used_idx].len = 0;
331                 vq->desc_packed[vq->last_used_idx].flags = flags;
332         }
333
334         vq_inc_last_used_packed(vq, count);
335 }
336
337 static __rte_always_inline void
338 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
339                                            uint16_t buf_id,
340                                            uint16_t count)
341 {
342         uint16_t flags;
343
344         vq->shadow_used_packed[0].id = buf_id;
345
346         flags = vq->desc_packed[vq->last_used_idx].flags;
347         if (vq->used_wrap_counter) {
348                 flags |= VRING_DESC_F_USED;
349                 flags |= VRING_DESC_F_AVAIL;
350         } else {
351                 flags &= ~VRING_DESC_F_USED;
352                 flags &= ~VRING_DESC_F_AVAIL;
353         }
354
355         if (!vq->shadow_used_idx) {
356                 vq->shadow_last_used_idx = vq->last_used_idx;
357                 vq->shadow_used_packed[0].len = 0;
358                 vq->shadow_used_packed[0].flags = flags;
359                 vq->shadow_used_idx++;
360         }
361
362         vq_inc_last_used_packed(vq, count);
363 }
364
365 static __rte_always_inline void
366 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
367                                    struct vhost_virtqueue *vq,
368                                    uint32_t len[],
369                                    uint16_t id[],
370                                    uint16_t count[],
371                                    uint16_t num_buffers)
372 {
373         uint16_t i;
374         for (i = 0; i < num_buffers; i++) {
375                 /* enqueue shadow flush action aligned with batch num */
376                 if (!vq->shadow_used_idx)
377                         vq->shadow_aligned_idx = vq->last_used_idx &
378                                 PACKED_BATCH_MASK;
379                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
380                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
381                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
382                 vq->shadow_aligned_idx += count[i];
383                 vq->shadow_used_idx++;
384         }
385
386         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
387                 do_data_copy_enqueue(dev, vq);
388                 vhost_flush_enqueue_shadow_packed(dev, vq);
389         }
390 }
391
392 /* avoid write operation when necessary, to lessen cache issues */
393 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
394         if ((var) != (val))                     \
395                 (var) = (val);                  \
396 } while (0)
397
398 static __rte_always_inline void
399 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
400 {
401         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
402
403         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
404                 csum_l4 |= PKT_TX_TCP_CKSUM;
405
406         if (csum_l4) {
407                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
408                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
409
410                 switch (csum_l4) {
411                 case PKT_TX_TCP_CKSUM:
412                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
413                                                 cksum));
414                         break;
415                 case PKT_TX_UDP_CKSUM:
416                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
417                                                 dgram_cksum));
418                         break;
419                 case PKT_TX_SCTP_CKSUM:
420                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
421                                                 cksum));
422                         break;
423                 }
424         } else {
425                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
426                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
427                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
428         }
429
430         /* IP cksum verification cannot be bypassed, then calculate here */
431         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
432                 struct rte_ipv4_hdr *ipv4_hdr;
433
434                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
435                                                    m_buf->l2_len);
436                 ipv4_hdr->hdr_checksum = 0;
437                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
438         }
439
440         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
441                 if (m_buf->ol_flags & PKT_TX_IPV4)
442                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
443                 else
444                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
445                 net_hdr->gso_size = m_buf->tso_segsz;
446                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
447                                         + m_buf->l4_len;
448         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
449                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
450                 net_hdr->gso_size = m_buf->tso_segsz;
451                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
452                         m_buf->l4_len;
453         } else {
454                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
455                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
456                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
457         }
458 }
459
460 static __rte_always_inline int
461 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
462                 struct buf_vector *buf_vec, uint16_t *vec_idx,
463                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
464 {
465         uint16_t vec_id = *vec_idx;
466
467         while (desc_len) {
468                 uint64_t desc_addr;
469                 uint64_t desc_chunck_len = desc_len;
470
471                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
472                         return -1;
473
474                 desc_addr = vhost_iova_to_vva(dev, vq,
475                                 desc_iova,
476                                 &desc_chunck_len,
477                                 perm);
478                 if (unlikely(!desc_addr))
479                         return -1;
480
481                 rte_prefetch0((void *)(uintptr_t)desc_addr);
482
483                 buf_vec[vec_id].buf_iova = desc_iova;
484                 buf_vec[vec_id].buf_addr = desc_addr;
485                 buf_vec[vec_id].buf_len  = desc_chunck_len;
486
487                 desc_len -= desc_chunck_len;
488                 desc_iova += desc_chunck_len;
489                 vec_id++;
490         }
491         *vec_idx = vec_id;
492
493         return 0;
494 }
495
496 static __rte_always_inline int
497 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
498                          uint32_t avail_idx, uint16_t *vec_idx,
499                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
500                          uint32_t *desc_chain_len, uint8_t perm)
501 {
502         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
503         uint16_t vec_id = *vec_idx;
504         uint32_t len    = 0;
505         uint64_t dlen;
506         uint32_t nr_descs = vq->size;
507         uint32_t cnt    = 0;
508         struct vring_desc *descs = vq->desc;
509         struct vring_desc *idesc = NULL;
510
511         if (unlikely(idx >= vq->size))
512                 return -1;
513
514         *desc_chain_head = idx;
515
516         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
517                 dlen = vq->desc[idx].len;
518                 nr_descs = dlen / sizeof(struct vring_desc);
519                 if (unlikely(nr_descs > vq->size))
520                         return -1;
521
522                 descs = (struct vring_desc *)(uintptr_t)
523                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
524                                                 &dlen,
525                                                 VHOST_ACCESS_RO);
526                 if (unlikely(!descs))
527                         return -1;
528
529                 if (unlikely(dlen < vq->desc[idx].len)) {
530                         /*
531                          * The indirect desc table is not contiguous
532                          * in process VA space, we have to copy it.
533                          */
534                         idesc = vhost_alloc_copy_ind_table(dev, vq,
535                                         vq->desc[idx].addr, vq->desc[idx].len);
536                         if (unlikely(!idesc))
537                                 return -1;
538
539                         descs = idesc;
540                 }
541
542                 idx = 0;
543         }
544
545         while (1) {
546                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
547                         free_ind_table(idesc);
548                         return -1;
549                 }
550
551                 dlen = descs[idx].len;
552                 len += dlen;
553
554                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
555                                                 descs[idx].addr, dlen,
556                                                 perm))) {
557                         free_ind_table(idesc);
558                         return -1;
559                 }
560
561                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
562                         break;
563
564                 idx = descs[idx].next;
565         }
566
567         *desc_chain_len = len;
568         *vec_idx = vec_id;
569
570         if (unlikely(!!idesc))
571                 free_ind_table(idesc);
572
573         return 0;
574 }
575
576 /*
577  * Returns -1 on fail, 0 on success
578  */
579 static inline int
580 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
581                                 uint32_t size, struct buf_vector *buf_vec,
582                                 uint16_t *num_buffers, uint16_t avail_head,
583                                 uint16_t *nr_vec)
584 {
585         uint16_t cur_idx;
586         uint16_t vec_idx = 0;
587         uint16_t max_tries, tries = 0;
588
589         uint16_t head_idx = 0;
590         uint32_t len = 0;
591
592         *num_buffers = 0;
593         cur_idx  = vq->last_avail_idx;
594
595         if (rxvq_is_mergeable(dev))
596                 max_tries = vq->size - 1;
597         else
598                 max_tries = 1;
599
600         while (size > 0) {
601                 if (unlikely(cur_idx == avail_head))
602                         return -1;
603                 /*
604                  * if we tried all available ring items, and still
605                  * can't get enough buf, it means something abnormal
606                  * happened.
607                  */
608                 if (unlikely(++tries > max_tries))
609                         return -1;
610
611                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
612                                                 &vec_idx, buf_vec,
613                                                 &head_idx, &len,
614                                                 VHOST_ACCESS_RW) < 0))
615                         return -1;
616                 len = RTE_MIN(len, size);
617                 update_shadow_used_ring_split(vq, head_idx, len);
618                 size -= len;
619
620                 cur_idx++;
621                 *num_buffers += 1;
622         }
623
624         *nr_vec = vec_idx;
625
626         return 0;
627 }
628
629 static __rte_always_inline int
630 fill_vec_buf_packed_indirect(struct virtio_net *dev,
631                         struct vhost_virtqueue *vq,
632                         struct vring_packed_desc *desc, uint16_t *vec_idx,
633                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
634 {
635         uint16_t i;
636         uint32_t nr_descs;
637         uint16_t vec_id = *vec_idx;
638         uint64_t dlen;
639         struct vring_packed_desc *descs, *idescs = NULL;
640
641         dlen = desc->len;
642         descs = (struct vring_packed_desc *)(uintptr_t)
643                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
644         if (unlikely(!descs))
645                 return -1;
646
647         if (unlikely(dlen < desc->len)) {
648                 /*
649                  * The indirect desc table is not contiguous
650                  * in process VA space, we have to copy it.
651                  */
652                 idescs = vhost_alloc_copy_ind_table(dev,
653                                 vq, desc->addr, desc->len);
654                 if (unlikely(!idescs))
655                         return -1;
656
657                 descs = idescs;
658         }
659
660         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
661         if (unlikely(nr_descs >= vq->size)) {
662                 free_ind_table(idescs);
663                 return -1;
664         }
665
666         for (i = 0; i < nr_descs; i++) {
667                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
668                         free_ind_table(idescs);
669                         return -1;
670                 }
671
672                 *len += descs[i].len;
673                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
674                                                 descs[i].addr, descs[i].len,
675                                                 perm)))
676                         return -1;
677         }
678         *vec_idx = vec_id;
679
680         if (unlikely(!!idescs))
681                 free_ind_table(idescs);
682
683         return 0;
684 }
685
686 static __rte_always_inline int
687 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
688                                 uint16_t avail_idx, uint16_t *desc_count,
689                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
690                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
691 {
692         bool wrap_counter = vq->avail_wrap_counter;
693         struct vring_packed_desc *descs = vq->desc_packed;
694         uint16_t vec_id = *vec_idx;
695
696         if (avail_idx < vq->last_avail_idx)
697                 wrap_counter ^= 1;
698
699         /*
700          * Perform a load-acquire barrier in desc_is_avail to
701          * enforce the ordering between desc flags and desc
702          * content.
703          */
704         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
705                 return -1;
706
707         *desc_count = 0;
708         *len = 0;
709
710         while (1) {
711                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
712                         return -1;
713
714                 if (unlikely(*desc_count >= vq->size))
715                         return -1;
716
717                 *desc_count += 1;
718                 *buf_id = descs[avail_idx].id;
719
720                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
721                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
722                                                         &descs[avail_idx],
723                                                         &vec_id, buf_vec,
724                                                         len, perm) < 0))
725                                 return -1;
726                 } else {
727                         *len += descs[avail_idx].len;
728
729                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
730                                                         descs[avail_idx].addr,
731                                                         descs[avail_idx].len,
732                                                         perm)))
733                                 return -1;
734                 }
735
736                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
737                         break;
738
739                 if (++avail_idx >= vq->size) {
740                         avail_idx -= vq->size;
741                         wrap_counter ^= 1;
742                 }
743         }
744
745         *vec_idx = vec_id;
746
747         return 0;
748 }
749
750 static __rte_noinline void
751 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
752                 struct buf_vector *buf_vec,
753                 struct virtio_net_hdr_mrg_rxbuf *hdr)
754 {
755         uint64_t len;
756         uint64_t remain = dev->vhost_hlen;
757         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
758         uint64_t iova = buf_vec->buf_iova;
759
760         while (remain) {
761                 len = RTE_MIN(remain,
762                                 buf_vec->buf_len);
763                 dst = buf_vec->buf_addr;
764                 rte_memcpy((void *)(uintptr_t)dst,
765                                 (void *)(uintptr_t)src,
766                                 len);
767
768                 PRINT_PACKET(dev, (uintptr_t)dst,
769                                 (uint32_t)len, 0);
770                 vhost_log_cache_write_iova(dev, vq,
771                                 iova, len);
772
773                 remain -= len;
774                 iova += len;
775                 src += len;
776                 buf_vec++;
777         }
778 }
779
780 static __rte_always_inline int
781 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
782                             struct rte_mbuf *m, struct buf_vector *buf_vec,
783                             uint16_t nr_vec, uint16_t num_buffers)
784 {
785         uint32_t vec_idx = 0;
786         uint32_t mbuf_offset, mbuf_avail;
787         uint32_t buf_offset, buf_avail;
788         uint64_t buf_addr, buf_iova, buf_len;
789         uint32_t cpy_len;
790         uint64_t hdr_addr;
791         struct rte_mbuf *hdr_mbuf;
792         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
793         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
794         int error = 0;
795
796         if (unlikely(m == NULL)) {
797                 error = -1;
798                 goto out;
799         }
800
801         buf_addr = buf_vec[vec_idx].buf_addr;
802         buf_iova = buf_vec[vec_idx].buf_iova;
803         buf_len = buf_vec[vec_idx].buf_len;
804
805         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
806                 error = -1;
807                 goto out;
808         }
809
810         hdr_mbuf = m;
811         hdr_addr = buf_addr;
812         if (unlikely(buf_len < dev->vhost_hlen))
813                 hdr = &tmp_hdr;
814         else
815                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
816
817         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
818                 dev->vid, num_buffers);
819
820         if (unlikely(buf_len < dev->vhost_hlen)) {
821                 buf_offset = dev->vhost_hlen - buf_len;
822                 vec_idx++;
823                 buf_addr = buf_vec[vec_idx].buf_addr;
824                 buf_iova = buf_vec[vec_idx].buf_iova;
825                 buf_len = buf_vec[vec_idx].buf_len;
826                 buf_avail = buf_len - buf_offset;
827         } else {
828                 buf_offset = dev->vhost_hlen;
829                 buf_avail = buf_len - dev->vhost_hlen;
830         }
831
832         mbuf_avail  = rte_pktmbuf_data_len(m);
833         mbuf_offset = 0;
834         while (mbuf_avail != 0 || m->next != NULL) {
835                 /* done with current buf, get the next one */
836                 if (buf_avail == 0) {
837                         vec_idx++;
838                         if (unlikely(vec_idx >= nr_vec)) {
839                                 error = -1;
840                                 goto out;
841                         }
842
843                         buf_addr = buf_vec[vec_idx].buf_addr;
844                         buf_iova = buf_vec[vec_idx].buf_iova;
845                         buf_len = buf_vec[vec_idx].buf_len;
846
847                         buf_offset = 0;
848                         buf_avail  = buf_len;
849                 }
850
851                 /* done with current mbuf, get the next one */
852                 if (mbuf_avail == 0) {
853                         m = m->next;
854
855                         mbuf_offset = 0;
856                         mbuf_avail  = rte_pktmbuf_data_len(m);
857                 }
858
859                 if (hdr_addr) {
860                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
861                         if (rxvq_is_mergeable(dev))
862                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
863                                                 num_buffers);
864
865                         if (unlikely(hdr == &tmp_hdr)) {
866                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
867                         } else {
868                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
869                                                 dev->vhost_hlen, 0);
870                                 vhost_log_cache_write_iova(dev, vq,
871                                                 buf_vec[0].buf_iova,
872                                                 dev->vhost_hlen);
873                         }
874
875                         hdr_addr = 0;
876                 }
877
878                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
879
880                 if (likely(cpy_len > MAX_BATCH_LEN ||
881                                         vq->batch_copy_nb_elems >= vq->size)) {
882                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
883                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
884                                 cpy_len);
885                         vhost_log_cache_write_iova(dev, vq,
886                                                    buf_iova + buf_offset,
887                                                    cpy_len);
888                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
889                                 cpy_len, 0);
890                 } else {
891                         batch_copy[vq->batch_copy_nb_elems].dst =
892                                 (void *)((uintptr_t)(buf_addr + buf_offset));
893                         batch_copy[vq->batch_copy_nb_elems].src =
894                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
895                         batch_copy[vq->batch_copy_nb_elems].log_addr =
896                                 buf_iova + buf_offset;
897                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
898                         vq->batch_copy_nb_elems++;
899                 }
900
901                 mbuf_avail  -= cpy_len;
902                 mbuf_offset += cpy_len;
903                 buf_avail  -= cpy_len;
904                 buf_offset += cpy_len;
905         }
906
907 out:
908
909         return error;
910 }
911
912 static __rte_always_inline void
913 async_fill_vec(struct iovec *v, void *base, size_t len)
914 {
915         v->iov_base = base;
916         v->iov_len = len;
917 }
918
919 static __rte_always_inline void
920 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
921         struct iovec *vec, unsigned long nr_seg)
922 {
923         it->offset = 0;
924         it->count = count;
925
926         if (count) {
927                 it->iov = vec;
928                 it->nr_segs = nr_seg;
929         } else {
930                 it->iov = 0;
931                 it->nr_segs = 0;
932         }
933 }
934
935 static __rte_always_inline void
936 async_fill_desc(struct rte_vhost_async_desc *desc,
937         struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
938 {
939         desc->src = src;
940         desc->dst = dst;
941 }
942
943 static __rte_always_inline int
944 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
945                         struct rte_mbuf *m, struct buf_vector *buf_vec,
946                         uint16_t nr_vec, uint16_t num_buffers,
947                         struct iovec *src_iovec, struct iovec *dst_iovec,
948                         struct rte_vhost_iov_iter *src_it,
949                         struct rte_vhost_iov_iter *dst_it)
950 {
951         uint32_t vec_idx = 0;
952         uint32_t mbuf_offset, mbuf_avail;
953         uint32_t buf_offset, buf_avail;
954         uint64_t buf_addr, buf_iova, buf_len;
955         uint32_t cpy_len, cpy_threshold;
956         uint64_t hdr_addr;
957         struct rte_mbuf *hdr_mbuf;
958         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
959         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
960         int error = 0;
961         uint64_t mapped_len;
962
963         uint32_t tlen = 0;
964         int tvec_idx = 0;
965         void *hpa;
966
967         if (unlikely(m == NULL)) {
968                 error = -1;
969                 goto out;
970         }
971
972         cpy_threshold = vq->async_threshold;
973
974         buf_addr = buf_vec[vec_idx].buf_addr;
975         buf_iova = buf_vec[vec_idx].buf_iova;
976         buf_len = buf_vec[vec_idx].buf_len;
977
978         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
979                 error = -1;
980                 goto out;
981         }
982
983         hdr_mbuf = m;
984         hdr_addr = buf_addr;
985         if (unlikely(buf_len < dev->vhost_hlen))
986                 hdr = &tmp_hdr;
987         else
988                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
989
990         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
991                 dev->vid, num_buffers);
992
993         if (unlikely(buf_len < dev->vhost_hlen)) {
994                 buf_offset = dev->vhost_hlen - buf_len;
995                 vec_idx++;
996                 buf_addr = buf_vec[vec_idx].buf_addr;
997                 buf_iova = buf_vec[vec_idx].buf_iova;
998                 buf_len = buf_vec[vec_idx].buf_len;
999                 buf_avail = buf_len - buf_offset;
1000         } else {
1001                 buf_offset = dev->vhost_hlen;
1002                 buf_avail = buf_len - dev->vhost_hlen;
1003         }
1004
1005         mbuf_avail  = rte_pktmbuf_data_len(m);
1006         mbuf_offset = 0;
1007
1008         while (mbuf_avail != 0 || m->next != NULL) {
1009                 /* done with current buf, get the next one */
1010                 if (buf_avail == 0) {
1011                         vec_idx++;
1012                         if (unlikely(vec_idx >= nr_vec)) {
1013                                 error = -1;
1014                                 goto out;
1015                         }
1016
1017                         buf_addr = buf_vec[vec_idx].buf_addr;
1018                         buf_iova = buf_vec[vec_idx].buf_iova;
1019                         buf_len = buf_vec[vec_idx].buf_len;
1020
1021                         buf_offset = 0;
1022                         buf_avail  = buf_len;
1023                 }
1024
1025                 /* done with current mbuf, get the next one */
1026                 if (mbuf_avail == 0) {
1027                         m = m->next;
1028
1029                         mbuf_offset = 0;
1030                         mbuf_avail  = rte_pktmbuf_data_len(m);
1031                 }
1032
1033                 if (hdr_addr) {
1034                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1035                         if (rxvq_is_mergeable(dev))
1036                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1037                                                 num_buffers);
1038
1039                         if (unlikely(hdr == &tmp_hdr)) {
1040                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1041                         } else {
1042                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1043                                                 dev->vhost_hlen, 0);
1044                                 vhost_log_cache_write_iova(dev, vq,
1045                                                 buf_vec[0].buf_iova,
1046                                                 dev->vhost_hlen);
1047                         }
1048
1049                         hdr_addr = 0;
1050                 }
1051
1052                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1053
1054                 while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
1055                         hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
1056                                         buf_iova + buf_offset,
1057                                         cpy_len, &mapped_len);
1058
1059                         if (unlikely(!hpa || mapped_len < cpy_threshold))
1060                                 break;
1061
1062                         async_fill_vec(src_iovec + tvec_idx,
1063                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1064                                 mbuf_offset), (size_t)mapped_len);
1065
1066                         async_fill_vec(dst_iovec + tvec_idx,
1067                                         hpa, (size_t)mapped_len);
1068
1069                         tlen += (uint32_t)mapped_len;
1070                         cpy_len -= (uint32_t)mapped_len;
1071                         mbuf_avail  -= (uint32_t)mapped_len;
1072                         mbuf_offset += (uint32_t)mapped_len;
1073                         buf_avail  -= (uint32_t)mapped_len;
1074                         buf_offset += (uint32_t)mapped_len;
1075                         tvec_idx++;
1076                 }
1077
1078                 if (likely(cpy_len)) {
1079                         if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
1080                                 rte_memcpy(
1081                                 (void *)((uintptr_t)(buf_addr + buf_offset)),
1082                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1083                                 cpy_len);
1084
1085                                 PRINT_PACKET(dev,
1086                                         (uintptr_t)(buf_addr + buf_offset),
1087                                         cpy_len, 0);
1088                         } else {
1089                                 batch_copy[vq->batch_copy_nb_elems].dst =
1090                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1091                                 batch_copy[vq->batch_copy_nb_elems].src =
1092                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1093                                 batch_copy[vq->batch_copy_nb_elems].log_addr =
1094                                         buf_iova + buf_offset;
1095                                 batch_copy[vq->batch_copy_nb_elems].len =
1096                                         cpy_len;
1097                                 vq->batch_copy_nb_elems++;
1098                         }
1099
1100                         mbuf_avail  -= cpy_len;
1101                         mbuf_offset += cpy_len;
1102                         buf_avail  -= cpy_len;
1103                         buf_offset += cpy_len;
1104                 }
1105
1106         }
1107
1108 out:
1109         if (tlen) {
1110                 async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
1111                 async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
1112         } else {
1113                 src_it->count = 0;
1114         }
1115
1116         return error;
1117 }
1118
1119 static __rte_always_inline int
1120 vhost_enqueue_single_packed(struct virtio_net *dev,
1121                             struct vhost_virtqueue *vq,
1122                             struct rte_mbuf *pkt,
1123                             struct buf_vector *buf_vec,
1124                             uint16_t *nr_descs)
1125 {
1126         uint16_t nr_vec = 0;
1127         uint16_t avail_idx = vq->last_avail_idx;
1128         uint16_t max_tries, tries = 0;
1129         uint16_t buf_id = 0;
1130         uint32_t len = 0;
1131         uint16_t desc_count;
1132         uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
1133         uint16_t num_buffers = 0;
1134         uint32_t buffer_len[vq->size];
1135         uint16_t buffer_buf_id[vq->size];
1136         uint16_t buffer_desc_count[vq->size];
1137
1138         if (rxvq_is_mergeable(dev))
1139                 max_tries = vq->size - 1;
1140         else
1141                 max_tries = 1;
1142
1143         while (size > 0) {
1144                 /*
1145                  * if we tried all available ring items, and still
1146                  * can't get enough buf, it means something abnormal
1147                  * happened.
1148                  */
1149                 if (unlikely(++tries > max_tries))
1150                         return -1;
1151
1152                 if (unlikely(fill_vec_buf_packed(dev, vq,
1153                                                 avail_idx, &desc_count,
1154                                                 buf_vec, &nr_vec,
1155                                                 &buf_id, &len,
1156                                                 VHOST_ACCESS_RW) < 0))
1157                         return -1;
1158
1159                 len = RTE_MIN(len, size);
1160                 size -= len;
1161
1162                 buffer_len[num_buffers] = len;
1163                 buffer_buf_id[num_buffers] = buf_id;
1164                 buffer_desc_count[num_buffers] = desc_count;
1165                 num_buffers += 1;
1166
1167                 *nr_descs += desc_count;
1168                 avail_idx += desc_count;
1169                 if (avail_idx >= vq->size)
1170                         avail_idx -= vq->size;
1171         }
1172
1173         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1174                 return -1;
1175
1176         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1177                                            buffer_desc_count, num_buffers);
1178
1179         return 0;
1180 }
1181
1182 static __rte_noinline uint32_t
1183 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1184         struct rte_mbuf **pkts, uint32_t count)
1185 {
1186         uint32_t pkt_idx = 0;
1187         uint16_t num_buffers;
1188         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1189         uint16_t avail_head;
1190
1191         /*
1192          * The ordering between avail index and
1193          * desc reads needs to be enforced.
1194          */
1195         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1196
1197         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1198
1199         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1200                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1201                 uint16_t nr_vec = 0;
1202
1203                 if (unlikely(reserve_avail_buf_split(dev, vq,
1204                                                 pkt_len, buf_vec, &num_buffers,
1205                                                 avail_head, &nr_vec) < 0)) {
1206                         VHOST_LOG_DATA(DEBUG,
1207                                 "(%d) failed to get enough desc from vring\n",
1208                                 dev->vid);
1209                         vq->shadow_used_idx -= num_buffers;
1210                         break;
1211                 }
1212
1213                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1214                         dev->vid, vq->last_avail_idx,
1215                         vq->last_avail_idx + num_buffers);
1216
1217                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1218                                                 buf_vec, nr_vec,
1219                                                 num_buffers) < 0) {
1220                         vq->shadow_used_idx -= num_buffers;
1221                         break;
1222                 }
1223
1224                 vq->last_avail_idx += num_buffers;
1225         }
1226
1227         do_data_copy_enqueue(dev, vq);
1228
1229         if (likely(vq->shadow_used_idx)) {
1230                 flush_shadow_used_ring_split(dev, vq);
1231                 vhost_vring_call_split(dev, vq);
1232         }
1233
1234         return pkt_idx;
1235 }
1236
1237 static __rte_always_inline int
1238 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1239                            struct vhost_virtqueue *vq,
1240                            struct rte_mbuf **pkts)
1241 {
1242         bool wrap_counter = vq->avail_wrap_counter;
1243         struct vring_packed_desc *descs = vq->desc_packed;
1244         uint16_t avail_idx = vq->last_avail_idx;
1245         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1246         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1247         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
1248         uint64_t lens[PACKED_BATCH_SIZE];
1249         uint16_t ids[PACKED_BATCH_SIZE];
1250         uint16_t i;
1251
1252         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1253                 return -1;
1254
1255         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1256                 return -1;
1257
1258         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1259                 if (unlikely(pkts[i]->next != NULL))
1260                         return -1;
1261                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1262                                             wrap_counter)))
1263                         return -1;
1264         }
1265
1266         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1267                 lens[i] = descs[avail_idx + i].len;
1268
1269         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1270                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1271                         return -1;
1272         }
1273
1274         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1275                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1276                                                   descs[avail_idx + i].addr,
1277                                                   &lens[i],
1278                                                   VHOST_ACCESS_RW);
1279
1280         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1281                 if (unlikely(!desc_addrs[i]))
1282                         return -1;
1283                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1284                         return -1;
1285         }
1286
1287         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1288                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1289                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1290                                         (uintptr_t)desc_addrs[i];
1291                 lens[i] = pkts[i]->pkt_len +
1292                         sizeof(struct virtio_net_hdr_mrg_rxbuf);
1293         }
1294
1295         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1296                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1297
1298         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1299
1300         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1301                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1302                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1303                            pkts[i]->pkt_len);
1304         }
1305
1306         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1307                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1308                                            lens[i]);
1309
1310         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1311                 ids[i] = descs[avail_idx + i].id;
1312
1313         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1314
1315         return 0;
1316 }
1317
1318 static __rte_always_inline int16_t
1319 virtio_dev_rx_single_packed(struct virtio_net *dev,
1320                             struct vhost_virtqueue *vq,
1321                             struct rte_mbuf *pkt)
1322 {
1323         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1324         uint16_t nr_descs = 0;
1325
1326         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1327                                                  &nr_descs) < 0)) {
1328                 VHOST_LOG_DATA(DEBUG,
1329                                 "(%d) failed to get enough desc from vring\n",
1330                                 dev->vid);
1331                 return -1;
1332         }
1333
1334         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1335                         dev->vid, vq->last_avail_idx,
1336                         vq->last_avail_idx + nr_descs);
1337
1338         vq_inc_last_avail_packed(vq, nr_descs);
1339
1340         return 0;
1341 }
1342
1343 static __rte_noinline uint32_t
1344 virtio_dev_rx_packed(struct virtio_net *dev,
1345                      struct vhost_virtqueue *__rte_restrict vq,
1346                      struct rte_mbuf **__rte_restrict pkts,
1347                      uint32_t count)
1348 {
1349         uint32_t pkt_idx = 0;
1350         uint32_t remained = count;
1351
1352         do {
1353                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1354
1355                 if (remained >= PACKED_BATCH_SIZE) {
1356                         if (!virtio_dev_rx_batch_packed(dev, vq,
1357                                                         &pkts[pkt_idx])) {
1358                                 pkt_idx += PACKED_BATCH_SIZE;
1359                                 remained -= PACKED_BATCH_SIZE;
1360                                 continue;
1361                         }
1362                 }
1363
1364                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1365                         break;
1366                 pkt_idx++;
1367                 remained--;
1368
1369         } while (pkt_idx < count);
1370
1371         if (vq->shadow_used_idx) {
1372                 do_data_copy_enqueue(dev, vq);
1373                 vhost_flush_enqueue_shadow_packed(dev, vq);
1374         }
1375
1376         if (pkt_idx)
1377                 vhost_vring_call_packed(dev, vq);
1378
1379         return pkt_idx;
1380 }
1381
1382 static __rte_always_inline uint32_t
1383 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1384         struct rte_mbuf **pkts, uint32_t count)
1385 {
1386         struct vhost_virtqueue *vq;
1387         uint32_t nb_tx = 0;
1388
1389         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1390         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1391                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1392                         dev->vid, __func__, queue_id);
1393                 return 0;
1394         }
1395
1396         vq = dev->virtqueue[queue_id];
1397
1398         rte_spinlock_lock(&vq->access_lock);
1399
1400         if (unlikely(!vq->enabled))
1401                 goto out_access_unlock;
1402
1403         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1404                 vhost_user_iotlb_rd_lock(vq);
1405
1406         if (unlikely(!vq->access_ok))
1407                 if (unlikely(vring_translate(dev, vq) < 0))
1408                         goto out;
1409
1410         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1411         if (count == 0)
1412                 goto out;
1413
1414         if (vq_is_packed(dev))
1415                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1416         else
1417                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1418
1419 out:
1420         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1421                 vhost_user_iotlb_rd_unlock(vq);
1422
1423 out_access_unlock:
1424         rte_spinlock_unlock(&vq->access_lock);
1425
1426         return nb_tx;
1427 }
1428
1429 uint16_t
1430 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1431         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1432 {
1433         struct virtio_net *dev = get_device(vid);
1434
1435         if (!dev)
1436                 return 0;
1437
1438         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1439                 VHOST_LOG_DATA(ERR,
1440                         "(%d) %s: built-in vhost net backend is disabled.\n",
1441                         dev->vid, __func__);
1442                 return 0;
1443         }
1444
1445         return virtio_dev_rx(dev, queue_id, pkts, count);
1446 }
1447
1448 static __rte_always_inline uint16_t
1449 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1450         uint16_t vq_size, uint16_t n_inflight)
1451 {
1452         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1453                 (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
1454 }
1455
1456 static __rte_noinline uint32_t
1457 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1458         struct vhost_virtqueue *vq, uint16_t queue_id,
1459         struct rte_mbuf **pkts, uint32_t count,
1460         struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1461 {
1462         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1463         uint16_t num_buffers;
1464         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1465         uint16_t avail_head;
1466
1467         struct rte_vhost_iov_iter *it_pool = vq->it_pool;
1468         struct iovec *vec_pool = vq->vec_pool;
1469         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1470         struct iovec *src_iovec = vec_pool;
1471         struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1472         struct rte_vhost_iov_iter *src_it = it_pool;
1473         struct rte_vhost_iov_iter *dst_it = it_pool + 1;
1474         uint16_t slot_idx = 0;
1475         uint16_t segs_await = 0;
1476         struct async_inflight_info *pkts_info = vq->async_pkts_info;
1477         uint32_t n_pkts = 0, pkt_err = 0;
1478         uint32_t num_async_pkts = 0, num_done_pkts = 0;
1479         struct {
1480                 uint16_t pkt_idx;
1481                 uint16_t last_avail_idx;
1482         } async_pkts_log[MAX_PKT_BURST];
1483
1484         /*
1485          * The ordering between avail index and desc reads need to be enforced.
1486          */
1487         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1488
1489         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1490
1491         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1492                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1493                 uint16_t nr_vec = 0;
1494
1495                 if (unlikely(reserve_avail_buf_split(dev, vq,
1496                                                 pkt_len, buf_vec, &num_buffers,
1497                                                 avail_head, &nr_vec) < 0)) {
1498                         VHOST_LOG_DATA(DEBUG,
1499                                 "(%d) failed to get enough desc from vring\n",
1500                                 dev->vid);
1501                         vq->shadow_used_idx -= num_buffers;
1502                         break;
1503                 }
1504
1505                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1506                         dev->vid, vq->last_avail_idx,
1507                         vq->last_avail_idx + num_buffers);
1508
1509                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1510                                 buf_vec, nr_vec, num_buffers,
1511                                 src_iovec, dst_iovec, src_it, dst_it) < 0) {
1512                         vq->shadow_used_idx -= num_buffers;
1513                         break;
1514                 }
1515
1516                 slot_idx = (vq->async_pkts_idx + num_async_pkts) &
1517                         (vq->size - 1);
1518                 if (src_it->count) {
1519                         uint16_t from, to;
1520
1521                         async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
1522                         pkts_info[slot_idx].descs = num_buffers;
1523                         pkts_info[slot_idx].mbuf = pkts[pkt_idx];
1524                         async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
1525                         async_pkts_log[num_async_pkts++].last_avail_idx =
1526                                 vq->last_avail_idx;
1527                         src_iovec += src_it->nr_segs;
1528                         dst_iovec += dst_it->nr_segs;
1529                         src_it += 2;
1530                         dst_it += 2;
1531                         segs_await += src_it->nr_segs;
1532
1533                         /**
1534                          * recover shadow used ring and keep DMA-occupied
1535                          * descriptors.
1536                          */
1537                         from = vq->shadow_used_idx - num_buffers;
1538                         to = vq->async_desc_idx & (vq->size - 1);
1539                         if (num_buffers + to <= vq->size) {
1540                                 rte_memcpy(&vq->async_descs_split[to],
1541                                                 &vq->shadow_used_split[from],
1542                                                 num_buffers *
1543                                                 sizeof(struct vring_used_elem));
1544                         } else {
1545                                 int size = vq->size - to;
1546
1547                                 rte_memcpy(&vq->async_descs_split[to],
1548                                                 &vq->shadow_used_split[from],
1549                                                 size *
1550                                                 sizeof(struct vring_used_elem));
1551                                 rte_memcpy(vq->async_descs_split,
1552                                                 &vq->shadow_used_split[from +
1553                                                 size], (num_buffers - size) *
1554                                            sizeof(struct vring_used_elem));
1555                         }
1556                         vq->async_desc_idx += num_buffers;
1557                         vq->shadow_used_idx -= num_buffers;
1558                 } else
1559                         comp_pkts[num_done_pkts++] = pkts[pkt_idx];
1560
1561                 vq->last_avail_idx += num_buffers;
1562
1563                 /*
1564                  * conditions to trigger async device transfer:
1565                  * - buffered packet number reaches transfer threshold
1566                  * - unused async iov number is less than max vhost vector
1567                  */
1568                 if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1569                         ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
1570                         BUF_VECTOR_MAX))) {
1571                         n_pkts = vq->async_ops.transfer_data(dev->vid,
1572                                         queue_id, tdes, 0, pkt_burst_idx);
1573                         src_iovec = vec_pool;
1574                         dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1575                         src_it = it_pool;
1576                         dst_it = it_pool + 1;
1577                         segs_await = 0;
1578                         vq->async_pkts_inflight_n += n_pkts;
1579
1580                         if (unlikely(n_pkts < pkt_burst_idx)) {
1581                                 /*
1582                                  * log error packets number here and do actual
1583                                  * error processing when applications poll
1584                                  * completion
1585                                  */
1586                                 pkt_err = pkt_burst_idx - n_pkts;
1587                                 pkt_burst_idx = 0;
1588                                 break;
1589                         }
1590
1591                         pkt_burst_idx = 0;
1592                 }
1593         }
1594
1595         if (pkt_burst_idx) {
1596                 n_pkts = vq->async_ops.transfer_data(dev->vid,
1597                                 queue_id, tdes, 0, pkt_burst_idx);
1598                 vq->async_pkts_inflight_n += n_pkts;
1599
1600                 if (unlikely(n_pkts < pkt_burst_idx))
1601                         pkt_err = pkt_burst_idx - n_pkts;
1602         }
1603
1604         do_data_copy_enqueue(dev, vq);
1605
1606         if (unlikely(pkt_err)) {
1607                 uint16_t num_descs = 0;
1608
1609                 num_async_pkts -= pkt_err;
1610                 /* calculate the sum of descriptors of DMA-error packets. */
1611                 while (pkt_err-- > 0) {
1612                         num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
1613                         slot_idx--;
1614                 }
1615                 vq->async_desc_idx -= num_descs;
1616                 /* recover shadow used ring and available ring */
1617                 vq->shadow_used_idx -= (vq->last_avail_idx -
1618                                 async_pkts_log[num_async_pkts].last_avail_idx -
1619                                 num_descs);
1620                 vq->last_avail_idx =
1621                         async_pkts_log[num_async_pkts].last_avail_idx;
1622                 pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
1623                 num_done_pkts = pkt_idx - num_async_pkts;
1624         }
1625
1626         vq->async_pkts_idx += num_async_pkts;
1627         *comp_count = num_done_pkts;
1628
1629         if (likely(vq->shadow_used_idx)) {
1630                 flush_shadow_used_ring_split(dev, vq);
1631                 vhost_vring_call_split(dev, vq);
1632         }
1633
1634         return pkt_idx;
1635 }
1636
1637 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
1638                 struct rte_mbuf **pkts, uint16_t count)
1639 {
1640         struct virtio_net *dev = get_device(vid);
1641         struct vhost_virtqueue *vq;
1642         uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
1643         uint16_t start_idx, pkts_idx, vq_size;
1644         struct async_inflight_info *pkts_info;
1645         uint16_t from, i;
1646
1647         if (!dev)
1648                 return 0;
1649
1650         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1651         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1652                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1653                         dev->vid, __func__, queue_id);
1654                 return 0;
1655         }
1656
1657         vq = dev->virtqueue[queue_id];
1658
1659         if (unlikely(!vq->async_registered)) {
1660                 VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
1661                         dev->vid, __func__, queue_id);
1662                 return 0;
1663         }
1664
1665         rte_spinlock_lock(&vq->access_lock);
1666
1667         pkts_idx = vq->async_pkts_idx & (vq->size - 1);
1668         pkts_info = vq->async_pkts_info;
1669         vq_size = vq->size;
1670         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1671                 vq_size, vq->async_pkts_inflight_n);
1672
1673         if (count > vq->async_last_pkts_n)
1674                 n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
1675                         queue_id, 0, count - vq->async_last_pkts_n);
1676         n_pkts_cpl += vq->async_last_pkts_n;
1677
1678         n_pkts_put = RTE_MIN(count, n_pkts_cpl);
1679         if (unlikely(n_pkts_put == 0)) {
1680                 vq->async_last_pkts_n = n_pkts_cpl;
1681                 goto done;
1682         }
1683
1684         for (i = 0; i < n_pkts_put; i++) {
1685                 from = (start_idx + i) & (vq_size - 1);
1686                 n_descs += pkts_info[from].descs;
1687                 pkts[i] = pkts_info[from].mbuf;
1688         }
1689         vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
1690         vq->async_pkts_inflight_n -= n_pkts_put;
1691
1692         if (likely(vq->enabled && vq->access_ok)) {
1693                 uint16_t nr_left = n_descs;
1694                 uint16_t nr_copy;
1695                 uint16_t to;
1696
1697                 /* write back completed descriptors to used ring */
1698                 do {
1699                         from = vq->last_async_desc_idx & (vq->size - 1);
1700                         nr_copy = nr_left + from <= vq->size ? nr_left :
1701                                 vq->size - from;
1702                         to = vq->last_used_idx & (vq->size - 1);
1703
1704                         if (to + nr_copy <= vq->size) {
1705                                 rte_memcpy(&vq->used->ring[to],
1706                                                 &vq->async_descs_split[from],
1707                                                 nr_copy *
1708                                                 sizeof(struct vring_used_elem));
1709                         } else {
1710                                 uint16_t size = vq->size - to;
1711
1712                                 rte_memcpy(&vq->used->ring[to],
1713                                                 &vq->async_descs_split[from],
1714                                                 size *
1715                                                 sizeof(struct vring_used_elem));
1716                                 rte_memcpy(vq->used->ring,
1717                                                 &vq->async_descs_split[from +
1718                                                 size], (nr_copy - size) *
1719                                                 sizeof(struct vring_used_elem));
1720                         }
1721
1722                         vq->last_async_desc_idx += nr_copy;
1723                         vq->last_used_idx += nr_copy;
1724                         nr_left -= nr_copy;
1725                 } while (nr_left > 0);
1726
1727                 __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
1728                 vhost_vring_call_split(dev, vq);
1729         } else
1730                 vq->last_async_desc_idx += n_descs;
1731
1732 done:
1733         rte_spinlock_unlock(&vq->access_lock);
1734
1735         return n_pkts_put;
1736 }
1737
1738 static __rte_always_inline uint32_t
1739 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
1740         struct rte_mbuf **pkts, uint32_t count,
1741         struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1742 {
1743         struct vhost_virtqueue *vq;
1744         uint32_t nb_tx = 0;
1745
1746         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1747         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1748                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1749                         dev->vid, __func__, queue_id);
1750                 return 0;
1751         }
1752
1753         vq = dev->virtqueue[queue_id];
1754
1755         rte_spinlock_lock(&vq->access_lock);
1756
1757         if (unlikely(!vq->enabled || !vq->async_registered))
1758                 goto out_access_unlock;
1759
1760         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1761                 vhost_user_iotlb_rd_lock(vq);
1762
1763         if (unlikely(!vq->access_ok))
1764                 if (unlikely(vring_translate(dev, vq) < 0))
1765                         goto out;
1766
1767         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1768         if (count == 0)
1769                 goto out;
1770
1771         /* TODO: packed queue not implemented */
1772         if (vq_is_packed(dev))
1773                 nb_tx = 0;
1774         else
1775                 nb_tx = virtio_dev_rx_async_submit_split(dev,
1776                                 vq, queue_id, pkts, count, comp_pkts,
1777                                 comp_count);
1778
1779 out:
1780         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1781                 vhost_user_iotlb_rd_unlock(vq);
1782
1783 out_access_unlock:
1784         rte_spinlock_unlock(&vq->access_lock);
1785
1786         return nb_tx;
1787 }
1788
1789 uint16_t
1790 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
1791                 struct rte_mbuf **pkts, uint16_t count,
1792                 struct rte_mbuf **comp_pkts, uint32_t *comp_count)
1793 {
1794         struct virtio_net *dev = get_device(vid);
1795
1796         *comp_count = 0;
1797         if (!dev)
1798                 return 0;
1799
1800         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1801                 VHOST_LOG_DATA(ERR,
1802                         "(%d) %s: built-in vhost net backend is disabled.\n",
1803                         dev->vid, __func__);
1804                 return 0;
1805         }
1806
1807         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, comp_pkts,
1808                         comp_count);
1809 }
1810
1811 static inline bool
1812 virtio_net_with_host_offload(struct virtio_net *dev)
1813 {
1814         if (dev->features &
1815                         ((1ULL << VIRTIO_NET_F_CSUM) |
1816                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1817                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1818                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1819                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1820                 return true;
1821
1822         return false;
1823 }
1824
1825 static void
1826 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1827 {
1828         struct rte_ipv4_hdr *ipv4_hdr;
1829         struct rte_ipv6_hdr *ipv6_hdr;
1830         void *l3_hdr = NULL;
1831         struct rte_ether_hdr *eth_hdr;
1832         uint16_t ethertype;
1833
1834         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1835
1836         m->l2_len = sizeof(struct rte_ether_hdr);
1837         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1838
1839         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1840                 struct rte_vlan_hdr *vlan_hdr =
1841                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1842
1843                 m->l2_len += sizeof(struct rte_vlan_hdr);
1844                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1845         }
1846
1847         l3_hdr = (char *)eth_hdr + m->l2_len;
1848
1849         switch (ethertype) {
1850         case RTE_ETHER_TYPE_IPV4:
1851                 ipv4_hdr = l3_hdr;
1852                 *l4_proto = ipv4_hdr->next_proto_id;
1853                 m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
1854                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1855                 m->ol_flags |= PKT_TX_IPV4;
1856                 break;
1857         case RTE_ETHER_TYPE_IPV6:
1858                 ipv6_hdr = l3_hdr;
1859                 *l4_proto = ipv6_hdr->proto;
1860                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1861                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1862                 m->ol_flags |= PKT_TX_IPV6;
1863                 break;
1864         default:
1865                 m->l3_len = 0;
1866                 *l4_proto = 0;
1867                 *l4_hdr = NULL;
1868                 break;
1869         }
1870 }
1871
1872 static __rte_always_inline void
1873 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1874 {
1875         uint16_t l4_proto = 0;
1876         void *l4_hdr = NULL;
1877         struct rte_tcp_hdr *tcp_hdr = NULL;
1878
1879         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1880                 return;
1881
1882         parse_ethernet(m, &l4_proto, &l4_hdr);
1883         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1884                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1885                         switch (hdr->csum_offset) {
1886                         case (offsetof(struct rte_tcp_hdr, cksum)):
1887                                 if (l4_proto == IPPROTO_TCP)
1888                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1889                                 break;
1890                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1891                                 if (l4_proto == IPPROTO_UDP)
1892                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1893                                 break;
1894                         case (offsetof(struct rte_sctp_hdr, cksum)):
1895                                 if (l4_proto == IPPROTO_SCTP)
1896                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1897                                 break;
1898                         default:
1899                                 break;
1900                         }
1901                 }
1902         }
1903
1904         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1905                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1906                 case VIRTIO_NET_HDR_GSO_TCPV4:
1907                 case VIRTIO_NET_HDR_GSO_TCPV6:
1908                         tcp_hdr = l4_hdr;
1909                         m->ol_flags |= PKT_TX_TCP_SEG;
1910                         m->tso_segsz = hdr->gso_size;
1911                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1912                         break;
1913                 case VIRTIO_NET_HDR_GSO_UDP:
1914                         m->ol_flags |= PKT_TX_UDP_SEG;
1915                         m->tso_segsz = hdr->gso_size;
1916                         m->l4_len = sizeof(struct rte_udp_hdr);
1917                         break;
1918                 default:
1919                         VHOST_LOG_DATA(WARNING,
1920                                 "unsupported gso type %u.\n", hdr->gso_type);
1921                         break;
1922                 }
1923         }
1924 }
1925
1926 static __rte_noinline void
1927 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1928                 struct buf_vector *buf_vec)
1929 {
1930         uint64_t len;
1931         uint64_t remain = sizeof(struct virtio_net_hdr);
1932         uint64_t src;
1933         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1934
1935         while (remain) {
1936                 len = RTE_MIN(remain, buf_vec->buf_len);
1937                 src = buf_vec->buf_addr;
1938                 rte_memcpy((void *)(uintptr_t)dst,
1939                                 (void *)(uintptr_t)src, len);
1940
1941                 remain -= len;
1942                 dst += len;
1943                 buf_vec++;
1944         }
1945 }
1946
1947 static __rte_always_inline int
1948 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1949                   struct buf_vector *buf_vec, uint16_t nr_vec,
1950                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1951 {
1952         uint32_t buf_avail, buf_offset;
1953         uint64_t buf_addr, buf_len;
1954         uint32_t mbuf_avail, mbuf_offset;
1955         uint32_t cpy_len;
1956         struct rte_mbuf *cur = m, *prev = m;
1957         struct virtio_net_hdr tmp_hdr;
1958         struct virtio_net_hdr *hdr = NULL;
1959         /* A counter to avoid desc dead loop chain */
1960         uint16_t vec_idx = 0;
1961         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1962         int error = 0;
1963
1964         buf_addr = buf_vec[vec_idx].buf_addr;
1965         buf_len = buf_vec[vec_idx].buf_len;
1966
1967         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1968                 error = -1;
1969                 goto out;
1970         }
1971
1972         if (virtio_net_with_host_offload(dev)) {
1973                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1974                         /*
1975                          * No luck, the virtio-net header doesn't fit
1976                          * in a contiguous virtual area.
1977                          */
1978                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1979                         hdr = &tmp_hdr;
1980                 } else {
1981                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1982                 }
1983         }
1984
1985         /*
1986          * A virtio driver normally uses at least 2 desc buffers
1987          * for Tx: the first for storing the header, and others
1988          * for storing the data.
1989          */
1990         if (unlikely(buf_len < dev->vhost_hlen)) {
1991                 buf_offset = dev->vhost_hlen - buf_len;
1992                 vec_idx++;
1993                 buf_addr = buf_vec[vec_idx].buf_addr;
1994                 buf_len = buf_vec[vec_idx].buf_len;
1995                 buf_avail  = buf_len - buf_offset;
1996         } else if (buf_len == dev->vhost_hlen) {
1997                 if (unlikely(++vec_idx >= nr_vec))
1998                         goto out;
1999                 buf_addr = buf_vec[vec_idx].buf_addr;
2000                 buf_len = buf_vec[vec_idx].buf_len;
2001
2002                 buf_offset = 0;
2003                 buf_avail = buf_len;
2004         } else {
2005                 buf_offset = dev->vhost_hlen;
2006                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
2007         }
2008
2009         PRINT_PACKET(dev,
2010                         (uintptr_t)(buf_addr + buf_offset),
2011                         (uint32_t)buf_avail, 0);
2012
2013         mbuf_offset = 0;
2014         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2015         while (1) {
2016                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2017
2018                 if (likely(cpy_len > MAX_BATCH_LEN ||
2019                                         vq->batch_copy_nb_elems >= vq->size ||
2020                                         (hdr && cur == m))) {
2021                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2022                                                 mbuf_offset),
2023                                         (void *)((uintptr_t)(buf_addr +
2024                                                         buf_offset)), cpy_len);
2025                 } else {
2026                         batch_copy[vq->batch_copy_nb_elems].dst =
2027                                 rte_pktmbuf_mtod_offset(cur, void *,
2028                                                 mbuf_offset);
2029                         batch_copy[vq->batch_copy_nb_elems].src =
2030                                 (void *)((uintptr_t)(buf_addr + buf_offset));
2031                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
2032                         vq->batch_copy_nb_elems++;
2033                 }
2034
2035                 mbuf_avail  -= cpy_len;
2036                 mbuf_offset += cpy_len;
2037                 buf_avail -= cpy_len;
2038                 buf_offset += cpy_len;
2039
2040                 /* This buf reaches to its end, get the next one */
2041                 if (buf_avail == 0) {
2042                         if (++vec_idx >= nr_vec)
2043                                 break;
2044
2045                         buf_addr = buf_vec[vec_idx].buf_addr;
2046                         buf_len = buf_vec[vec_idx].buf_len;
2047
2048                         buf_offset = 0;
2049                         buf_avail  = buf_len;
2050
2051                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2052                                         (uint32_t)buf_avail, 0);
2053                 }
2054
2055                 /*
2056                  * This mbuf reaches to its end, get a new one
2057                  * to hold more data.
2058                  */
2059                 if (mbuf_avail == 0) {
2060                         cur = rte_pktmbuf_alloc(mbuf_pool);
2061                         if (unlikely(cur == NULL)) {
2062                                 VHOST_LOG_DATA(ERR, "Failed to "
2063                                         "allocate memory for mbuf.\n");
2064                                 error = -1;
2065                                 goto out;
2066                         }
2067
2068                         prev->next = cur;
2069                         prev->data_len = mbuf_offset;
2070                         m->nb_segs += 1;
2071                         m->pkt_len += mbuf_offset;
2072                         prev = cur;
2073
2074                         mbuf_offset = 0;
2075                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2076                 }
2077         }
2078
2079         prev->data_len = mbuf_offset;
2080         m->pkt_len    += mbuf_offset;
2081
2082         if (hdr)
2083                 vhost_dequeue_offload(hdr, m);
2084
2085 out:
2086
2087         return error;
2088 }
2089
2090 static void
2091 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2092 {
2093         rte_free(opaque);
2094 }
2095
2096 static int
2097 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2098 {
2099         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2100         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2101         uint16_t buf_len;
2102         rte_iova_t iova;
2103         void *buf;
2104
2105         total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2106         total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2107
2108         if (unlikely(total_len > UINT16_MAX))
2109                 return -ENOSPC;
2110
2111         buf_len = total_len;
2112         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2113         if (unlikely(buf == NULL))
2114                 return -ENOMEM;
2115
2116         /* Initialize shinfo */
2117         shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2118                                                 virtio_dev_extbuf_free, buf);
2119         if (unlikely(shinfo == NULL)) {
2120                 rte_free(buf);
2121                 VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2122                 return -1;
2123         }
2124
2125         iova = rte_malloc_virt2iova(buf);
2126         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2127         rte_pktmbuf_reset_headroom(pkt);
2128
2129         return 0;
2130 }
2131
2132 /*
2133  * Allocate a host supported pktmbuf.
2134  */
2135 static __rte_always_inline struct rte_mbuf *
2136 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
2137                          uint32_t data_len)
2138 {
2139         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
2140
2141         if (unlikely(pkt == NULL)) {
2142                 VHOST_LOG_DATA(ERR,
2143                         "Failed to allocate memory for mbuf.\n");
2144                 return NULL;
2145         }
2146
2147         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2148                 return pkt;
2149
2150         /* attach an external buffer if supported */
2151         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2152                 return pkt;
2153
2154         /* check if chained buffers are allowed */
2155         if (!dev->linearbuf)
2156                 return pkt;
2157
2158         /* Data doesn't fit into the buffer and the host supports
2159          * only linear buffers
2160          */
2161         rte_pktmbuf_free(pkt);
2162
2163         return NULL;
2164 }
2165
2166 static __rte_noinline uint16_t
2167 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2168         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2169 {
2170         uint16_t i;
2171         uint16_t free_entries;
2172         uint16_t dropped = 0;
2173         static bool allocerr_warned;
2174
2175         /*
2176          * The ordering between avail index and
2177          * desc reads needs to be enforced.
2178          */
2179         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2180                         vq->last_avail_idx;
2181         if (free_entries == 0)
2182                 return 0;
2183
2184         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2185
2186         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2187
2188         count = RTE_MIN(count, MAX_PKT_BURST);
2189         count = RTE_MIN(count, free_entries);
2190         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2191                         dev->vid, count);
2192
2193         for (i = 0; i < count; i++) {
2194                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2195                 uint16_t head_idx;
2196                 uint32_t buf_len;
2197                 uint16_t nr_vec = 0;
2198                 int err;
2199
2200                 if (unlikely(fill_vec_buf_split(dev, vq,
2201                                                 vq->last_avail_idx + i,
2202                                                 &nr_vec, buf_vec,
2203                                                 &head_idx, &buf_len,
2204                                                 VHOST_ACCESS_RO) < 0))
2205                         break;
2206
2207                 update_shadow_used_ring_split(vq, head_idx, 0);
2208
2209                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2210                 if (unlikely(pkts[i] == NULL)) {
2211                         /*
2212                          * mbuf allocation fails for jumbo packets when external
2213                          * buffer allocation is not allowed and linear buffer
2214                          * is required. Drop this packet.
2215                          */
2216                         if (!allocerr_warned) {
2217                                 VHOST_LOG_DATA(ERR,
2218                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2219                                         buf_len, mbuf_pool->name, dev->ifname);
2220                                 allocerr_warned = true;
2221                         }
2222                         dropped += 1;
2223                         i++;
2224                         break;
2225                 }
2226
2227                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2228                                 mbuf_pool);
2229                 if (unlikely(err)) {
2230                         rte_pktmbuf_free(pkts[i]);
2231                         if (!allocerr_warned) {
2232                                 VHOST_LOG_DATA(ERR,
2233                                         "Failed to copy desc to mbuf on %s.\n",
2234                                         dev->ifname);
2235                                 allocerr_warned = true;
2236                         }
2237                         dropped += 1;
2238                         i++;
2239                         break;
2240                 }
2241         }
2242
2243         vq->last_avail_idx += i;
2244
2245         do_data_copy_dequeue(vq);
2246         if (unlikely(i < count))
2247                 vq->shadow_used_idx = i;
2248         if (likely(vq->shadow_used_idx)) {
2249                 flush_shadow_used_ring_split(dev, vq);
2250                 vhost_vring_call_split(dev, vq);
2251         }
2252
2253         return (i - dropped);
2254 }
2255
2256 static __rte_always_inline int
2257 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2258                                  struct vhost_virtqueue *vq,
2259                                  struct rte_mempool *mbuf_pool,
2260                                  struct rte_mbuf **pkts,
2261                                  uint16_t avail_idx,
2262                                  uintptr_t *desc_addrs,
2263                                  uint16_t *ids)
2264 {
2265         bool wrap = vq->avail_wrap_counter;
2266         struct vring_packed_desc *descs = vq->desc_packed;
2267         uint64_t lens[PACKED_BATCH_SIZE];
2268         uint64_t buf_lens[PACKED_BATCH_SIZE];
2269         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2270         uint16_t flags, i;
2271
2272         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2273                 return -1;
2274         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2275                 return -1;
2276
2277         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2278                 flags = descs[avail_idx + i].flags;
2279                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2280                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2281                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2282                         return -1;
2283         }
2284
2285         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
2286
2287         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2288                 lens[i] = descs[avail_idx + i].len;
2289
2290         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2291                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2292                                                   descs[avail_idx + i].addr,
2293                                                   &lens[i], VHOST_ACCESS_RW);
2294         }
2295
2296         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2297                 if (unlikely(!desc_addrs[i]))
2298                         return -1;
2299                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2300                         return -1;
2301         }
2302
2303         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2304                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
2305                 if (!pkts[i])
2306                         goto free_buf;
2307         }
2308
2309         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2310                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2311
2312         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2313                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2314                         goto free_buf;
2315         }
2316
2317         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2318                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
2319                 pkts[i]->data_len = pkts[i]->pkt_len;
2320                 ids[i] = descs[avail_idx + i].id;
2321         }
2322
2323         return 0;
2324
2325 free_buf:
2326         for (i = 0; i < PACKED_BATCH_SIZE; i++)
2327                 rte_pktmbuf_free(pkts[i]);
2328
2329         return -1;
2330 }
2331
2332 static __rte_always_inline int
2333 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2334                            struct vhost_virtqueue *vq,
2335                            struct rte_mempool *mbuf_pool,
2336                            struct rte_mbuf **pkts)
2337 {
2338         uint16_t avail_idx = vq->last_avail_idx;
2339         uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2340         struct virtio_net_hdr *hdr;
2341         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2342         uint16_t ids[PACKED_BATCH_SIZE];
2343         uint16_t i;
2344
2345         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
2346                                              avail_idx, desc_addrs, ids))
2347                 return -1;
2348
2349         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2350                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2351
2352         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2353                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2354                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2355                            pkts[i]->pkt_len);
2356
2357         if (virtio_net_with_host_offload(dev)) {
2358                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2359                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2360                         vhost_dequeue_offload(hdr, pkts[i]);
2361                 }
2362         }
2363
2364         if (virtio_net_is_inorder(dev))
2365                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2366                         ids[PACKED_BATCH_SIZE - 1]);
2367         else
2368                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2369
2370         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2371
2372         return 0;
2373 }
2374
2375 static __rte_always_inline int
2376 vhost_dequeue_single_packed(struct virtio_net *dev,
2377                             struct vhost_virtqueue *vq,
2378                             struct rte_mempool *mbuf_pool,
2379                             struct rte_mbuf **pkts,
2380                             uint16_t *buf_id,
2381                             uint16_t *desc_count)
2382 {
2383         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2384         uint32_t buf_len;
2385         uint16_t nr_vec = 0;
2386         int err;
2387         static bool allocerr_warned;
2388
2389         if (unlikely(fill_vec_buf_packed(dev, vq,
2390                                          vq->last_avail_idx, desc_count,
2391                                          buf_vec, &nr_vec,
2392                                          buf_id, &buf_len,
2393                                          VHOST_ACCESS_RO) < 0))
2394                 return -1;
2395
2396         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2397         if (unlikely(*pkts == NULL)) {
2398                 if (!allocerr_warned) {
2399                         VHOST_LOG_DATA(ERR,
2400                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2401                                 buf_len, mbuf_pool->name, dev->ifname);
2402                         allocerr_warned = true;
2403                 }
2404                 return -1;
2405         }
2406
2407         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
2408                                 mbuf_pool);
2409         if (unlikely(err)) {
2410                 if (!allocerr_warned) {
2411                         VHOST_LOG_DATA(ERR,
2412                                 "Failed to copy desc to mbuf on %s.\n",
2413                                 dev->ifname);
2414                         allocerr_warned = true;
2415                 }
2416                 rte_pktmbuf_free(*pkts);
2417                 return -1;
2418         }
2419
2420         return 0;
2421 }
2422
2423 static __rte_always_inline int
2424 virtio_dev_tx_single_packed(struct virtio_net *dev,
2425                             struct vhost_virtqueue *vq,
2426                             struct rte_mempool *mbuf_pool,
2427                             struct rte_mbuf **pkts)
2428 {
2429
2430         uint16_t buf_id, desc_count = 0;
2431         int ret;
2432
2433         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2434                                         &desc_count);
2435
2436         if (likely(desc_count > 0)) {
2437                 if (virtio_net_is_inorder(dev))
2438                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2439                                                                    desc_count);
2440                 else
2441                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2442                                         desc_count);
2443
2444                 vq_inc_last_avail_packed(vq, desc_count);
2445         }
2446
2447         return ret;
2448 }
2449
2450 static __rte_noinline uint16_t
2451 virtio_dev_tx_packed(struct virtio_net *dev,
2452                      struct vhost_virtqueue *__rte_restrict vq,
2453                      struct rte_mempool *mbuf_pool,
2454                      struct rte_mbuf **__rte_restrict pkts,
2455                      uint32_t count)
2456 {
2457         uint32_t pkt_idx = 0;
2458         uint32_t remained = count;
2459
2460         do {
2461                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2462
2463                 if (remained >= PACKED_BATCH_SIZE) {
2464                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2465                                                         &pkts[pkt_idx])) {
2466                                 pkt_idx += PACKED_BATCH_SIZE;
2467                                 remained -= PACKED_BATCH_SIZE;
2468                                 continue;
2469                         }
2470                 }
2471
2472                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2473                                                 &pkts[pkt_idx]))
2474                         break;
2475                 pkt_idx++;
2476                 remained--;
2477
2478         } while (remained);
2479
2480         if (vq->shadow_used_idx) {
2481                 do_data_copy_dequeue(vq);
2482
2483                 vhost_flush_dequeue_shadow_packed(dev, vq);
2484                 vhost_vring_call_packed(dev, vq);
2485         }
2486
2487         return pkt_idx;
2488 }
2489
2490 uint16_t
2491 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2492         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2493 {
2494         struct virtio_net *dev;
2495         struct rte_mbuf *rarp_mbuf = NULL;
2496         struct vhost_virtqueue *vq;
2497         int16_t success = 1;
2498
2499         dev = get_device(vid);
2500         if (!dev)
2501                 return 0;
2502
2503         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2504                 VHOST_LOG_DATA(ERR,
2505                         "(%d) %s: built-in vhost net backend is disabled.\n",
2506                         dev->vid, __func__);
2507                 return 0;
2508         }
2509
2510         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2511                 VHOST_LOG_DATA(ERR,
2512                         "(%d) %s: invalid virtqueue idx %d.\n",
2513                         dev->vid, __func__, queue_id);
2514                 return 0;
2515         }
2516
2517         vq = dev->virtqueue[queue_id];
2518
2519         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2520                 return 0;
2521
2522         if (unlikely(!vq->enabled)) {
2523                 count = 0;
2524                 goto out_access_unlock;
2525         }
2526
2527         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2528                 vhost_user_iotlb_rd_lock(vq);
2529
2530         if (unlikely(!vq->access_ok))
2531                 if (unlikely(vring_translate(dev, vq) < 0)) {
2532                         count = 0;
2533                         goto out;
2534                 }
2535
2536         /*
2537          * Construct a RARP broadcast packet, and inject it to the "pkts"
2538          * array, to looks like that guest actually send such packet.
2539          *
2540          * Check user_send_rarp() for more information.
2541          *
2542          * broadcast_rarp shares a cacheline in the virtio_net structure
2543          * with some fields that are accessed during enqueue and
2544          * __atomic_compare_exchange_n causes a write if performed compare
2545          * and exchange. This could result in false sharing between enqueue
2546          * and dequeue.
2547          *
2548          * Prevent unnecessary false sharing by reading broadcast_rarp first
2549          * and only performing compare and exchange if the read indicates it
2550          * is likely to be set.
2551          */
2552         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
2553                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
2554                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
2555
2556                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2557                 if (rarp_mbuf == NULL) {
2558                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2559                         count = 0;
2560                         goto out;
2561                 }
2562                 count -= 1;
2563         }
2564
2565         if (vq_is_packed(dev))
2566                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2567         else
2568                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2569
2570 out:
2571         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2572                 vhost_user_iotlb_rd_unlock(vq);
2573
2574 out_access_unlock:
2575         rte_spinlock_unlock(&vq->access_lock);
2576
2577         if (unlikely(rarp_mbuf != NULL)) {
2578                 /*
2579                  * Inject it to the head of "pkts" array, so that switch's mac
2580                  * learning table will get updated first.
2581                  */
2582                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2583                 pkts[0] = rarp_mbuf;
2584                 count += 1;
2585         }
2586
2587         return count;
2588 }