net/virtio-user: check tap system call setting
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost_async.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24
25 #define MAX_BATCH_LEN 256
26
27 #define VHOST_ASYNC_BATCH_THRESHOLD 32
28
29 static  __rte_always_inline bool
30 rxvq_is_mergeable(struct virtio_net *dev)
31 {
32         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
33 }
34
35 static  __rte_always_inline bool
36 virtio_net_is_inorder(struct virtio_net *dev)
37 {
38         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
39 }
40
41 static bool
42 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
43 {
44         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
45 }
46
47 static inline void
48 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
49 {
50         struct batch_copy_elem *elem = vq->batch_copy_elems;
51         uint16_t count = vq->batch_copy_nb_elems;
52         int i;
53
54         for (i = 0; i < count; i++) {
55                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
56                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
57                                            elem[i].len);
58                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
59         }
60
61         vq->batch_copy_nb_elems = 0;
62 }
63
64 static inline void
65 do_data_copy_dequeue(struct vhost_virtqueue *vq)
66 {
67         struct batch_copy_elem *elem = vq->batch_copy_elems;
68         uint16_t count = vq->batch_copy_nb_elems;
69         int i;
70
71         for (i = 0; i < count; i++)
72                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
73
74         vq->batch_copy_nb_elems = 0;
75 }
76
77 static __rte_always_inline void
78 do_flush_shadow_used_ring_split(struct virtio_net *dev,
79                         struct vhost_virtqueue *vq,
80                         uint16_t to, uint16_t from, uint16_t size)
81 {
82         rte_memcpy(&vq->used->ring[to],
83                         &vq->shadow_used_split[from],
84                         size * sizeof(struct vring_used_elem));
85         vhost_log_cache_used_vring(dev, vq,
86                         offsetof(struct vring_used, ring[to]),
87                         size * sizeof(struct vring_used_elem));
88 }
89
90 static __rte_always_inline void
91 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
92 {
93         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
94
95         if (used_idx + vq->shadow_used_idx <= vq->size) {
96                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
97                                           vq->shadow_used_idx);
98         } else {
99                 uint16_t size;
100
101                 /* update used ring interval [used_idx, vq->size] */
102                 size = vq->size - used_idx;
103                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
104
105                 /* update the left half used ring interval [0, left_size] */
106                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
107                                           vq->shadow_used_idx - size);
108         }
109         vq->last_used_idx += vq->shadow_used_idx;
110
111         vhost_log_cache_sync(dev, vq);
112
113         __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
114                            __ATOMIC_RELEASE);
115         vq->shadow_used_idx = 0;
116         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
117                 sizeof(vq->used->idx));
118 }
119
120 static __rte_always_inline void
121 async_flush_shadow_used_ring_split(struct virtio_net *dev,
122         struct vhost_virtqueue *vq)
123 {
124         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
125
126         if (used_idx + vq->shadow_used_idx <= vq->size) {
127                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
128                                           vq->shadow_used_idx);
129         } else {
130                 uint16_t size;
131
132                 /* update used ring interval [used_idx, vq->size] */
133                 size = vq->size - used_idx;
134                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
135
136                 /* update the left half used ring interval [0, left_size] */
137                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
138                                           vq->shadow_used_idx - size);
139         }
140
141         vq->last_used_idx += vq->shadow_used_idx;
142         vq->shadow_used_idx = 0;
143 }
144
145 static __rte_always_inline void
146 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
147                          uint16_t desc_idx, uint32_t len)
148 {
149         uint16_t i = vq->shadow_used_idx++;
150
151         vq->shadow_used_split[i].id  = desc_idx;
152         vq->shadow_used_split[i].len = len;
153 }
154
155 static __rte_always_inline void
156 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
157                                   struct vhost_virtqueue *vq)
158 {
159         int i;
160         uint16_t used_idx = vq->last_used_idx;
161         uint16_t head_idx = vq->last_used_idx;
162         uint16_t head_flags = 0;
163
164         /* Split loop in two to save memory barriers */
165         for (i = 0; i < vq->shadow_used_idx; i++) {
166                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
167                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
168
169                 used_idx += vq->shadow_used_packed[i].count;
170                 if (used_idx >= vq->size)
171                         used_idx -= vq->size;
172         }
173
174         rte_smp_wmb();
175
176         for (i = 0; i < vq->shadow_used_idx; i++) {
177                 uint16_t flags;
178
179                 if (vq->shadow_used_packed[i].len)
180                         flags = VRING_DESC_F_WRITE;
181                 else
182                         flags = 0;
183
184                 if (vq->used_wrap_counter) {
185                         flags |= VRING_DESC_F_USED;
186                         flags |= VRING_DESC_F_AVAIL;
187                 } else {
188                         flags &= ~VRING_DESC_F_USED;
189                         flags &= ~VRING_DESC_F_AVAIL;
190                 }
191
192                 if (i > 0) {
193                         vq->desc_packed[vq->last_used_idx].flags = flags;
194
195                         vhost_log_cache_used_vring(dev, vq,
196                                         vq->last_used_idx *
197                                         sizeof(struct vring_packed_desc),
198                                         sizeof(struct vring_packed_desc));
199                 } else {
200                         head_idx = vq->last_used_idx;
201                         head_flags = flags;
202                 }
203
204                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
205         }
206
207         vq->desc_packed[head_idx].flags = head_flags;
208
209         vhost_log_cache_used_vring(dev, vq,
210                                 head_idx *
211                                 sizeof(struct vring_packed_desc),
212                                 sizeof(struct vring_packed_desc));
213
214         vq->shadow_used_idx = 0;
215         vhost_log_cache_sync(dev, vq);
216 }
217
218 static __rte_always_inline void
219 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
220                                   struct vhost_virtqueue *vq)
221 {
222         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
223
224         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
225         rte_smp_wmb();
226         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
227
228         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
229                                    sizeof(struct vring_packed_desc),
230                                    sizeof(struct vring_packed_desc));
231         vq->shadow_used_idx = 0;
232         vhost_log_cache_sync(dev, vq);
233 }
234
235 static __rte_always_inline void
236 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
237                                  struct vhost_virtqueue *vq,
238                                  uint64_t *lens,
239                                  uint16_t *ids)
240 {
241         uint16_t i;
242         uint16_t flags;
243
244         if (vq->shadow_used_idx) {
245                 do_data_copy_enqueue(dev, vq);
246                 vhost_flush_enqueue_shadow_packed(dev, vq);
247         }
248
249         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
250
251         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
252                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
253                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
254         }
255
256         rte_smp_wmb();
257
258         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
259                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
260
261         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
262                                    sizeof(struct vring_packed_desc),
263                                    sizeof(struct vring_packed_desc) *
264                                    PACKED_BATCH_SIZE);
265         vhost_log_cache_sync(dev, vq);
266
267         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
268 }
269
270 static __rte_always_inline void
271 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
272                                           uint16_t id)
273 {
274         vq->shadow_used_packed[0].id = id;
275
276         if (!vq->shadow_used_idx) {
277                 vq->shadow_last_used_idx = vq->last_used_idx;
278                 vq->shadow_used_packed[0].flags =
279                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
280                 vq->shadow_used_packed[0].len = 0;
281                 vq->shadow_used_packed[0].count = 1;
282                 vq->shadow_used_idx++;
283         }
284
285         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
286 }
287
288 static __rte_always_inline void
289 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
290                                   struct vhost_virtqueue *vq,
291                                   uint16_t *ids)
292 {
293         uint16_t flags;
294         uint16_t i;
295         uint16_t begin;
296
297         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
298
299         if (!vq->shadow_used_idx) {
300                 vq->shadow_last_used_idx = vq->last_used_idx;
301                 vq->shadow_used_packed[0].id  = ids[0];
302                 vq->shadow_used_packed[0].len = 0;
303                 vq->shadow_used_packed[0].count = 1;
304                 vq->shadow_used_packed[0].flags = flags;
305                 vq->shadow_used_idx++;
306                 begin = 1;
307         } else
308                 begin = 0;
309
310         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
311                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
312                 vq->desc_packed[vq->last_used_idx + i].len = 0;
313         }
314
315         rte_smp_wmb();
316         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
317                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
318
319         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
320                                    sizeof(struct vring_packed_desc),
321                                    sizeof(struct vring_packed_desc) *
322                                    PACKED_BATCH_SIZE);
323         vhost_log_cache_sync(dev, vq);
324
325         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
326 }
327
328 static __rte_always_inline void
329 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
330                                    uint16_t buf_id,
331                                    uint16_t count)
332 {
333         uint16_t flags;
334
335         flags = vq->desc_packed[vq->last_used_idx].flags;
336         if (vq->used_wrap_counter) {
337                 flags |= VRING_DESC_F_USED;
338                 flags |= VRING_DESC_F_AVAIL;
339         } else {
340                 flags &= ~VRING_DESC_F_USED;
341                 flags &= ~VRING_DESC_F_AVAIL;
342         }
343
344         if (!vq->shadow_used_idx) {
345                 vq->shadow_last_used_idx = vq->last_used_idx;
346
347                 vq->shadow_used_packed[0].id  = buf_id;
348                 vq->shadow_used_packed[0].len = 0;
349                 vq->shadow_used_packed[0].flags = flags;
350                 vq->shadow_used_idx++;
351         } else {
352                 vq->desc_packed[vq->last_used_idx].id = buf_id;
353                 vq->desc_packed[vq->last_used_idx].len = 0;
354                 vq->desc_packed[vq->last_used_idx].flags = flags;
355         }
356
357         vq_inc_last_used_packed(vq, count);
358 }
359
360 static __rte_always_inline void
361 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
362                                            uint16_t buf_id,
363                                            uint16_t count)
364 {
365         uint16_t flags;
366
367         vq->shadow_used_packed[0].id = buf_id;
368
369         flags = vq->desc_packed[vq->last_used_idx].flags;
370         if (vq->used_wrap_counter) {
371                 flags |= VRING_DESC_F_USED;
372                 flags |= VRING_DESC_F_AVAIL;
373         } else {
374                 flags &= ~VRING_DESC_F_USED;
375                 flags &= ~VRING_DESC_F_AVAIL;
376         }
377
378         if (!vq->shadow_used_idx) {
379                 vq->shadow_last_used_idx = vq->last_used_idx;
380                 vq->shadow_used_packed[0].len = 0;
381                 vq->shadow_used_packed[0].flags = flags;
382                 vq->shadow_used_idx++;
383         }
384
385         vq_inc_last_used_packed(vq, count);
386 }
387
388 static __rte_always_inline void
389 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
390                                    struct vhost_virtqueue *vq,
391                                    uint32_t len[],
392                                    uint16_t id[],
393                                    uint16_t count[],
394                                    uint16_t num_buffers)
395 {
396         uint16_t i;
397         for (i = 0; i < num_buffers; i++) {
398                 /* enqueue shadow flush action aligned with batch num */
399                 if (!vq->shadow_used_idx)
400                         vq->shadow_aligned_idx = vq->last_used_idx &
401                                 PACKED_BATCH_MASK;
402                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
403                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
404                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
405                 vq->shadow_aligned_idx += count[i];
406                 vq->shadow_used_idx++;
407         }
408
409         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
410                 do_data_copy_enqueue(dev, vq);
411                 vhost_flush_enqueue_shadow_packed(dev, vq);
412         }
413 }
414
415 /* avoid write operation when necessary, to lessen cache issues */
416 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
417         if ((var) != (val))                     \
418                 (var) = (val);                  \
419 } while (0)
420
421 static __rte_always_inline void
422 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
423 {
424         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
425
426         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
427                 csum_l4 |= PKT_TX_TCP_CKSUM;
428
429         if (csum_l4) {
430                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
431                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
432
433                 switch (csum_l4) {
434                 case PKT_TX_TCP_CKSUM:
435                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
436                                                 cksum));
437                         break;
438                 case PKT_TX_UDP_CKSUM:
439                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
440                                                 dgram_cksum));
441                         break;
442                 case PKT_TX_SCTP_CKSUM:
443                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
444                                                 cksum));
445                         break;
446                 }
447         } else {
448                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
449                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
450                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
451         }
452
453         /* IP cksum verification cannot be bypassed, then calculate here */
454         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
455                 struct rte_ipv4_hdr *ipv4_hdr;
456
457                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
458                                                    m_buf->l2_len);
459                 ipv4_hdr->hdr_checksum = 0;
460                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
461         }
462
463         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
464                 if (m_buf->ol_flags & PKT_TX_IPV4)
465                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
466                 else
467                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
468                 net_hdr->gso_size = m_buf->tso_segsz;
469                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
470                                         + m_buf->l4_len;
471         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
472                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
473                 net_hdr->gso_size = m_buf->tso_segsz;
474                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
475                         m_buf->l4_len;
476         } else {
477                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
478                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
479                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
480         }
481 }
482
483 static __rte_always_inline int
484 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
485                 struct buf_vector *buf_vec, uint16_t *vec_idx,
486                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
487 {
488         uint16_t vec_id = *vec_idx;
489
490         while (desc_len) {
491                 uint64_t desc_addr;
492                 uint64_t desc_chunck_len = desc_len;
493
494                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
495                         return -1;
496
497                 desc_addr = vhost_iova_to_vva(dev, vq,
498                                 desc_iova,
499                                 &desc_chunck_len,
500                                 perm);
501                 if (unlikely(!desc_addr))
502                         return -1;
503
504                 rte_prefetch0((void *)(uintptr_t)desc_addr);
505
506                 buf_vec[vec_id].buf_iova = desc_iova;
507                 buf_vec[vec_id].buf_addr = desc_addr;
508                 buf_vec[vec_id].buf_len  = desc_chunck_len;
509
510                 desc_len -= desc_chunck_len;
511                 desc_iova += desc_chunck_len;
512                 vec_id++;
513         }
514         *vec_idx = vec_id;
515
516         return 0;
517 }
518
519 static __rte_always_inline int
520 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
521                          uint32_t avail_idx, uint16_t *vec_idx,
522                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
523                          uint32_t *desc_chain_len, uint8_t perm)
524 {
525         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
526         uint16_t vec_id = *vec_idx;
527         uint32_t len    = 0;
528         uint64_t dlen;
529         uint32_t nr_descs = vq->size;
530         uint32_t cnt    = 0;
531         struct vring_desc *descs = vq->desc;
532         struct vring_desc *idesc = NULL;
533
534         if (unlikely(idx >= vq->size))
535                 return -1;
536
537         *desc_chain_head = idx;
538
539         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
540                 dlen = vq->desc[idx].len;
541                 nr_descs = dlen / sizeof(struct vring_desc);
542                 if (unlikely(nr_descs > vq->size))
543                         return -1;
544
545                 descs = (struct vring_desc *)(uintptr_t)
546                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
547                                                 &dlen,
548                                                 VHOST_ACCESS_RO);
549                 if (unlikely(!descs))
550                         return -1;
551
552                 if (unlikely(dlen < vq->desc[idx].len)) {
553                         /*
554                          * The indirect desc table is not contiguous
555                          * in process VA space, we have to copy it.
556                          */
557                         idesc = vhost_alloc_copy_ind_table(dev, vq,
558                                         vq->desc[idx].addr, vq->desc[idx].len);
559                         if (unlikely(!idesc))
560                                 return -1;
561
562                         descs = idesc;
563                 }
564
565                 idx = 0;
566         }
567
568         while (1) {
569                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
570                         free_ind_table(idesc);
571                         return -1;
572                 }
573
574                 len += descs[idx].len;
575
576                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
577                                                 descs[idx].addr, descs[idx].len,
578                                                 perm))) {
579                         free_ind_table(idesc);
580                         return -1;
581                 }
582
583                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
584                         break;
585
586                 idx = descs[idx].next;
587         }
588
589         *desc_chain_len = len;
590         *vec_idx = vec_id;
591
592         if (unlikely(!!idesc))
593                 free_ind_table(idesc);
594
595         return 0;
596 }
597
598 /*
599  * Returns -1 on fail, 0 on success
600  */
601 static inline int
602 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
603                                 uint32_t size, struct buf_vector *buf_vec,
604                                 uint16_t *num_buffers, uint16_t avail_head,
605                                 uint16_t *nr_vec)
606 {
607         uint16_t cur_idx;
608         uint16_t vec_idx = 0;
609         uint16_t max_tries, tries = 0;
610
611         uint16_t head_idx = 0;
612         uint32_t len = 0;
613
614         *num_buffers = 0;
615         cur_idx  = vq->last_avail_idx;
616
617         if (rxvq_is_mergeable(dev))
618                 max_tries = vq->size - 1;
619         else
620                 max_tries = 1;
621
622         while (size > 0) {
623                 if (unlikely(cur_idx == avail_head))
624                         return -1;
625                 /*
626                  * if we tried all available ring items, and still
627                  * can't get enough buf, it means something abnormal
628                  * happened.
629                  */
630                 if (unlikely(++tries > max_tries))
631                         return -1;
632
633                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
634                                                 &vec_idx, buf_vec,
635                                                 &head_idx, &len,
636                                                 VHOST_ACCESS_RW) < 0))
637                         return -1;
638                 len = RTE_MIN(len, size);
639                 update_shadow_used_ring_split(vq, head_idx, len);
640                 size -= len;
641
642                 cur_idx++;
643                 *num_buffers += 1;
644         }
645
646         *nr_vec = vec_idx;
647
648         return 0;
649 }
650
651 static __rte_always_inline int
652 fill_vec_buf_packed_indirect(struct virtio_net *dev,
653                         struct vhost_virtqueue *vq,
654                         struct vring_packed_desc *desc, uint16_t *vec_idx,
655                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
656 {
657         uint16_t i;
658         uint32_t nr_descs;
659         uint16_t vec_id = *vec_idx;
660         uint64_t dlen;
661         struct vring_packed_desc *descs, *idescs = NULL;
662
663         dlen = desc->len;
664         descs = (struct vring_packed_desc *)(uintptr_t)
665                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
666         if (unlikely(!descs))
667                 return -1;
668
669         if (unlikely(dlen < desc->len)) {
670                 /*
671                  * The indirect desc table is not contiguous
672                  * in process VA space, we have to copy it.
673                  */
674                 idescs = vhost_alloc_copy_ind_table(dev,
675                                 vq, desc->addr, desc->len);
676                 if (unlikely(!idescs))
677                         return -1;
678
679                 descs = idescs;
680         }
681
682         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
683         if (unlikely(nr_descs >= vq->size)) {
684                 free_ind_table(idescs);
685                 return -1;
686         }
687
688         for (i = 0; i < nr_descs; i++) {
689                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
690                         free_ind_table(idescs);
691                         return -1;
692                 }
693
694                 *len += descs[i].len;
695                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
696                                                 descs[i].addr, descs[i].len,
697                                                 perm)))
698                         return -1;
699         }
700         *vec_idx = vec_id;
701
702         if (unlikely(!!idescs))
703                 free_ind_table(idescs);
704
705         return 0;
706 }
707
708 static __rte_always_inline int
709 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
710                                 uint16_t avail_idx, uint16_t *desc_count,
711                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
712                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
713 {
714         bool wrap_counter = vq->avail_wrap_counter;
715         struct vring_packed_desc *descs = vq->desc_packed;
716         uint16_t vec_id = *vec_idx;
717
718         if (avail_idx < vq->last_avail_idx)
719                 wrap_counter ^= 1;
720
721         /*
722          * Perform a load-acquire barrier in desc_is_avail to
723          * enforce the ordering between desc flags and desc
724          * content.
725          */
726         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
727                 return -1;
728
729         *desc_count = 0;
730         *len = 0;
731
732         while (1) {
733                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
734                         return -1;
735
736                 if (unlikely(*desc_count >= vq->size))
737                         return -1;
738
739                 *desc_count += 1;
740                 *buf_id = descs[avail_idx].id;
741
742                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
743                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
744                                                         &descs[avail_idx],
745                                                         &vec_id, buf_vec,
746                                                         len, perm) < 0))
747                                 return -1;
748                 } else {
749                         *len += descs[avail_idx].len;
750
751                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
752                                                         descs[avail_idx].addr,
753                                                         descs[avail_idx].len,
754                                                         perm)))
755                                 return -1;
756                 }
757
758                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
759                         break;
760
761                 if (++avail_idx >= vq->size) {
762                         avail_idx -= vq->size;
763                         wrap_counter ^= 1;
764                 }
765         }
766
767         *vec_idx = vec_id;
768
769         return 0;
770 }
771
772 static __rte_noinline void
773 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
774                 struct buf_vector *buf_vec,
775                 struct virtio_net_hdr_mrg_rxbuf *hdr)
776 {
777         uint64_t len;
778         uint64_t remain = dev->vhost_hlen;
779         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
780         uint64_t iova = buf_vec->buf_iova;
781
782         while (remain) {
783                 len = RTE_MIN(remain,
784                                 buf_vec->buf_len);
785                 dst = buf_vec->buf_addr;
786                 rte_memcpy((void *)(uintptr_t)dst,
787                                 (void *)(uintptr_t)src,
788                                 len);
789
790                 PRINT_PACKET(dev, (uintptr_t)dst,
791                                 (uint32_t)len, 0);
792                 vhost_log_cache_write_iova(dev, vq,
793                                 iova, len);
794
795                 remain -= len;
796                 iova += len;
797                 src += len;
798                 buf_vec++;
799         }
800 }
801
802 static __rte_always_inline int
803 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
804                             struct rte_mbuf *m, struct buf_vector *buf_vec,
805                             uint16_t nr_vec, uint16_t num_buffers)
806 {
807         uint32_t vec_idx = 0;
808         uint32_t mbuf_offset, mbuf_avail;
809         uint32_t buf_offset, buf_avail;
810         uint64_t buf_addr, buf_iova, buf_len;
811         uint32_t cpy_len;
812         uint64_t hdr_addr;
813         struct rte_mbuf *hdr_mbuf;
814         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
815         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
816         int error = 0;
817
818         if (unlikely(m == NULL)) {
819                 error = -1;
820                 goto out;
821         }
822
823         buf_addr = buf_vec[vec_idx].buf_addr;
824         buf_iova = buf_vec[vec_idx].buf_iova;
825         buf_len = buf_vec[vec_idx].buf_len;
826
827         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
828                 error = -1;
829                 goto out;
830         }
831
832         hdr_mbuf = m;
833         hdr_addr = buf_addr;
834         if (unlikely(buf_len < dev->vhost_hlen))
835                 hdr = &tmp_hdr;
836         else
837                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
838
839         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
840                 dev->vid, num_buffers);
841
842         if (unlikely(buf_len < dev->vhost_hlen)) {
843                 buf_offset = dev->vhost_hlen - buf_len;
844                 vec_idx++;
845                 buf_addr = buf_vec[vec_idx].buf_addr;
846                 buf_iova = buf_vec[vec_idx].buf_iova;
847                 buf_len = buf_vec[vec_idx].buf_len;
848                 buf_avail = buf_len - buf_offset;
849         } else {
850                 buf_offset = dev->vhost_hlen;
851                 buf_avail = buf_len - dev->vhost_hlen;
852         }
853
854         mbuf_avail  = rte_pktmbuf_data_len(m);
855         mbuf_offset = 0;
856         while (mbuf_avail != 0 || m->next != NULL) {
857                 /* done with current buf, get the next one */
858                 if (buf_avail == 0) {
859                         vec_idx++;
860                         if (unlikely(vec_idx >= nr_vec)) {
861                                 error = -1;
862                                 goto out;
863                         }
864
865                         buf_addr = buf_vec[vec_idx].buf_addr;
866                         buf_iova = buf_vec[vec_idx].buf_iova;
867                         buf_len = buf_vec[vec_idx].buf_len;
868
869                         buf_offset = 0;
870                         buf_avail  = buf_len;
871                 }
872
873                 /* done with current mbuf, get the next one */
874                 if (mbuf_avail == 0) {
875                         m = m->next;
876
877                         mbuf_offset = 0;
878                         mbuf_avail  = rte_pktmbuf_data_len(m);
879                 }
880
881                 if (hdr_addr) {
882                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
883                         if (rxvq_is_mergeable(dev))
884                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
885                                                 num_buffers);
886
887                         if (unlikely(hdr == &tmp_hdr)) {
888                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
889                         } else {
890                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
891                                                 dev->vhost_hlen, 0);
892                                 vhost_log_cache_write_iova(dev, vq,
893                                                 buf_vec[0].buf_iova,
894                                                 dev->vhost_hlen);
895                         }
896
897                         hdr_addr = 0;
898                 }
899
900                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
901
902                 if (likely(cpy_len > MAX_BATCH_LEN ||
903                                         vq->batch_copy_nb_elems >= vq->size)) {
904                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
905                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
906                                 cpy_len);
907                         vhost_log_cache_write_iova(dev, vq,
908                                                    buf_iova + buf_offset,
909                                                    cpy_len);
910                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
911                                 cpy_len, 0);
912                 } else {
913                         batch_copy[vq->batch_copy_nb_elems].dst =
914                                 (void *)((uintptr_t)(buf_addr + buf_offset));
915                         batch_copy[vq->batch_copy_nb_elems].src =
916                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
917                         batch_copy[vq->batch_copy_nb_elems].log_addr =
918                                 buf_iova + buf_offset;
919                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
920                         vq->batch_copy_nb_elems++;
921                 }
922
923                 mbuf_avail  -= cpy_len;
924                 mbuf_offset += cpy_len;
925                 buf_avail  -= cpy_len;
926                 buf_offset += cpy_len;
927         }
928
929 out:
930
931         return error;
932 }
933
934 static __rte_always_inline void
935 async_fill_vec(struct iovec *v, void *base, size_t len)
936 {
937         v->iov_base = base;
938         v->iov_len = len;
939 }
940
941 static __rte_always_inline void
942 async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
943         struct iovec *vec, unsigned long nr_seg)
944 {
945         it->offset = 0;
946         it->count = count;
947
948         if (count) {
949                 it->iov = vec;
950                 it->nr_segs = nr_seg;
951         } else {
952                 it->iov = 0;
953                 it->nr_segs = 0;
954         }
955 }
956
957 static __rte_always_inline void
958 async_fill_desc(struct rte_vhost_async_desc *desc,
959         struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
960 {
961         desc->src = src;
962         desc->dst = dst;
963 }
964
965 static __rte_always_inline int
966 async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
967                         struct rte_mbuf *m, struct buf_vector *buf_vec,
968                         uint16_t nr_vec, uint16_t num_buffers,
969                         struct iovec *src_iovec, struct iovec *dst_iovec,
970                         struct rte_vhost_iov_iter *src_it,
971                         struct rte_vhost_iov_iter *dst_it)
972 {
973         uint32_t vec_idx = 0;
974         uint32_t mbuf_offset, mbuf_avail;
975         uint32_t buf_offset, buf_avail;
976         uint64_t buf_addr, buf_iova, buf_len;
977         uint32_t cpy_len, cpy_threshold;
978         uint64_t hdr_addr;
979         struct rte_mbuf *hdr_mbuf;
980         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
981         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
982         int error = 0;
983
984         uint32_t tlen = 0;
985         int tvec_idx = 0;
986         void *hpa;
987
988         if (unlikely(m == NULL)) {
989                 error = -1;
990                 goto out;
991         }
992
993         cpy_threshold = vq->async_threshold;
994
995         buf_addr = buf_vec[vec_idx].buf_addr;
996         buf_iova = buf_vec[vec_idx].buf_iova;
997         buf_len = buf_vec[vec_idx].buf_len;
998
999         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1000                 error = -1;
1001                 goto out;
1002         }
1003
1004         hdr_mbuf = m;
1005         hdr_addr = buf_addr;
1006         if (unlikely(buf_len < dev->vhost_hlen))
1007                 hdr = &tmp_hdr;
1008         else
1009                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
1010
1011         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
1012                 dev->vid, num_buffers);
1013
1014         if (unlikely(buf_len < dev->vhost_hlen)) {
1015                 buf_offset = dev->vhost_hlen - buf_len;
1016                 vec_idx++;
1017                 buf_addr = buf_vec[vec_idx].buf_addr;
1018                 buf_iova = buf_vec[vec_idx].buf_iova;
1019                 buf_len = buf_vec[vec_idx].buf_len;
1020                 buf_avail = buf_len - buf_offset;
1021         } else {
1022                 buf_offset = dev->vhost_hlen;
1023                 buf_avail = buf_len - dev->vhost_hlen;
1024         }
1025
1026         mbuf_avail  = rte_pktmbuf_data_len(m);
1027         mbuf_offset = 0;
1028
1029         while (mbuf_avail != 0 || m->next != NULL) {
1030                 /* done with current buf, get the next one */
1031                 if (buf_avail == 0) {
1032                         vec_idx++;
1033                         if (unlikely(vec_idx >= nr_vec)) {
1034                                 error = -1;
1035                                 goto out;
1036                         }
1037
1038                         buf_addr = buf_vec[vec_idx].buf_addr;
1039                         buf_iova = buf_vec[vec_idx].buf_iova;
1040                         buf_len = buf_vec[vec_idx].buf_len;
1041
1042                         buf_offset = 0;
1043                         buf_avail  = buf_len;
1044                 }
1045
1046                 /* done with current mbuf, get the next one */
1047                 if (mbuf_avail == 0) {
1048                         m = m->next;
1049
1050                         mbuf_offset = 0;
1051                         mbuf_avail  = rte_pktmbuf_data_len(m);
1052                 }
1053
1054                 if (hdr_addr) {
1055                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
1056                         if (rxvq_is_mergeable(dev))
1057                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
1058                                                 num_buffers);
1059
1060                         if (unlikely(hdr == &tmp_hdr)) {
1061                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
1062                         } else {
1063                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
1064                                                 dev->vhost_hlen, 0);
1065                                 vhost_log_cache_write_iova(dev, vq,
1066                                                 buf_vec[0].buf_iova,
1067                                                 dev->vhost_hlen);
1068                         }
1069
1070                         hdr_addr = 0;
1071                 }
1072
1073                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1074
1075                 if (unlikely(cpy_len >= cpy_threshold)) {
1076                         hpa = (void *)(uintptr_t)gpa_to_hpa(dev,
1077                                         buf_iova + buf_offset, cpy_len);
1078
1079                         if (unlikely(!hpa)) {
1080                                 error = -1;
1081                                 goto out;
1082                         }
1083
1084                         async_fill_vec(src_iovec + tvec_idx,
1085                                 (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
1086                                                 mbuf_offset), cpy_len);
1087
1088                         async_fill_vec(dst_iovec + tvec_idx, hpa, cpy_len);
1089
1090                         tlen += cpy_len;
1091                         tvec_idx++;
1092                 } else {
1093                         if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
1094                                 rte_memcpy(
1095                                 (void *)((uintptr_t)(buf_addr + buf_offset)),
1096                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
1097                                 cpy_len);
1098
1099                                 PRINT_PACKET(dev,
1100                                         (uintptr_t)(buf_addr + buf_offset),
1101                                         cpy_len, 0);
1102                         } else {
1103                                 batch_copy[vq->batch_copy_nb_elems].dst =
1104                                 (void *)((uintptr_t)(buf_addr + buf_offset));
1105                                 batch_copy[vq->batch_copy_nb_elems].src =
1106                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
1107                                 batch_copy[vq->batch_copy_nb_elems].log_addr =
1108                                         buf_iova + buf_offset;
1109                                 batch_copy[vq->batch_copy_nb_elems].len =
1110                                         cpy_len;
1111                                 vq->batch_copy_nb_elems++;
1112                         }
1113                 }
1114
1115                 mbuf_avail  -= cpy_len;
1116                 mbuf_offset += cpy_len;
1117                 buf_avail  -= cpy_len;
1118                 buf_offset += cpy_len;
1119         }
1120
1121 out:
1122         async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
1123         async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
1124
1125         return error;
1126 }
1127
1128 static __rte_always_inline int
1129 vhost_enqueue_single_packed(struct virtio_net *dev,
1130                             struct vhost_virtqueue *vq,
1131                             struct rte_mbuf *pkt,
1132                             struct buf_vector *buf_vec,
1133                             uint16_t *nr_descs)
1134 {
1135         uint16_t nr_vec = 0;
1136         uint16_t avail_idx = vq->last_avail_idx;
1137         uint16_t max_tries, tries = 0;
1138         uint16_t buf_id = 0;
1139         uint32_t len = 0;
1140         uint16_t desc_count;
1141         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
1142         uint16_t num_buffers = 0;
1143         uint32_t buffer_len[vq->size];
1144         uint16_t buffer_buf_id[vq->size];
1145         uint16_t buffer_desc_count[vq->size];
1146
1147         if (rxvq_is_mergeable(dev))
1148                 max_tries = vq->size - 1;
1149         else
1150                 max_tries = 1;
1151
1152         while (size > 0) {
1153                 /*
1154                  * if we tried all available ring items, and still
1155                  * can't get enough buf, it means something abnormal
1156                  * happened.
1157                  */
1158                 if (unlikely(++tries > max_tries))
1159                         return -1;
1160
1161                 if (unlikely(fill_vec_buf_packed(dev, vq,
1162                                                 avail_idx, &desc_count,
1163                                                 buf_vec, &nr_vec,
1164                                                 &buf_id, &len,
1165                                                 VHOST_ACCESS_RW) < 0))
1166                         return -1;
1167
1168                 len = RTE_MIN(len, size);
1169                 size -= len;
1170
1171                 buffer_len[num_buffers] = len;
1172                 buffer_buf_id[num_buffers] = buf_id;
1173                 buffer_desc_count[num_buffers] = desc_count;
1174                 num_buffers += 1;
1175
1176                 *nr_descs += desc_count;
1177                 avail_idx += desc_count;
1178                 if (avail_idx >= vq->size)
1179                         avail_idx -= vq->size;
1180         }
1181
1182         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1183                 return -1;
1184
1185         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1186                                            buffer_desc_count, num_buffers);
1187
1188         return 0;
1189 }
1190
1191 static __rte_noinline uint32_t
1192 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1193         struct rte_mbuf **pkts, uint32_t count)
1194 {
1195         uint32_t pkt_idx = 0;
1196         uint16_t num_buffers;
1197         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1198         uint16_t avail_head;
1199
1200         /*
1201          * The ordering between avail index and
1202          * desc reads needs to be enforced.
1203          */
1204         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1205
1206         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1207
1208         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1209                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1210                 uint16_t nr_vec = 0;
1211
1212                 if (unlikely(reserve_avail_buf_split(dev, vq,
1213                                                 pkt_len, buf_vec, &num_buffers,
1214                                                 avail_head, &nr_vec) < 0)) {
1215                         VHOST_LOG_DATA(DEBUG,
1216                                 "(%d) failed to get enough desc from vring\n",
1217                                 dev->vid);
1218                         vq->shadow_used_idx -= num_buffers;
1219                         break;
1220                 }
1221
1222                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1223                         dev->vid, vq->last_avail_idx,
1224                         vq->last_avail_idx + num_buffers);
1225
1226                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1227                                                 buf_vec, nr_vec,
1228                                                 num_buffers) < 0) {
1229                         vq->shadow_used_idx -= num_buffers;
1230                         break;
1231                 }
1232
1233                 vq->last_avail_idx += num_buffers;
1234         }
1235
1236         do_data_copy_enqueue(dev, vq);
1237
1238         if (likely(vq->shadow_used_idx)) {
1239                 flush_shadow_used_ring_split(dev, vq);
1240                 vhost_vring_call_split(dev, vq);
1241         }
1242
1243         return pkt_idx;
1244 }
1245
1246 static __rte_always_inline int
1247 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1248                            struct vhost_virtqueue *vq,
1249                            struct rte_mbuf **pkts)
1250 {
1251         bool wrap_counter = vq->avail_wrap_counter;
1252         struct vring_packed_desc *descs = vq->desc_packed;
1253         uint16_t avail_idx = vq->last_avail_idx;
1254         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1255         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1256         uint32_t buf_offset = dev->vhost_hlen;
1257         uint64_t lens[PACKED_BATCH_SIZE];
1258         uint16_t ids[PACKED_BATCH_SIZE];
1259         uint16_t i;
1260
1261         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1262                 return -1;
1263
1264         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1265                 return -1;
1266
1267         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1268                 if (unlikely(pkts[i]->next != NULL))
1269                         return -1;
1270                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1271                                             wrap_counter)))
1272                         return -1;
1273         }
1274
1275         rte_smp_rmb();
1276
1277         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1278                 lens[i] = descs[avail_idx + i].len;
1279
1280         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1281                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1282                         return -1;
1283         }
1284
1285         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1286                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1287                                                   descs[avail_idx + i].addr,
1288                                                   &lens[i],
1289                                                   VHOST_ACCESS_RW);
1290
1291         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1292                 if (unlikely(!desc_addrs[i]))
1293                         return -1;
1294                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1295                         return -1;
1296         }
1297
1298         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1299                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1300                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1301                                         (uintptr_t)desc_addrs[i];
1302                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1303         }
1304
1305         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1306                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1307
1308         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1309
1310         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1311                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1312                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1313                            pkts[i]->pkt_len);
1314         }
1315
1316         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1317                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1318                                            lens[i]);
1319
1320         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1321                 ids[i] = descs[avail_idx + i].id;
1322
1323         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1324
1325         return 0;
1326 }
1327
1328 static __rte_always_inline int16_t
1329 virtio_dev_rx_single_packed(struct virtio_net *dev,
1330                             struct vhost_virtqueue *vq,
1331                             struct rte_mbuf *pkt)
1332 {
1333         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1334         uint16_t nr_descs = 0;
1335
1336         rte_smp_rmb();
1337         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1338                                                  &nr_descs) < 0)) {
1339                 VHOST_LOG_DATA(DEBUG,
1340                                 "(%d) failed to get enough desc from vring\n",
1341                                 dev->vid);
1342                 return -1;
1343         }
1344
1345         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1346                         dev->vid, vq->last_avail_idx,
1347                         vq->last_avail_idx + nr_descs);
1348
1349         vq_inc_last_avail_packed(vq, nr_descs);
1350
1351         return 0;
1352 }
1353
1354 static __rte_noinline uint32_t
1355 virtio_dev_rx_packed(struct virtio_net *dev,
1356                      struct vhost_virtqueue *__rte_restrict vq,
1357                      struct rte_mbuf **__rte_restrict pkts,
1358                      uint32_t count)
1359 {
1360         uint32_t pkt_idx = 0;
1361         uint32_t remained = count;
1362
1363         do {
1364                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1365
1366                 if (remained >= PACKED_BATCH_SIZE) {
1367                         if (!virtio_dev_rx_batch_packed(dev, vq,
1368                                                         &pkts[pkt_idx])) {
1369                                 pkt_idx += PACKED_BATCH_SIZE;
1370                                 remained -= PACKED_BATCH_SIZE;
1371                                 continue;
1372                         }
1373                 }
1374
1375                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1376                         break;
1377                 pkt_idx++;
1378                 remained--;
1379
1380         } while (pkt_idx < count);
1381
1382         if (vq->shadow_used_idx) {
1383                 do_data_copy_enqueue(dev, vq);
1384                 vhost_flush_enqueue_shadow_packed(dev, vq);
1385         }
1386
1387         if (pkt_idx)
1388                 vhost_vring_call_packed(dev, vq);
1389
1390         return pkt_idx;
1391 }
1392
1393 static __rte_always_inline uint32_t
1394 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1395         struct rte_mbuf **pkts, uint32_t count)
1396 {
1397         struct vhost_virtqueue *vq;
1398         uint32_t nb_tx = 0;
1399
1400         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1401         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1402                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1403                         dev->vid, __func__, queue_id);
1404                 return 0;
1405         }
1406
1407         vq = dev->virtqueue[queue_id];
1408
1409         rte_spinlock_lock(&vq->access_lock);
1410
1411         if (unlikely(vq->enabled == 0))
1412                 goto out_access_unlock;
1413
1414         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1415                 vhost_user_iotlb_rd_lock(vq);
1416
1417         if (unlikely(vq->access_ok == 0))
1418                 if (unlikely(vring_translate(dev, vq) < 0))
1419                         goto out;
1420
1421         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1422         if (count == 0)
1423                 goto out;
1424
1425         if (vq_is_packed(dev))
1426                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1427         else
1428                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1429
1430 out:
1431         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1432                 vhost_user_iotlb_rd_unlock(vq);
1433
1434 out_access_unlock:
1435         rte_spinlock_unlock(&vq->access_lock);
1436
1437         return nb_tx;
1438 }
1439
1440 uint16_t
1441 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1442         struct rte_mbuf **__rte_restrict pkts, uint16_t count)
1443 {
1444         struct virtio_net *dev = get_device(vid);
1445
1446         if (!dev)
1447                 return 0;
1448
1449         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1450                 VHOST_LOG_DATA(ERR,
1451                         "(%d) %s: built-in vhost net backend is disabled.\n",
1452                         dev->vid, __func__);
1453                 return 0;
1454         }
1455
1456         return virtio_dev_rx(dev, queue_id, pkts, count);
1457 }
1458
1459 static __rte_always_inline uint16_t
1460 virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
1461         uint16_t vq_size, uint16_t n_inflight)
1462 {
1463         return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
1464                 (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
1465 }
1466
1467 static __rte_always_inline void
1468 virtio_dev_rx_async_submit_split_err(struct virtio_net *dev,
1469         struct vhost_virtqueue *vq, uint16_t queue_id,
1470         uint16_t last_idx, uint16_t shadow_idx)
1471 {
1472         uint16_t start_idx, pkts_idx, vq_size;
1473         uint64_t *async_pending_info;
1474
1475         pkts_idx = vq->async_pkts_idx;
1476         async_pending_info = vq->async_pending_info;
1477         vq_size = vq->size;
1478         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1479                 vq_size, vq->async_pkts_inflight_n);
1480
1481         while (likely((start_idx & (vq_size - 1)) != pkts_idx)) {
1482                 uint64_t n_seg =
1483                         async_pending_info[(start_idx) & (vq_size - 1)] >>
1484                         ASYNC_PENDING_INFO_N_SFT;
1485
1486                 while (n_seg)
1487                         n_seg -= vq->async_ops.check_completed_copies(dev->vid,
1488                                 queue_id, 0, 1);
1489         }
1490
1491         vq->async_pkts_inflight_n = 0;
1492         vq->batch_copy_nb_elems = 0;
1493
1494         vq->shadow_used_idx = shadow_idx;
1495         vq->last_avail_idx = last_idx;
1496 }
1497
1498 static __rte_noinline uint32_t
1499 virtio_dev_rx_async_submit_split(struct virtio_net *dev,
1500         struct vhost_virtqueue *vq, uint16_t queue_id,
1501         struct rte_mbuf **pkts, uint32_t count)
1502 {
1503         uint32_t pkt_idx = 0, pkt_burst_idx = 0;
1504         uint16_t num_buffers;
1505         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1506         uint16_t avail_head, last_idx, shadow_idx;
1507
1508         struct rte_vhost_iov_iter *it_pool = vq->it_pool;
1509         struct iovec *vec_pool = vq->vec_pool;
1510         struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
1511         struct iovec *src_iovec = vec_pool;
1512         struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1513         struct rte_vhost_iov_iter *src_it = it_pool;
1514         struct rte_vhost_iov_iter *dst_it = it_pool + 1;
1515         uint16_t n_free_slot, slot_idx;
1516         int n_pkts = 0;
1517
1518         avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
1519         last_idx = vq->last_avail_idx;
1520         shadow_idx = vq->shadow_used_idx;
1521
1522         /*
1523          * The ordering between avail index and
1524          * desc reads needs to be enforced.
1525          */
1526         rte_smp_rmb();
1527
1528         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1529
1530         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1531                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1532                 uint16_t nr_vec = 0;
1533
1534                 if (unlikely(reserve_avail_buf_split(dev, vq,
1535                                                 pkt_len, buf_vec, &num_buffers,
1536                                                 avail_head, &nr_vec) < 0)) {
1537                         VHOST_LOG_DATA(DEBUG,
1538                                 "(%d) failed to get enough desc from vring\n",
1539                                 dev->vid);
1540                         vq->shadow_used_idx -= num_buffers;
1541                         break;
1542                 }
1543
1544                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1545                         dev->vid, vq->last_avail_idx,
1546                         vq->last_avail_idx + num_buffers);
1547
1548                 if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1549                                 buf_vec, nr_vec, num_buffers,
1550                                 src_iovec, dst_iovec, src_it, dst_it) < 0) {
1551                         vq->shadow_used_idx -= num_buffers;
1552                         break;
1553                 }
1554
1555                 slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
1556                 if (src_it->count) {
1557                         async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
1558                         pkt_burst_idx++;
1559                         vq->async_pending_info[slot_idx] =
1560                                 num_buffers | (src_it->nr_segs << 16);
1561                         src_iovec += src_it->nr_segs;
1562                         dst_iovec += dst_it->nr_segs;
1563                         src_it += 2;
1564                         dst_it += 2;
1565                 } else {
1566                         vq->async_pending_info[slot_idx] = num_buffers;
1567                         vq->async_pkts_inflight_n++;
1568                 }
1569
1570                 vq->last_avail_idx += num_buffers;
1571
1572                 if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
1573                                 (pkt_idx == count - 1 && pkt_burst_idx)) {
1574                         n_pkts = vq->async_ops.transfer_data(dev->vid,
1575                                         queue_id, tdes, 0, pkt_burst_idx);
1576                         src_iovec = vec_pool;
1577                         dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
1578                         src_it = it_pool;
1579                         dst_it = it_pool + 1;
1580
1581                         if (unlikely(n_pkts < (int)pkt_burst_idx)) {
1582                                 vq->async_pkts_inflight_n +=
1583                                         n_pkts > 0 ? n_pkts : 0;
1584                                 virtio_dev_rx_async_submit_split_err(dev,
1585                                         vq, queue_id, last_idx, shadow_idx);
1586                                 return 0;
1587                         }
1588
1589                         pkt_burst_idx = 0;
1590                         vq->async_pkts_inflight_n += n_pkts;
1591                 }
1592         }
1593
1594         if (pkt_burst_idx) {
1595                 n_pkts = vq->async_ops.transfer_data(dev->vid,
1596                                 queue_id, tdes, 0, pkt_burst_idx);
1597                 if (unlikely(n_pkts < (int)pkt_burst_idx)) {
1598                         vq->async_pkts_inflight_n += n_pkts > 0 ? n_pkts : 0;
1599                         virtio_dev_rx_async_submit_split_err(dev, vq, queue_id,
1600                                 last_idx, shadow_idx);
1601                         return 0;
1602                 }
1603
1604                 vq->async_pkts_inflight_n += n_pkts;
1605         }
1606
1607         do_data_copy_enqueue(dev, vq);
1608
1609         n_free_slot = vq->size - vq->async_pkts_idx;
1610         if (n_free_slot > pkt_idx) {
1611                 rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
1612                         pkts, pkt_idx * sizeof(uintptr_t));
1613                 vq->async_pkts_idx += pkt_idx;
1614         } else {
1615                 rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
1616                         pkts, n_free_slot * sizeof(uintptr_t));
1617                 rte_memcpy(&vq->async_pkts_pending[0],
1618                         &pkts[n_free_slot],
1619                         (pkt_idx - n_free_slot) * sizeof(uintptr_t));
1620                 vq->async_pkts_idx = pkt_idx - n_free_slot;
1621         }
1622
1623         if (likely(vq->shadow_used_idx))
1624                 async_flush_shadow_used_ring_split(dev, vq);
1625
1626         return pkt_idx;
1627 }
1628
1629 uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
1630                 struct rte_mbuf **pkts, uint16_t count)
1631 {
1632         struct virtio_net *dev = get_device(vid);
1633         struct vhost_virtqueue *vq;
1634         uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
1635         uint16_t start_idx, pkts_idx, vq_size;
1636         uint16_t n_inflight;
1637         uint64_t *async_pending_info;
1638
1639         if (!dev)
1640                 return 0;
1641
1642         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1643         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1644                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1645                         dev->vid, __func__, queue_id);
1646                 return 0;
1647         }
1648
1649         vq = dev->virtqueue[queue_id];
1650
1651         rte_spinlock_lock(&vq->access_lock);
1652
1653         n_inflight = vq->async_pkts_inflight_n;
1654         pkts_idx = vq->async_pkts_idx;
1655         async_pending_info = vq->async_pending_info;
1656         vq_size = vq->size;
1657         start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
1658                 vq_size, vq->async_pkts_inflight_n);
1659
1660         n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
1661                 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
1662                 vq->async_last_seg_n;
1663
1664         rte_smp_wmb();
1665
1666         while (likely((n_pkts_put < count) && n_inflight)) {
1667                 uint64_t info = async_pending_info[
1668                         (start_idx + n_pkts_put) & (vq_size - 1)];
1669                 uint64_t n_segs;
1670                 n_pkts_put++;
1671                 n_inflight--;
1672                 n_descs += info & ASYNC_PENDING_INFO_N_MSK;
1673                 n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
1674
1675                 if (n_segs) {
1676                         if (unlikely(n_segs_cpl < n_segs)) {
1677                                 n_pkts_put--;
1678                                 n_inflight++;
1679                                 n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
1680                                 if (n_segs_cpl) {
1681                                         async_pending_info[
1682                                                 (start_idx + n_pkts_put) &
1683                                                 (vq_size - 1)] =
1684                                         ((n_segs - n_segs_cpl) <<
1685                                          ASYNC_PENDING_INFO_N_SFT) |
1686                                         (info & ASYNC_PENDING_INFO_N_MSK);
1687                                         n_segs_cpl = 0;
1688                                 }
1689                                 break;
1690                         }
1691                         n_segs_cpl -= n_segs;
1692                 }
1693         }
1694
1695         vq->async_last_seg_n = n_segs_cpl;
1696
1697         if (n_pkts_put) {
1698                 vq->async_pkts_inflight_n = n_inflight;
1699                 if (likely(vq->enabled && vq->access_ok)) {
1700                         __atomic_add_fetch(&vq->used->idx,
1701                                         n_descs, __ATOMIC_RELEASE);
1702                         vhost_vring_call_split(dev, vq);
1703                 }
1704         }
1705
1706         if (start_idx + n_pkts_put <= vq_size) {
1707                 rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
1708                         n_pkts_put * sizeof(uintptr_t));
1709         } else {
1710                 rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
1711                         (vq_size - start_idx) * sizeof(uintptr_t));
1712                 rte_memcpy(&pkts[vq_size - start_idx], vq->async_pkts_pending,
1713                         (n_pkts_put - vq_size + start_idx) * sizeof(uintptr_t));
1714         }
1715
1716         rte_spinlock_unlock(&vq->access_lock);
1717
1718         return n_pkts_put;
1719 }
1720
1721 static __rte_always_inline uint32_t
1722 virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
1723         struct rte_mbuf **pkts, uint32_t count)
1724 {
1725         struct vhost_virtqueue *vq;
1726         uint32_t nb_tx = 0;
1727         bool drawback = false;
1728
1729         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1730         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1731                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1732                         dev->vid, __func__, queue_id);
1733                 return 0;
1734         }
1735
1736         vq = dev->virtqueue[queue_id];
1737
1738         rte_spinlock_lock(&vq->access_lock);
1739
1740         if (unlikely(vq->enabled == 0))
1741                 goto out_access_unlock;
1742
1743         if (unlikely(!vq->async_registered)) {
1744                 drawback = true;
1745                 goto out_access_unlock;
1746         }
1747
1748         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1749                 vhost_user_iotlb_rd_lock(vq);
1750
1751         if (unlikely(vq->access_ok == 0))
1752                 if (unlikely(vring_translate(dev, vq) < 0))
1753                         goto out;
1754
1755         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1756         if (count == 0)
1757                 goto out;
1758
1759         /* TODO: packed queue not implemented */
1760         if (vq_is_packed(dev))
1761                 nb_tx = 0;
1762         else
1763                 nb_tx = virtio_dev_rx_async_submit_split(dev,
1764                                 vq, queue_id, pkts, count);
1765
1766 out:
1767         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1768                 vhost_user_iotlb_rd_unlock(vq);
1769
1770 out_access_unlock:
1771         rte_spinlock_unlock(&vq->access_lock);
1772
1773         if (drawback)
1774                 return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count);
1775
1776         return nb_tx;
1777 }
1778
1779 uint16_t
1780 rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
1781                 struct rte_mbuf **pkts, uint16_t count)
1782 {
1783         struct virtio_net *dev = get_device(vid);
1784
1785         if (!dev)
1786                 return 0;
1787
1788         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1789                 VHOST_LOG_DATA(ERR,
1790                         "(%d) %s: built-in vhost net backend is disabled.\n",
1791                         dev->vid, __func__);
1792                 return 0;
1793         }
1794
1795         return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
1796 }
1797
1798 static inline bool
1799 virtio_net_with_host_offload(struct virtio_net *dev)
1800 {
1801         if (dev->features &
1802                         ((1ULL << VIRTIO_NET_F_CSUM) |
1803                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1804                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1805                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1806                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1807                 return true;
1808
1809         return false;
1810 }
1811
1812 static void
1813 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1814 {
1815         struct rte_ipv4_hdr *ipv4_hdr;
1816         struct rte_ipv6_hdr *ipv6_hdr;
1817         void *l3_hdr = NULL;
1818         struct rte_ether_hdr *eth_hdr;
1819         uint16_t ethertype;
1820
1821         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1822
1823         m->l2_len = sizeof(struct rte_ether_hdr);
1824         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1825
1826         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1827                 struct rte_vlan_hdr *vlan_hdr =
1828                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1829
1830                 m->l2_len += sizeof(struct rte_vlan_hdr);
1831                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1832         }
1833
1834         l3_hdr = (char *)eth_hdr + m->l2_len;
1835
1836         switch (ethertype) {
1837         case RTE_ETHER_TYPE_IPV4:
1838                 ipv4_hdr = l3_hdr;
1839                 *l4_proto = ipv4_hdr->next_proto_id;
1840                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1841                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1842                 m->ol_flags |= PKT_TX_IPV4;
1843                 break;
1844         case RTE_ETHER_TYPE_IPV6:
1845                 ipv6_hdr = l3_hdr;
1846                 *l4_proto = ipv6_hdr->proto;
1847                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1848                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1849                 m->ol_flags |= PKT_TX_IPV6;
1850                 break;
1851         default:
1852                 m->l3_len = 0;
1853                 *l4_proto = 0;
1854                 *l4_hdr = NULL;
1855                 break;
1856         }
1857 }
1858
1859 static __rte_always_inline void
1860 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1861 {
1862         uint16_t l4_proto = 0;
1863         void *l4_hdr = NULL;
1864         struct rte_tcp_hdr *tcp_hdr = NULL;
1865
1866         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1867                 return;
1868
1869         parse_ethernet(m, &l4_proto, &l4_hdr);
1870         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1871                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1872                         switch (hdr->csum_offset) {
1873                         case (offsetof(struct rte_tcp_hdr, cksum)):
1874                                 if (l4_proto == IPPROTO_TCP)
1875                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1876                                 break;
1877                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1878                                 if (l4_proto == IPPROTO_UDP)
1879                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1880                                 break;
1881                         case (offsetof(struct rte_sctp_hdr, cksum)):
1882                                 if (l4_proto == IPPROTO_SCTP)
1883                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1884                                 break;
1885                         default:
1886                                 break;
1887                         }
1888                 }
1889         }
1890
1891         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1892                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1893                 case VIRTIO_NET_HDR_GSO_TCPV4:
1894                 case VIRTIO_NET_HDR_GSO_TCPV6:
1895                         tcp_hdr = l4_hdr;
1896                         m->ol_flags |= PKT_TX_TCP_SEG;
1897                         m->tso_segsz = hdr->gso_size;
1898                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1899                         break;
1900                 case VIRTIO_NET_HDR_GSO_UDP:
1901                         m->ol_flags |= PKT_TX_UDP_SEG;
1902                         m->tso_segsz = hdr->gso_size;
1903                         m->l4_len = sizeof(struct rte_udp_hdr);
1904                         break;
1905                 default:
1906                         VHOST_LOG_DATA(WARNING,
1907                                 "unsupported gso type %u.\n", hdr->gso_type);
1908                         break;
1909                 }
1910         }
1911 }
1912
1913 static __rte_noinline void
1914 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1915                 struct buf_vector *buf_vec)
1916 {
1917         uint64_t len;
1918         uint64_t remain = sizeof(struct virtio_net_hdr);
1919         uint64_t src;
1920         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1921
1922         while (remain) {
1923                 len = RTE_MIN(remain, buf_vec->buf_len);
1924                 src = buf_vec->buf_addr;
1925                 rte_memcpy((void *)(uintptr_t)dst,
1926                                 (void *)(uintptr_t)src, len);
1927
1928                 remain -= len;
1929                 dst += len;
1930                 buf_vec++;
1931         }
1932 }
1933
1934 static __rte_always_inline int
1935 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1936                   struct buf_vector *buf_vec, uint16_t nr_vec,
1937                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1938 {
1939         uint32_t buf_avail, buf_offset;
1940         uint64_t buf_addr, buf_iova, buf_len;
1941         uint32_t mbuf_avail, mbuf_offset;
1942         uint32_t cpy_len;
1943         struct rte_mbuf *cur = m, *prev = m;
1944         struct virtio_net_hdr tmp_hdr;
1945         struct virtio_net_hdr *hdr = NULL;
1946         /* A counter to avoid desc dead loop chain */
1947         uint16_t vec_idx = 0;
1948         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1949         int error = 0;
1950
1951         buf_addr = buf_vec[vec_idx].buf_addr;
1952         buf_iova = buf_vec[vec_idx].buf_iova;
1953         buf_len = buf_vec[vec_idx].buf_len;
1954
1955         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1956                 error = -1;
1957                 goto out;
1958         }
1959
1960         if (virtio_net_with_host_offload(dev)) {
1961                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1962                         /*
1963                          * No luck, the virtio-net header doesn't fit
1964                          * in a contiguous virtual area.
1965                          */
1966                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1967                         hdr = &tmp_hdr;
1968                 } else {
1969                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1970                 }
1971         }
1972
1973         /*
1974          * A virtio driver normally uses at least 2 desc buffers
1975          * for Tx: the first for storing the header, and others
1976          * for storing the data.
1977          */
1978         if (unlikely(buf_len < dev->vhost_hlen)) {
1979                 buf_offset = dev->vhost_hlen - buf_len;
1980                 vec_idx++;
1981                 buf_addr = buf_vec[vec_idx].buf_addr;
1982                 buf_iova = buf_vec[vec_idx].buf_iova;
1983                 buf_len = buf_vec[vec_idx].buf_len;
1984                 buf_avail  = buf_len - buf_offset;
1985         } else if (buf_len == dev->vhost_hlen) {
1986                 if (unlikely(++vec_idx >= nr_vec))
1987                         goto out;
1988                 buf_addr = buf_vec[vec_idx].buf_addr;
1989                 buf_iova = buf_vec[vec_idx].buf_iova;
1990                 buf_len = buf_vec[vec_idx].buf_len;
1991
1992                 buf_offset = 0;
1993                 buf_avail = buf_len;
1994         } else {
1995                 buf_offset = dev->vhost_hlen;
1996                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1997         }
1998
1999         PRINT_PACKET(dev,
2000                         (uintptr_t)(buf_addr + buf_offset),
2001                         (uint32_t)buf_avail, 0);
2002
2003         mbuf_offset = 0;
2004         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
2005         while (1) {
2006                 uint64_t hpa;
2007
2008                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
2009
2010                 /*
2011                  * A desc buf might across two host physical pages that are
2012                  * not continuous. In such case (gpa_to_hpa returns 0), data
2013                  * will be copied even though zero copy is enabled.
2014                  */
2015                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
2016                                         buf_iova + buf_offset, cpy_len)))) {
2017                         cur->data_len = cpy_len;
2018                         cur->data_off = 0;
2019                         cur->buf_addr =
2020                                 (void *)(uintptr_t)(buf_addr + buf_offset);
2021                         cur->buf_iova = hpa;
2022
2023                         /*
2024                          * In zero copy mode, one mbuf can only reference data
2025                          * for one or partial of one desc buff.
2026                          */
2027                         mbuf_avail = cpy_len;
2028                 } else {
2029                         if (likely(cpy_len > MAX_BATCH_LEN ||
2030                                    vq->batch_copy_nb_elems >= vq->size ||
2031                                    (hdr && cur == m))) {
2032                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
2033                                                                    mbuf_offset),
2034                                            (void *)((uintptr_t)(buf_addr +
2035                                                            buf_offset)),
2036                                            cpy_len);
2037                         } else {
2038                                 batch_copy[vq->batch_copy_nb_elems].dst =
2039                                         rte_pktmbuf_mtod_offset(cur, void *,
2040                                                                 mbuf_offset);
2041                                 batch_copy[vq->batch_copy_nb_elems].src =
2042                                         (void *)((uintptr_t)(buf_addr +
2043                                                                 buf_offset));
2044                                 batch_copy[vq->batch_copy_nb_elems].len =
2045                                         cpy_len;
2046                                 vq->batch_copy_nb_elems++;
2047                         }
2048                 }
2049
2050                 mbuf_avail  -= cpy_len;
2051                 mbuf_offset += cpy_len;
2052                 buf_avail -= cpy_len;
2053                 buf_offset += cpy_len;
2054
2055                 /* This buf reaches to its end, get the next one */
2056                 if (buf_avail == 0) {
2057                         if (++vec_idx >= nr_vec)
2058                                 break;
2059
2060                         buf_addr = buf_vec[vec_idx].buf_addr;
2061                         buf_iova = buf_vec[vec_idx].buf_iova;
2062                         buf_len = buf_vec[vec_idx].buf_len;
2063
2064                         buf_offset = 0;
2065                         buf_avail  = buf_len;
2066
2067                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
2068                                         (uint32_t)buf_avail, 0);
2069                 }
2070
2071                 /*
2072                  * This mbuf reaches to its end, get a new one
2073                  * to hold more data.
2074                  */
2075                 if (mbuf_avail == 0) {
2076                         cur = rte_pktmbuf_alloc(mbuf_pool);
2077                         if (unlikely(cur == NULL)) {
2078                                 VHOST_LOG_DATA(ERR, "Failed to "
2079                                         "allocate memory for mbuf.\n");
2080                                 error = -1;
2081                                 goto out;
2082                         }
2083                         if (unlikely(dev->dequeue_zero_copy))
2084                                 rte_mbuf_refcnt_update(cur, 1);
2085
2086                         prev->next = cur;
2087                         prev->data_len = mbuf_offset;
2088                         m->nb_segs += 1;
2089                         m->pkt_len += mbuf_offset;
2090                         prev = cur;
2091
2092                         mbuf_offset = 0;
2093                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
2094                 }
2095         }
2096
2097         prev->data_len = mbuf_offset;
2098         m->pkt_len    += mbuf_offset;
2099
2100         if (hdr)
2101                 vhost_dequeue_offload(hdr, m);
2102
2103 out:
2104
2105         return error;
2106 }
2107
2108 static __rte_always_inline struct zcopy_mbuf *
2109 get_zmbuf(struct vhost_virtqueue *vq)
2110 {
2111         uint16_t i;
2112         uint16_t last;
2113         int tries = 0;
2114
2115         /* search [last_zmbuf_idx, zmbuf_size) */
2116         i = vq->last_zmbuf_idx;
2117         last = vq->zmbuf_size;
2118
2119 again:
2120         for (; i < last; i++) {
2121                 if (vq->zmbufs[i].in_use == 0) {
2122                         vq->last_zmbuf_idx = i + 1;
2123                         vq->zmbufs[i].in_use = 1;
2124                         return &vq->zmbufs[i];
2125                 }
2126         }
2127
2128         tries++;
2129         if (tries == 1) {
2130                 /* search [0, last_zmbuf_idx) */
2131                 i = 0;
2132                 last = vq->last_zmbuf_idx;
2133                 goto again;
2134         }
2135
2136         return NULL;
2137 }
2138
2139 static void
2140 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
2141 {
2142         rte_free(opaque);
2143 }
2144
2145 static int
2146 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
2147 {
2148         struct rte_mbuf_ext_shared_info *shinfo = NULL;
2149         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
2150         uint16_t buf_len;
2151         rte_iova_t iova;
2152         void *buf;
2153
2154         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
2155          * required, otherwise store shinfo in the new buffer.
2156          */
2157         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
2158                 shinfo = rte_pktmbuf_mtod(pkt,
2159                                           struct rte_mbuf_ext_shared_info *);
2160         else {
2161                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
2162                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
2163         }
2164
2165         if (unlikely(total_len > UINT16_MAX))
2166                 return -ENOSPC;
2167
2168         buf_len = total_len;
2169         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
2170         if (unlikely(buf == NULL))
2171                 return -ENOMEM;
2172
2173         /* Initialize shinfo */
2174         if (shinfo) {
2175                 shinfo->free_cb = virtio_dev_extbuf_free;
2176                 shinfo->fcb_opaque = buf;
2177                 rte_mbuf_ext_refcnt_set(shinfo, 1);
2178         } else {
2179                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
2180                                               virtio_dev_extbuf_free, buf);
2181                 if (unlikely(shinfo == NULL)) {
2182                         rte_free(buf);
2183                         VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
2184                         return -1;
2185                 }
2186         }
2187
2188         iova = rte_malloc_virt2iova(buf);
2189         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
2190         rte_pktmbuf_reset_headroom(pkt);
2191
2192         return 0;
2193 }
2194
2195 /*
2196  * Allocate a host supported pktmbuf.
2197  */
2198 static __rte_always_inline struct rte_mbuf *
2199 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
2200                          uint32_t data_len)
2201 {
2202         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
2203
2204         if (unlikely(pkt == NULL)) {
2205                 VHOST_LOG_DATA(ERR,
2206                         "Failed to allocate memory for mbuf.\n");
2207                 return NULL;
2208         }
2209
2210         if (rte_pktmbuf_tailroom(pkt) >= data_len)
2211                 return pkt;
2212
2213         /* attach an external buffer if supported */
2214         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
2215                 return pkt;
2216
2217         /* check if chained buffers are allowed */
2218         if (!dev->linearbuf)
2219                 return pkt;
2220
2221         /* Data doesn't fit into the buffer and the host supports
2222          * only linear buffers
2223          */
2224         rte_pktmbuf_free(pkt);
2225
2226         return NULL;
2227 }
2228
2229 static __rte_noinline uint16_t
2230 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
2231         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2232 {
2233         uint16_t i;
2234         uint16_t free_entries;
2235         uint16_t dropped = 0;
2236         static bool allocerr_warned;
2237
2238         if (unlikely(dev->dequeue_zero_copy)) {
2239                 struct zcopy_mbuf *zmbuf, *next;
2240
2241                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2242                      zmbuf != NULL; zmbuf = next) {
2243                         next = TAILQ_NEXT(zmbuf, next);
2244
2245                         if (mbuf_is_consumed(zmbuf->mbuf)) {
2246                                 update_shadow_used_ring_split(vq,
2247                                                 zmbuf->desc_idx, 0);
2248                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2249                                 restore_mbuf(zmbuf->mbuf);
2250                                 rte_pktmbuf_free(zmbuf->mbuf);
2251                                 put_zmbuf(zmbuf);
2252                                 vq->nr_zmbuf -= 1;
2253                         }
2254                 }
2255
2256                 if (likely(vq->shadow_used_idx)) {
2257                         flush_shadow_used_ring_split(dev, vq);
2258                         vhost_vring_call_split(dev, vq);
2259                 }
2260         }
2261
2262         /*
2263          * The ordering between avail index and
2264          * desc reads needs to be enforced.
2265          */
2266         free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
2267                         vq->last_avail_idx;
2268         if (free_entries == 0)
2269                 return 0;
2270
2271         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
2272
2273         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
2274
2275         count = RTE_MIN(count, MAX_PKT_BURST);
2276         count = RTE_MIN(count, free_entries);
2277         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
2278                         dev->vid, count);
2279
2280         for (i = 0; i < count; i++) {
2281                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2282                 uint16_t head_idx;
2283                 uint32_t buf_len;
2284                 uint16_t nr_vec = 0;
2285                 int err;
2286
2287                 if (unlikely(fill_vec_buf_split(dev, vq,
2288                                                 vq->last_avail_idx + i,
2289                                                 &nr_vec, buf_vec,
2290                                                 &head_idx, &buf_len,
2291                                                 VHOST_ACCESS_RO) < 0))
2292                         break;
2293
2294                 if (likely(dev->dequeue_zero_copy == 0))
2295                         update_shadow_used_ring_split(vq, head_idx, 0);
2296
2297                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2298                 if (unlikely(pkts[i] == NULL)) {
2299                         /*
2300                          * mbuf allocation fails for jumbo packets when external
2301                          * buffer allocation is not allowed and linear buffer
2302                          * is required. Drop this packet.
2303                          */
2304                         if (!allocerr_warned) {
2305                                 VHOST_LOG_DATA(ERR,
2306                                         "Failed mbuf alloc of size %d from %s on %s.\n",
2307                                         buf_len, mbuf_pool->name, dev->ifname);
2308                                 allocerr_warned = true;
2309                         }
2310                         dropped += 1;
2311                         i++;
2312                         break;
2313                 }
2314
2315                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2316                                 mbuf_pool);
2317                 if (unlikely(err)) {
2318                         rte_pktmbuf_free(pkts[i]);
2319                         if (!allocerr_warned) {
2320                                 VHOST_LOG_DATA(ERR,
2321                                         "Failed to copy desc to mbuf on %s.\n",
2322                                         dev->ifname);
2323                                 allocerr_warned = true;
2324                         }
2325                         dropped += 1;
2326                         i++;
2327                         break;
2328                 }
2329
2330                 if (unlikely(dev->dequeue_zero_copy)) {
2331                         struct zcopy_mbuf *zmbuf;
2332
2333                         zmbuf = get_zmbuf(vq);
2334                         if (!zmbuf) {
2335                                 rte_pktmbuf_free(pkts[i]);
2336                                 dropped += 1;
2337                                 i++;
2338                                 break;
2339                         }
2340                         zmbuf->mbuf = pkts[i];
2341                         zmbuf->desc_idx = head_idx;
2342
2343                         /*
2344                          * Pin lock the mbuf; we will check later to see
2345                          * whether the mbuf is freed (when we are the last
2346                          * user) or not. If that's the case, we then could
2347                          * update the used ring safely.
2348                          */
2349                         rte_mbuf_refcnt_update(pkts[i], 1);
2350
2351                         vq->nr_zmbuf += 1;
2352                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2353                 }
2354         }
2355         vq->last_avail_idx += i;
2356
2357         if (likely(dev->dequeue_zero_copy == 0)) {
2358                 do_data_copy_dequeue(vq);
2359                 if (unlikely(i < count))
2360                         vq->shadow_used_idx = i;
2361                 if (likely(vq->shadow_used_idx)) {
2362                         flush_shadow_used_ring_split(dev, vq);
2363                         vhost_vring_call_split(dev, vq);
2364                 }
2365         }
2366
2367         return (i - dropped);
2368 }
2369
2370 static __rte_always_inline int
2371 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
2372                                  struct vhost_virtqueue *vq,
2373                                  struct rte_mempool *mbuf_pool,
2374                                  struct rte_mbuf **pkts,
2375                                  uint16_t avail_idx,
2376                                  uintptr_t *desc_addrs,
2377                                  uint16_t *ids)
2378 {
2379         bool wrap = vq->avail_wrap_counter;
2380         struct vring_packed_desc *descs = vq->desc_packed;
2381         struct virtio_net_hdr *hdr;
2382         uint64_t lens[PACKED_BATCH_SIZE];
2383         uint64_t buf_lens[PACKED_BATCH_SIZE];
2384         uint32_t buf_offset = dev->vhost_hlen;
2385         uint16_t flags, i;
2386
2387         if (unlikely(avail_idx & PACKED_BATCH_MASK))
2388                 return -1;
2389         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
2390                 return -1;
2391
2392         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2393                 flags = descs[avail_idx + i].flags;
2394                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
2395                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
2396                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
2397                         return -1;
2398         }
2399
2400         rte_smp_rmb();
2401
2402         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2403                 lens[i] = descs[avail_idx + i].len;
2404
2405         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2406                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
2407                                                   descs[avail_idx + i].addr,
2408                                                   &lens[i], VHOST_ACCESS_RW);
2409         }
2410
2411         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2412                 if (unlikely(!desc_addrs[i]))
2413                         return -1;
2414                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
2415                         return -1;
2416         }
2417
2418         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2419                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
2420                 if (!pkts[i])
2421                         goto free_buf;
2422         }
2423
2424         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2425                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
2426
2427         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2428                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
2429                         goto free_buf;
2430         }
2431
2432         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2433                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
2434                 pkts[i]->data_len = pkts[i]->pkt_len;
2435                 ids[i] = descs[avail_idx + i].id;
2436         }
2437
2438         if (virtio_net_with_host_offload(dev)) {
2439                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2440                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
2441                         vhost_dequeue_offload(hdr, pkts[i]);
2442                 }
2443         }
2444
2445         return 0;
2446
2447 free_buf:
2448         for (i = 0; i < PACKED_BATCH_SIZE; i++)
2449                 rte_pktmbuf_free(pkts[i]);
2450
2451         return -1;
2452 }
2453
2454 static __rte_always_inline int
2455 virtio_dev_tx_batch_packed(struct virtio_net *dev,
2456                            struct vhost_virtqueue *vq,
2457                            struct rte_mempool *mbuf_pool,
2458                            struct rte_mbuf **pkts)
2459 {
2460         uint16_t avail_idx = vq->last_avail_idx;
2461         uint32_t buf_offset = dev->vhost_hlen;
2462         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2463         uint16_t ids[PACKED_BATCH_SIZE];
2464         uint16_t i;
2465
2466         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
2467                                              avail_idx, desc_addrs, ids))
2468                 return -1;
2469
2470         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2471                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
2472
2473         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2474                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
2475                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
2476                            pkts[i]->pkt_len);
2477
2478         if (virtio_net_is_inorder(dev))
2479                 vhost_shadow_dequeue_batch_packed_inorder(vq,
2480                         ids[PACKED_BATCH_SIZE - 1]);
2481         else
2482                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
2483
2484         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2485
2486         return 0;
2487 }
2488
2489 static __rte_always_inline int
2490 vhost_dequeue_single_packed(struct virtio_net *dev,
2491                             struct vhost_virtqueue *vq,
2492                             struct rte_mempool *mbuf_pool,
2493                             struct rte_mbuf **pkts,
2494                             uint16_t *buf_id,
2495                             uint16_t *desc_count)
2496 {
2497         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2498         uint32_t buf_len;
2499         uint16_t nr_vec = 0;
2500         int err;
2501         static bool allocerr_warned;
2502
2503         if (unlikely(fill_vec_buf_packed(dev, vq,
2504                                          vq->last_avail_idx, desc_count,
2505                                          buf_vec, &nr_vec,
2506                                          buf_id, &buf_len,
2507                                          VHOST_ACCESS_RO) < 0))
2508                 return -1;
2509
2510         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2511         if (unlikely(*pkts == NULL)) {
2512                 if (!allocerr_warned) {
2513                         VHOST_LOG_DATA(ERR,
2514                                 "Failed mbuf alloc of size %d from %s on %s.\n",
2515                                 buf_len, mbuf_pool->name, dev->ifname);
2516                         allocerr_warned = true;
2517                 }
2518                 return -1;
2519         }
2520
2521         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
2522                                 mbuf_pool);
2523         if (unlikely(err)) {
2524                 if (!allocerr_warned) {
2525                         VHOST_LOG_DATA(ERR,
2526                                 "Failed to copy desc to mbuf on %s.\n",
2527                                 dev->ifname);
2528                         allocerr_warned = true;
2529                 }
2530                 rte_pktmbuf_free(*pkts);
2531                 return -1;
2532         }
2533
2534         return 0;
2535 }
2536
2537 static __rte_always_inline int
2538 virtio_dev_tx_single_packed(struct virtio_net *dev,
2539                             struct vhost_virtqueue *vq,
2540                             struct rte_mempool *mbuf_pool,
2541                             struct rte_mbuf **pkts)
2542 {
2543
2544         uint16_t buf_id, desc_count = 0;
2545         int ret;
2546
2547         ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2548                                         &desc_count);
2549
2550         if (likely(desc_count > 0)) {
2551                 if (virtio_net_is_inorder(dev))
2552                         vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
2553                                                                    desc_count);
2554                 else
2555                         vhost_shadow_dequeue_single_packed(vq, buf_id,
2556                                         desc_count);
2557
2558                 vq_inc_last_avail_packed(vq, desc_count);
2559         }
2560
2561         return ret;
2562 }
2563
2564 static __rte_always_inline int
2565 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
2566                                  struct vhost_virtqueue *vq,
2567                                  struct rte_mempool *mbuf_pool,
2568                                  struct rte_mbuf **pkts)
2569 {
2570         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
2571         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
2572         uint16_t ids[PACKED_BATCH_SIZE];
2573         uint16_t i;
2574
2575         uint16_t avail_idx = vq->last_avail_idx;
2576
2577         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
2578                                              avail_idx, desc_addrs, ids))
2579                 return -1;
2580
2581         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2582                 zmbufs[i] = get_zmbuf(vq);
2583
2584         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2585                 if (!zmbufs[i])
2586                         goto free_pkt;
2587         }
2588
2589         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2590                 zmbufs[i]->mbuf = pkts[i];
2591                 zmbufs[i]->desc_idx = ids[i];
2592                 zmbufs[i]->desc_count = 1;
2593         }
2594
2595         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2596                 rte_mbuf_refcnt_update(pkts[i], 1);
2597
2598         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2599                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
2600
2601         vq->nr_zmbuf += PACKED_BATCH_SIZE;
2602         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2603
2604         return 0;
2605
2606 free_pkt:
2607         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2608                 rte_pktmbuf_free(pkts[i]);
2609
2610         return -1;
2611 }
2612
2613 static __rte_always_inline int
2614 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2615                                   struct vhost_virtqueue *vq,
2616                                   struct rte_mempool *mbuf_pool,
2617                                   struct rte_mbuf **pkts)
2618 {
2619         uint16_t buf_id, desc_count;
2620         struct zcopy_mbuf *zmbuf;
2621
2622         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2623                                         &desc_count))
2624                 return -1;
2625
2626         zmbuf = get_zmbuf(vq);
2627         if (!zmbuf) {
2628                 rte_pktmbuf_free(*pkts);
2629                 return -1;
2630         }
2631         zmbuf->mbuf = *pkts;
2632         zmbuf->desc_idx = buf_id;
2633         zmbuf->desc_count = desc_count;
2634
2635         rte_mbuf_refcnt_update(*pkts, 1);
2636
2637         vq->nr_zmbuf += 1;
2638         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2639
2640         vq_inc_last_avail_packed(vq, desc_count);
2641         return 0;
2642 }
2643
2644 static __rte_always_inline void
2645 free_zmbuf(struct vhost_virtqueue *vq)
2646 {
2647         struct zcopy_mbuf *next = NULL;
2648         struct zcopy_mbuf *zmbuf;
2649
2650         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2651              zmbuf != NULL; zmbuf = next) {
2652                 next = TAILQ_NEXT(zmbuf, next);
2653
2654                 uint16_t last_used_idx = vq->last_used_idx;
2655
2656                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2657                         uint16_t flags;
2658                         flags = vq->desc_packed[last_used_idx].flags;
2659                         if (vq->used_wrap_counter) {
2660                                 flags |= VRING_DESC_F_USED;
2661                                 flags |= VRING_DESC_F_AVAIL;
2662                         } else {
2663                                 flags &= ~VRING_DESC_F_USED;
2664                                 flags &= ~VRING_DESC_F_AVAIL;
2665                         }
2666
2667                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2668                         vq->desc_packed[last_used_idx].len = 0;
2669
2670                         rte_smp_wmb();
2671                         vq->desc_packed[last_used_idx].flags = flags;
2672
2673                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2674
2675                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2676                         restore_mbuf(zmbuf->mbuf);
2677                         rte_pktmbuf_free(zmbuf->mbuf);
2678                         put_zmbuf(zmbuf);
2679                         vq->nr_zmbuf -= 1;
2680                 }
2681         }
2682 }
2683
2684 static __rte_noinline uint16_t
2685 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2686                            struct vhost_virtqueue *__rte_restrict vq,
2687                            struct rte_mempool *mbuf_pool,
2688                            struct rte_mbuf **__rte_restrict pkts,
2689                            uint32_t count)
2690 {
2691         uint32_t pkt_idx = 0;
2692         uint32_t remained = count;
2693
2694         free_zmbuf(vq);
2695
2696         do {
2697                 if (remained >= PACKED_BATCH_SIZE) {
2698                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2699                                 mbuf_pool, &pkts[pkt_idx])) {
2700                                 pkt_idx += PACKED_BATCH_SIZE;
2701                                 remained -= PACKED_BATCH_SIZE;
2702                                 continue;
2703                         }
2704                 }
2705
2706                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2707                                                       &pkts[pkt_idx]))
2708                         break;
2709                 pkt_idx++;
2710                 remained--;
2711
2712         } while (remained);
2713
2714         if (pkt_idx)
2715                 vhost_vring_call_packed(dev, vq);
2716
2717         return pkt_idx;
2718 }
2719
2720 static __rte_noinline uint16_t
2721 virtio_dev_tx_packed(struct virtio_net *dev,
2722                      struct vhost_virtqueue *__rte_restrict vq,
2723                      struct rte_mempool *mbuf_pool,
2724                      struct rte_mbuf **__rte_restrict pkts,
2725                      uint32_t count)
2726 {
2727         uint32_t pkt_idx = 0;
2728         uint32_t remained = count;
2729
2730         do {
2731                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2732
2733                 if (remained >= PACKED_BATCH_SIZE) {
2734                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2735                                                         &pkts[pkt_idx])) {
2736                                 pkt_idx += PACKED_BATCH_SIZE;
2737                                 remained -= PACKED_BATCH_SIZE;
2738                                 continue;
2739                         }
2740                 }
2741
2742                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2743                                                 &pkts[pkt_idx]))
2744                         break;
2745                 pkt_idx++;
2746                 remained--;
2747
2748         } while (remained);
2749
2750         if (vq->shadow_used_idx) {
2751                 do_data_copy_dequeue(vq);
2752
2753                 vhost_flush_dequeue_shadow_packed(dev, vq);
2754                 vhost_vring_call_packed(dev, vq);
2755         }
2756
2757         return pkt_idx;
2758 }
2759
2760 uint16_t
2761 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2762         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2763 {
2764         struct virtio_net *dev;
2765         struct rte_mbuf *rarp_mbuf = NULL;
2766         struct vhost_virtqueue *vq;
2767         int16_t success = 1;
2768
2769         dev = get_device(vid);
2770         if (!dev)
2771                 return 0;
2772
2773         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2774                 VHOST_LOG_DATA(ERR,
2775                         "(%d) %s: built-in vhost net backend is disabled.\n",
2776                         dev->vid, __func__);
2777                 return 0;
2778         }
2779
2780         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2781                 VHOST_LOG_DATA(ERR,
2782                         "(%d) %s: invalid virtqueue idx %d.\n",
2783                         dev->vid, __func__, queue_id);
2784                 return 0;
2785         }
2786
2787         vq = dev->virtqueue[queue_id];
2788
2789         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2790                 return 0;
2791
2792         if (unlikely(vq->enabled == 0)) {
2793                 count = 0;
2794                 goto out_access_unlock;
2795         }
2796
2797         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2798                 vhost_user_iotlb_rd_lock(vq);
2799
2800         if (unlikely(vq->access_ok == 0))
2801                 if (unlikely(vring_translate(dev, vq) < 0)) {
2802                         count = 0;
2803                         goto out;
2804                 }
2805
2806         /*
2807          * Construct a RARP broadcast packet, and inject it to the "pkts"
2808          * array, to looks like that guest actually send such packet.
2809          *
2810          * Check user_send_rarp() for more information.
2811          *
2812          * broadcast_rarp shares a cacheline in the virtio_net structure
2813          * with some fields that are accessed during enqueue and
2814          * __atomic_compare_exchange_n causes a write if performed compare
2815          * and exchange. This could result in false sharing between enqueue
2816          * and dequeue.
2817          *
2818          * Prevent unnecessary false sharing by reading broadcast_rarp first
2819          * and only performing compare and exchange if the read indicates it
2820          * is likely to be set.
2821          */
2822         if (unlikely(__atomic_load_n(&dev->broadcast_rarp, __ATOMIC_ACQUIRE) &&
2823                         __atomic_compare_exchange_n(&dev->broadcast_rarp,
2824                         &success, 0, 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED))) {
2825
2826                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2827                 if (rarp_mbuf == NULL) {
2828                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2829                         count = 0;
2830                         goto out;
2831                 }
2832                 count -= 1;
2833         }
2834
2835         if (vq_is_packed(dev)) {
2836                 if (unlikely(dev->dequeue_zero_copy))
2837                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2838                                                            pkts, count);
2839                 else
2840                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2841                                                      count);
2842         } else
2843                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2844
2845 out:
2846         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2847                 vhost_user_iotlb_rd_unlock(vq);
2848
2849 out_access_unlock:
2850         rte_spinlock_unlock(&vq->access_lock);
2851
2852         if (unlikely(rarp_mbuf != NULL)) {
2853                 /*
2854                  * Inject it to the head of "pkts" array, so that switch's mac
2855                  * learning table will get updated first.
2856                  */
2857                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2858                 pkts[0] = rarp_mbuf;
2859                 count += 1;
2860         }
2861
2862         return count;
2863 }