1fc30c6819dc04c82961a975860a0fc0fa78e84b
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static inline void
47 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
48 {
49         struct batch_copy_elem *elem = vq->batch_copy_elems;
50         uint16_t count = vq->batch_copy_nb_elems;
51         int i;
52
53         for (i = 0; i < count; i++) {
54                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
55                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
56                                            elem[i].len);
57                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
58         }
59
60         vq->batch_copy_nb_elems = 0;
61 }
62
63 static inline void
64 do_data_copy_dequeue(struct vhost_virtqueue *vq)
65 {
66         struct batch_copy_elem *elem = vq->batch_copy_elems;
67         uint16_t count = vq->batch_copy_nb_elems;
68         int i;
69
70         for (i = 0; i < count; i++)
71                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
72
73         vq->batch_copy_nb_elems = 0;
74 }
75
76 static __rte_always_inline void
77 do_flush_shadow_used_ring_split(struct virtio_net *dev,
78                         struct vhost_virtqueue *vq,
79                         uint16_t to, uint16_t from, uint16_t size)
80 {
81         rte_memcpy(&vq->used->ring[to],
82                         &vq->shadow_used_split[from],
83                         size * sizeof(struct vring_used_elem));
84         vhost_log_cache_used_vring(dev, vq,
85                         offsetof(struct vring_used, ring[to]),
86                         size * sizeof(struct vring_used_elem));
87 }
88
89 static __rte_always_inline void
90 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
91 {
92         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
93
94         if (used_idx + vq->shadow_used_idx <= vq->size) {
95                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
96                                           vq->shadow_used_idx);
97         } else {
98                 uint16_t size;
99
100                 /* update used ring interval [used_idx, vq->size] */
101                 size = vq->size - used_idx;
102                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
103
104                 /* update the left half used ring interval [0, left_size] */
105                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
106                                           vq->shadow_used_idx - size);
107         }
108         vq->last_used_idx += vq->shadow_used_idx;
109
110         rte_smp_wmb();
111
112         vhost_log_cache_sync(dev, vq);
113
114         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
115         vq->shadow_used_idx = 0;
116         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
117                 sizeof(vq->used->idx));
118 }
119
120 static __rte_always_inline void
121 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
122                          uint16_t desc_idx, uint32_t len)
123 {
124         uint16_t i = vq->shadow_used_idx++;
125
126         vq->shadow_used_split[i].id  = desc_idx;
127         vq->shadow_used_split[i].len = len;
128 }
129
130 static __rte_always_inline void
131 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
132                                   struct vhost_virtqueue *vq)
133 {
134         int i;
135         uint16_t used_idx = vq->last_used_idx;
136         uint16_t head_idx = vq->last_used_idx;
137         uint16_t head_flags = 0;
138
139         /* Split loop in two to save memory barriers */
140         for (i = 0; i < vq->shadow_used_idx; i++) {
141                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
142                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
143
144                 used_idx += vq->shadow_used_packed[i].count;
145                 if (used_idx >= vq->size)
146                         used_idx -= vq->size;
147         }
148
149         rte_smp_wmb();
150
151         for (i = 0; i < vq->shadow_used_idx; i++) {
152                 uint16_t flags;
153
154                 if (vq->shadow_used_packed[i].len)
155                         flags = VRING_DESC_F_WRITE;
156                 else
157                         flags = 0;
158
159                 if (vq->used_wrap_counter) {
160                         flags |= VRING_DESC_F_USED;
161                         flags |= VRING_DESC_F_AVAIL;
162                 } else {
163                         flags &= ~VRING_DESC_F_USED;
164                         flags &= ~VRING_DESC_F_AVAIL;
165                 }
166
167                 if (i > 0) {
168                         vq->desc_packed[vq->last_used_idx].flags = flags;
169
170                         vhost_log_cache_used_vring(dev, vq,
171                                         vq->last_used_idx *
172                                         sizeof(struct vring_packed_desc),
173                                         sizeof(struct vring_packed_desc));
174                 } else {
175                         head_idx = vq->last_used_idx;
176                         head_flags = flags;
177                 }
178
179                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
180         }
181
182         vq->desc_packed[head_idx].flags = head_flags;
183
184         vhost_log_cache_used_vring(dev, vq,
185                                 head_idx *
186                                 sizeof(struct vring_packed_desc),
187                                 sizeof(struct vring_packed_desc));
188
189         vq->shadow_used_idx = 0;
190         vhost_log_cache_sync(dev, vq);
191 }
192
193 static __rte_always_inline void
194 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
195                                   struct vhost_virtqueue *vq)
196 {
197         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
198
199         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
200         rte_smp_wmb();
201         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
202
203         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
204                                    sizeof(struct vring_packed_desc),
205                                    sizeof(struct vring_packed_desc));
206         vq->shadow_used_idx = 0;
207         vhost_log_cache_sync(dev, vq);
208 }
209
210 static __rte_always_inline void
211 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
212                                  struct vhost_virtqueue *vq,
213                                  uint64_t *lens,
214                                  uint16_t *ids)
215 {
216         uint16_t i;
217         uint16_t flags;
218
219         if (vq->shadow_used_idx) {
220                 do_data_copy_enqueue(dev, vq);
221                 vhost_flush_enqueue_shadow_packed(dev, vq);
222         }
223
224         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
225
226         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
227                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
228                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
229         }
230
231         rte_smp_wmb();
232
233         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
234                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
235
236         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
237                                    sizeof(struct vring_packed_desc),
238                                    sizeof(struct vring_packed_desc) *
239                                    PACKED_BATCH_SIZE);
240         vhost_log_cache_sync(dev, vq);
241
242         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
243 }
244
245 static __rte_always_inline void
246 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
247                                           uint16_t id)
248 {
249         vq->shadow_used_packed[0].id = id;
250
251         if (!vq->shadow_used_idx) {
252                 vq->shadow_last_used_idx = vq->last_used_idx;
253                 vq->shadow_used_packed[0].flags =
254                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
255                 vq->shadow_used_packed[0].len = 0;
256                 vq->shadow_used_packed[0].count = 1;
257                 vq->shadow_used_idx++;
258         }
259
260         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
261 }
262
263 static __rte_always_inline void
264 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
265                                   struct vhost_virtqueue *vq,
266                                   uint16_t *ids)
267 {
268         uint16_t flags;
269         uint16_t i;
270         uint16_t begin;
271
272         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
273
274         if (!vq->shadow_used_idx) {
275                 vq->shadow_last_used_idx = vq->last_used_idx;
276                 vq->shadow_used_packed[0].id  = ids[0];
277                 vq->shadow_used_packed[0].len = 0;
278                 vq->shadow_used_packed[0].count = 1;
279                 vq->shadow_used_packed[0].flags = flags;
280                 vq->shadow_used_idx++;
281                 begin = 1;
282         } else
283                 begin = 0;
284
285         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
286                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
287                 vq->desc_packed[vq->last_used_idx + i].len = 0;
288         }
289
290         rte_smp_wmb();
291         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
292                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
293
294         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
295                                    sizeof(struct vring_packed_desc),
296                                    sizeof(struct vring_packed_desc) *
297                                    PACKED_BATCH_SIZE);
298         vhost_log_cache_sync(dev, vq);
299
300         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
301 }
302
303 static __rte_always_inline void
304 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
305                                    uint16_t buf_id,
306                                    uint16_t count)
307 {
308         uint16_t flags;
309
310         flags = vq->desc_packed[vq->last_used_idx].flags;
311         if (vq->used_wrap_counter) {
312                 flags |= VRING_DESC_F_USED;
313                 flags |= VRING_DESC_F_AVAIL;
314         } else {
315                 flags &= ~VRING_DESC_F_USED;
316                 flags &= ~VRING_DESC_F_AVAIL;
317         }
318
319         if (!vq->shadow_used_idx) {
320                 vq->shadow_last_used_idx = vq->last_used_idx;
321
322                 vq->shadow_used_packed[0].id  = buf_id;
323                 vq->shadow_used_packed[0].len = 0;
324                 vq->shadow_used_packed[0].flags = flags;
325                 vq->shadow_used_idx++;
326         } else {
327                 vq->desc_packed[vq->last_used_idx].id = buf_id;
328                 vq->desc_packed[vq->last_used_idx].len = 0;
329                 vq->desc_packed[vq->last_used_idx].flags = flags;
330         }
331
332         vq_inc_last_used_packed(vq, count);
333 }
334
335 static __rte_always_inline void
336 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
337                                            uint16_t buf_id,
338                                            uint16_t count)
339 {
340         uint16_t flags;
341
342         vq->shadow_used_packed[0].id = buf_id;
343
344         flags = vq->desc_packed[vq->last_used_idx].flags;
345         if (vq->used_wrap_counter) {
346                 flags |= VRING_DESC_F_USED;
347                 flags |= VRING_DESC_F_AVAIL;
348         } else {
349                 flags &= ~VRING_DESC_F_USED;
350                 flags &= ~VRING_DESC_F_AVAIL;
351         }
352
353         if (!vq->shadow_used_idx) {
354                 vq->shadow_last_used_idx = vq->last_used_idx;
355                 vq->shadow_used_packed[0].len = 0;
356                 vq->shadow_used_packed[0].flags = flags;
357                 vq->shadow_used_idx++;
358         }
359
360         vq_inc_last_used_packed(vq, count);
361 }
362
363 static __rte_always_inline void
364 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
365                                    struct vhost_virtqueue *vq,
366                                    uint32_t len[],
367                                    uint16_t id[],
368                                    uint16_t count[],
369                                    uint16_t num_buffers)
370 {
371         uint16_t i;
372         for (i = 0; i < num_buffers; i++) {
373                 /* enqueue shadow flush action aligned with batch num */
374                 if (!vq->shadow_used_idx)
375                         vq->shadow_aligned_idx = vq->last_used_idx &
376                                 PACKED_BATCH_MASK;
377                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
378                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
379                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
380                 vq->shadow_aligned_idx += count[i];
381                 vq->shadow_used_idx++;
382         }
383
384         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
385                 do_data_copy_enqueue(dev, vq);
386                 vhost_flush_enqueue_shadow_packed(dev, vq);
387         }
388 }
389
390 /* avoid write operation when necessary, to lessen cache issues */
391 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
392         if ((var) != (val))                     \
393                 (var) = (val);                  \
394 } while (0)
395
396 static __rte_always_inline void
397 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
398 {
399         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
400
401         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
402                 csum_l4 |= PKT_TX_TCP_CKSUM;
403
404         if (csum_l4) {
405                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
406                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
407
408                 switch (csum_l4) {
409                 case PKT_TX_TCP_CKSUM:
410                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
411                                                 cksum));
412                         break;
413                 case PKT_TX_UDP_CKSUM:
414                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
415                                                 dgram_cksum));
416                         break;
417                 case PKT_TX_SCTP_CKSUM:
418                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
419                                                 cksum));
420                         break;
421                 }
422         } else {
423                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
424                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
425                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
426         }
427
428         /* IP cksum verification cannot be bypassed, then calculate here */
429         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
430                 struct rte_ipv4_hdr *ipv4_hdr;
431
432                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
433                                                    m_buf->l2_len);
434                 ipv4_hdr->hdr_checksum = 0;
435                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
436         }
437
438         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
439                 if (m_buf->ol_flags & PKT_TX_IPV4)
440                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
441                 else
442                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
443                 net_hdr->gso_size = m_buf->tso_segsz;
444                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
445                                         + m_buf->l4_len;
446         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
447                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
448                 net_hdr->gso_size = m_buf->tso_segsz;
449                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
450                         m_buf->l4_len;
451         } else {
452                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
453                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
454                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
455         }
456 }
457
458 static __rte_always_inline int
459 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
460                 struct buf_vector *buf_vec, uint16_t *vec_idx,
461                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
462 {
463         uint16_t vec_id = *vec_idx;
464
465         while (desc_len) {
466                 uint64_t desc_addr;
467                 uint64_t desc_chunck_len = desc_len;
468
469                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
470                         return -1;
471
472                 desc_addr = vhost_iova_to_vva(dev, vq,
473                                 desc_iova,
474                                 &desc_chunck_len,
475                                 perm);
476                 if (unlikely(!desc_addr))
477                         return -1;
478
479                 rte_prefetch0((void *)(uintptr_t)desc_addr);
480
481                 buf_vec[vec_id].buf_iova = desc_iova;
482                 buf_vec[vec_id].buf_addr = desc_addr;
483                 buf_vec[vec_id].buf_len  = desc_chunck_len;
484
485                 desc_len -= desc_chunck_len;
486                 desc_iova += desc_chunck_len;
487                 vec_id++;
488         }
489         *vec_idx = vec_id;
490
491         return 0;
492 }
493
494 static __rte_always_inline int
495 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
496                          uint32_t avail_idx, uint16_t *vec_idx,
497                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
498                          uint32_t *desc_chain_len, uint8_t perm)
499 {
500         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
501         uint16_t vec_id = *vec_idx;
502         uint32_t len    = 0;
503         uint64_t dlen;
504         uint32_t nr_descs = vq->size;
505         uint32_t cnt    = 0;
506         struct vring_desc *descs = vq->desc;
507         struct vring_desc *idesc = NULL;
508
509         if (unlikely(idx >= vq->size))
510                 return -1;
511
512         *desc_chain_head = idx;
513
514         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
515                 dlen = vq->desc[idx].len;
516                 nr_descs = dlen / sizeof(struct vring_desc);
517                 if (unlikely(nr_descs > vq->size))
518                         return -1;
519
520                 descs = (struct vring_desc *)(uintptr_t)
521                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
522                                                 &dlen,
523                                                 VHOST_ACCESS_RO);
524                 if (unlikely(!descs))
525                         return -1;
526
527                 if (unlikely(dlen < vq->desc[idx].len)) {
528                         /*
529                          * The indirect desc table is not contiguous
530                          * in process VA space, we have to copy it.
531                          */
532                         idesc = vhost_alloc_copy_ind_table(dev, vq,
533                                         vq->desc[idx].addr, vq->desc[idx].len);
534                         if (unlikely(!idesc))
535                                 return -1;
536
537                         descs = idesc;
538                 }
539
540                 idx = 0;
541         }
542
543         while (1) {
544                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
545                         free_ind_table(idesc);
546                         return -1;
547                 }
548
549                 len += descs[idx].len;
550
551                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
552                                                 descs[idx].addr, descs[idx].len,
553                                                 perm))) {
554                         free_ind_table(idesc);
555                         return -1;
556                 }
557
558                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
559                         break;
560
561                 idx = descs[idx].next;
562         }
563
564         *desc_chain_len = len;
565         *vec_idx = vec_id;
566
567         if (unlikely(!!idesc))
568                 free_ind_table(idesc);
569
570         return 0;
571 }
572
573 /*
574  * Returns -1 on fail, 0 on success
575  */
576 static inline int
577 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
578                                 uint32_t size, struct buf_vector *buf_vec,
579                                 uint16_t *num_buffers, uint16_t avail_head,
580                                 uint16_t *nr_vec)
581 {
582         uint16_t cur_idx;
583         uint16_t vec_idx = 0;
584         uint16_t max_tries, tries = 0;
585
586         uint16_t head_idx = 0;
587         uint32_t len = 0;
588
589         *num_buffers = 0;
590         cur_idx  = vq->last_avail_idx;
591
592         if (rxvq_is_mergeable(dev))
593                 max_tries = vq->size - 1;
594         else
595                 max_tries = 1;
596
597         while (size > 0) {
598                 if (unlikely(cur_idx == avail_head))
599                         return -1;
600                 /*
601                  * if we tried all available ring items, and still
602                  * can't get enough buf, it means something abnormal
603                  * happened.
604                  */
605                 if (unlikely(++tries > max_tries))
606                         return -1;
607
608                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
609                                                 &vec_idx, buf_vec,
610                                                 &head_idx, &len,
611                                                 VHOST_ACCESS_RW) < 0))
612                         return -1;
613                 len = RTE_MIN(len, size);
614                 update_shadow_used_ring_split(vq, head_idx, len);
615                 size -= len;
616
617                 cur_idx++;
618                 *num_buffers += 1;
619         }
620
621         *nr_vec = vec_idx;
622
623         return 0;
624 }
625
626 static __rte_always_inline int
627 fill_vec_buf_packed_indirect(struct virtio_net *dev,
628                         struct vhost_virtqueue *vq,
629                         struct vring_packed_desc *desc, uint16_t *vec_idx,
630                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
631 {
632         uint16_t i;
633         uint32_t nr_descs;
634         uint16_t vec_id = *vec_idx;
635         uint64_t dlen;
636         struct vring_packed_desc *descs, *idescs = NULL;
637
638         dlen = desc->len;
639         descs = (struct vring_packed_desc *)(uintptr_t)
640                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
641         if (unlikely(!descs))
642                 return -1;
643
644         if (unlikely(dlen < desc->len)) {
645                 /*
646                  * The indirect desc table is not contiguous
647                  * in process VA space, we have to copy it.
648                  */
649                 idescs = vhost_alloc_copy_ind_table(dev,
650                                 vq, desc->addr, desc->len);
651                 if (unlikely(!idescs))
652                         return -1;
653
654                 descs = idescs;
655         }
656
657         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
658         if (unlikely(nr_descs >= vq->size)) {
659                 free_ind_table(idescs);
660                 return -1;
661         }
662
663         for (i = 0; i < nr_descs; i++) {
664                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
665                         free_ind_table(idescs);
666                         return -1;
667                 }
668
669                 *len += descs[i].len;
670                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
671                                                 descs[i].addr, descs[i].len,
672                                                 perm)))
673                         return -1;
674         }
675         *vec_idx = vec_id;
676
677         if (unlikely(!!idescs))
678                 free_ind_table(idescs);
679
680         return 0;
681 }
682
683 static __rte_always_inline int
684 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
685                                 uint16_t avail_idx, uint16_t *desc_count,
686                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
687                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
688 {
689         bool wrap_counter = vq->avail_wrap_counter;
690         struct vring_packed_desc *descs = vq->desc_packed;
691         uint16_t vec_id = *vec_idx;
692
693         if (avail_idx < vq->last_avail_idx)
694                 wrap_counter ^= 1;
695
696         /*
697          * Perform a load-acquire barrier in desc_is_avail to
698          * enforce the ordering between desc flags and desc
699          * content.
700          */
701         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
702                 return -1;
703
704         *desc_count = 0;
705         *len = 0;
706
707         while (1) {
708                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
709                         return -1;
710
711                 if (unlikely(*desc_count >= vq->size))
712                         return -1;
713
714                 *desc_count += 1;
715                 *buf_id = descs[avail_idx].id;
716
717                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
718                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
719                                                         &descs[avail_idx],
720                                                         &vec_id, buf_vec,
721                                                         len, perm) < 0))
722                                 return -1;
723                 } else {
724                         *len += descs[avail_idx].len;
725
726                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
727                                                         descs[avail_idx].addr,
728                                                         descs[avail_idx].len,
729                                                         perm)))
730                                 return -1;
731                 }
732
733                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
734                         break;
735
736                 if (++avail_idx >= vq->size) {
737                         avail_idx -= vq->size;
738                         wrap_counter ^= 1;
739                 }
740         }
741
742         *vec_idx = vec_id;
743
744         return 0;
745 }
746
747 static __rte_noinline void
748 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
749                 struct buf_vector *buf_vec,
750                 struct virtio_net_hdr_mrg_rxbuf *hdr)
751 {
752         uint64_t len;
753         uint64_t remain = dev->vhost_hlen;
754         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
755         uint64_t iova = buf_vec->buf_iova;
756
757         while (remain) {
758                 len = RTE_MIN(remain,
759                                 buf_vec->buf_len);
760                 dst = buf_vec->buf_addr;
761                 rte_memcpy((void *)(uintptr_t)dst,
762                                 (void *)(uintptr_t)src,
763                                 len);
764
765                 PRINT_PACKET(dev, (uintptr_t)dst,
766                                 (uint32_t)len, 0);
767                 vhost_log_cache_write_iova(dev, vq,
768                                 iova, len);
769
770                 remain -= len;
771                 iova += len;
772                 src += len;
773                 buf_vec++;
774         }
775 }
776
777 static __rte_always_inline int
778 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
779                             struct rte_mbuf *m, struct buf_vector *buf_vec,
780                             uint16_t nr_vec, uint16_t num_buffers)
781 {
782         uint32_t vec_idx = 0;
783         uint32_t mbuf_offset, mbuf_avail;
784         uint32_t buf_offset, buf_avail;
785         uint64_t buf_addr, buf_iova, buf_len;
786         uint32_t cpy_len;
787         uint64_t hdr_addr;
788         struct rte_mbuf *hdr_mbuf;
789         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
790         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
791         int error = 0;
792
793         if (unlikely(m == NULL)) {
794                 error = -1;
795                 goto out;
796         }
797
798         buf_addr = buf_vec[vec_idx].buf_addr;
799         buf_iova = buf_vec[vec_idx].buf_iova;
800         buf_len = buf_vec[vec_idx].buf_len;
801
802         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
803                 error = -1;
804                 goto out;
805         }
806
807         hdr_mbuf = m;
808         hdr_addr = buf_addr;
809         if (unlikely(buf_len < dev->vhost_hlen))
810                 hdr = &tmp_hdr;
811         else
812                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
813
814         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
815                 dev->vid, num_buffers);
816
817         if (unlikely(buf_len < dev->vhost_hlen)) {
818                 buf_offset = dev->vhost_hlen - buf_len;
819                 vec_idx++;
820                 buf_addr = buf_vec[vec_idx].buf_addr;
821                 buf_iova = buf_vec[vec_idx].buf_iova;
822                 buf_len = buf_vec[vec_idx].buf_len;
823                 buf_avail = buf_len - buf_offset;
824         } else {
825                 buf_offset = dev->vhost_hlen;
826                 buf_avail = buf_len - dev->vhost_hlen;
827         }
828
829         mbuf_avail  = rte_pktmbuf_data_len(m);
830         mbuf_offset = 0;
831         while (mbuf_avail != 0 || m->next != NULL) {
832                 /* done with current buf, get the next one */
833                 if (buf_avail == 0) {
834                         vec_idx++;
835                         if (unlikely(vec_idx >= nr_vec)) {
836                                 error = -1;
837                                 goto out;
838                         }
839
840                         buf_addr = buf_vec[vec_idx].buf_addr;
841                         buf_iova = buf_vec[vec_idx].buf_iova;
842                         buf_len = buf_vec[vec_idx].buf_len;
843
844                         buf_offset = 0;
845                         buf_avail  = buf_len;
846                 }
847
848                 /* done with current mbuf, get the next one */
849                 if (mbuf_avail == 0) {
850                         m = m->next;
851
852                         mbuf_offset = 0;
853                         mbuf_avail  = rte_pktmbuf_data_len(m);
854                 }
855
856                 if (hdr_addr) {
857                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
858                         if (rxvq_is_mergeable(dev))
859                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
860                                                 num_buffers);
861
862                         if (unlikely(hdr == &tmp_hdr)) {
863                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
864                         } else {
865                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
866                                                 dev->vhost_hlen, 0);
867                                 vhost_log_cache_write_iova(dev, vq,
868                                                 buf_vec[0].buf_iova,
869                                                 dev->vhost_hlen);
870                         }
871
872                         hdr_addr = 0;
873                 }
874
875                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
876
877                 if (likely(cpy_len > MAX_BATCH_LEN ||
878                                         vq->batch_copy_nb_elems >= vq->size)) {
879                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
880                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
881                                 cpy_len);
882                         vhost_log_cache_write_iova(dev, vq,
883                                                    buf_iova + buf_offset,
884                                                    cpy_len);
885                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
886                                 cpy_len, 0);
887                 } else {
888                         batch_copy[vq->batch_copy_nb_elems].dst =
889                                 (void *)((uintptr_t)(buf_addr + buf_offset));
890                         batch_copy[vq->batch_copy_nb_elems].src =
891                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
892                         batch_copy[vq->batch_copy_nb_elems].log_addr =
893                                 buf_iova + buf_offset;
894                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
895                         vq->batch_copy_nb_elems++;
896                 }
897
898                 mbuf_avail  -= cpy_len;
899                 mbuf_offset += cpy_len;
900                 buf_avail  -= cpy_len;
901                 buf_offset += cpy_len;
902         }
903
904 out:
905
906         return error;
907 }
908
909 static __rte_always_inline int
910 vhost_enqueue_single_packed(struct virtio_net *dev,
911                             struct vhost_virtqueue *vq,
912                             struct rte_mbuf *pkt,
913                             struct buf_vector *buf_vec,
914                             uint16_t *nr_descs)
915 {
916         uint16_t nr_vec = 0;
917         uint16_t avail_idx = vq->last_avail_idx;
918         uint16_t max_tries, tries = 0;
919         uint16_t buf_id = 0;
920         uint32_t len = 0;
921         uint16_t desc_count;
922         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
923         uint16_t num_buffers = 0;
924         uint32_t buffer_len[vq->size];
925         uint16_t buffer_buf_id[vq->size];
926         uint16_t buffer_desc_count[vq->size];
927
928         if (rxvq_is_mergeable(dev))
929                 max_tries = vq->size - 1;
930         else
931                 max_tries = 1;
932
933         while (size > 0) {
934                 /*
935                  * if we tried all available ring items, and still
936                  * can't get enough buf, it means something abnormal
937                  * happened.
938                  */
939                 if (unlikely(++tries > max_tries))
940                         return -1;
941
942                 if (unlikely(fill_vec_buf_packed(dev, vq,
943                                                 avail_idx, &desc_count,
944                                                 buf_vec, &nr_vec,
945                                                 &buf_id, &len,
946                                                 VHOST_ACCESS_RW) < 0))
947                         return -1;
948
949                 len = RTE_MIN(len, size);
950                 size -= len;
951
952                 buffer_len[num_buffers] = len;
953                 buffer_buf_id[num_buffers] = buf_id;
954                 buffer_desc_count[num_buffers] = desc_count;
955                 num_buffers += 1;
956
957                 *nr_descs += desc_count;
958                 avail_idx += desc_count;
959                 if (avail_idx >= vq->size)
960                         avail_idx -= vq->size;
961         }
962
963         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
964                 return -1;
965
966         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
967                                            buffer_desc_count, num_buffers);
968
969         return 0;
970 }
971
972 static __rte_noinline uint32_t
973 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
974         struct rte_mbuf **pkts, uint32_t count)
975 {
976         uint32_t pkt_idx = 0;
977         uint16_t num_buffers;
978         struct buf_vector buf_vec[BUF_VECTOR_MAX];
979         uint16_t avail_head;
980
981         avail_head = *((volatile uint16_t *)&vq->avail->idx);
982
983         /*
984          * The ordering between avail index and
985          * desc reads needs to be enforced.
986          */
987         rte_smp_rmb();
988
989         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
990
991         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
992                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
993                 uint16_t nr_vec = 0;
994
995                 if (unlikely(reserve_avail_buf_split(dev, vq,
996                                                 pkt_len, buf_vec, &num_buffers,
997                                                 avail_head, &nr_vec) < 0)) {
998                         VHOST_LOG_DATA(DEBUG,
999                                 "(%d) failed to get enough desc from vring\n",
1000                                 dev->vid);
1001                         vq->shadow_used_idx -= num_buffers;
1002                         break;
1003                 }
1004
1005                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1006                         dev->vid, vq->last_avail_idx,
1007                         vq->last_avail_idx + num_buffers);
1008
1009                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1010                                                 buf_vec, nr_vec,
1011                                                 num_buffers) < 0) {
1012                         vq->shadow_used_idx -= num_buffers;
1013                         break;
1014                 }
1015
1016                 vq->last_avail_idx += num_buffers;
1017         }
1018
1019         do_data_copy_enqueue(dev, vq);
1020
1021         if (likely(vq->shadow_used_idx)) {
1022                 flush_shadow_used_ring_split(dev, vq);
1023                 vhost_vring_call_split(dev, vq);
1024         }
1025
1026         return pkt_idx;
1027 }
1028
1029 static __rte_always_inline int
1030 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1031                            struct vhost_virtqueue *vq,
1032                            struct rte_mbuf **pkts)
1033 {
1034         bool wrap_counter = vq->avail_wrap_counter;
1035         struct vring_packed_desc *descs = vq->desc_packed;
1036         uint16_t avail_idx = vq->last_avail_idx;
1037         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1038         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1039         uint32_t buf_offset = dev->vhost_hlen;
1040         uint64_t lens[PACKED_BATCH_SIZE];
1041         uint16_t ids[PACKED_BATCH_SIZE];
1042         uint16_t i;
1043
1044         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1045                 return -1;
1046
1047         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1048                 return -1;
1049
1050         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1051                 if (unlikely(pkts[i]->next != NULL))
1052                         return -1;
1053                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1054                                             wrap_counter)))
1055                         return -1;
1056         }
1057
1058         rte_smp_rmb();
1059
1060         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1061                 lens[i] = descs[avail_idx + i].len;
1062
1063         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1064                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1065                         return -1;
1066         }
1067
1068         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1069                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1070                                                   descs[avail_idx + i].addr,
1071                                                   &lens[i],
1072                                                   VHOST_ACCESS_RW);
1073
1074         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1075                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1076                         return -1;
1077         }
1078
1079         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1080                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1081                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1082                                         (uintptr_t)desc_addrs[i];
1083                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1084         }
1085
1086         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1087                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1088
1089         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1090
1091         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1092                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1093                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1094                            pkts[i]->pkt_len);
1095         }
1096
1097         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1098                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1099                                            lens[i]);
1100
1101         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1102                 ids[i] = descs[avail_idx + i].id;
1103
1104         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1105
1106         return 0;
1107 }
1108
1109 static __rte_always_inline int16_t
1110 virtio_dev_rx_single_packed(struct virtio_net *dev,
1111                             struct vhost_virtqueue *vq,
1112                             struct rte_mbuf *pkt)
1113 {
1114         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1115         uint16_t nr_descs = 0;
1116
1117         rte_smp_rmb();
1118         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1119                                                  &nr_descs) < 0)) {
1120                 VHOST_LOG_DATA(DEBUG,
1121                                 "(%d) failed to get enough desc from vring\n",
1122                                 dev->vid);
1123                 return -1;
1124         }
1125
1126         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1127                         dev->vid, vq->last_avail_idx,
1128                         vq->last_avail_idx + nr_descs);
1129
1130         vq_inc_last_avail_packed(vq, nr_descs);
1131
1132         return 0;
1133 }
1134
1135 static __rte_noinline uint32_t
1136 virtio_dev_rx_packed(struct virtio_net *dev,
1137                      struct vhost_virtqueue *vq,
1138                      struct rte_mbuf **pkts,
1139                      uint32_t count)
1140 {
1141         uint32_t pkt_idx = 0;
1142         uint32_t remained = count;
1143
1144         do {
1145                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1146
1147                 if (remained >= PACKED_BATCH_SIZE) {
1148                         if (!virtio_dev_rx_batch_packed(dev, vq,
1149                                                         &pkts[pkt_idx])) {
1150                                 pkt_idx += PACKED_BATCH_SIZE;
1151                                 remained -= PACKED_BATCH_SIZE;
1152                                 continue;
1153                         }
1154                 }
1155
1156                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1157                         break;
1158                 pkt_idx++;
1159                 remained--;
1160
1161         } while (pkt_idx < count);
1162
1163         if (vq->shadow_used_idx) {
1164                 do_data_copy_enqueue(dev, vq);
1165                 vhost_flush_enqueue_shadow_packed(dev, vq);
1166         }
1167
1168         if (pkt_idx)
1169                 vhost_vring_call_packed(dev, vq);
1170
1171         return pkt_idx;
1172 }
1173
1174 static __rte_always_inline uint32_t
1175 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1176         struct rte_mbuf **pkts, uint32_t count)
1177 {
1178         struct vhost_virtqueue *vq;
1179         uint32_t nb_tx = 0;
1180
1181         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1182         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1183                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1184                         dev->vid, __func__, queue_id);
1185                 return 0;
1186         }
1187
1188         vq = dev->virtqueue[queue_id];
1189
1190         rte_spinlock_lock(&vq->access_lock);
1191
1192         if (unlikely(vq->enabled == 0))
1193                 goto out_access_unlock;
1194
1195         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1196                 vhost_user_iotlb_rd_lock(vq);
1197
1198         if (unlikely(vq->access_ok == 0))
1199                 if (unlikely(vring_translate(dev, vq) < 0))
1200                         goto out;
1201
1202         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1203         if (count == 0)
1204                 goto out;
1205
1206         if (vq_is_packed(dev))
1207                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1208         else
1209                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1210
1211 out:
1212         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1213                 vhost_user_iotlb_rd_unlock(vq);
1214
1215 out_access_unlock:
1216         rte_spinlock_unlock(&vq->access_lock);
1217
1218         return nb_tx;
1219 }
1220
1221 uint16_t
1222 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1223         struct rte_mbuf **pkts, uint16_t count)
1224 {
1225         struct virtio_net *dev = get_device(vid);
1226
1227         if (!dev)
1228                 return 0;
1229
1230         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1231                 VHOST_LOG_DATA(ERR,
1232                         "(%d) %s: built-in vhost net backend is disabled.\n",
1233                         dev->vid, __func__);
1234                 return 0;
1235         }
1236
1237         return virtio_dev_rx(dev, queue_id, pkts, count);
1238 }
1239
1240 static inline bool
1241 virtio_net_with_host_offload(struct virtio_net *dev)
1242 {
1243         if (dev->features &
1244                         ((1ULL << VIRTIO_NET_F_CSUM) |
1245                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1246                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1247                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1248                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1249                 return true;
1250
1251         return false;
1252 }
1253
1254 static void
1255 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1256 {
1257         struct rte_ipv4_hdr *ipv4_hdr;
1258         struct rte_ipv6_hdr *ipv6_hdr;
1259         void *l3_hdr = NULL;
1260         struct rte_ether_hdr *eth_hdr;
1261         uint16_t ethertype;
1262
1263         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1264
1265         m->l2_len = sizeof(struct rte_ether_hdr);
1266         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1267
1268         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1269                 struct rte_vlan_hdr *vlan_hdr =
1270                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1271
1272                 m->l2_len += sizeof(struct rte_vlan_hdr);
1273                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1274         }
1275
1276         l3_hdr = (char *)eth_hdr + m->l2_len;
1277
1278         switch (ethertype) {
1279         case RTE_ETHER_TYPE_IPV4:
1280                 ipv4_hdr = l3_hdr;
1281                 *l4_proto = ipv4_hdr->next_proto_id;
1282                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1283                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1284                 m->ol_flags |= PKT_TX_IPV4;
1285                 break;
1286         case RTE_ETHER_TYPE_IPV6:
1287                 ipv6_hdr = l3_hdr;
1288                 *l4_proto = ipv6_hdr->proto;
1289                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1290                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1291                 m->ol_flags |= PKT_TX_IPV6;
1292                 break;
1293         default:
1294                 m->l3_len = 0;
1295                 *l4_proto = 0;
1296                 *l4_hdr = NULL;
1297                 break;
1298         }
1299 }
1300
1301 static __rte_always_inline void
1302 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1303 {
1304         uint16_t l4_proto = 0;
1305         void *l4_hdr = NULL;
1306         struct rte_tcp_hdr *tcp_hdr = NULL;
1307
1308         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1309                 return;
1310
1311         parse_ethernet(m, &l4_proto, &l4_hdr);
1312         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1313                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1314                         switch (hdr->csum_offset) {
1315                         case (offsetof(struct rte_tcp_hdr, cksum)):
1316                                 if (l4_proto == IPPROTO_TCP)
1317                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1318                                 break;
1319                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1320                                 if (l4_proto == IPPROTO_UDP)
1321                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1322                                 break;
1323                         case (offsetof(struct rte_sctp_hdr, cksum)):
1324                                 if (l4_proto == IPPROTO_SCTP)
1325                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1326                                 break;
1327                         default:
1328                                 break;
1329                         }
1330                 }
1331         }
1332
1333         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1334                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1335                 case VIRTIO_NET_HDR_GSO_TCPV4:
1336                 case VIRTIO_NET_HDR_GSO_TCPV6:
1337                         tcp_hdr = l4_hdr;
1338                         m->ol_flags |= PKT_TX_TCP_SEG;
1339                         m->tso_segsz = hdr->gso_size;
1340                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1341                         break;
1342                 case VIRTIO_NET_HDR_GSO_UDP:
1343                         m->ol_flags |= PKT_TX_UDP_SEG;
1344                         m->tso_segsz = hdr->gso_size;
1345                         m->l4_len = sizeof(struct rte_udp_hdr);
1346                         break;
1347                 default:
1348                         VHOST_LOG_DATA(WARNING,
1349                                 "unsupported gso type %u.\n", hdr->gso_type);
1350                         break;
1351                 }
1352         }
1353 }
1354
1355 static __rte_noinline void
1356 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1357                 struct buf_vector *buf_vec)
1358 {
1359         uint64_t len;
1360         uint64_t remain = sizeof(struct virtio_net_hdr);
1361         uint64_t src;
1362         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1363
1364         while (remain) {
1365                 len = RTE_MIN(remain, buf_vec->buf_len);
1366                 src = buf_vec->buf_addr;
1367                 rte_memcpy((void *)(uintptr_t)dst,
1368                                 (void *)(uintptr_t)src, len);
1369
1370                 remain -= len;
1371                 dst += len;
1372                 buf_vec++;
1373         }
1374 }
1375
1376 static __rte_always_inline int
1377 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1378                   struct buf_vector *buf_vec, uint16_t nr_vec,
1379                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1380 {
1381         uint32_t buf_avail, buf_offset;
1382         uint64_t buf_addr, buf_iova, buf_len;
1383         uint32_t mbuf_avail, mbuf_offset;
1384         uint32_t cpy_len;
1385         struct rte_mbuf *cur = m, *prev = m;
1386         struct virtio_net_hdr tmp_hdr;
1387         struct virtio_net_hdr *hdr = NULL;
1388         /* A counter to avoid desc dead loop chain */
1389         uint16_t vec_idx = 0;
1390         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1391         int error = 0;
1392
1393         buf_addr = buf_vec[vec_idx].buf_addr;
1394         buf_iova = buf_vec[vec_idx].buf_iova;
1395         buf_len = buf_vec[vec_idx].buf_len;
1396
1397         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1398                 error = -1;
1399                 goto out;
1400         }
1401
1402         if (virtio_net_with_host_offload(dev)) {
1403                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1404                         /*
1405                          * No luck, the virtio-net header doesn't fit
1406                          * in a contiguous virtual area.
1407                          */
1408                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1409                         hdr = &tmp_hdr;
1410                 } else {
1411                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1412                 }
1413         }
1414
1415         /*
1416          * A virtio driver normally uses at least 2 desc buffers
1417          * for Tx: the first for storing the header, and others
1418          * for storing the data.
1419          */
1420         if (unlikely(buf_len < dev->vhost_hlen)) {
1421                 buf_offset = dev->vhost_hlen - buf_len;
1422                 vec_idx++;
1423                 buf_addr = buf_vec[vec_idx].buf_addr;
1424                 buf_iova = buf_vec[vec_idx].buf_iova;
1425                 buf_len = buf_vec[vec_idx].buf_len;
1426                 buf_avail  = buf_len - buf_offset;
1427         } else if (buf_len == dev->vhost_hlen) {
1428                 if (unlikely(++vec_idx >= nr_vec))
1429                         goto out;
1430                 buf_addr = buf_vec[vec_idx].buf_addr;
1431                 buf_iova = buf_vec[vec_idx].buf_iova;
1432                 buf_len = buf_vec[vec_idx].buf_len;
1433
1434                 buf_offset = 0;
1435                 buf_avail = buf_len;
1436         } else {
1437                 buf_offset = dev->vhost_hlen;
1438                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1439         }
1440
1441         PRINT_PACKET(dev,
1442                         (uintptr_t)(buf_addr + buf_offset),
1443                         (uint32_t)buf_avail, 0);
1444
1445         mbuf_offset = 0;
1446         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1447         while (1) {
1448                 uint64_t hpa;
1449
1450                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1451
1452                 /*
1453                  * A desc buf might across two host physical pages that are
1454                  * not continuous. In such case (gpa_to_hpa returns 0), data
1455                  * will be copied even though zero copy is enabled.
1456                  */
1457                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1458                                         buf_iova + buf_offset, cpy_len)))) {
1459                         cur->data_len = cpy_len;
1460                         cur->data_off = 0;
1461                         cur->buf_addr =
1462                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1463                         cur->buf_iova = hpa;
1464
1465                         /*
1466                          * In zero copy mode, one mbuf can only reference data
1467                          * for one or partial of one desc buff.
1468                          */
1469                         mbuf_avail = cpy_len;
1470                 } else {
1471                         if (likely(cpy_len > MAX_BATCH_LEN ||
1472                                    vq->batch_copy_nb_elems >= vq->size ||
1473                                    (hdr && cur == m))) {
1474                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1475                                                                    mbuf_offset),
1476                                            (void *)((uintptr_t)(buf_addr +
1477                                                            buf_offset)),
1478                                            cpy_len);
1479                         } else {
1480                                 batch_copy[vq->batch_copy_nb_elems].dst =
1481                                         rte_pktmbuf_mtod_offset(cur, void *,
1482                                                                 mbuf_offset);
1483                                 batch_copy[vq->batch_copy_nb_elems].src =
1484                                         (void *)((uintptr_t)(buf_addr +
1485                                                                 buf_offset));
1486                                 batch_copy[vq->batch_copy_nb_elems].len =
1487                                         cpy_len;
1488                                 vq->batch_copy_nb_elems++;
1489                         }
1490                 }
1491
1492                 mbuf_avail  -= cpy_len;
1493                 mbuf_offset += cpy_len;
1494                 buf_avail -= cpy_len;
1495                 buf_offset += cpy_len;
1496
1497                 /* This buf reaches to its end, get the next one */
1498                 if (buf_avail == 0) {
1499                         if (++vec_idx >= nr_vec)
1500                                 break;
1501
1502                         buf_addr = buf_vec[vec_idx].buf_addr;
1503                         buf_iova = buf_vec[vec_idx].buf_iova;
1504                         buf_len = buf_vec[vec_idx].buf_len;
1505
1506                         buf_offset = 0;
1507                         buf_avail  = buf_len;
1508
1509                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1510                                         (uint32_t)buf_avail, 0);
1511                 }
1512
1513                 /*
1514                  * This mbuf reaches to its end, get a new one
1515                  * to hold more data.
1516                  */
1517                 if (mbuf_avail == 0) {
1518                         cur = rte_pktmbuf_alloc(mbuf_pool);
1519                         if (unlikely(cur == NULL)) {
1520                                 VHOST_LOG_DATA(ERR, "Failed to "
1521                                         "allocate memory for mbuf.\n");
1522                                 error = -1;
1523                                 goto out;
1524                         }
1525                         if (unlikely(dev->dequeue_zero_copy))
1526                                 rte_mbuf_refcnt_update(cur, 1);
1527
1528                         prev->next = cur;
1529                         prev->data_len = mbuf_offset;
1530                         m->nb_segs += 1;
1531                         m->pkt_len += mbuf_offset;
1532                         prev = cur;
1533
1534                         mbuf_offset = 0;
1535                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1536                 }
1537         }
1538
1539         prev->data_len = mbuf_offset;
1540         m->pkt_len    += mbuf_offset;
1541
1542         if (hdr)
1543                 vhost_dequeue_offload(hdr, m);
1544
1545 out:
1546
1547         return error;
1548 }
1549
1550 static __rte_always_inline struct zcopy_mbuf *
1551 get_zmbuf(struct vhost_virtqueue *vq)
1552 {
1553         uint16_t i;
1554         uint16_t last;
1555         int tries = 0;
1556
1557         /* search [last_zmbuf_idx, zmbuf_size) */
1558         i = vq->last_zmbuf_idx;
1559         last = vq->zmbuf_size;
1560
1561 again:
1562         for (; i < last; i++) {
1563                 if (vq->zmbufs[i].in_use == 0) {
1564                         vq->last_zmbuf_idx = i + 1;
1565                         vq->zmbufs[i].in_use = 1;
1566                         return &vq->zmbufs[i];
1567                 }
1568         }
1569
1570         tries++;
1571         if (tries == 1) {
1572                 /* search [0, last_zmbuf_idx) */
1573                 i = 0;
1574                 last = vq->last_zmbuf_idx;
1575                 goto again;
1576         }
1577
1578         return NULL;
1579 }
1580
1581 static void
1582 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1583 {
1584         rte_free(opaque);
1585 }
1586
1587 static int
1588 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1589 {
1590         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1591         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1592         uint16_t buf_len;
1593         rte_iova_t iova;
1594         void *buf;
1595
1596         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1597          * required, otherwise store shinfo in the new buffer.
1598          */
1599         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1600                 shinfo = rte_pktmbuf_mtod(pkt,
1601                                           struct rte_mbuf_ext_shared_info *);
1602         else {
1603                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1604                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1605         }
1606
1607         if (unlikely(total_len > UINT16_MAX))
1608                 return -ENOSPC;
1609
1610         buf_len = total_len;
1611         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1612         if (unlikely(buf == NULL))
1613                 return -ENOMEM;
1614
1615         /* Initialize shinfo */
1616         if (shinfo) {
1617                 shinfo->free_cb = virtio_dev_extbuf_free;
1618                 shinfo->fcb_opaque = buf;
1619                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1620         } else {
1621                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1622                                               virtio_dev_extbuf_free, buf);
1623                 if (unlikely(shinfo == NULL)) {
1624                         rte_free(buf);
1625                         VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
1626                         return -1;
1627                 }
1628         }
1629
1630         iova = rte_malloc_virt2iova(buf);
1631         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1632         rte_pktmbuf_reset_headroom(pkt);
1633
1634         return 0;
1635 }
1636
1637 /*
1638  * Allocate a host supported pktmbuf.
1639  */
1640 static __rte_always_inline struct rte_mbuf *
1641 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1642                          uint32_t data_len)
1643 {
1644         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1645
1646         if (unlikely(pkt == NULL)) {
1647                 VHOST_LOG_DATA(ERR,
1648                         "Failed to allocate memory for mbuf.\n");
1649                 return NULL;
1650         }
1651
1652         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1653                 return pkt;
1654
1655         /* attach an external buffer if supported */
1656         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1657                 return pkt;
1658
1659         /* check if chained buffers are allowed */
1660         if (!dev->linearbuf)
1661                 return pkt;
1662
1663         /* Data doesn't fit into the buffer and the host supports
1664          * only linear buffers
1665          */
1666         rte_pktmbuf_free(pkt);
1667
1668         return NULL;
1669 }
1670
1671 static __rte_noinline uint16_t
1672 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1673         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1674 {
1675         uint16_t i;
1676         uint16_t free_entries;
1677
1678         if (unlikely(dev->dequeue_zero_copy)) {
1679                 struct zcopy_mbuf *zmbuf, *next;
1680
1681                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1682                      zmbuf != NULL; zmbuf = next) {
1683                         next = TAILQ_NEXT(zmbuf, next);
1684
1685                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1686                                 update_shadow_used_ring_split(vq,
1687                                                 zmbuf->desc_idx, 0);
1688                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1689                                 restore_mbuf(zmbuf->mbuf);
1690                                 rte_pktmbuf_free(zmbuf->mbuf);
1691                                 put_zmbuf(zmbuf);
1692                                 vq->nr_zmbuf -= 1;
1693                         }
1694                 }
1695
1696                 if (likely(vq->shadow_used_idx)) {
1697                         flush_shadow_used_ring_split(dev, vq);
1698                         vhost_vring_call_split(dev, vq);
1699                 }
1700         }
1701
1702         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1703                         vq->last_avail_idx;
1704         if (free_entries == 0)
1705                 return 0;
1706
1707         /*
1708          * The ordering between avail index and
1709          * desc reads needs to be enforced.
1710          */
1711         rte_smp_rmb();
1712
1713         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1714
1715         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1716
1717         count = RTE_MIN(count, MAX_PKT_BURST);
1718         count = RTE_MIN(count, free_entries);
1719         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
1720                         dev->vid, count);
1721
1722         for (i = 0; i < count; i++) {
1723                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1724                 uint16_t head_idx;
1725                 uint32_t buf_len;
1726                 uint16_t nr_vec = 0;
1727                 int err;
1728
1729                 if (unlikely(fill_vec_buf_split(dev, vq,
1730                                                 vq->last_avail_idx + i,
1731                                                 &nr_vec, buf_vec,
1732                                                 &head_idx, &buf_len,
1733                                                 VHOST_ACCESS_RO) < 0))
1734                         break;
1735
1736                 if (likely(dev->dequeue_zero_copy == 0))
1737                         update_shadow_used_ring_split(vq, head_idx, 0);
1738
1739                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1740                 if (unlikely(pkts[i] == NULL))
1741                         break;
1742
1743                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1744                                 mbuf_pool);
1745                 if (unlikely(err)) {
1746                         rte_pktmbuf_free(pkts[i]);
1747                         break;
1748                 }
1749
1750                 if (unlikely(dev->dequeue_zero_copy)) {
1751                         struct zcopy_mbuf *zmbuf;
1752
1753                         zmbuf = get_zmbuf(vq);
1754                         if (!zmbuf) {
1755                                 rte_pktmbuf_free(pkts[i]);
1756                                 break;
1757                         }
1758                         zmbuf->mbuf = pkts[i];
1759                         zmbuf->desc_idx = head_idx;
1760
1761                         /*
1762                          * Pin lock the mbuf; we will check later to see
1763                          * whether the mbuf is freed (when we are the last
1764                          * user) or not. If that's the case, we then could
1765                          * update the used ring safely.
1766                          */
1767                         rte_mbuf_refcnt_update(pkts[i], 1);
1768
1769                         vq->nr_zmbuf += 1;
1770                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1771                 }
1772         }
1773         vq->last_avail_idx += i;
1774
1775         if (likely(dev->dequeue_zero_copy == 0)) {
1776                 do_data_copy_dequeue(vq);
1777                 if (unlikely(i < count))
1778                         vq->shadow_used_idx = i;
1779                 if (likely(vq->shadow_used_idx)) {
1780                         flush_shadow_used_ring_split(dev, vq);
1781                         vhost_vring_call_split(dev, vq);
1782                 }
1783         }
1784
1785         return i;
1786 }
1787
1788 static __rte_always_inline int
1789 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1790                                  struct vhost_virtqueue *vq,
1791                                  struct rte_mempool *mbuf_pool,
1792                                  struct rte_mbuf **pkts,
1793                                  uint16_t avail_idx,
1794                                  uintptr_t *desc_addrs,
1795                                  uint16_t *ids)
1796 {
1797         bool wrap = vq->avail_wrap_counter;
1798         struct vring_packed_desc *descs = vq->desc_packed;
1799         struct virtio_net_hdr *hdr;
1800         uint64_t lens[PACKED_BATCH_SIZE];
1801         uint64_t buf_lens[PACKED_BATCH_SIZE];
1802         uint32_t buf_offset = dev->vhost_hlen;
1803         uint16_t flags, i;
1804
1805         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1806                 return -1;
1807         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1808                 return -1;
1809
1810         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1811                 flags = descs[avail_idx + i].flags;
1812                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1813                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1814                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1815                         return -1;
1816         }
1817
1818         rte_smp_rmb();
1819
1820         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1821                 lens[i] = descs[avail_idx + i].len;
1822
1823         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1824                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1825                                                   descs[avail_idx + i].addr,
1826                                                   &lens[i], VHOST_ACCESS_RW);
1827         }
1828
1829         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1830                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1831                         return -1;
1832         }
1833
1834         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1835                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1836                 if (!pkts[i])
1837                         goto free_buf;
1838         }
1839
1840         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1841                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1842
1843         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1844                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1845                         goto free_buf;
1846         }
1847
1848         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1849                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1850                 pkts[i]->data_len = pkts[i]->pkt_len;
1851                 ids[i] = descs[avail_idx + i].id;
1852         }
1853
1854         if (virtio_net_with_host_offload(dev)) {
1855                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1856                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1857                         vhost_dequeue_offload(hdr, pkts[i]);
1858                 }
1859         }
1860
1861         return 0;
1862
1863 free_buf:
1864         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1865                 rte_pktmbuf_free(pkts[i]);
1866
1867         return -1;
1868 }
1869
1870 static __rte_always_inline int
1871 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1872                            struct vhost_virtqueue *vq,
1873                            struct rte_mempool *mbuf_pool,
1874                            struct rte_mbuf **pkts)
1875 {
1876         uint16_t avail_idx = vq->last_avail_idx;
1877         uint32_t buf_offset = dev->vhost_hlen;
1878         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1879         uint16_t ids[PACKED_BATCH_SIZE];
1880         uint16_t i;
1881
1882         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1883                                              avail_idx, desc_addrs, ids))
1884                 return -1;
1885
1886         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1887                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1888
1889         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1890                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1891                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1892                            pkts[i]->pkt_len);
1893
1894         if (virtio_net_is_inorder(dev))
1895                 vhost_shadow_dequeue_batch_packed_inorder(vq,
1896                         ids[PACKED_BATCH_SIZE - 1]);
1897         else
1898                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1899
1900         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1901
1902         return 0;
1903 }
1904
1905 static __rte_always_inline int
1906 vhost_dequeue_single_packed(struct virtio_net *dev,
1907                             struct vhost_virtqueue *vq,
1908                             struct rte_mempool *mbuf_pool,
1909                             struct rte_mbuf **pkts,
1910                             uint16_t *buf_id,
1911                             uint16_t *desc_count)
1912 {
1913         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1914         uint32_t buf_len;
1915         uint16_t nr_vec = 0;
1916         int err;
1917
1918         if (unlikely(fill_vec_buf_packed(dev, vq,
1919                                          vq->last_avail_idx, desc_count,
1920                                          buf_vec, &nr_vec,
1921                                          buf_id, &buf_len,
1922                                          VHOST_ACCESS_RO) < 0))
1923                 return -1;
1924
1925         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1926         if (unlikely(*pkts == NULL)) {
1927                 VHOST_LOG_DATA(ERR,
1928                         "Failed to allocate memory for mbuf.\n");
1929                 return -1;
1930         }
1931
1932         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1933                                 mbuf_pool);
1934         if (unlikely(err)) {
1935                 rte_pktmbuf_free(*pkts);
1936                 return -1;
1937         }
1938
1939         return 0;
1940 }
1941
1942 static __rte_always_inline int
1943 virtio_dev_tx_single_packed(struct virtio_net *dev,
1944                             struct vhost_virtqueue *vq,
1945                             struct rte_mempool *mbuf_pool,
1946                             struct rte_mbuf **pkts)
1947 {
1948
1949         uint16_t buf_id, desc_count;
1950
1951         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1952                                         &desc_count))
1953                 return -1;
1954
1955         if (virtio_net_is_inorder(dev))
1956                 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
1957                                                            desc_count);
1958         else
1959                 vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1960
1961         vq_inc_last_avail_packed(vq, desc_count);
1962
1963         return 0;
1964 }
1965
1966 static __rte_always_inline int
1967 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1968                                  struct vhost_virtqueue *vq,
1969                                  struct rte_mempool *mbuf_pool,
1970                                  struct rte_mbuf **pkts)
1971 {
1972         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1973         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1974         uint16_t ids[PACKED_BATCH_SIZE];
1975         uint16_t i;
1976
1977         uint16_t avail_idx = vq->last_avail_idx;
1978
1979         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1980                                              avail_idx, desc_addrs, ids))
1981                 return -1;
1982
1983         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1984                 zmbufs[i] = get_zmbuf(vq);
1985
1986         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1987                 if (!zmbufs[i])
1988                         goto free_pkt;
1989         }
1990
1991         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1992                 zmbufs[i]->mbuf = pkts[i];
1993                 zmbufs[i]->desc_idx = ids[i];
1994                 zmbufs[i]->desc_count = 1;
1995         }
1996
1997         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1998                 rte_mbuf_refcnt_update(pkts[i], 1);
1999
2000         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2001                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
2002
2003         vq->nr_zmbuf += PACKED_BATCH_SIZE;
2004         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2005
2006         return 0;
2007
2008 free_pkt:
2009         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2010                 rte_pktmbuf_free(pkts[i]);
2011
2012         return -1;
2013 }
2014
2015 static __rte_always_inline int
2016 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2017                                   struct vhost_virtqueue *vq,
2018                                   struct rte_mempool *mbuf_pool,
2019                                   struct rte_mbuf **pkts)
2020 {
2021         uint16_t buf_id, desc_count;
2022         struct zcopy_mbuf *zmbuf;
2023
2024         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2025                                         &desc_count))
2026                 return -1;
2027
2028         zmbuf = get_zmbuf(vq);
2029         if (!zmbuf) {
2030                 rte_pktmbuf_free(*pkts);
2031                 return -1;
2032         }
2033         zmbuf->mbuf = *pkts;
2034         zmbuf->desc_idx = buf_id;
2035         zmbuf->desc_count = desc_count;
2036
2037         rte_mbuf_refcnt_update(*pkts, 1);
2038
2039         vq->nr_zmbuf += 1;
2040         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2041
2042         vq_inc_last_avail_packed(vq, desc_count);
2043         return 0;
2044 }
2045
2046 static __rte_always_inline void
2047 free_zmbuf(struct vhost_virtqueue *vq)
2048 {
2049         struct zcopy_mbuf *next = NULL;
2050         struct zcopy_mbuf *zmbuf;
2051
2052         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2053              zmbuf != NULL; zmbuf = next) {
2054                 next = TAILQ_NEXT(zmbuf, next);
2055
2056                 uint16_t last_used_idx = vq->last_used_idx;
2057
2058                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2059                         uint16_t flags;
2060                         flags = vq->desc_packed[last_used_idx].flags;
2061                         if (vq->used_wrap_counter) {
2062                                 flags |= VRING_DESC_F_USED;
2063                                 flags |= VRING_DESC_F_AVAIL;
2064                         } else {
2065                                 flags &= ~VRING_DESC_F_USED;
2066                                 flags &= ~VRING_DESC_F_AVAIL;
2067                         }
2068
2069                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2070                         vq->desc_packed[last_used_idx].len = 0;
2071
2072                         rte_smp_wmb();
2073                         vq->desc_packed[last_used_idx].flags = flags;
2074
2075                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2076
2077                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2078                         restore_mbuf(zmbuf->mbuf);
2079                         rte_pktmbuf_free(zmbuf->mbuf);
2080                         put_zmbuf(zmbuf);
2081                         vq->nr_zmbuf -= 1;
2082                 }
2083         }
2084 }
2085
2086 static __rte_noinline uint16_t
2087 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2088                            struct vhost_virtqueue *vq,
2089                            struct rte_mempool *mbuf_pool,
2090                            struct rte_mbuf **pkts,
2091                            uint32_t count)
2092 {
2093         uint32_t pkt_idx = 0;
2094         uint32_t remained = count;
2095
2096         free_zmbuf(vq);
2097
2098         do {
2099                 if (remained >= PACKED_BATCH_SIZE) {
2100                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2101                                 mbuf_pool, &pkts[pkt_idx])) {
2102                                 pkt_idx += PACKED_BATCH_SIZE;
2103                                 remained -= PACKED_BATCH_SIZE;
2104                                 continue;
2105                         }
2106                 }
2107
2108                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2109                                                       &pkts[pkt_idx]))
2110                         break;
2111                 pkt_idx++;
2112                 remained--;
2113
2114         } while (remained);
2115
2116         if (pkt_idx)
2117                 vhost_vring_call_packed(dev, vq);
2118
2119         return pkt_idx;
2120 }
2121
2122 static __rte_noinline uint16_t
2123 virtio_dev_tx_packed(struct virtio_net *dev,
2124                      struct vhost_virtqueue *vq,
2125                      struct rte_mempool *mbuf_pool,
2126                      struct rte_mbuf **pkts,
2127                      uint32_t count)
2128 {
2129         uint32_t pkt_idx = 0;
2130         uint32_t remained = count;
2131
2132         do {
2133                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2134
2135                 if (remained >= PACKED_BATCH_SIZE) {
2136                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2137                                                         &pkts[pkt_idx])) {
2138                                 pkt_idx += PACKED_BATCH_SIZE;
2139                                 remained -= PACKED_BATCH_SIZE;
2140                                 continue;
2141                         }
2142                 }
2143
2144                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2145                                                 &pkts[pkt_idx]))
2146                         break;
2147                 pkt_idx++;
2148                 remained--;
2149
2150         } while (remained);
2151
2152         if (vq->shadow_used_idx) {
2153                 do_data_copy_dequeue(vq);
2154
2155                 vhost_flush_dequeue_shadow_packed(dev, vq);
2156                 vhost_vring_call_packed(dev, vq);
2157         }
2158
2159         return pkt_idx;
2160 }
2161
2162 uint16_t
2163 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2164         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2165 {
2166         struct virtio_net *dev;
2167         struct rte_mbuf *rarp_mbuf = NULL;
2168         struct vhost_virtqueue *vq;
2169
2170         dev = get_device(vid);
2171         if (!dev)
2172                 return 0;
2173
2174         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2175                 VHOST_LOG_DATA(ERR,
2176                         "(%d) %s: built-in vhost net backend is disabled.\n",
2177                         dev->vid, __func__);
2178                 return 0;
2179         }
2180
2181         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2182                 VHOST_LOG_DATA(ERR,
2183                         "(%d) %s: invalid virtqueue idx %d.\n",
2184                         dev->vid, __func__, queue_id);
2185                 return 0;
2186         }
2187
2188         vq = dev->virtqueue[queue_id];
2189
2190         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2191                 return 0;
2192
2193         if (unlikely(vq->enabled == 0)) {
2194                 count = 0;
2195                 goto out_access_unlock;
2196         }
2197
2198         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2199                 vhost_user_iotlb_rd_lock(vq);
2200
2201         if (unlikely(vq->access_ok == 0))
2202                 if (unlikely(vring_translate(dev, vq) < 0)) {
2203                         count = 0;
2204                         goto out;
2205                 }
2206
2207         /*
2208          * Construct a RARP broadcast packet, and inject it to the "pkts"
2209          * array, to looks like that guest actually send such packet.
2210          *
2211          * Check user_send_rarp() for more information.
2212          *
2213          * broadcast_rarp shares a cacheline in the virtio_net structure
2214          * with some fields that are accessed during enqueue and
2215          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2216          * result in false sharing between enqueue and dequeue.
2217          *
2218          * Prevent unnecessary false sharing by reading broadcast_rarp first
2219          * and only performing cmpset if the read indicates it is likely to
2220          * be set.
2221          */
2222         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2223                         rte_atomic16_cmpset((volatile uint16_t *)
2224                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2225
2226                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2227                 if (rarp_mbuf == NULL) {
2228                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2229                         count = 0;
2230                         goto out;
2231                 }
2232                 count -= 1;
2233         }
2234
2235         if (vq_is_packed(dev)) {
2236                 if (unlikely(dev->dequeue_zero_copy))
2237                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2238                                                            pkts, count);
2239                 else
2240                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2241                                                      count);
2242         } else
2243                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2244
2245 out:
2246         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2247                 vhost_user_iotlb_rd_unlock(vq);
2248
2249 out_access_unlock:
2250         rte_spinlock_unlock(&vq->access_lock);
2251
2252         if (unlikely(rarp_mbuf != NULL)) {
2253                 /*
2254                  * Inject it to the head of "pkts" array, so that switch's mac
2255                  * learning table will get updated first.
2256                  */
2257                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2258                 pkts[0] = rarp_mbuf;
2259                 count += 1;
2260         }
2261
2262         return count;
2263 }