vhost: fix shadow update
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static __rte_always_inline void
47 do_flush_shadow_used_ring_split(struct virtio_net *dev,
48                         struct vhost_virtqueue *vq,
49                         uint16_t to, uint16_t from, uint16_t size)
50 {
51         rte_memcpy(&vq->used->ring[to],
52                         &vq->shadow_used_split[from],
53                         size * sizeof(struct vring_used_elem));
54         vhost_log_cache_used_vring(dev, vq,
55                         offsetof(struct vring_used, ring[to]),
56                         size * sizeof(struct vring_used_elem));
57 }
58
59 static __rte_always_inline void
60 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
61 {
62         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
63
64         if (used_idx + vq->shadow_used_idx <= vq->size) {
65                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
66                                           vq->shadow_used_idx);
67         } else {
68                 uint16_t size;
69
70                 /* update used ring interval [used_idx, vq->size] */
71                 size = vq->size - used_idx;
72                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
73
74                 /* update the left half used ring interval [0, left_size] */
75                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
76                                           vq->shadow_used_idx - size);
77         }
78         vq->last_used_idx += vq->shadow_used_idx;
79
80         rte_smp_wmb();
81
82         vhost_log_cache_sync(dev, vq);
83
84         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
85         vq->shadow_used_idx = 0;
86         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
87                 sizeof(vq->used->idx));
88 }
89
90 static __rte_always_inline void
91 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
92                          uint16_t desc_idx, uint32_t len)
93 {
94         uint16_t i = vq->shadow_used_idx++;
95
96         vq->shadow_used_split[i].id  = desc_idx;
97         vq->shadow_used_split[i].len = len;
98 }
99
100 static __rte_always_inline void
101 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
102                                   struct vhost_virtqueue *vq)
103 {
104         int i;
105         uint16_t used_idx = vq->last_used_idx;
106         uint16_t head_idx = vq->last_used_idx;
107         uint16_t head_flags = 0;
108
109         /* Split loop in two to save memory barriers */
110         for (i = 0; i < vq->shadow_used_idx; i++) {
111                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
112                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
113
114                 used_idx += vq->shadow_used_packed[i].count;
115                 if (used_idx >= vq->size)
116                         used_idx -= vq->size;
117         }
118
119         rte_smp_wmb();
120
121         for (i = 0; i < vq->shadow_used_idx; i++) {
122                 uint16_t flags;
123
124                 if (vq->shadow_used_packed[i].len)
125                         flags = VRING_DESC_F_WRITE;
126                 else
127                         flags = 0;
128
129                 if (vq->used_wrap_counter) {
130                         flags |= VRING_DESC_F_USED;
131                         flags |= VRING_DESC_F_AVAIL;
132                 } else {
133                         flags &= ~VRING_DESC_F_USED;
134                         flags &= ~VRING_DESC_F_AVAIL;
135                 }
136
137                 if (i > 0) {
138                         vq->desc_packed[vq->last_used_idx].flags = flags;
139
140                         vhost_log_cache_used_vring(dev, vq,
141                                         vq->last_used_idx *
142                                         sizeof(struct vring_packed_desc),
143                                         sizeof(struct vring_packed_desc));
144                 } else {
145                         head_idx = vq->last_used_idx;
146                         head_flags = flags;
147                 }
148
149                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
150         }
151
152         vq->desc_packed[head_idx].flags = head_flags;
153
154         vhost_log_cache_used_vring(dev, vq,
155                                 head_idx *
156                                 sizeof(struct vring_packed_desc),
157                                 sizeof(struct vring_packed_desc));
158
159         vq->shadow_used_idx = 0;
160         vhost_log_cache_sync(dev, vq);
161 }
162
163 static __rte_always_inline void
164 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
165                                   struct vhost_virtqueue *vq)
166 {
167         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
168
169         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
170         rte_smp_wmb();
171         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
172
173         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
174                                    sizeof(struct vring_packed_desc),
175                                    sizeof(struct vring_packed_desc));
176         vq->shadow_used_idx = 0;
177         vhost_log_cache_sync(dev, vq);
178 }
179
180 static __rte_always_inline void
181 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
182                                  struct vhost_virtqueue *vq,
183                                  uint64_t *lens,
184                                  uint16_t *ids)
185 {
186         uint16_t i;
187         uint16_t flags;
188
189         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
190
191         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
192                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
193                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
194         }
195
196         rte_smp_wmb();
197
198         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
199                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
200
201         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
202                                    sizeof(struct vring_packed_desc),
203                                    sizeof(struct vring_packed_desc) *
204                                    PACKED_BATCH_SIZE);
205         vhost_log_cache_sync(dev, vq);
206
207         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
208 }
209
210 static __rte_always_inline void
211 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
212                                           uint16_t id)
213 {
214         vq->shadow_used_packed[0].id = id;
215
216         if (!vq->shadow_used_idx) {
217                 vq->shadow_last_used_idx = vq->last_used_idx;
218                 vq->shadow_used_packed[0].flags =
219                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
220                 vq->shadow_used_packed[0].len = 0;
221                 vq->shadow_used_packed[0].count = 1;
222                 vq->shadow_used_idx++;
223         }
224
225         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
226 }
227
228 static __rte_always_inline void
229 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
230                                   struct vhost_virtqueue *vq,
231                                   uint16_t *ids)
232 {
233         uint16_t flags;
234         uint16_t i;
235         uint16_t begin;
236
237         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
238
239         if (!vq->shadow_used_idx) {
240                 vq->shadow_last_used_idx = vq->last_used_idx;
241                 vq->shadow_used_packed[0].id  = ids[0];
242                 vq->shadow_used_packed[0].len = 0;
243                 vq->shadow_used_packed[0].count = 1;
244                 vq->shadow_used_packed[0].flags = flags;
245                 vq->shadow_used_idx++;
246                 begin = 1;
247         } else
248                 begin = 0;
249
250         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
251                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
252                 vq->desc_packed[vq->last_used_idx + i].len = 0;
253         }
254
255         rte_smp_wmb();
256         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
257                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
258
259         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
260                                    sizeof(struct vring_packed_desc),
261                                    sizeof(struct vring_packed_desc) *
262                                    PACKED_BATCH_SIZE);
263         vhost_log_cache_sync(dev, vq);
264
265         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
266 }
267
268 static __rte_always_inline void
269 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
270                                    uint16_t buf_id,
271                                    uint16_t count)
272 {
273         uint16_t flags;
274
275         flags = vq->desc_packed[vq->last_used_idx].flags;
276         if (vq->used_wrap_counter) {
277                 flags |= VRING_DESC_F_USED;
278                 flags |= VRING_DESC_F_AVAIL;
279         } else {
280                 flags &= ~VRING_DESC_F_USED;
281                 flags &= ~VRING_DESC_F_AVAIL;
282         }
283
284         if (!vq->shadow_used_idx) {
285                 vq->shadow_last_used_idx = vq->last_used_idx;
286
287                 vq->shadow_used_packed[0].id  = buf_id;
288                 vq->shadow_used_packed[0].len = 0;
289                 vq->shadow_used_packed[0].flags = flags;
290                 vq->shadow_used_idx++;
291         } else {
292                 vq->desc_packed[vq->last_used_idx].id = buf_id;
293                 vq->desc_packed[vq->last_used_idx].len = 0;
294                 vq->desc_packed[vq->last_used_idx].flags = flags;
295         }
296
297         vq_inc_last_used_packed(vq, count);
298 }
299
300 static __rte_always_inline void
301 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
302                                            uint16_t buf_id,
303                                            uint16_t count)
304 {
305         uint16_t flags;
306
307         vq->shadow_used_packed[0].id = buf_id;
308
309         flags = vq->desc_packed[vq->last_used_idx].flags;
310         if (vq->used_wrap_counter) {
311                 flags |= VRING_DESC_F_USED;
312                 flags |= VRING_DESC_F_AVAIL;
313         } else {
314                 flags &= ~VRING_DESC_F_USED;
315                 flags &= ~VRING_DESC_F_AVAIL;
316         }
317
318         if (!vq->shadow_used_idx) {
319                 vq->shadow_last_used_idx = vq->last_used_idx;
320                 vq->shadow_used_packed[0].len = 0;
321                 vq->shadow_used_packed[0].flags = flags;
322                 vq->shadow_used_idx++;
323         }
324
325         vq_inc_last_used_packed(vq, count);
326 }
327
328 static inline void
329 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
330 {
331         struct batch_copy_elem *elem = vq->batch_copy_elems;
332         uint16_t count = vq->batch_copy_nb_elems;
333         int i;
334
335         for (i = 0; i < count; i++) {
336                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
337                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
338                                            elem[i].len);
339                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
340         }
341
342         vq->batch_copy_nb_elems = 0;
343 }
344
345 static inline void
346 do_data_copy_dequeue(struct vhost_virtqueue *vq)
347 {
348         struct batch_copy_elem *elem = vq->batch_copy_elems;
349         uint16_t count = vq->batch_copy_nb_elems;
350         int i;
351
352         for (i = 0; i < count; i++)
353                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
354
355         vq->batch_copy_nb_elems = 0;
356 }
357
358 static __rte_always_inline void
359 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
360                                    struct vhost_virtqueue *vq,
361                                    uint32_t len[],
362                                    uint16_t id[],
363                                    uint16_t count[],
364                                    uint16_t num_buffers)
365 {
366         uint16_t i;
367         for (i = 0; i < num_buffers; i++) {
368                 /* enqueue shadow flush action aligned with batch num */
369                 if (!vq->shadow_used_idx)
370                         vq->shadow_aligned_idx = vq->last_used_idx &
371                                 PACKED_BATCH_MASK;
372                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
373                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
374                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
375                 vq->shadow_aligned_idx += count[i];
376                 vq->shadow_used_idx++;
377         }
378
379         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
380                 do_data_copy_enqueue(dev, vq);
381                 vhost_flush_enqueue_shadow_packed(dev, vq);
382         }
383 }
384
385 /* avoid write operation when necessary, to lessen cache issues */
386 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
387         if ((var) != (val))                     \
388                 (var) = (val);                  \
389 } while (0)
390
391 static __rte_always_inline void
392 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
393 {
394         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
395
396         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
397                 csum_l4 |= PKT_TX_TCP_CKSUM;
398
399         if (csum_l4) {
400                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
401                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
402
403                 switch (csum_l4) {
404                 case PKT_TX_TCP_CKSUM:
405                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
406                                                 cksum));
407                         break;
408                 case PKT_TX_UDP_CKSUM:
409                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
410                                                 dgram_cksum));
411                         break;
412                 case PKT_TX_SCTP_CKSUM:
413                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
414                                                 cksum));
415                         break;
416                 }
417         } else {
418                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
419                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
420                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
421         }
422
423         /* IP cksum verification cannot be bypassed, then calculate here */
424         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
425                 struct rte_ipv4_hdr *ipv4_hdr;
426
427                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
428                                                    m_buf->l2_len);
429                 ipv4_hdr->hdr_checksum = 0;
430                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
431         }
432
433         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
434                 if (m_buf->ol_flags & PKT_TX_IPV4)
435                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
436                 else
437                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
438                 net_hdr->gso_size = m_buf->tso_segsz;
439                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
440                                         + m_buf->l4_len;
441         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
442                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
443                 net_hdr->gso_size = m_buf->tso_segsz;
444                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
445                         m_buf->l4_len;
446         } else {
447                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
448                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
449                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
450         }
451 }
452
453 static __rte_always_inline int
454 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
455                 struct buf_vector *buf_vec, uint16_t *vec_idx,
456                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
457 {
458         uint16_t vec_id = *vec_idx;
459
460         while (desc_len) {
461                 uint64_t desc_addr;
462                 uint64_t desc_chunck_len = desc_len;
463
464                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
465                         return -1;
466
467                 desc_addr = vhost_iova_to_vva(dev, vq,
468                                 desc_iova,
469                                 &desc_chunck_len,
470                                 perm);
471                 if (unlikely(!desc_addr))
472                         return -1;
473
474                 rte_prefetch0((void *)(uintptr_t)desc_addr);
475
476                 buf_vec[vec_id].buf_iova = desc_iova;
477                 buf_vec[vec_id].buf_addr = desc_addr;
478                 buf_vec[vec_id].buf_len  = desc_chunck_len;
479
480                 desc_len -= desc_chunck_len;
481                 desc_iova += desc_chunck_len;
482                 vec_id++;
483         }
484         *vec_idx = vec_id;
485
486         return 0;
487 }
488
489 static __rte_always_inline int
490 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
491                          uint32_t avail_idx, uint16_t *vec_idx,
492                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
493                          uint32_t *desc_chain_len, uint8_t perm)
494 {
495         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
496         uint16_t vec_id = *vec_idx;
497         uint32_t len    = 0;
498         uint64_t dlen;
499         uint32_t nr_descs = vq->size;
500         uint32_t cnt    = 0;
501         struct vring_desc *descs = vq->desc;
502         struct vring_desc *idesc = NULL;
503
504         if (unlikely(idx >= vq->size))
505                 return -1;
506
507         *desc_chain_head = idx;
508
509         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
510                 dlen = vq->desc[idx].len;
511                 nr_descs = dlen / sizeof(struct vring_desc);
512                 if (unlikely(nr_descs > vq->size))
513                         return -1;
514
515                 descs = (struct vring_desc *)(uintptr_t)
516                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
517                                                 &dlen,
518                                                 VHOST_ACCESS_RO);
519                 if (unlikely(!descs))
520                         return -1;
521
522                 if (unlikely(dlen < vq->desc[idx].len)) {
523                         /*
524                          * The indirect desc table is not contiguous
525                          * in process VA space, we have to copy it.
526                          */
527                         idesc = vhost_alloc_copy_ind_table(dev, vq,
528                                         vq->desc[idx].addr, vq->desc[idx].len);
529                         if (unlikely(!idesc))
530                                 return -1;
531
532                         descs = idesc;
533                 }
534
535                 idx = 0;
536         }
537
538         while (1) {
539                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
540                         free_ind_table(idesc);
541                         return -1;
542                 }
543
544                 len += descs[idx].len;
545
546                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
547                                                 descs[idx].addr, descs[idx].len,
548                                                 perm))) {
549                         free_ind_table(idesc);
550                         return -1;
551                 }
552
553                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
554                         break;
555
556                 idx = descs[idx].next;
557         }
558
559         *desc_chain_len = len;
560         *vec_idx = vec_id;
561
562         if (unlikely(!!idesc))
563                 free_ind_table(idesc);
564
565         return 0;
566 }
567
568 /*
569  * Returns -1 on fail, 0 on success
570  */
571 static inline int
572 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
573                                 uint32_t size, struct buf_vector *buf_vec,
574                                 uint16_t *num_buffers, uint16_t avail_head,
575                                 uint16_t *nr_vec)
576 {
577         uint16_t cur_idx;
578         uint16_t vec_idx = 0;
579         uint16_t max_tries, tries = 0;
580
581         uint16_t head_idx = 0;
582         uint32_t len = 0;
583
584         *num_buffers = 0;
585         cur_idx  = vq->last_avail_idx;
586
587         if (rxvq_is_mergeable(dev))
588                 max_tries = vq->size - 1;
589         else
590                 max_tries = 1;
591
592         while (size > 0) {
593                 if (unlikely(cur_idx == avail_head))
594                         return -1;
595                 /*
596                  * if we tried all available ring items, and still
597                  * can't get enough buf, it means something abnormal
598                  * happened.
599                  */
600                 if (unlikely(++tries > max_tries))
601                         return -1;
602
603                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
604                                                 &vec_idx, buf_vec,
605                                                 &head_idx, &len,
606                                                 VHOST_ACCESS_RW) < 0))
607                         return -1;
608                 len = RTE_MIN(len, size);
609                 update_shadow_used_ring_split(vq, head_idx, len);
610                 size -= len;
611
612                 cur_idx++;
613                 *num_buffers += 1;
614         }
615
616         *nr_vec = vec_idx;
617
618         return 0;
619 }
620
621 static __rte_always_inline int
622 fill_vec_buf_packed_indirect(struct virtio_net *dev,
623                         struct vhost_virtqueue *vq,
624                         struct vring_packed_desc *desc, uint16_t *vec_idx,
625                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
626 {
627         uint16_t i;
628         uint32_t nr_descs;
629         uint16_t vec_id = *vec_idx;
630         uint64_t dlen;
631         struct vring_packed_desc *descs, *idescs = NULL;
632
633         dlen = desc->len;
634         descs = (struct vring_packed_desc *)(uintptr_t)
635                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
636         if (unlikely(!descs))
637                 return -1;
638
639         if (unlikely(dlen < desc->len)) {
640                 /*
641                  * The indirect desc table is not contiguous
642                  * in process VA space, we have to copy it.
643                  */
644                 idescs = vhost_alloc_copy_ind_table(dev,
645                                 vq, desc->addr, desc->len);
646                 if (unlikely(!idescs))
647                         return -1;
648
649                 descs = idescs;
650         }
651
652         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
653         if (unlikely(nr_descs >= vq->size)) {
654                 free_ind_table(idescs);
655                 return -1;
656         }
657
658         for (i = 0; i < nr_descs; i++) {
659                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
660                         free_ind_table(idescs);
661                         return -1;
662                 }
663
664                 *len += descs[i].len;
665                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
666                                                 descs[i].addr, descs[i].len,
667                                                 perm)))
668                         return -1;
669         }
670         *vec_idx = vec_id;
671
672         if (unlikely(!!idescs))
673                 free_ind_table(idescs);
674
675         return 0;
676 }
677
678 static __rte_always_inline int
679 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
680                                 uint16_t avail_idx, uint16_t *desc_count,
681                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
682                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
683 {
684         bool wrap_counter = vq->avail_wrap_counter;
685         struct vring_packed_desc *descs = vq->desc_packed;
686         uint16_t vec_id = *vec_idx;
687
688         if (avail_idx < vq->last_avail_idx)
689                 wrap_counter ^= 1;
690
691         /*
692          * Perform a load-acquire barrier in desc_is_avail to
693          * enforce the ordering between desc flags and desc
694          * content.
695          */
696         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
697                 return -1;
698
699         *desc_count = 0;
700         *len = 0;
701
702         while (1) {
703                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
704                         return -1;
705
706                 if (unlikely(*desc_count >= vq->size))
707                         return -1;
708
709                 *desc_count += 1;
710                 *buf_id = descs[avail_idx].id;
711
712                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
713                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
714                                                         &descs[avail_idx],
715                                                         &vec_id, buf_vec,
716                                                         len, perm) < 0))
717                                 return -1;
718                 } else {
719                         *len += descs[avail_idx].len;
720
721                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
722                                                         descs[avail_idx].addr,
723                                                         descs[avail_idx].len,
724                                                         perm)))
725                                 return -1;
726                 }
727
728                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
729                         break;
730
731                 if (++avail_idx >= vq->size) {
732                         avail_idx -= vq->size;
733                         wrap_counter ^= 1;
734                 }
735         }
736
737         *vec_idx = vec_id;
738
739         return 0;
740 }
741
742 static __rte_noinline void
743 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
744                 struct buf_vector *buf_vec,
745                 struct virtio_net_hdr_mrg_rxbuf *hdr)
746 {
747         uint64_t len;
748         uint64_t remain = dev->vhost_hlen;
749         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
750         uint64_t iova = buf_vec->buf_iova;
751
752         while (remain) {
753                 len = RTE_MIN(remain,
754                                 buf_vec->buf_len);
755                 dst = buf_vec->buf_addr;
756                 rte_memcpy((void *)(uintptr_t)dst,
757                                 (void *)(uintptr_t)src,
758                                 len);
759
760                 PRINT_PACKET(dev, (uintptr_t)dst,
761                                 (uint32_t)len, 0);
762                 vhost_log_cache_write_iova(dev, vq,
763                                 iova, len);
764
765                 remain -= len;
766                 iova += len;
767                 src += len;
768                 buf_vec++;
769         }
770 }
771
772 static __rte_always_inline int
773 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
774                             struct rte_mbuf *m, struct buf_vector *buf_vec,
775                             uint16_t nr_vec, uint16_t num_buffers)
776 {
777         uint32_t vec_idx = 0;
778         uint32_t mbuf_offset, mbuf_avail;
779         uint32_t buf_offset, buf_avail;
780         uint64_t buf_addr, buf_iova, buf_len;
781         uint32_t cpy_len;
782         uint64_t hdr_addr;
783         struct rte_mbuf *hdr_mbuf;
784         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
785         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
786         int error = 0;
787
788         if (unlikely(m == NULL)) {
789                 error = -1;
790                 goto out;
791         }
792
793         buf_addr = buf_vec[vec_idx].buf_addr;
794         buf_iova = buf_vec[vec_idx].buf_iova;
795         buf_len = buf_vec[vec_idx].buf_len;
796
797         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
798                 error = -1;
799                 goto out;
800         }
801
802         hdr_mbuf = m;
803         hdr_addr = buf_addr;
804         if (unlikely(buf_len < dev->vhost_hlen))
805                 hdr = &tmp_hdr;
806         else
807                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
808
809         VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
810                 dev->vid, num_buffers);
811
812         if (unlikely(buf_len < dev->vhost_hlen)) {
813                 buf_offset = dev->vhost_hlen - buf_len;
814                 vec_idx++;
815                 buf_addr = buf_vec[vec_idx].buf_addr;
816                 buf_iova = buf_vec[vec_idx].buf_iova;
817                 buf_len = buf_vec[vec_idx].buf_len;
818                 buf_avail = buf_len - buf_offset;
819         } else {
820                 buf_offset = dev->vhost_hlen;
821                 buf_avail = buf_len - dev->vhost_hlen;
822         }
823
824         mbuf_avail  = rte_pktmbuf_data_len(m);
825         mbuf_offset = 0;
826         while (mbuf_avail != 0 || m->next != NULL) {
827                 /* done with current buf, get the next one */
828                 if (buf_avail == 0) {
829                         vec_idx++;
830                         if (unlikely(vec_idx >= nr_vec)) {
831                                 error = -1;
832                                 goto out;
833                         }
834
835                         buf_addr = buf_vec[vec_idx].buf_addr;
836                         buf_iova = buf_vec[vec_idx].buf_iova;
837                         buf_len = buf_vec[vec_idx].buf_len;
838
839                         buf_offset = 0;
840                         buf_avail  = buf_len;
841                 }
842
843                 /* done with current mbuf, get the next one */
844                 if (mbuf_avail == 0) {
845                         m = m->next;
846
847                         mbuf_offset = 0;
848                         mbuf_avail  = rte_pktmbuf_data_len(m);
849                 }
850
851                 if (hdr_addr) {
852                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
853                         if (rxvq_is_mergeable(dev))
854                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
855                                                 num_buffers);
856
857                         if (unlikely(hdr == &tmp_hdr)) {
858                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
859                         } else {
860                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
861                                                 dev->vhost_hlen, 0);
862                                 vhost_log_cache_write_iova(dev, vq,
863                                                 buf_vec[0].buf_iova,
864                                                 dev->vhost_hlen);
865                         }
866
867                         hdr_addr = 0;
868                 }
869
870                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
871
872                 if (likely(cpy_len > MAX_BATCH_LEN ||
873                                         vq->batch_copy_nb_elems >= vq->size)) {
874                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
875                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
876                                 cpy_len);
877                         vhost_log_cache_write_iova(dev, vq,
878                                                    buf_iova + buf_offset,
879                                                    cpy_len);
880                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
881                                 cpy_len, 0);
882                 } else {
883                         batch_copy[vq->batch_copy_nb_elems].dst =
884                                 (void *)((uintptr_t)(buf_addr + buf_offset));
885                         batch_copy[vq->batch_copy_nb_elems].src =
886                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
887                         batch_copy[vq->batch_copy_nb_elems].log_addr =
888                                 buf_iova + buf_offset;
889                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
890                         vq->batch_copy_nb_elems++;
891                 }
892
893                 mbuf_avail  -= cpy_len;
894                 mbuf_offset += cpy_len;
895                 buf_avail  -= cpy_len;
896                 buf_offset += cpy_len;
897         }
898
899 out:
900
901         return error;
902 }
903
904 static __rte_always_inline int
905 vhost_enqueue_single_packed(struct virtio_net *dev,
906                             struct vhost_virtqueue *vq,
907                             struct rte_mbuf *pkt,
908                             struct buf_vector *buf_vec,
909                             uint16_t *nr_descs)
910 {
911         uint16_t nr_vec = 0;
912         uint16_t avail_idx = vq->last_avail_idx;
913         uint16_t max_tries, tries = 0;
914         uint16_t buf_id = 0;
915         uint32_t len = 0;
916         uint16_t desc_count;
917         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
918         uint16_t num_buffers = 0;
919         uint32_t buffer_len[vq->size];
920         uint16_t buffer_buf_id[vq->size];
921         uint16_t buffer_desc_count[vq->size];
922
923         if (rxvq_is_mergeable(dev))
924                 max_tries = vq->size - 1;
925         else
926                 max_tries = 1;
927
928         while (size > 0) {
929                 /*
930                  * if we tried all available ring items, and still
931                  * can't get enough buf, it means something abnormal
932                  * happened.
933                  */
934                 if (unlikely(++tries > max_tries))
935                         return -1;
936
937                 if (unlikely(fill_vec_buf_packed(dev, vq,
938                                                 avail_idx, &desc_count,
939                                                 buf_vec, &nr_vec,
940                                                 &buf_id, &len,
941                                                 VHOST_ACCESS_RW) < 0))
942                         return -1;
943
944                 len = RTE_MIN(len, size);
945                 size -= len;
946
947                 buffer_len[num_buffers] = len;
948                 buffer_buf_id[num_buffers] = buf_id;
949                 buffer_desc_count[num_buffers] = desc_count;
950                 num_buffers += 1;
951
952                 *nr_descs += desc_count;
953                 avail_idx += desc_count;
954                 if (avail_idx >= vq->size)
955                         avail_idx -= vq->size;
956         }
957
958         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
959                 return -1;
960
961         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
962                                            buffer_desc_count, num_buffers);
963
964         return 0;
965 }
966
967 static __rte_noinline uint32_t
968 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
969         struct rte_mbuf **pkts, uint32_t count)
970 {
971         uint32_t pkt_idx = 0;
972         uint16_t num_buffers;
973         struct buf_vector buf_vec[BUF_VECTOR_MAX];
974         uint16_t avail_head;
975
976         avail_head = *((volatile uint16_t *)&vq->avail->idx);
977
978         /*
979          * The ordering between avail index and
980          * desc reads needs to be enforced.
981          */
982         rte_smp_rmb();
983
984         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
985
986         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
987                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
988                 uint16_t nr_vec = 0;
989
990                 if (unlikely(reserve_avail_buf_split(dev, vq,
991                                                 pkt_len, buf_vec, &num_buffers,
992                                                 avail_head, &nr_vec) < 0)) {
993                         VHOST_LOG_DATA(DEBUG,
994                                 "(%d) failed to get enough desc from vring\n",
995                                 dev->vid);
996                         vq->shadow_used_idx -= num_buffers;
997                         break;
998                 }
999
1000                 VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1001                         dev->vid, vq->last_avail_idx,
1002                         vq->last_avail_idx + num_buffers);
1003
1004                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1005                                                 buf_vec, nr_vec,
1006                                                 num_buffers) < 0) {
1007                         vq->shadow_used_idx -= num_buffers;
1008                         break;
1009                 }
1010
1011                 vq->last_avail_idx += num_buffers;
1012         }
1013
1014         do_data_copy_enqueue(dev, vq);
1015
1016         if (likely(vq->shadow_used_idx)) {
1017                 flush_shadow_used_ring_split(dev, vq);
1018                 vhost_vring_call_split(dev, vq);
1019         }
1020
1021         return pkt_idx;
1022 }
1023
1024 static __rte_always_inline int
1025 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1026                            struct vhost_virtqueue *vq,
1027                            struct rte_mbuf **pkts)
1028 {
1029         bool wrap_counter = vq->avail_wrap_counter;
1030         struct vring_packed_desc *descs = vq->desc_packed;
1031         uint16_t avail_idx = vq->last_avail_idx;
1032         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1033         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1034         uint32_t buf_offset = dev->vhost_hlen;
1035         uint64_t lens[PACKED_BATCH_SIZE];
1036         uint16_t ids[PACKED_BATCH_SIZE];
1037         uint16_t i;
1038
1039         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1040                 return -1;
1041
1042         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1043                 return -1;
1044
1045         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1046                 if (unlikely(pkts[i]->next != NULL))
1047                         return -1;
1048                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1049                                             wrap_counter)))
1050                         return -1;
1051         }
1052
1053         rte_smp_rmb();
1054
1055         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1056                 lens[i] = descs[avail_idx + i].len;
1057
1058         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1059                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1060                         return -1;
1061         }
1062
1063         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1064                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1065                                                   descs[avail_idx + i].addr,
1066                                                   &lens[i],
1067                                                   VHOST_ACCESS_RW);
1068
1069         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1070                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1071                         return -1;
1072         }
1073
1074         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1075                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1076                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1077                                         (uintptr_t)desc_addrs[i];
1078                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1079         }
1080
1081         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1082                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1083
1084         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1085
1086         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1087                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1088                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1089                            pkts[i]->pkt_len);
1090         }
1091
1092         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1093                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1094                                            lens[i]);
1095
1096         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1097                 ids[i] = descs[avail_idx + i].id;
1098
1099         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1100
1101         return 0;
1102 }
1103
1104 static __rte_always_inline int16_t
1105 virtio_dev_rx_single_packed(struct virtio_net *dev,
1106                             struct vhost_virtqueue *vq,
1107                             struct rte_mbuf *pkt)
1108 {
1109         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1110         uint16_t nr_descs = 0;
1111
1112         rte_smp_rmb();
1113         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1114                                                  &nr_descs) < 0)) {
1115                 VHOST_LOG_DATA(DEBUG,
1116                                 "(%d) failed to get enough desc from vring\n",
1117                                 dev->vid);
1118                 return -1;
1119         }
1120
1121         VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
1122                         dev->vid, vq->last_avail_idx,
1123                         vq->last_avail_idx + nr_descs);
1124
1125         vq_inc_last_avail_packed(vq, nr_descs);
1126
1127         return 0;
1128 }
1129
1130 static __rte_noinline uint32_t
1131 virtio_dev_rx_packed(struct virtio_net *dev,
1132                      struct vhost_virtqueue *vq,
1133                      struct rte_mbuf **pkts,
1134                      uint32_t count)
1135 {
1136         uint32_t pkt_idx = 0;
1137         uint32_t remained = count;
1138
1139         do {
1140                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1141
1142                 if (remained >= PACKED_BATCH_SIZE) {
1143                         if (!virtio_dev_rx_batch_packed(dev, vq,
1144                                                         &pkts[pkt_idx])) {
1145                                 pkt_idx += PACKED_BATCH_SIZE;
1146                                 remained -= PACKED_BATCH_SIZE;
1147                                 continue;
1148                         }
1149                 }
1150
1151                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1152                         break;
1153                 pkt_idx++;
1154                 remained--;
1155
1156         } while (pkt_idx < count);
1157
1158         if (vq->shadow_used_idx) {
1159                 do_data_copy_enqueue(dev, vq);
1160                 vhost_flush_enqueue_shadow_packed(dev, vq);
1161         }
1162
1163         if (pkt_idx)
1164                 vhost_vring_call_packed(dev, vq);
1165
1166         return pkt_idx;
1167 }
1168
1169 static __rte_always_inline uint32_t
1170 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1171         struct rte_mbuf **pkts, uint32_t count)
1172 {
1173         struct vhost_virtqueue *vq;
1174         uint32_t nb_tx = 0;
1175
1176         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1177         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1178                 VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
1179                         dev->vid, __func__, queue_id);
1180                 return 0;
1181         }
1182
1183         vq = dev->virtqueue[queue_id];
1184
1185         rte_spinlock_lock(&vq->access_lock);
1186
1187         if (unlikely(vq->enabled == 0))
1188                 goto out_access_unlock;
1189
1190         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1191                 vhost_user_iotlb_rd_lock(vq);
1192
1193         if (unlikely(vq->access_ok == 0))
1194                 if (unlikely(vring_translate(dev, vq) < 0))
1195                         goto out;
1196
1197         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1198         if (count == 0)
1199                 goto out;
1200
1201         if (vq_is_packed(dev))
1202                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1203         else
1204                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1205
1206 out:
1207         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1208                 vhost_user_iotlb_rd_unlock(vq);
1209
1210 out_access_unlock:
1211         rte_spinlock_unlock(&vq->access_lock);
1212
1213         return nb_tx;
1214 }
1215
1216 uint16_t
1217 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1218         struct rte_mbuf **pkts, uint16_t count)
1219 {
1220         struct virtio_net *dev = get_device(vid);
1221
1222         if (!dev)
1223                 return 0;
1224
1225         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1226                 VHOST_LOG_DATA(ERR,
1227                         "(%d) %s: built-in vhost net backend is disabled.\n",
1228                         dev->vid, __func__);
1229                 return 0;
1230         }
1231
1232         return virtio_dev_rx(dev, queue_id, pkts, count);
1233 }
1234
1235 static inline bool
1236 virtio_net_with_host_offload(struct virtio_net *dev)
1237 {
1238         if (dev->features &
1239                         ((1ULL << VIRTIO_NET_F_CSUM) |
1240                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1241                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1242                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1243                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1244                 return true;
1245
1246         return false;
1247 }
1248
1249 static void
1250 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1251 {
1252         struct rte_ipv4_hdr *ipv4_hdr;
1253         struct rte_ipv6_hdr *ipv6_hdr;
1254         void *l3_hdr = NULL;
1255         struct rte_ether_hdr *eth_hdr;
1256         uint16_t ethertype;
1257
1258         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1259
1260         m->l2_len = sizeof(struct rte_ether_hdr);
1261         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1262
1263         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1264                 struct rte_vlan_hdr *vlan_hdr =
1265                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1266
1267                 m->l2_len += sizeof(struct rte_vlan_hdr);
1268                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1269         }
1270
1271         l3_hdr = (char *)eth_hdr + m->l2_len;
1272
1273         switch (ethertype) {
1274         case RTE_ETHER_TYPE_IPV4:
1275                 ipv4_hdr = l3_hdr;
1276                 *l4_proto = ipv4_hdr->next_proto_id;
1277                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1278                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1279                 m->ol_flags |= PKT_TX_IPV4;
1280                 break;
1281         case RTE_ETHER_TYPE_IPV6:
1282                 ipv6_hdr = l3_hdr;
1283                 *l4_proto = ipv6_hdr->proto;
1284                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1285                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1286                 m->ol_flags |= PKT_TX_IPV6;
1287                 break;
1288         default:
1289                 m->l3_len = 0;
1290                 *l4_proto = 0;
1291                 *l4_hdr = NULL;
1292                 break;
1293         }
1294 }
1295
1296 static __rte_always_inline void
1297 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1298 {
1299         uint16_t l4_proto = 0;
1300         void *l4_hdr = NULL;
1301         struct rte_tcp_hdr *tcp_hdr = NULL;
1302
1303         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1304                 return;
1305
1306         parse_ethernet(m, &l4_proto, &l4_hdr);
1307         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1308                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1309                         switch (hdr->csum_offset) {
1310                         case (offsetof(struct rte_tcp_hdr, cksum)):
1311                                 if (l4_proto == IPPROTO_TCP)
1312                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1313                                 break;
1314                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1315                                 if (l4_proto == IPPROTO_UDP)
1316                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1317                                 break;
1318                         case (offsetof(struct rte_sctp_hdr, cksum)):
1319                                 if (l4_proto == IPPROTO_SCTP)
1320                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1321                                 break;
1322                         default:
1323                                 break;
1324                         }
1325                 }
1326         }
1327
1328         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1329                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1330                 case VIRTIO_NET_HDR_GSO_TCPV4:
1331                 case VIRTIO_NET_HDR_GSO_TCPV6:
1332                         tcp_hdr = l4_hdr;
1333                         m->ol_flags |= PKT_TX_TCP_SEG;
1334                         m->tso_segsz = hdr->gso_size;
1335                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1336                         break;
1337                 case VIRTIO_NET_HDR_GSO_UDP:
1338                         m->ol_flags |= PKT_TX_UDP_SEG;
1339                         m->tso_segsz = hdr->gso_size;
1340                         m->l4_len = sizeof(struct rte_udp_hdr);
1341                         break;
1342                 default:
1343                         VHOST_LOG_DATA(WARNING,
1344                                 "unsupported gso type %u.\n", hdr->gso_type);
1345                         break;
1346                 }
1347         }
1348 }
1349
1350 static __rte_noinline void
1351 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1352                 struct buf_vector *buf_vec)
1353 {
1354         uint64_t len;
1355         uint64_t remain = sizeof(struct virtio_net_hdr);
1356         uint64_t src;
1357         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1358
1359         while (remain) {
1360                 len = RTE_MIN(remain, buf_vec->buf_len);
1361                 src = buf_vec->buf_addr;
1362                 rte_memcpy((void *)(uintptr_t)dst,
1363                                 (void *)(uintptr_t)src, len);
1364
1365                 remain -= len;
1366                 dst += len;
1367                 buf_vec++;
1368         }
1369 }
1370
1371 static __rte_always_inline int
1372 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1373                   struct buf_vector *buf_vec, uint16_t nr_vec,
1374                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1375 {
1376         uint32_t buf_avail, buf_offset;
1377         uint64_t buf_addr, buf_iova, buf_len;
1378         uint32_t mbuf_avail, mbuf_offset;
1379         uint32_t cpy_len;
1380         struct rte_mbuf *cur = m, *prev = m;
1381         struct virtio_net_hdr tmp_hdr;
1382         struct virtio_net_hdr *hdr = NULL;
1383         /* A counter to avoid desc dead loop chain */
1384         uint16_t vec_idx = 0;
1385         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1386         int error = 0;
1387
1388         buf_addr = buf_vec[vec_idx].buf_addr;
1389         buf_iova = buf_vec[vec_idx].buf_iova;
1390         buf_len = buf_vec[vec_idx].buf_len;
1391
1392         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1393                 error = -1;
1394                 goto out;
1395         }
1396
1397         if (virtio_net_with_host_offload(dev)) {
1398                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1399                         /*
1400                          * No luck, the virtio-net header doesn't fit
1401                          * in a contiguous virtual area.
1402                          */
1403                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1404                         hdr = &tmp_hdr;
1405                 } else {
1406                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1407                 }
1408         }
1409
1410         /*
1411          * A virtio driver normally uses at least 2 desc buffers
1412          * for Tx: the first for storing the header, and others
1413          * for storing the data.
1414          */
1415         if (unlikely(buf_len < dev->vhost_hlen)) {
1416                 buf_offset = dev->vhost_hlen - buf_len;
1417                 vec_idx++;
1418                 buf_addr = buf_vec[vec_idx].buf_addr;
1419                 buf_iova = buf_vec[vec_idx].buf_iova;
1420                 buf_len = buf_vec[vec_idx].buf_len;
1421                 buf_avail  = buf_len - buf_offset;
1422         } else if (buf_len == dev->vhost_hlen) {
1423                 if (unlikely(++vec_idx >= nr_vec))
1424                         goto out;
1425                 buf_addr = buf_vec[vec_idx].buf_addr;
1426                 buf_iova = buf_vec[vec_idx].buf_iova;
1427                 buf_len = buf_vec[vec_idx].buf_len;
1428
1429                 buf_offset = 0;
1430                 buf_avail = buf_len;
1431         } else {
1432                 buf_offset = dev->vhost_hlen;
1433                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1434         }
1435
1436         PRINT_PACKET(dev,
1437                         (uintptr_t)(buf_addr + buf_offset),
1438                         (uint32_t)buf_avail, 0);
1439
1440         mbuf_offset = 0;
1441         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1442         while (1) {
1443                 uint64_t hpa;
1444
1445                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1446
1447                 /*
1448                  * A desc buf might across two host physical pages that are
1449                  * not continuous. In such case (gpa_to_hpa returns 0), data
1450                  * will be copied even though zero copy is enabled.
1451                  */
1452                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1453                                         buf_iova + buf_offset, cpy_len)))) {
1454                         cur->data_len = cpy_len;
1455                         cur->data_off = 0;
1456                         cur->buf_addr =
1457                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1458                         cur->buf_iova = hpa;
1459
1460                         /*
1461                          * In zero copy mode, one mbuf can only reference data
1462                          * for one or partial of one desc buff.
1463                          */
1464                         mbuf_avail = cpy_len;
1465                 } else {
1466                         if (likely(cpy_len > MAX_BATCH_LEN ||
1467                                    vq->batch_copy_nb_elems >= vq->size ||
1468                                    (hdr && cur == m))) {
1469                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1470                                                                    mbuf_offset),
1471                                            (void *)((uintptr_t)(buf_addr +
1472                                                            buf_offset)),
1473                                            cpy_len);
1474                         } else {
1475                                 batch_copy[vq->batch_copy_nb_elems].dst =
1476                                         rte_pktmbuf_mtod_offset(cur, void *,
1477                                                                 mbuf_offset);
1478                                 batch_copy[vq->batch_copy_nb_elems].src =
1479                                         (void *)((uintptr_t)(buf_addr +
1480                                                                 buf_offset));
1481                                 batch_copy[vq->batch_copy_nb_elems].len =
1482                                         cpy_len;
1483                                 vq->batch_copy_nb_elems++;
1484                         }
1485                 }
1486
1487                 mbuf_avail  -= cpy_len;
1488                 mbuf_offset += cpy_len;
1489                 buf_avail -= cpy_len;
1490                 buf_offset += cpy_len;
1491
1492                 /* This buf reaches to its end, get the next one */
1493                 if (buf_avail == 0) {
1494                         if (++vec_idx >= nr_vec)
1495                                 break;
1496
1497                         buf_addr = buf_vec[vec_idx].buf_addr;
1498                         buf_iova = buf_vec[vec_idx].buf_iova;
1499                         buf_len = buf_vec[vec_idx].buf_len;
1500
1501                         buf_offset = 0;
1502                         buf_avail  = buf_len;
1503
1504                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1505                                         (uint32_t)buf_avail, 0);
1506                 }
1507
1508                 /*
1509                  * This mbuf reaches to its end, get a new one
1510                  * to hold more data.
1511                  */
1512                 if (mbuf_avail == 0) {
1513                         cur = rte_pktmbuf_alloc(mbuf_pool);
1514                         if (unlikely(cur == NULL)) {
1515                                 VHOST_LOG_DATA(ERR, "Failed to "
1516                                         "allocate memory for mbuf.\n");
1517                                 error = -1;
1518                                 goto out;
1519                         }
1520                         if (unlikely(dev->dequeue_zero_copy))
1521                                 rte_mbuf_refcnt_update(cur, 1);
1522
1523                         prev->next = cur;
1524                         prev->data_len = mbuf_offset;
1525                         m->nb_segs += 1;
1526                         m->pkt_len += mbuf_offset;
1527                         prev = cur;
1528
1529                         mbuf_offset = 0;
1530                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1531                 }
1532         }
1533
1534         prev->data_len = mbuf_offset;
1535         m->pkt_len    += mbuf_offset;
1536
1537         if (hdr)
1538                 vhost_dequeue_offload(hdr, m);
1539
1540 out:
1541
1542         return error;
1543 }
1544
1545 static __rte_always_inline struct zcopy_mbuf *
1546 get_zmbuf(struct vhost_virtqueue *vq)
1547 {
1548         uint16_t i;
1549         uint16_t last;
1550         int tries = 0;
1551
1552         /* search [last_zmbuf_idx, zmbuf_size) */
1553         i = vq->last_zmbuf_idx;
1554         last = vq->zmbuf_size;
1555
1556 again:
1557         for (; i < last; i++) {
1558                 if (vq->zmbufs[i].in_use == 0) {
1559                         vq->last_zmbuf_idx = i + 1;
1560                         vq->zmbufs[i].in_use = 1;
1561                         return &vq->zmbufs[i];
1562                 }
1563         }
1564
1565         tries++;
1566         if (tries == 1) {
1567                 /* search [0, last_zmbuf_idx) */
1568                 i = 0;
1569                 last = vq->last_zmbuf_idx;
1570                 goto again;
1571         }
1572
1573         return NULL;
1574 }
1575
1576 static void
1577 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1578 {
1579         rte_free(opaque);
1580 }
1581
1582 static int
1583 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1584 {
1585         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1586         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1587         uint16_t buf_len;
1588         rte_iova_t iova;
1589         void *buf;
1590
1591         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1592          * required, otherwise store shinfo in the new buffer.
1593          */
1594         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1595                 shinfo = rte_pktmbuf_mtod(pkt,
1596                                           struct rte_mbuf_ext_shared_info *);
1597         else {
1598                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1599                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1600         }
1601
1602         if (unlikely(total_len > UINT16_MAX))
1603                 return -ENOSPC;
1604
1605         buf_len = total_len;
1606         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1607         if (unlikely(buf == NULL))
1608                 return -ENOMEM;
1609
1610         /* Initialize shinfo */
1611         if (shinfo) {
1612                 shinfo->free_cb = virtio_dev_extbuf_free;
1613                 shinfo->fcb_opaque = buf;
1614                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1615         } else {
1616                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1617                                               virtio_dev_extbuf_free, buf);
1618                 if (unlikely(shinfo == NULL)) {
1619                         rte_free(buf);
1620                         VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
1621                         return -1;
1622                 }
1623         }
1624
1625         iova = rte_malloc_virt2iova(buf);
1626         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1627         rte_pktmbuf_reset_headroom(pkt);
1628
1629         return 0;
1630 }
1631
1632 /*
1633  * Allocate a host supported pktmbuf.
1634  */
1635 static __rte_always_inline struct rte_mbuf *
1636 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1637                          uint32_t data_len)
1638 {
1639         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1640
1641         if (unlikely(pkt == NULL)) {
1642                 VHOST_LOG_DATA(ERR,
1643                         "Failed to allocate memory for mbuf.\n");
1644                 return NULL;
1645         }
1646
1647         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1648                 return pkt;
1649
1650         /* attach an external buffer if supported */
1651         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1652                 return pkt;
1653
1654         /* check if chained buffers are allowed */
1655         if (!dev->linearbuf)
1656                 return pkt;
1657
1658         /* Data doesn't fit into the buffer and the host supports
1659          * only linear buffers
1660          */
1661         rte_pktmbuf_free(pkt);
1662
1663         return NULL;
1664 }
1665
1666 static __rte_noinline uint16_t
1667 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1668         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1669 {
1670         uint16_t i;
1671         uint16_t free_entries;
1672
1673         if (unlikely(dev->dequeue_zero_copy)) {
1674                 struct zcopy_mbuf *zmbuf, *next;
1675
1676                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1677                      zmbuf != NULL; zmbuf = next) {
1678                         next = TAILQ_NEXT(zmbuf, next);
1679
1680                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1681                                 update_shadow_used_ring_split(vq,
1682                                                 zmbuf->desc_idx, 0);
1683                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1684                                 restore_mbuf(zmbuf->mbuf);
1685                                 rte_pktmbuf_free(zmbuf->mbuf);
1686                                 put_zmbuf(zmbuf);
1687                                 vq->nr_zmbuf -= 1;
1688                         }
1689                 }
1690
1691                 if (likely(vq->shadow_used_idx)) {
1692                         flush_shadow_used_ring_split(dev, vq);
1693                         vhost_vring_call_split(dev, vq);
1694                 }
1695         }
1696
1697         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1698                         vq->last_avail_idx;
1699         if (free_entries == 0)
1700                 return 0;
1701
1702         /*
1703          * The ordering between avail index and
1704          * desc reads needs to be enforced.
1705          */
1706         rte_smp_rmb();
1707
1708         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1709
1710         VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
1711
1712         count = RTE_MIN(count, MAX_PKT_BURST);
1713         count = RTE_MIN(count, free_entries);
1714         VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
1715                         dev->vid, count);
1716
1717         for (i = 0; i < count; i++) {
1718                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1719                 uint16_t head_idx;
1720                 uint32_t buf_len;
1721                 uint16_t nr_vec = 0;
1722                 int err;
1723
1724                 if (unlikely(fill_vec_buf_split(dev, vq,
1725                                                 vq->last_avail_idx + i,
1726                                                 &nr_vec, buf_vec,
1727                                                 &head_idx, &buf_len,
1728                                                 VHOST_ACCESS_RO) < 0))
1729                         break;
1730
1731                 if (likely(dev->dequeue_zero_copy == 0))
1732                         update_shadow_used_ring_split(vq, head_idx, 0);
1733
1734                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1735                 if (unlikely(pkts[i] == NULL))
1736                         break;
1737
1738                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1739                                 mbuf_pool);
1740                 if (unlikely(err)) {
1741                         rte_pktmbuf_free(pkts[i]);
1742                         break;
1743                 }
1744
1745                 if (unlikely(dev->dequeue_zero_copy)) {
1746                         struct zcopy_mbuf *zmbuf;
1747
1748                         zmbuf = get_zmbuf(vq);
1749                         if (!zmbuf) {
1750                                 rte_pktmbuf_free(pkts[i]);
1751                                 break;
1752                         }
1753                         zmbuf->mbuf = pkts[i];
1754                         zmbuf->desc_idx = head_idx;
1755
1756                         /*
1757                          * Pin lock the mbuf; we will check later to see
1758                          * whether the mbuf is freed (when we are the last
1759                          * user) or not. If that's the case, we then could
1760                          * update the used ring safely.
1761                          */
1762                         rte_mbuf_refcnt_update(pkts[i], 1);
1763
1764                         vq->nr_zmbuf += 1;
1765                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1766                 }
1767         }
1768         vq->last_avail_idx += i;
1769
1770         if (likely(dev->dequeue_zero_copy == 0)) {
1771                 do_data_copy_dequeue(vq);
1772                 if (unlikely(i < count))
1773                         vq->shadow_used_idx = i;
1774                 if (likely(vq->shadow_used_idx)) {
1775                         flush_shadow_used_ring_split(dev, vq);
1776                         vhost_vring_call_split(dev, vq);
1777                 }
1778         }
1779
1780         return i;
1781 }
1782
1783 static __rte_always_inline int
1784 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1785                                  struct vhost_virtqueue *vq,
1786                                  struct rte_mempool *mbuf_pool,
1787                                  struct rte_mbuf **pkts,
1788                                  uint16_t avail_idx,
1789                                  uintptr_t *desc_addrs,
1790                                  uint16_t *ids)
1791 {
1792         bool wrap = vq->avail_wrap_counter;
1793         struct vring_packed_desc *descs = vq->desc_packed;
1794         struct virtio_net_hdr *hdr;
1795         uint64_t lens[PACKED_BATCH_SIZE];
1796         uint64_t buf_lens[PACKED_BATCH_SIZE];
1797         uint32_t buf_offset = dev->vhost_hlen;
1798         uint16_t flags, i;
1799
1800         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1801                 return -1;
1802         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1803                 return -1;
1804
1805         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1806                 flags = descs[avail_idx + i].flags;
1807                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1808                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1809                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1810                         return -1;
1811         }
1812
1813         rte_smp_rmb();
1814
1815         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1816                 lens[i] = descs[avail_idx + i].len;
1817
1818         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1819                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1820                                                   descs[avail_idx + i].addr,
1821                                                   &lens[i], VHOST_ACCESS_RW);
1822         }
1823
1824         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1825                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1826                         return -1;
1827         }
1828
1829         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1830                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1831                 if (!pkts[i])
1832                         goto free_buf;
1833         }
1834
1835         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1836                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1837
1838         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1839                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1840                         goto free_buf;
1841         }
1842
1843         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1844                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1845                 pkts[i]->data_len = pkts[i]->pkt_len;
1846                 ids[i] = descs[avail_idx + i].id;
1847         }
1848
1849         if (virtio_net_with_host_offload(dev)) {
1850                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1851                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1852                         vhost_dequeue_offload(hdr, pkts[i]);
1853                 }
1854         }
1855
1856         return 0;
1857
1858 free_buf:
1859         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1860                 rte_pktmbuf_free(pkts[i]);
1861
1862         return -1;
1863 }
1864
1865 static __rte_always_inline int
1866 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1867                            struct vhost_virtqueue *vq,
1868                            struct rte_mempool *mbuf_pool,
1869                            struct rte_mbuf **pkts)
1870 {
1871         uint16_t avail_idx = vq->last_avail_idx;
1872         uint32_t buf_offset = dev->vhost_hlen;
1873         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1874         uint16_t ids[PACKED_BATCH_SIZE];
1875         uint16_t i;
1876
1877         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1878                                              avail_idx, desc_addrs, ids))
1879                 return -1;
1880
1881         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1882                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1883
1884         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1885                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1886                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1887                            pkts[i]->pkt_len);
1888
1889         if (virtio_net_is_inorder(dev))
1890                 vhost_shadow_dequeue_batch_packed_inorder(vq,
1891                         ids[PACKED_BATCH_SIZE - 1]);
1892         else
1893                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1894
1895         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1896
1897         return 0;
1898 }
1899
1900 static __rte_always_inline int
1901 vhost_dequeue_single_packed(struct virtio_net *dev,
1902                             struct vhost_virtqueue *vq,
1903                             struct rte_mempool *mbuf_pool,
1904                             struct rte_mbuf **pkts,
1905                             uint16_t *buf_id,
1906                             uint16_t *desc_count)
1907 {
1908         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1909         uint32_t buf_len;
1910         uint16_t nr_vec = 0;
1911         int err;
1912
1913         if (unlikely(fill_vec_buf_packed(dev, vq,
1914                                          vq->last_avail_idx, desc_count,
1915                                          buf_vec, &nr_vec,
1916                                          buf_id, &buf_len,
1917                                          VHOST_ACCESS_RO) < 0))
1918                 return -1;
1919
1920         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1921         if (unlikely(*pkts == NULL)) {
1922                 VHOST_LOG_DATA(ERR,
1923                         "Failed to allocate memory for mbuf.\n");
1924                 return -1;
1925         }
1926
1927         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1928                                 mbuf_pool);
1929         if (unlikely(err)) {
1930                 rte_pktmbuf_free(*pkts);
1931                 return -1;
1932         }
1933
1934         return 0;
1935 }
1936
1937 static __rte_always_inline int
1938 virtio_dev_tx_single_packed(struct virtio_net *dev,
1939                             struct vhost_virtqueue *vq,
1940                             struct rte_mempool *mbuf_pool,
1941                             struct rte_mbuf **pkts)
1942 {
1943
1944         uint16_t buf_id, desc_count;
1945
1946         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1947                                         &desc_count))
1948                 return -1;
1949
1950         if (virtio_net_is_inorder(dev))
1951                 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
1952                                                            desc_count);
1953         else
1954                 vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1955
1956         vq_inc_last_avail_packed(vq, desc_count);
1957
1958         return 0;
1959 }
1960
1961 static __rte_always_inline int
1962 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1963                                  struct vhost_virtqueue *vq,
1964                                  struct rte_mempool *mbuf_pool,
1965                                  struct rte_mbuf **pkts)
1966 {
1967         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1968         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1969         uint16_t ids[PACKED_BATCH_SIZE];
1970         uint16_t i;
1971
1972         uint16_t avail_idx = vq->last_avail_idx;
1973
1974         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1975                                              avail_idx, desc_addrs, ids))
1976                 return -1;
1977
1978         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1979                 zmbufs[i] = get_zmbuf(vq);
1980
1981         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1982                 if (!zmbufs[i])
1983                         goto free_pkt;
1984         }
1985
1986         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1987                 zmbufs[i]->mbuf = pkts[i];
1988                 zmbufs[i]->desc_idx = ids[i];
1989                 zmbufs[i]->desc_count = 1;
1990         }
1991
1992         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1993                 rte_mbuf_refcnt_update(pkts[i], 1);
1994
1995         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1996                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
1997
1998         vq->nr_zmbuf += PACKED_BATCH_SIZE;
1999         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2000
2001         return 0;
2002
2003 free_pkt:
2004         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2005                 rte_pktmbuf_free(pkts[i]);
2006
2007         return -1;
2008 }
2009
2010 static __rte_always_inline int
2011 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2012                                   struct vhost_virtqueue *vq,
2013                                   struct rte_mempool *mbuf_pool,
2014                                   struct rte_mbuf **pkts)
2015 {
2016         uint16_t buf_id, desc_count;
2017         struct zcopy_mbuf *zmbuf;
2018
2019         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2020                                         &desc_count))
2021                 return -1;
2022
2023         zmbuf = get_zmbuf(vq);
2024         if (!zmbuf) {
2025                 rte_pktmbuf_free(*pkts);
2026                 return -1;
2027         }
2028         zmbuf->mbuf = *pkts;
2029         zmbuf->desc_idx = buf_id;
2030         zmbuf->desc_count = desc_count;
2031
2032         rte_mbuf_refcnt_update(*pkts, 1);
2033
2034         vq->nr_zmbuf += 1;
2035         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2036
2037         vq_inc_last_avail_packed(vq, desc_count);
2038         return 0;
2039 }
2040
2041 static __rte_always_inline void
2042 free_zmbuf(struct vhost_virtqueue *vq)
2043 {
2044         struct zcopy_mbuf *next = NULL;
2045         struct zcopy_mbuf *zmbuf;
2046
2047         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2048              zmbuf != NULL; zmbuf = next) {
2049                 next = TAILQ_NEXT(zmbuf, next);
2050
2051                 uint16_t last_used_idx = vq->last_used_idx;
2052
2053                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2054                         uint16_t flags;
2055                         flags = vq->desc_packed[last_used_idx].flags;
2056                         if (vq->used_wrap_counter) {
2057                                 flags |= VRING_DESC_F_USED;
2058                                 flags |= VRING_DESC_F_AVAIL;
2059                         } else {
2060                                 flags &= ~VRING_DESC_F_USED;
2061                                 flags &= ~VRING_DESC_F_AVAIL;
2062                         }
2063
2064                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2065                         vq->desc_packed[last_used_idx].len = 0;
2066
2067                         rte_smp_wmb();
2068                         vq->desc_packed[last_used_idx].flags = flags;
2069
2070                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2071
2072                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2073                         restore_mbuf(zmbuf->mbuf);
2074                         rte_pktmbuf_free(zmbuf->mbuf);
2075                         put_zmbuf(zmbuf);
2076                         vq->nr_zmbuf -= 1;
2077                 }
2078         }
2079 }
2080
2081 static __rte_noinline uint16_t
2082 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2083                            struct vhost_virtqueue *vq,
2084                            struct rte_mempool *mbuf_pool,
2085                            struct rte_mbuf **pkts,
2086                            uint32_t count)
2087 {
2088         uint32_t pkt_idx = 0;
2089         uint32_t remained = count;
2090
2091         free_zmbuf(vq);
2092
2093         do {
2094                 if (remained >= PACKED_BATCH_SIZE) {
2095                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2096                                 mbuf_pool, &pkts[pkt_idx])) {
2097                                 pkt_idx += PACKED_BATCH_SIZE;
2098                                 remained -= PACKED_BATCH_SIZE;
2099                                 continue;
2100                         }
2101                 }
2102
2103                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2104                                                       &pkts[pkt_idx]))
2105                         break;
2106                 pkt_idx++;
2107                 remained--;
2108
2109         } while (remained);
2110
2111         if (pkt_idx)
2112                 vhost_vring_call_packed(dev, vq);
2113
2114         return pkt_idx;
2115 }
2116
2117 static __rte_noinline uint16_t
2118 virtio_dev_tx_packed(struct virtio_net *dev,
2119                      struct vhost_virtqueue *vq,
2120                      struct rte_mempool *mbuf_pool,
2121                      struct rte_mbuf **pkts,
2122                      uint32_t count)
2123 {
2124         uint32_t pkt_idx = 0;
2125         uint32_t remained = count;
2126
2127         do {
2128                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2129
2130                 if (remained >= PACKED_BATCH_SIZE) {
2131                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2132                                                         &pkts[pkt_idx])) {
2133                                 pkt_idx += PACKED_BATCH_SIZE;
2134                                 remained -= PACKED_BATCH_SIZE;
2135                                 continue;
2136                         }
2137                 }
2138
2139                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2140                                                 &pkts[pkt_idx]))
2141                         break;
2142                 pkt_idx++;
2143                 remained--;
2144
2145         } while (remained);
2146
2147         if (vq->shadow_used_idx) {
2148                 do_data_copy_dequeue(vq);
2149
2150                 vhost_flush_dequeue_shadow_packed(dev, vq);
2151                 vhost_vring_call_packed(dev, vq);
2152         }
2153
2154         return pkt_idx;
2155 }
2156
2157 uint16_t
2158 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2159         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2160 {
2161         struct virtio_net *dev;
2162         struct rte_mbuf *rarp_mbuf = NULL;
2163         struct vhost_virtqueue *vq;
2164
2165         dev = get_device(vid);
2166         if (!dev)
2167                 return 0;
2168
2169         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2170                 VHOST_LOG_DATA(ERR,
2171                         "(%d) %s: built-in vhost net backend is disabled.\n",
2172                         dev->vid, __func__);
2173                 return 0;
2174         }
2175
2176         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2177                 VHOST_LOG_DATA(ERR,
2178                         "(%d) %s: invalid virtqueue idx %d.\n",
2179                         dev->vid, __func__, queue_id);
2180                 return 0;
2181         }
2182
2183         vq = dev->virtqueue[queue_id];
2184
2185         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2186                 return 0;
2187
2188         if (unlikely(vq->enabled == 0)) {
2189                 count = 0;
2190                 goto out_access_unlock;
2191         }
2192
2193         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2194                 vhost_user_iotlb_rd_lock(vq);
2195
2196         if (unlikely(vq->access_ok == 0))
2197                 if (unlikely(vring_translate(dev, vq) < 0)) {
2198                         count = 0;
2199                         goto out;
2200                 }
2201
2202         /*
2203          * Construct a RARP broadcast packet, and inject it to the "pkts"
2204          * array, to looks like that guest actually send such packet.
2205          *
2206          * Check user_send_rarp() for more information.
2207          *
2208          * broadcast_rarp shares a cacheline in the virtio_net structure
2209          * with some fields that are accessed during enqueue and
2210          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2211          * result in false sharing between enqueue and dequeue.
2212          *
2213          * Prevent unnecessary false sharing by reading broadcast_rarp first
2214          * and only performing cmpset if the read indicates it is likely to
2215          * be set.
2216          */
2217         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2218                         rte_atomic16_cmpset((volatile uint16_t *)
2219                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2220
2221                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2222                 if (rarp_mbuf == NULL) {
2223                         VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
2224                         count = 0;
2225                         goto out;
2226                 }
2227                 count -= 1;
2228         }
2229
2230         if (vq_is_packed(dev)) {
2231                 if (unlikely(dev->dequeue_zero_copy))
2232                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2233                                                            pkts, count);
2234                 else
2235                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2236                                                      count);
2237         } else
2238                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2239
2240 out:
2241         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2242                 vhost_user_iotlb_rd_unlock(vq);
2243
2244 out_access_unlock:
2245         rte_spinlock_unlock(&vq->access_lock);
2246
2247         if (unlikely(rarp_mbuf != NULL)) {
2248                 /*
2249                  * Inject it to the head of "pkts" array, so that switch's mac
2250                  * learning table will get updated first.
2251                  */
2252                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2253                 pkts[0] = rarp_mbuf;
2254                 count += 1;
2255         }
2256
2257         return count;
2258 }