vhost: fix batch enqueue only handle few packets
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static __rte_always_inline void
47 do_flush_shadow_used_ring_split(struct virtio_net *dev,
48                         struct vhost_virtqueue *vq,
49                         uint16_t to, uint16_t from, uint16_t size)
50 {
51         rte_memcpy(&vq->used->ring[to],
52                         &vq->shadow_used_split[from],
53                         size * sizeof(struct vring_used_elem));
54         vhost_log_cache_used_vring(dev, vq,
55                         offsetof(struct vring_used, ring[to]),
56                         size * sizeof(struct vring_used_elem));
57 }
58
59 static __rte_always_inline void
60 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
61 {
62         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
63
64         if (used_idx + vq->shadow_used_idx <= vq->size) {
65                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
66                                           vq->shadow_used_idx);
67         } else {
68                 uint16_t size;
69
70                 /* update used ring interval [used_idx, vq->size] */
71                 size = vq->size - used_idx;
72                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
73
74                 /* update the left half used ring interval [0, left_size] */
75                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
76                                           vq->shadow_used_idx - size);
77         }
78         vq->last_used_idx += vq->shadow_used_idx;
79
80         rte_smp_wmb();
81
82         vhost_log_cache_sync(dev, vq);
83
84         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
85         vq->shadow_used_idx = 0;
86         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
87                 sizeof(vq->used->idx));
88 }
89
90 static __rte_always_inline void
91 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
92                          uint16_t desc_idx, uint32_t len)
93 {
94         uint16_t i = vq->shadow_used_idx++;
95
96         vq->shadow_used_split[i].id  = desc_idx;
97         vq->shadow_used_split[i].len = len;
98 }
99
100 static __rte_always_inline void
101 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
102                                   struct vhost_virtqueue *vq)
103 {
104         int i;
105         uint16_t used_idx = vq->last_used_idx;
106         uint16_t head_idx = vq->last_used_idx;
107         uint16_t head_flags = 0;
108
109         /* Split loop in two to save memory barriers */
110         for (i = 0; i < vq->shadow_used_idx; i++) {
111                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
112                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
113
114                 used_idx += vq->shadow_used_packed[i].count;
115                 if (used_idx >= vq->size)
116                         used_idx -= vq->size;
117         }
118
119         rte_smp_wmb();
120
121         for (i = 0; i < vq->shadow_used_idx; i++) {
122                 uint16_t flags;
123
124                 if (vq->shadow_used_packed[i].len)
125                         flags = VRING_DESC_F_WRITE;
126                 else
127                         flags = 0;
128
129                 if (vq->used_wrap_counter) {
130                         flags |= VRING_DESC_F_USED;
131                         flags |= VRING_DESC_F_AVAIL;
132                 } else {
133                         flags &= ~VRING_DESC_F_USED;
134                         flags &= ~VRING_DESC_F_AVAIL;
135                 }
136
137                 if (i > 0) {
138                         vq->desc_packed[vq->last_used_idx].flags = flags;
139
140                         vhost_log_cache_used_vring(dev, vq,
141                                         vq->last_used_idx *
142                                         sizeof(struct vring_packed_desc),
143                                         sizeof(struct vring_packed_desc));
144                 } else {
145                         head_idx = vq->last_used_idx;
146                         head_flags = flags;
147                 }
148
149                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
150         }
151
152         vq->desc_packed[head_idx].flags = head_flags;
153
154         vhost_log_cache_used_vring(dev, vq,
155                                 head_idx *
156                                 sizeof(struct vring_packed_desc),
157                                 sizeof(struct vring_packed_desc));
158
159         vq->shadow_used_idx = 0;
160         vhost_log_cache_sync(dev, vq);
161 }
162
163 static __rte_always_inline void
164 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
165                                   struct vhost_virtqueue *vq)
166 {
167         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
168
169         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
170         rte_smp_wmb();
171         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
172
173         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
174                                    sizeof(struct vring_packed_desc),
175                                    sizeof(struct vring_packed_desc));
176         vq->shadow_used_idx = 0;
177         vhost_log_cache_sync(dev, vq);
178 }
179
180 static __rte_always_inline void
181 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
182                                  struct vhost_virtqueue *vq,
183                                  uint64_t *lens,
184                                  uint16_t *ids)
185 {
186         uint16_t i;
187         uint16_t flags;
188
189         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
190
191         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
192                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
193                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
194         }
195
196         rte_smp_wmb();
197
198         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
199                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
200
201         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
202                                    sizeof(struct vring_packed_desc),
203                                    sizeof(struct vring_packed_desc) *
204                                    PACKED_BATCH_SIZE);
205         vhost_log_cache_sync(dev, vq);
206
207         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
208 }
209
210 static __rte_always_inline void
211 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
212                                           uint16_t id)
213 {
214         vq->shadow_used_packed[0].id = id;
215
216         if (!vq->shadow_used_idx) {
217                 vq->shadow_last_used_idx = vq->last_used_idx;
218                 vq->shadow_used_packed[0].flags =
219                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
220                 vq->shadow_used_packed[0].len = 0;
221                 vq->shadow_used_packed[0].count = 1;
222                 vq->shadow_used_idx++;
223         }
224
225         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
226 }
227
228 static __rte_always_inline void
229 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
230                                   struct vhost_virtqueue *vq,
231                                   uint16_t *ids)
232 {
233         uint16_t flags;
234         uint16_t i;
235         uint16_t begin;
236
237         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
238
239         if (!vq->shadow_used_idx) {
240                 vq->shadow_last_used_idx = vq->last_used_idx;
241                 vq->shadow_used_packed[0].id  = ids[0];
242                 vq->shadow_used_packed[0].len = 0;
243                 vq->shadow_used_packed[0].count = 1;
244                 vq->shadow_used_packed[0].flags = flags;
245                 vq->shadow_used_idx++;
246                 begin = 1;
247         } else
248                 begin = 0;
249
250         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
251                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
252                 vq->desc_packed[vq->last_used_idx + i].len = 0;
253         }
254
255         rte_smp_wmb();
256         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
257                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
258
259         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
260                                    sizeof(struct vring_packed_desc),
261                                    sizeof(struct vring_packed_desc) *
262                                    PACKED_BATCH_SIZE);
263         vhost_log_cache_sync(dev, vq);
264
265         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
266 }
267
268 static __rte_always_inline void
269 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
270                                    uint16_t buf_id,
271                                    uint16_t count)
272 {
273         uint16_t flags;
274
275         flags = vq->desc_packed[vq->last_used_idx].flags;
276         if (vq->used_wrap_counter) {
277                 flags |= VRING_DESC_F_USED;
278                 flags |= VRING_DESC_F_AVAIL;
279         } else {
280                 flags &= ~VRING_DESC_F_USED;
281                 flags &= ~VRING_DESC_F_AVAIL;
282         }
283
284         if (!vq->shadow_used_idx) {
285                 vq->shadow_last_used_idx = vq->last_used_idx;
286
287                 vq->shadow_used_packed[0].id  = buf_id;
288                 vq->shadow_used_packed[0].len = 0;
289                 vq->shadow_used_packed[0].flags = flags;
290                 vq->shadow_used_idx++;
291         } else {
292                 vq->desc_packed[vq->last_used_idx].id = buf_id;
293                 vq->desc_packed[vq->last_used_idx].len = 0;
294                 vq->desc_packed[vq->last_used_idx].flags = flags;
295         }
296
297         vq_inc_last_used_packed(vq, count);
298 }
299
300 static __rte_always_inline void
301 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
302                                            uint16_t buf_id,
303                                            uint16_t count)
304 {
305         uint16_t flags;
306
307         vq->shadow_used_packed[0].id = buf_id;
308
309         flags = vq->desc_packed[vq->last_used_idx].flags;
310         if (vq->used_wrap_counter) {
311                 flags |= VRING_DESC_F_USED;
312                 flags |= VRING_DESC_F_AVAIL;
313         } else {
314                 flags &= ~VRING_DESC_F_USED;
315                 flags &= ~VRING_DESC_F_AVAIL;
316         }
317
318         if (!vq->shadow_used_idx) {
319                 vq->shadow_last_used_idx = vq->last_used_idx;
320                 vq->shadow_used_packed[0].len = 0;
321                 vq->shadow_used_packed[0].flags = flags;
322                 vq->shadow_used_idx++;
323         }
324
325         vq_inc_last_used_packed(vq, count);
326 }
327
328 static inline void
329 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
330 {
331         struct batch_copy_elem *elem = vq->batch_copy_elems;
332         uint16_t count = vq->batch_copy_nb_elems;
333         int i;
334
335         for (i = 0; i < count; i++) {
336                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
337                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
338                                            elem[i].len);
339                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
340         }
341
342         vq->batch_copy_nb_elems = 0;
343 }
344
345 static inline void
346 do_data_copy_dequeue(struct vhost_virtqueue *vq)
347 {
348         struct batch_copy_elem *elem = vq->batch_copy_elems;
349         uint16_t count = vq->batch_copy_nb_elems;
350         int i;
351
352         for (i = 0; i < count; i++)
353                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
354
355         vq->batch_copy_nb_elems = 0;
356 }
357
358 static __rte_always_inline void
359 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
360                                    struct vhost_virtqueue *vq,
361                                    uint32_t len[],
362                                    uint16_t id[],
363                                    uint16_t count[],
364                                    uint16_t num_buffers)
365 {
366         uint16_t i;
367         for (i = 0; i < num_buffers; i++) {
368                 /* enqueue shadow flush action aligned with batch num */
369                 if (!vq->shadow_used_idx)
370                         vq->shadow_aligned_idx = vq->last_used_idx &
371                                 PACKED_BATCH_MASK;
372                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
373                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
374                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
375                 vq->shadow_aligned_idx += count[i];
376                 vq->shadow_used_idx++;
377         }
378
379         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
380                 do_data_copy_enqueue(dev, vq);
381                 vhost_flush_enqueue_shadow_packed(dev, vq);
382         }
383 }
384
385 static __rte_always_inline void
386 vhost_flush_dequeue_packed(struct virtio_net *dev,
387                            struct vhost_virtqueue *vq)
388 {
389         int shadow_count;
390         if (!vq->shadow_used_idx)
391                 return;
392
393         shadow_count = vq->last_used_idx - vq->shadow_last_used_idx;
394         if (shadow_count <= 0)
395                 shadow_count += vq->size;
396
397         if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
398                 do_data_copy_dequeue(vq);
399                 vhost_flush_dequeue_shadow_packed(dev, vq);
400                 vhost_vring_call_packed(dev, vq);
401         }
402 }
403
404 /* avoid write operation when necessary, to lessen cache issues */
405 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
406         if ((var) != (val))                     \
407                 (var) = (val);                  \
408 } while (0)
409
410 static __rte_always_inline void
411 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
412 {
413         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
414
415         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
416                 csum_l4 |= PKT_TX_TCP_CKSUM;
417
418         if (csum_l4) {
419                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
420                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
421
422                 switch (csum_l4) {
423                 case PKT_TX_TCP_CKSUM:
424                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
425                                                 cksum));
426                         break;
427                 case PKT_TX_UDP_CKSUM:
428                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
429                                                 dgram_cksum));
430                         break;
431                 case PKT_TX_SCTP_CKSUM:
432                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
433                                                 cksum));
434                         break;
435                 }
436         } else {
437                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
438                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
439                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
440         }
441
442         /* IP cksum verification cannot be bypassed, then calculate here */
443         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
444                 struct rte_ipv4_hdr *ipv4_hdr;
445
446                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
447                                                    m_buf->l2_len);
448                 ipv4_hdr->hdr_checksum = 0;
449                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
450         }
451
452         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
453                 if (m_buf->ol_flags & PKT_TX_IPV4)
454                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
455                 else
456                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
457                 net_hdr->gso_size = m_buf->tso_segsz;
458                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
459                                         + m_buf->l4_len;
460         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
461                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
462                 net_hdr->gso_size = m_buf->tso_segsz;
463                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
464                         m_buf->l4_len;
465         } else {
466                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
467                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
468                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
469         }
470 }
471
472 static __rte_always_inline int
473 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
474                 struct buf_vector *buf_vec, uint16_t *vec_idx,
475                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
476 {
477         uint16_t vec_id = *vec_idx;
478
479         while (desc_len) {
480                 uint64_t desc_addr;
481                 uint64_t desc_chunck_len = desc_len;
482
483                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
484                         return -1;
485
486                 desc_addr = vhost_iova_to_vva(dev, vq,
487                                 desc_iova,
488                                 &desc_chunck_len,
489                                 perm);
490                 if (unlikely(!desc_addr))
491                         return -1;
492
493                 rte_prefetch0((void *)(uintptr_t)desc_addr);
494
495                 buf_vec[vec_id].buf_iova = desc_iova;
496                 buf_vec[vec_id].buf_addr = desc_addr;
497                 buf_vec[vec_id].buf_len  = desc_chunck_len;
498
499                 desc_len -= desc_chunck_len;
500                 desc_iova += desc_chunck_len;
501                 vec_id++;
502         }
503         *vec_idx = vec_id;
504
505         return 0;
506 }
507
508 static __rte_always_inline int
509 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
510                          uint32_t avail_idx, uint16_t *vec_idx,
511                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
512                          uint32_t *desc_chain_len, uint8_t perm)
513 {
514         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
515         uint16_t vec_id = *vec_idx;
516         uint32_t len    = 0;
517         uint64_t dlen;
518         uint32_t nr_descs = vq->size;
519         uint32_t cnt    = 0;
520         struct vring_desc *descs = vq->desc;
521         struct vring_desc *idesc = NULL;
522
523         if (unlikely(idx >= vq->size))
524                 return -1;
525
526         *desc_chain_head = idx;
527
528         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
529                 dlen = vq->desc[idx].len;
530                 nr_descs = dlen / sizeof(struct vring_desc);
531                 if (unlikely(nr_descs > vq->size))
532                         return -1;
533
534                 descs = (struct vring_desc *)(uintptr_t)
535                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
536                                                 &dlen,
537                                                 VHOST_ACCESS_RO);
538                 if (unlikely(!descs))
539                         return -1;
540
541                 if (unlikely(dlen < vq->desc[idx].len)) {
542                         /*
543                          * The indirect desc table is not contiguous
544                          * in process VA space, we have to copy it.
545                          */
546                         idesc = vhost_alloc_copy_ind_table(dev, vq,
547                                         vq->desc[idx].addr, vq->desc[idx].len);
548                         if (unlikely(!idesc))
549                                 return -1;
550
551                         descs = idesc;
552                 }
553
554                 idx = 0;
555         }
556
557         while (1) {
558                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
559                         free_ind_table(idesc);
560                         return -1;
561                 }
562
563                 len += descs[idx].len;
564
565                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
566                                                 descs[idx].addr, descs[idx].len,
567                                                 perm))) {
568                         free_ind_table(idesc);
569                         return -1;
570                 }
571
572                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
573                         break;
574
575                 idx = descs[idx].next;
576         }
577
578         *desc_chain_len = len;
579         *vec_idx = vec_id;
580
581         if (unlikely(!!idesc))
582                 free_ind_table(idesc);
583
584         return 0;
585 }
586
587 /*
588  * Returns -1 on fail, 0 on success
589  */
590 static inline int
591 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
592                                 uint32_t size, struct buf_vector *buf_vec,
593                                 uint16_t *num_buffers, uint16_t avail_head,
594                                 uint16_t *nr_vec)
595 {
596         uint16_t cur_idx;
597         uint16_t vec_idx = 0;
598         uint16_t max_tries, tries = 0;
599
600         uint16_t head_idx = 0;
601         uint32_t len = 0;
602
603         *num_buffers = 0;
604         cur_idx  = vq->last_avail_idx;
605
606         if (rxvq_is_mergeable(dev))
607                 max_tries = vq->size - 1;
608         else
609                 max_tries = 1;
610
611         while (size > 0) {
612                 if (unlikely(cur_idx == avail_head))
613                         return -1;
614                 /*
615                  * if we tried all available ring items, and still
616                  * can't get enough buf, it means something abnormal
617                  * happened.
618                  */
619                 if (unlikely(++tries > max_tries))
620                         return -1;
621
622                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
623                                                 &vec_idx, buf_vec,
624                                                 &head_idx, &len,
625                                                 VHOST_ACCESS_RW) < 0))
626                         return -1;
627                 len = RTE_MIN(len, size);
628                 update_shadow_used_ring_split(vq, head_idx, len);
629                 size -= len;
630
631                 cur_idx++;
632                 *num_buffers += 1;
633         }
634
635         *nr_vec = vec_idx;
636
637         return 0;
638 }
639
640 static __rte_always_inline int
641 fill_vec_buf_packed_indirect(struct virtio_net *dev,
642                         struct vhost_virtqueue *vq,
643                         struct vring_packed_desc *desc, uint16_t *vec_idx,
644                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
645 {
646         uint16_t i;
647         uint32_t nr_descs;
648         uint16_t vec_id = *vec_idx;
649         uint64_t dlen;
650         struct vring_packed_desc *descs, *idescs = NULL;
651
652         dlen = desc->len;
653         descs = (struct vring_packed_desc *)(uintptr_t)
654                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
655         if (unlikely(!descs))
656                 return -1;
657
658         if (unlikely(dlen < desc->len)) {
659                 /*
660                  * The indirect desc table is not contiguous
661                  * in process VA space, we have to copy it.
662                  */
663                 idescs = vhost_alloc_copy_ind_table(dev,
664                                 vq, desc->addr, desc->len);
665                 if (unlikely(!idescs))
666                         return -1;
667
668                 descs = idescs;
669         }
670
671         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
672         if (unlikely(nr_descs >= vq->size)) {
673                 free_ind_table(idescs);
674                 return -1;
675         }
676
677         for (i = 0; i < nr_descs; i++) {
678                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
679                         free_ind_table(idescs);
680                         return -1;
681                 }
682
683                 *len += descs[i].len;
684                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
685                                                 descs[i].addr, descs[i].len,
686                                                 perm)))
687                         return -1;
688         }
689         *vec_idx = vec_id;
690
691         if (unlikely(!!idescs))
692                 free_ind_table(idescs);
693
694         return 0;
695 }
696
697 static __rte_always_inline int
698 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
699                                 uint16_t avail_idx, uint16_t *desc_count,
700                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
701                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
702 {
703         bool wrap_counter = vq->avail_wrap_counter;
704         struct vring_packed_desc *descs = vq->desc_packed;
705         uint16_t vec_id = *vec_idx;
706
707         if (avail_idx < vq->last_avail_idx)
708                 wrap_counter ^= 1;
709
710         /*
711          * Perform a load-acquire barrier in desc_is_avail to
712          * enforce the ordering between desc flags and desc
713          * content.
714          */
715         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
716                 return -1;
717
718         *desc_count = 0;
719         *len = 0;
720
721         while (1) {
722                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
723                         return -1;
724
725                 if (unlikely(*desc_count >= vq->size))
726                         return -1;
727
728                 *desc_count += 1;
729                 *buf_id = descs[avail_idx].id;
730
731                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
732                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
733                                                         &descs[avail_idx],
734                                                         &vec_id, buf_vec,
735                                                         len, perm) < 0))
736                                 return -1;
737                 } else {
738                         *len += descs[avail_idx].len;
739
740                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
741                                                         descs[avail_idx].addr,
742                                                         descs[avail_idx].len,
743                                                         perm)))
744                                 return -1;
745                 }
746
747                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
748                         break;
749
750                 if (++avail_idx >= vq->size) {
751                         avail_idx -= vq->size;
752                         wrap_counter ^= 1;
753                 }
754         }
755
756         *vec_idx = vec_id;
757
758         return 0;
759 }
760
761 static __rte_noinline void
762 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
763                 struct buf_vector *buf_vec,
764                 struct virtio_net_hdr_mrg_rxbuf *hdr)
765 {
766         uint64_t len;
767         uint64_t remain = dev->vhost_hlen;
768         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
769         uint64_t iova = buf_vec->buf_iova;
770
771         while (remain) {
772                 len = RTE_MIN(remain,
773                                 buf_vec->buf_len);
774                 dst = buf_vec->buf_addr;
775                 rte_memcpy((void *)(uintptr_t)dst,
776                                 (void *)(uintptr_t)src,
777                                 len);
778
779                 PRINT_PACKET(dev, (uintptr_t)dst,
780                                 (uint32_t)len, 0);
781                 vhost_log_cache_write_iova(dev, vq,
782                                 iova, len);
783
784                 remain -= len;
785                 iova += len;
786                 src += len;
787                 buf_vec++;
788         }
789 }
790
791 static __rte_always_inline int
792 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
793                             struct rte_mbuf *m, struct buf_vector *buf_vec,
794                             uint16_t nr_vec, uint16_t num_buffers)
795 {
796         uint32_t vec_idx = 0;
797         uint32_t mbuf_offset, mbuf_avail;
798         uint32_t buf_offset, buf_avail;
799         uint64_t buf_addr, buf_iova, buf_len;
800         uint32_t cpy_len;
801         uint64_t hdr_addr;
802         struct rte_mbuf *hdr_mbuf;
803         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
804         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
805         int error = 0;
806
807         if (unlikely(m == NULL)) {
808                 error = -1;
809                 goto out;
810         }
811
812         buf_addr = buf_vec[vec_idx].buf_addr;
813         buf_iova = buf_vec[vec_idx].buf_iova;
814         buf_len = buf_vec[vec_idx].buf_len;
815
816         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
817                 error = -1;
818                 goto out;
819         }
820
821         hdr_mbuf = m;
822         hdr_addr = buf_addr;
823         if (unlikely(buf_len < dev->vhost_hlen))
824                 hdr = &tmp_hdr;
825         else
826                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
827
828         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
829                 dev->vid, num_buffers);
830
831         if (unlikely(buf_len < dev->vhost_hlen)) {
832                 buf_offset = dev->vhost_hlen - buf_len;
833                 vec_idx++;
834                 buf_addr = buf_vec[vec_idx].buf_addr;
835                 buf_iova = buf_vec[vec_idx].buf_iova;
836                 buf_len = buf_vec[vec_idx].buf_len;
837                 buf_avail = buf_len - buf_offset;
838         } else {
839                 buf_offset = dev->vhost_hlen;
840                 buf_avail = buf_len - dev->vhost_hlen;
841         }
842
843         mbuf_avail  = rte_pktmbuf_data_len(m);
844         mbuf_offset = 0;
845         while (mbuf_avail != 0 || m->next != NULL) {
846                 /* done with current buf, get the next one */
847                 if (buf_avail == 0) {
848                         vec_idx++;
849                         if (unlikely(vec_idx >= nr_vec)) {
850                                 error = -1;
851                                 goto out;
852                         }
853
854                         buf_addr = buf_vec[vec_idx].buf_addr;
855                         buf_iova = buf_vec[vec_idx].buf_iova;
856                         buf_len = buf_vec[vec_idx].buf_len;
857
858                         buf_offset = 0;
859                         buf_avail  = buf_len;
860                 }
861
862                 /* done with current mbuf, get the next one */
863                 if (mbuf_avail == 0) {
864                         m = m->next;
865
866                         mbuf_offset = 0;
867                         mbuf_avail  = rte_pktmbuf_data_len(m);
868                 }
869
870                 if (hdr_addr) {
871                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
872                         if (rxvq_is_mergeable(dev))
873                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
874                                                 num_buffers);
875
876                         if (unlikely(hdr == &tmp_hdr)) {
877                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
878                         } else {
879                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
880                                                 dev->vhost_hlen, 0);
881                                 vhost_log_cache_write_iova(dev, vq,
882                                                 buf_vec[0].buf_iova,
883                                                 dev->vhost_hlen);
884                         }
885
886                         hdr_addr = 0;
887                 }
888
889                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
890
891                 if (likely(cpy_len > MAX_BATCH_LEN ||
892                                         vq->batch_copy_nb_elems >= vq->size)) {
893                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
894                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
895                                 cpy_len);
896                         vhost_log_cache_write_iova(dev, vq,
897                                                    buf_iova + buf_offset,
898                                                    cpy_len);
899                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
900                                 cpy_len, 0);
901                 } else {
902                         batch_copy[vq->batch_copy_nb_elems].dst =
903                                 (void *)((uintptr_t)(buf_addr + buf_offset));
904                         batch_copy[vq->batch_copy_nb_elems].src =
905                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
906                         batch_copy[vq->batch_copy_nb_elems].log_addr =
907                                 buf_iova + buf_offset;
908                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
909                         vq->batch_copy_nb_elems++;
910                 }
911
912                 mbuf_avail  -= cpy_len;
913                 mbuf_offset += cpy_len;
914                 buf_avail  -= cpy_len;
915                 buf_offset += cpy_len;
916         }
917
918 out:
919
920         return error;
921 }
922
923 static __rte_always_inline int
924 vhost_enqueue_single_packed(struct virtio_net *dev,
925                             struct vhost_virtqueue *vq,
926                             struct rte_mbuf *pkt,
927                             struct buf_vector *buf_vec,
928                             uint16_t *nr_descs)
929 {
930         uint16_t nr_vec = 0;
931         uint16_t avail_idx = vq->last_avail_idx;
932         uint16_t max_tries, tries = 0;
933         uint16_t buf_id = 0;
934         uint32_t len = 0;
935         uint16_t desc_count;
936         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
937         uint16_t num_buffers = 0;
938         uint32_t buffer_len[vq->size];
939         uint16_t buffer_buf_id[vq->size];
940         uint16_t buffer_desc_count[vq->size];
941
942         if (rxvq_is_mergeable(dev))
943                 max_tries = vq->size - 1;
944         else
945                 max_tries = 1;
946
947         while (size > 0) {
948                 /*
949                  * if we tried all available ring items, and still
950                  * can't get enough buf, it means something abnormal
951                  * happened.
952                  */
953                 if (unlikely(++tries > max_tries))
954                         return -1;
955
956                 if (unlikely(fill_vec_buf_packed(dev, vq,
957                                                 avail_idx, &desc_count,
958                                                 buf_vec, &nr_vec,
959                                                 &buf_id, &len,
960                                                 VHOST_ACCESS_RW) < 0))
961                         return -1;
962
963                 len = RTE_MIN(len, size);
964                 size -= len;
965
966                 buffer_len[num_buffers] = len;
967                 buffer_buf_id[num_buffers] = buf_id;
968                 buffer_desc_count[num_buffers] = desc_count;
969                 num_buffers += 1;
970
971                 *nr_descs += desc_count;
972                 avail_idx += desc_count;
973                 if (avail_idx >= vq->size)
974                         avail_idx -= vq->size;
975         }
976
977         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
978                 return -1;
979
980         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
981                                            buffer_desc_count, num_buffers);
982
983         return 0;
984 }
985
986 static __rte_noinline uint32_t
987 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
988         struct rte_mbuf **pkts, uint32_t count)
989 {
990         uint32_t pkt_idx = 0;
991         uint16_t num_buffers;
992         struct buf_vector buf_vec[BUF_VECTOR_MAX];
993         uint16_t avail_head;
994
995         avail_head = *((volatile uint16_t *)&vq->avail->idx);
996
997         /*
998          * The ordering between avail index and
999          * desc reads needs to be enforced.
1000          */
1001         rte_smp_rmb();
1002
1003         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1004
1005         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1006                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1007                 uint16_t nr_vec = 0;
1008
1009                 if (unlikely(reserve_avail_buf_split(dev, vq,
1010                                                 pkt_len, buf_vec, &num_buffers,
1011                                                 avail_head, &nr_vec) < 0)) {
1012                         VHOST_LOG_DEBUG(VHOST_DATA,
1013                                 "(%d) failed to get enough desc from vring\n",
1014                                 dev->vid);
1015                         vq->shadow_used_idx -= num_buffers;
1016                         break;
1017                 }
1018
1019                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1020                         dev->vid, vq->last_avail_idx,
1021                         vq->last_avail_idx + num_buffers);
1022
1023                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1024                                                 buf_vec, nr_vec,
1025                                                 num_buffers) < 0) {
1026                         vq->shadow_used_idx -= num_buffers;
1027                         break;
1028                 }
1029
1030                 vq->last_avail_idx += num_buffers;
1031         }
1032
1033         do_data_copy_enqueue(dev, vq);
1034
1035         if (likely(vq->shadow_used_idx)) {
1036                 flush_shadow_used_ring_split(dev, vq);
1037                 vhost_vring_call_split(dev, vq);
1038         }
1039
1040         return pkt_idx;
1041 }
1042
1043 static __rte_always_inline int
1044 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1045                            struct vhost_virtqueue *vq,
1046                            struct rte_mbuf **pkts)
1047 {
1048         bool wrap_counter = vq->avail_wrap_counter;
1049         struct vring_packed_desc *descs = vq->desc_packed;
1050         uint16_t avail_idx = vq->last_avail_idx;
1051         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1052         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1053         uint32_t buf_offset = dev->vhost_hlen;
1054         uint64_t lens[PACKED_BATCH_SIZE];
1055         uint16_t ids[PACKED_BATCH_SIZE];
1056         uint16_t i;
1057
1058         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1059                 return -1;
1060
1061         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1062                 return -1;
1063
1064         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1065                 if (unlikely(pkts[i]->next != NULL))
1066                         return -1;
1067                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1068                                             wrap_counter)))
1069                         return -1;
1070         }
1071
1072         rte_smp_rmb();
1073
1074         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1075                 lens[i] = descs[avail_idx + i].len;
1076
1077         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1078                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1079                         return -1;
1080         }
1081
1082         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1083                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1084                                                   descs[avail_idx + i].addr,
1085                                                   &lens[i],
1086                                                   VHOST_ACCESS_RW);
1087
1088         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1089                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1090                         return -1;
1091         }
1092
1093         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1094                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1095                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1096                                         (uintptr_t)desc_addrs[i];
1097                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1098         }
1099
1100         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1101                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1102
1103         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1104
1105         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1106                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1107                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1108                            pkts[i]->pkt_len);
1109         }
1110
1111         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1112                 vhost_log_cache_write_iova(dev, vq, descs[avail_idx + i].addr,
1113                                            lens[i]);
1114
1115         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1116                 ids[i] = descs[avail_idx + i].id;
1117
1118         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1119
1120         return 0;
1121 }
1122
1123 static __rte_always_inline int16_t
1124 virtio_dev_rx_single_packed(struct virtio_net *dev,
1125                             struct vhost_virtqueue *vq,
1126                             struct rte_mbuf *pkt)
1127 {
1128         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1129         uint16_t nr_descs = 0;
1130
1131         rte_smp_rmb();
1132         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1133                                                  &nr_descs) < 0)) {
1134                 VHOST_LOG_DEBUG(VHOST_DATA,
1135                                 "(%d) failed to get enough desc from vring\n",
1136                                 dev->vid);
1137                 return -1;
1138         }
1139
1140         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1141                         dev->vid, vq->last_avail_idx,
1142                         vq->last_avail_idx + nr_descs);
1143
1144         vq_inc_last_avail_packed(vq, nr_descs);
1145
1146         return 0;
1147 }
1148
1149 static __rte_noinline uint32_t
1150 virtio_dev_rx_packed(struct virtio_net *dev,
1151                      struct vhost_virtqueue *vq,
1152                      struct rte_mbuf **pkts,
1153                      uint32_t count)
1154 {
1155         uint32_t pkt_idx = 0;
1156         uint32_t remained = count;
1157
1158         do {
1159                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1160
1161                 if (remained >= PACKED_BATCH_SIZE) {
1162                         if (!virtio_dev_rx_batch_packed(dev, vq,
1163                                                         &pkts[pkt_idx])) {
1164                                 pkt_idx += PACKED_BATCH_SIZE;
1165                                 remained -= PACKED_BATCH_SIZE;
1166                                 continue;
1167                         }
1168                 }
1169
1170                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1171                         break;
1172                 pkt_idx++;
1173                 remained--;
1174
1175         } while (pkt_idx < count);
1176
1177         if (vq->shadow_used_idx) {
1178                 do_data_copy_enqueue(dev, vq);
1179                 vhost_flush_enqueue_shadow_packed(dev, vq);
1180         }
1181
1182         if (pkt_idx)
1183                 vhost_vring_call_packed(dev, vq);
1184
1185         return pkt_idx;
1186 }
1187
1188 static __rte_always_inline uint32_t
1189 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1190         struct rte_mbuf **pkts, uint32_t count)
1191 {
1192         struct vhost_virtqueue *vq;
1193         uint32_t nb_tx = 0;
1194
1195         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1196         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1197                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1198                         dev->vid, __func__, queue_id);
1199                 return 0;
1200         }
1201
1202         vq = dev->virtqueue[queue_id];
1203
1204         rte_spinlock_lock(&vq->access_lock);
1205
1206         if (unlikely(vq->enabled == 0))
1207                 goto out_access_unlock;
1208
1209         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1210                 vhost_user_iotlb_rd_lock(vq);
1211
1212         if (unlikely(vq->access_ok == 0))
1213                 if (unlikely(vring_translate(dev, vq) < 0))
1214                         goto out;
1215
1216         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1217         if (count == 0)
1218                 goto out;
1219
1220         if (vq_is_packed(dev))
1221                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1222         else
1223                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1224
1225 out:
1226         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1227                 vhost_user_iotlb_rd_unlock(vq);
1228
1229 out_access_unlock:
1230         rte_spinlock_unlock(&vq->access_lock);
1231
1232         return nb_tx;
1233 }
1234
1235 uint16_t
1236 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1237         struct rte_mbuf **pkts, uint16_t count)
1238 {
1239         struct virtio_net *dev = get_device(vid);
1240
1241         if (!dev)
1242                 return 0;
1243
1244         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1245                 RTE_LOG(ERR, VHOST_DATA,
1246                         "(%d) %s: built-in vhost net backend is disabled.\n",
1247                         dev->vid, __func__);
1248                 return 0;
1249         }
1250
1251         return virtio_dev_rx(dev, queue_id, pkts, count);
1252 }
1253
1254 static inline bool
1255 virtio_net_with_host_offload(struct virtio_net *dev)
1256 {
1257         if (dev->features &
1258                         ((1ULL << VIRTIO_NET_F_CSUM) |
1259                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1260                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1261                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1262                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1263                 return true;
1264
1265         return false;
1266 }
1267
1268 static void
1269 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1270 {
1271         struct rte_ipv4_hdr *ipv4_hdr;
1272         struct rte_ipv6_hdr *ipv6_hdr;
1273         void *l3_hdr = NULL;
1274         struct rte_ether_hdr *eth_hdr;
1275         uint16_t ethertype;
1276
1277         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1278
1279         m->l2_len = sizeof(struct rte_ether_hdr);
1280         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1281
1282         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1283                 struct rte_vlan_hdr *vlan_hdr =
1284                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1285
1286                 m->l2_len += sizeof(struct rte_vlan_hdr);
1287                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1288         }
1289
1290         l3_hdr = (char *)eth_hdr + m->l2_len;
1291
1292         switch (ethertype) {
1293         case RTE_ETHER_TYPE_IPV4:
1294                 ipv4_hdr = l3_hdr;
1295                 *l4_proto = ipv4_hdr->next_proto_id;
1296                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1297                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1298                 m->ol_flags |= PKT_TX_IPV4;
1299                 break;
1300         case RTE_ETHER_TYPE_IPV6:
1301                 ipv6_hdr = l3_hdr;
1302                 *l4_proto = ipv6_hdr->proto;
1303                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1304                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1305                 m->ol_flags |= PKT_TX_IPV6;
1306                 break;
1307         default:
1308                 m->l3_len = 0;
1309                 *l4_proto = 0;
1310                 *l4_hdr = NULL;
1311                 break;
1312         }
1313 }
1314
1315 static __rte_always_inline void
1316 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1317 {
1318         uint16_t l4_proto = 0;
1319         void *l4_hdr = NULL;
1320         struct rte_tcp_hdr *tcp_hdr = NULL;
1321
1322         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1323                 return;
1324
1325         parse_ethernet(m, &l4_proto, &l4_hdr);
1326         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1327                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1328                         switch (hdr->csum_offset) {
1329                         case (offsetof(struct rte_tcp_hdr, cksum)):
1330                                 if (l4_proto == IPPROTO_TCP)
1331                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1332                                 break;
1333                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1334                                 if (l4_proto == IPPROTO_UDP)
1335                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1336                                 break;
1337                         case (offsetof(struct rte_sctp_hdr, cksum)):
1338                                 if (l4_proto == IPPROTO_SCTP)
1339                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1340                                 break;
1341                         default:
1342                                 break;
1343                         }
1344                 }
1345         }
1346
1347         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1348                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1349                 case VIRTIO_NET_HDR_GSO_TCPV4:
1350                 case VIRTIO_NET_HDR_GSO_TCPV6:
1351                         tcp_hdr = l4_hdr;
1352                         m->ol_flags |= PKT_TX_TCP_SEG;
1353                         m->tso_segsz = hdr->gso_size;
1354                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1355                         break;
1356                 case VIRTIO_NET_HDR_GSO_UDP:
1357                         m->ol_flags |= PKT_TX_UDP_SEG;
1358                         m->tso_segsz = hdr->gso_size;
1359                         m->l4_len = sizeof(struct rte_udp_hdr);
1360                         break;
1361                 default:
1362                         RTE_LOG(WARNING, VHOST_DATA,
1363                                 "unsupported gso type %u.\n", hdr->gso_type);
1364                         break;
1365                 }
1366         }
1367 }
1368
1369 static __rte_noinline void
1370 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1371                 struct buf_vector *buf_vec)
1372 {
1373         uint64_t len;
1374         uint64_t remain = sizeof(struct virtio_net_hdr);
1375         uint64_t src;
1376         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1377
1378         while (remain) {
1379                 len = RTE_MIN(remain, buf_vec->buf_len);
1380                 src = buf_vec->buf_addr;
1381                 rte_memcpy((void *)(uintptr_t)dst,
1382                                 (void *)(uintptr_t)src, len);
1383
1384                 remain -= len;
1385                 dst += len;
1386                 buf_vec++;
1387         }
1388 }
1389
1390 static __rte_always_inline int
1391 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1392                   struct buf_vector *buf_vec, uint16_t nr_vec,
1393                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1394 {
1395         uint32_t buf_avail, buf_offset;
1396         uint64_t buf_addr, buf_iova, buf_len;
1397         uint32_t mbuf_avail, mbuf_offset;
1398         uint32_t cpy_len;
1399         struct rte_mbuf *cur = m, *prev = m;
1400         struct virtio_net_hdr tmp_hdr;
1401         struct virtio_net_hdr *hdr = NULL;
1402         /* A counter to avoid desc dead loop chain */
1403         uint16_t vec_idx = 0;
1404         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1405         int error = 0;
1406
1407         buf_addr = buf_vec[vec_idx].buf_addr;
1408         buf_iova = buf_vec[vec_idx].buf_iova;
1409         buf_len = buf_vec[vec_idx].buf_len;
1410
1411         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1412                 error = -1;
1413                 goto out;
1414         }
1415
1416         if (virtio_net_with_host_offload(dev)) {
1417                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1418                         /*
1419                          * No luck, the virtio-net header doesn't fit
1420                          * in a contiguous virtual area.
1421                          */
1422                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1423                         hdr = &tmp_hdr;
1424                 } else {
1425                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1426                 }
1427         }
1428
1429         /*
1430          * A virtio driver normally uses at least 2 desc buffers
1431          * for Tx: the first for storing the header, and others
1432          * for storing the data.
1433          */
1434         if (unlikely(buf_len < dev->vhost_hlen)) {
1435                 buf_offset = dev->vhost_hlen - buf_len;
1436                 vec_idx++;
1437                 buf_addr = buf_vec[vec_idx].buf_addr;
1438                 buf_iova = buf_vec[vec_idx].buf_iova;
1439                 buf_len = buf_vec[vec_idx].buf_len;
1440                 buf_avail  = buf_len - buf_offset;
1441         } else if (buf_len == dev->vhost_hlen) {
1442                 if (unlikely(++vec_idx >= nr_vec))
1443                         goto out;
1444                 buf_addr = buf_vec[vec_idx].buf_addr;
1445                 buf_iova = buf_vec[vec_idx].buf_iova;
1446                 buf_len = buf_vec[vec_idx].buf_len;
1447
1448                 buf_offset = 0;
1449                 buf_avail = buf_len;
1450         } else {
1451                 buf_offset = dev->vhost_hlen;
1452                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1453         }
1454
1455         PRINT_PACKET(dev,
1456                         (uintptr_t)(buf_addr + buf_offset),
1457                         (uint32_t)buf_avail, 0);
1458
1459         mbuf_offset = 0;
1460         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1461         while (1) {
1462                 uint64_t hpa;
1463
1464                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1465
1466                 /*
1467                  * A desc buf might across two host physical pages that are
1468                  * not continuous. In such case (gpa_to_hpa returns 0), data
1469                  * will be copied even though zero copy is enabled.
1470                  */
1471                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1472                                         buf_iova + buf_offset, cpy_len)))) {
1473                         cur->data_len = cpy_len;
1474                         cur->data_off = 0;
1475                         cur->buf_addr =
1476                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1477                         cur->buf_iova = hpa;
1478
1479                         /*
1480                          * In zero copy mode, one mbuf can only reference data
1481                          * for one or partial of one desc buff.
1482                          */
1483                         mbuf_avail = cpy_len;
1484                 } else {
1485                         if (likely(cpy_len > MAX_BATCH_LEN ||
1486                                    vq->batch_copy_nb_elems >= vq->size ||
1487                                    (hdr && cur == m))) {
1488                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1489                                                                    mbuf_offset),
1490                                            (void *)((uintptr_t)(buf_addr +
1491                                                            buf_offset)),
1492                                            cpy_len);
1493                         } else {
1494                                 batch_copy[vq->batch_copy_nb_elems].dst =
1495                                         rte_pktmbuf_mtod_offset(cur, void *,
1496                                                                 mbuf_offset);
1497                                 batch_copy[vq->batch_copy_nb_elems].src =
1498                                         (void *)((uintptr_t)(buf_addr +
1499                                                                 buf_offset));
1500                                 batch_copy[vq->batch_copy_nb_elems].len =
1501                                         cpy_len;
1502                                 vq->batch_copy_nb_elems++;
1503                         }
1504                 }
1505
1506                 mbuf_avail  -= cpy_len;
1507                 mbuf_offset += cpy_len;
1508                 buf_avail -= cpy_len;
1509                 buf_offset += cpy_len;
1510
1511                 /* This buf reaches to its end, get the next one */
1512                 if (buf_avail == 0) {
1513                         if (++vec_idx >= nr_vec)
1514                                 break;
1515
1516                         buf_addr = buf_vec[vec_idx].buf_addr;
1517                         buf_iova = buf_vec[vec_idx].buf_iova;
1518                         buf_len = buf_vec[vec_idx].buf_len;
1519
1520                         buf_offset = 0;
1521                         buf_avail  = buf_len;
1522
1523                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1524                                         (uint32_t)buf_avail, 0);
1525                 }
1526
1527                 /*
1528                  * This mbuf reaches to its end, get a new one
1529                  * to hold more data.
1530                  */
1531                 if (mbuf_avail == 0) {
1532                         cur = rte_pktmbuf_alloc(mbuf_pool);
1533                         if (unlikely(cur == NULL)) {
1534                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1535                                         "allocate memory for mbuf.\n");
1536                                 error = -1;
1537                                 goto out;
1538                         }
1539                         if (unlikely(dev->dequeue_zero_copy))
1540                                 rte_mbuf_refcnt_update(cur, 1);
1541
1542                         prev->next = cur;
1543                         prev->data_len = mbuf_offset;
1544                         m->nb_segs += 1;
1545                         m->pkt_len += mbuf_offset;
1546                         prev = cur;
1547
1548                         mbuf_offset = 0;
1549                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1550                 }
1551         }
1552
1553         prev->data_len = mbuf_offset;
1554         m->pkt_len    += mbuf_offset;
1555
1556         if (hdr)
1557                 vhost_dequeue_offload(hdr, m);
1558
1559 out:
1560
1561         return error;
1562 }
1563
1564 static __rte_always_inline struct zcopy_mbuf *
1565 get_zmbuf(struct vhost_virtqueue *vq)
1566 {
1567         uint16_t i;
1568         uint16_t last;
1569         int tries = 0;
1570
1571         /* search [last_zmbuf_idx, zmbuf_size) */
1572         i = vq->last_zmbuf_idx;
1573         last = vq->zmbuf_size;
1574
1575 again:
1576         for (; i < last; i++) {
1577                 if (vq->zmbufs[i].in_use == 0) {
1578                         vq->last_zmbuf_idx = i + 1;
1579                         vq->zmbufs[i].in_use = 1;
1580                         return &vq->zmbufs[i];
1581                 }
1582         }
1583
1584         tries++;
1585         if (tries == 1) {
1586                 /* search [0, last_zmbuf_idx) */
1587                 i = 0;
1588                 last = vq->last_zmbuf_idx;
1589                 goto again;
1590         }
1591
1592         return NULL;
1593 }
1594
1595 static void
1596 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1597 {
1598         rte_free(opaque);
1599 }
1600
1601 static int
1602 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1603 {
1604         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1605         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1606         uint16_t buf_len;
1607         rte_iova_t iova;
1608         void *buf;
1609
1610         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1611          * required, otherwise store shinfo in the new buffer.
1612          */
1613         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1614                 shinfo = rte_pktmbuf_mtod(pkt,
1615                                           struct rte_mbuf_ext_shared_info *);
1616         else {
1617                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1618                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1619         }
1620
1621         if (unlikely(total_len > UINT16_MAX))
1622                 return -ENOSPC;
1623
1624         buf_len = total_len;
1625         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1626         if (unlikely(buf == NULL))
1627                 return -ENOMEM;
1628
1629         /* Initialize shinfo */
1630         if (shinfo) {
1631                 shinfo->free_cb = virtio_dev_extbuf_free;
1632                 shinfo->fcb_opaque = buf;
1633                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1634         } else {
1635                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1636                                               virtio_dev_extbuf_free, buf);
1637                 if (unlikely(shinfo == NULL)) {
1638                         rte_free(buf);
1639                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1640                         return -1;
1641                 }
1642         }
1643
1644         iova = rte_malloc_virt2iova(buf);
1645         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1646         rte_pktmbuf_reset_headroom(pkt);
1647
1648         return 0;
1649 }
1650
1651 /*
1652  * Allocate a host supported pktmbuf.
1653  */
1654 static __rte_always_inline struct rte_mbuf *
1655 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1656                          uint32_t data_len)
1657 {
1658         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1659
1660         if (unlikely(pkt == NULL)) {
1661                 RTE_LOG(ERR, VHOST_DATA,
1662                         "Failed to allocate memory for mbuf.\n");
1663                 return NULL;
1664         }
1665
1666         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1667                 return pkt;
1668
1669         /* attach an external buffer if supported */
1670         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1671                 return pkt;
1672
1673         /* check if chained buffers are allowed */
1674         if (!dev->linearbuf)
1675                 return pkt;
1676
1677         /* Data doesn't fit into the buffer and the host supports
1678          * only linear buffers
1679          */
1680         rte_pktmbuf_free(pkt);
1681
1682         return NULL;
1683 }
1684
1685 static __rte_noinline uint16_t
1686 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1687         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1688 {
1689         uint16_t i;
1690         uint16_t free_entries;
1691
1692         if (unlikely(dev->dequeue_zero_copy)) {
1693                 struct zcopy_mbuf *zmbuf, *next;
1694
1695                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1696                      zmbuf != NULL; zmbuf = next) {
1697                         next = TAILQ_NEXT(zmbuf, next);
1698
1699                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1700                                 update_shadow_used_ring_split(vq,
1701                                                 zmbuf->desc_idx, 0);
1702                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1703                                 restore_mbuf(zmbuf->mbuf);
1704                                 rte_pktmbuf_free(zmbuf->mbuf);
1705                                 put_zmbuf(zmbuf);
1706                                 vq->nr_zmbuf -= 1;
1707                         }
1708                 }
1709
1710                 if (likely(vq->shadow_used_idx)) {
1711                         flush_shadow_used_ring_split(dev, vq);
1712                         vhost_vring_call_split(dev, vq);
1713                 }
1714         }
1715
1716         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1717                         vq->last_avail_idx;
1718         if (free_entries == 0)
1719                 return 0;
1720
1721         /*
1722          * The ordering between avail index and
1723          * desc reads needs to be enforced.
1724          */
1725         rte_smp_rmb();
1726
1727         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1728
1729         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1730
1731         count = RTE_MIN(count, MAX_PKT_BURST);
1732         count = RTE_MIN(count, free_entries);
1733         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1734                         dev->vid, count);
1735
1736         for (i = 0; i < count; i++) {
1737                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1738                 uint16_t head_idx;
1739                 uint32_t buf_len;
1740                 uint16_t nr_vec = 0;
1741                 int err;
1742
1743                 if (unlikely(fill_vec_buf_split(dev, vq,
1744                                                 vq->last_avail_idx + i,
1745                                                 &nr_vec, buf_vec,
1746                                                 &head_idx, &buf_len,
1747                                                 VHOST_ACCESS_RO) < 0))
1748                         break;
1749
1750                 if (likely(dev->dequeue_zero_copy == 0))
1751                         update_shadow_used_ring_split(vq, head_idx, 0);
1752
1753                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1754                 if (unlikely(pkts[i] == NULL))
1755                         break;
1756
1757                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1758                                 mbuf_pool);
1759                 if (unlikely(err)) {
1760                         rte_pktmbuf_free(pkts[i]);
1761                         break;
1762                 }
1763
1764                 if (unlikely(dev->dequeue_zero_copy)) {
1765                         struct zcopy_mbuf *zmbuf;
1766
1767                         zmbuf = get_zmbuf(vq);
1768                         if (!zmbuf) {
1769                                 rte_pktmbuf_free(pkts[i]);
1770                                 break;
1771                         }
1772                         zmbuf->mbuf = pkts[i];
1773                         zmbuf->desc_idx = head_idx;
1774
1775                         /*
1776                          * Pin lock the mbuf; we will check later to see
1777                          * whether the mbuf is freed (when we are the last
1778                          * user) or not. If that's the case, we then could
1779                          * update the used ring safely.
1780                          */
1781                         rte_mbuf_refcnt_update(pkts[i], 1);
1782
1783                         vq->nr_zmbuf += 1;
1784                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1785                 }
1786         }
1787         vq->last_avail_idx += i;
1788
1789         if (likely(dev->dequeue_zero_copy == 0)) {
1790                 do_data_copy_dequeue(vq);
1791                 if (unlikely(i < count))
1792                         vq->shadow_used_idx = i;
1793                 if (likely(vq->shadow_used_idx)) {
1794                         flush_shadow_used_ring_split(dev, vq);
1795                         vhost_vring_call_split(dev, vq);
1796                 }
1797         }
1798
1799         return i;
1800 }
1801
1802 static __rte_always_inline int
1803 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1804                                  struct vhost_virtqueue *vq,
1805                                  struct rte_mempool *mbuf_pool,
1806                                  struct rte_mbuf **pkts,
1807                                  uint16_t avail_idx,
1808                                  uintptr_t *desc_addrs,
1809                                  uint16_t *ids)
1810 {
1811         bool wrap = vq->avail_wrap_counter;
1812         struct vring_packed_desc *descs = vq->desc_packed;
1813         struct virtio_net_hdr *hdr;
1814         uint64_t lens[PACKED_BATCH_SIZE];
1815         uint64_t buf_lens[PACKED_BATCH_SIZE];
1816         uint32_t buf_offset = dev->vhost_hlen;
1817         uint16_t flags, i;
1818
1819         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1820                 return -1;
1821         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1822                 return -1;
1823
1824         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1825                 flags = descs[avail_idx + i].flags;
1826                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1827                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1828                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1829                         return -1;
1830         }
1831
1832         rte_smp_rmb();
1833
1834         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1835                 lens[i] = descs[avail_idx + i].len;
1836
1837         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1838                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1839                                                   descs[avail_idx + i].addr,
1840                                                   &lens[i], VHOST_ACCESS_RW);
1841         }
1842
1843         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1844                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1845                         return -1;
1846         }
1847
1848         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1849                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1850                 if (!pkts[i])
1851                         goto free_buf;
1852         }
1853
1854         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1855                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1856
1857         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1858                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1859                         goto free_buf;
1860         }
1861
1862         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1863                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1864                 pkts[i]->data_len = pkts[i]->pkt_len;
1865                 ids[i] = descs[avail_idx + i].id;
1866         }
1867
1868         if (virtio_net_with_host_offload(dev)) {
1869                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1870                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1871                         vhost_dequeue_offload(hdr, pkts[i]);
1872                 }
1873         }
1874
1875         return 0;
1876
1877 free_buf:
1878         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1879                 rte_pktmbuf_free(pkts[i]);
1880
1881         return -1;
1882 }
1883
1884 static __rte_always_inline int
1885 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1886                            struct vhost_virtqueue *vq,
1887                            struct rte_mempool *mbuf_pool,
1888                            struct rte_mbuf **pkts)
1889 {
1890         uint16_t avail_idx = vq->last_avail_idx;
1891         uint32_t buf_offset = dev->vhost_hlen;
1892         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1893         uint16_t ids[PACKED_BATCH_SIZE];
1894         uint16_t i;
1895
1896         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1897                                              avail_idx, desc_addrs, ids))
1898                 return -1;
1899
1900         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1901                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1902
1903         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1904                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1905                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1906                            pkts[i]->pkt_len);
1907
1908         if (virtio_net_is_inorder(dev))
1909                 vhost_shadow_dequeue_batch_packed_inorder(vq,
1910                         ids[PACKED_BATCH_SIZE - 1]);
1911         else
1912                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1913
1914         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1915
1916         return 0;
1917 }
1918
1919 static __rte_always_inline int
1920 vhost_dequeue_single_packed(struct virtio_net *dev,
1921                             struct vhost_virtqueue *vq,
1922                             struct rte_mempool *mbuf_pool,
1923                             struct rte_mbuf **pkts,
1924                             uint16_t *buf_id,
1925                             uint16_t *desc_count)
1926 {
1927         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1928         uint32_t buf_len;
1929         uint16_t nr_vec = 0;
1930         int err;
1931
1932         if (unlikely(fill_vec_buf_packed(dev, vq,
1933                                          vq->last_avail_idx, desc_count,
1934                                          buf_vec, &nr_vec,
1935                                          buf_id, &buf_len,
1936                                          VHOST_ACCESS_RO) < 0))
1937                 return -1;
1938
1939         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1940         if (unlikely(*pkts == NULL)) {
1941                 RTE_LOG(ERR, VHOST_DATA,
1942                         "Failed to allocate memory for mbuf.\n");
1943                 return -1;
1944         }
1945
1946         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1947                                 mbuf_pool);
1948         if (unlikely(err)) {
1949                 rte_pktmbuf_free(*pkts);
1950                 return -1;
1951         }
1952
1953         return 0;
1954 }
1955
1956 static __rte_always_inline int
1957 virtio_dev_tx_single_packed(struct virtio_net *dev,
1958                             struct vhost_virtqueue *vq,
1959                             struct rte_mempool *mbuf_pool,
1960                             struct rte_mbuf **pkts)
1961 {
1962
1963         uint16_t buf_id, desc_count;
1964
1965         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1966                                         &desc_count))
1967                 return -1;
1968
1969         if (virtio_net_is_inorder(dev))
1970                 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
1971                                                            desc_count);
1972         else
1973                 vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1974
1975         vq_inc_last_avail_packed(vq, desc_count);
1976
1977         return 0;
1978 }
1979
1980 static __rte_always_inline int
1981 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1982                                  struct vhost_virtqueue *vq,
1983                                  struct rte_mempool *mbuf_pool,
1984                                  struct rte_mbuf **pkts)
1985 {
1986         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1987         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1988         uint16_t ids[PACKED_BATCH_SIZE];
1989         uint16_t i;
1990
1991         uint16_t avail_idx = vq->last_avail_idx;
1992
1993         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1994                                              avail_idx, desc_addrs, ids))
1995                 return -1;
1996
1997         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1998                 zmbufs[i] = get_zmbuf(vq);
1999
2000         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2001                 if (!zmbufs[i])
2002                         goto free_pkt;
2003         }
2004
2005         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
2006                 zmbufs[i]->mbuf = pkts[i];
2007                 zmbufs[i]->desc_idx = avail_idx + i;
2008                 zmbufs[i]->desc_count = 1;
2009         }
2010
2011         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2012                 rte_mbuf_refcnt_update(pkts[i], 1);
2013
2014         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2015                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
2016
2017         vq->nr_zmbuf += PACKED_BATCH_SIZE;
2018         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2019
2020         return 0;
2021
2022 free_pkt:
2023         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2024                 rte_pktmbuf_free(pkts[i]);
2025
2026         return -1;
2027 }
2028
2029 static __rte_always_inline int
2030 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2031                                   struct vhost_virtqueue *vq,
2032                                   struct rte_mempool *mbuf_pool,
2033                                   struct rte_mbuf **pkts)
2034 {
2035         uint16_t buf_id, desc_count;
2036         struct zcopy_mbuf *zmbuf;
2037
2038         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2039                                         &desc_count))
2040                 return -1;
2041
2042         zmbuf = get_zmbuf(vq);
2043         if (!zmbuf) {
2044                 rte_pktmbuf_free(*pkts);
2045                 return -1;
2046         }
2047         zmbuf->mbuf = *pkts;
2048         zmbuf->desc_idx = vq->last_avail_idx;
2049         zmbuf->desc_count = desc_count;
2050
2051         rte_mbuf_refcnt_update(*pkts, 1);
2052
2053         vq->nr_zmbuf += 1;
2054         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2055
2056         vq_inc_last_avail_packed(vq, desc_count);
2057         return 0;
2058 }
2059
2060 static __rte_always_inline void
2061 free_zmbuf(struct vhost_virtqueue *vq)
2062 {
2063         struct zcopy_mbuf *next = NULL;
2064         struct zcopy_mbuf *zmbuf;
2065
2066         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2067              zmbuf != NULL; zmbuf = next) {
2068                 next = TAILQ_NEXT(zmbuf, next);
2069
2070                 uint16_t last_used_idx = vq->last_used_idx;
2071
2072                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2073                         uint16_t flags;
2074                         flags = vq->desc_packed[last_used_idx].flags;
2075                         if (vq->used_wrap_counter) {
2076                                 flags |= VRING_DESC_F_USED;
2077                                 flags |= VRING_DESC_F_AVAIL;
2078                         } else {
2079                                 flags &= ~VRING_DESC_F_USED;
2080                                 flags &= ~VRING_DESC_F_AVAIL;
2081                         }
2082
2083                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2084                         vq->desc_packed[last_used_idx].len = 0;
2085
2086                         rte_smp_wmb();
2087                         vq->desc_packed[last_used_idx].flags = flags;
2088
2089                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2090
2091                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2092                         restore_mbuf(zmbuf->mbuf);
2093                         rte_pktmbuf_free(zmbuf->mbuf);
2094                         put_zmbuf(zmbuf);
2095                         vq->nr_zmbuf -= 1;
2096                 }
2097         }
2098 }
2099
2100 static __rte_noinline uint16_t
2101 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2102                            struct vhost_virtqueue *vq,
2103                            struct rte_mempool *mbuf_pool,
2104                            struct rte_mbuf **pkts,
2105                            uint32_t count)
2106 {
2107         uint32_t pkt_idx = 0;
2108         uint32_t remained = count;
2109
2110         free_zmbuf(vq);
2111
2112         do {
2113                 if (remained >= PACKED_BATCH_SIZE) {
2114                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2115                                 mbuf_pool, &pkts[pkt_idx])) {
2116                                 pkt_idx += PACKED_BATCH_SIZE;
2117                                 remained -= PACKED_BATCH_SIZE;
2118                                 continue;
2119                         }
2120                 }
2121
2122                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2123                                                       &pkts[pkt_idx]))
2124                         break;
2125                 pkt_idx++;
2126                 remained--;
2127
2128         } while (remained);
2129
2130         if (pkt_idx)
2131                 vhost_vring_call_packed(dev, vq);
2132
2133         return pkt_idx;
2134 }
2135
2136 static __rte_noinline uint16_t
2137 virtio_dev_tx_packed(struct virtio_net *dev,
2138                      struct vhost_virtqueue *vq,
2139                      struct rte_mempool *mbuf_pool,
2140                      struct rte_mbuf **pkts,
2141                      uint32_t count)
2142 {
2143         uint32_t pkt_idx = 0;
2144         uint32_t remained = count;
2145
2146         do {
2147                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2148
2149                 if (remained >= PACKED_BATCH_SIZE) {
2150                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2151                                                         &pkts[pkt_idx])) {
2152                                 vhost_flush_dequeue_packed(dev, vq);
2153                                 pkt_idx += PACKED_BATCH_SIZE;
2154                                 remained -= PACKED_BATCH_SIZE;
2155                                 continue;
2156                         }
2157                 }
2158
2159                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2160                                                 &pkts[pkt_idx]))
2161                         break;
2162                 vhost_flush_dequeue_packed(dev, vq);
2163                 pkt_idx++;
2164                 remained--;
2165
2166         } while (remained);
2167
2168         if (vq->shadow_used_idx)
2169                 do_data_copy_dequeue(vq);
2170
2171         return pkt_idx;
2172 }
2173
2174 uint16_t
2175 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2176         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2177 {
2178         struct virtio_net *dev;
2179         struct rte_mbuf *rarp_mbuf = NULL;
2180         struct vhost_virtqueue *vq;
2181
2182         dev = get_device(vid);
2183         if (!dev)
2184                 return 0;
2185
2186         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2187                 RTE_LOG(ERR, VHOST_DATA,
2188                         "(%d) %s: built-in vhost net backend is disabled.\n",
2189                         dev->vid, __func__);
2190                 return 0;
2191         }
2192
2193         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2194                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2195                         dev->vid, __func__, queue_id);
2196                 return 0;
2197         }
2198
2199         vq = dev->virtqueue[queue_id];
2200
2201         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2202                 return 0;
2203
2204         if (unlikely(vq->enabled == 0)) {
2205                 count = 0;
2206                 goto out_access_unlock;
2207         }
2208
2209         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2210                 vhost_user_iotlb_rd_lock(vq);
2211
2212         if (unlikely(vq->access_ok == 0))
2213                 if (unlikely(vring_translate(dev, vq) < 0)) {
2214                         count = 0;
2215                         goto out;
2216                 }
2217
2218         /*
2219          * Construct a RARP broadcast packet, and inject it to the "pkts"
2220          * array, to looks like that guest actually send such packet.
2221          *
2222          * Check user_send_rarp() for more information.
2223          *
2224          * broadcast_rarp shares a cacheline in the virtio_net structure
2225          * with some fields that are accessed during enqueue and
2226          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2227          * result in false sharing between enqueue and dequeue.
2228          *
2229          * Prevent unnecessary false sharing by reading broadcast_rarp first
2230          * and only performing cmpset if the read indicates it is likely to
2231          * be set.
2232          */
2233         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2234                         rte_atomic16_cmpset((volatile uint16_t *)
2235                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2236
2237                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2238                 if (rarp_mbuf == NULL) {
2239                         RTE_LOG(ERR, VHOST_DATA,
2240                                 "Failed to make RARP packet.\n");
2241                         count = 0;
2242                         goto out;
2243                 }
2244                 count -= 1;
2245         }
2246
2247         if (vq_is_packed(dev)) {
2248                 if (unlikely(dev->dequeue_zero_copy))
2249                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2250                                                            pkts, count);
2251                 else
2252                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2253                                                      count);
2254         } else
2255                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2256
2257 out:
2258         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2259                 vhost_user_iotlb_rd_unlock(vq);
2260
2261 out_access_unlock:
2262         rte_spinlock_unlock(&vq->access_lock);
2263
2264         if (unlikely(rarp_mbuf != NULL)) {
2265                 /*
2266                  * Inject it to the head of "pkts" array, so that switch's mac
2267                  * learning table will get updated first.
2268                  */
2269                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2270                 pkts[0] = rarp_mbuf;
2271                 count += 1;
2272         }
2273
2274         return count;
2275 }