vhost: optimize packed ring dequeue when in-order
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static  __rte_always_inline bool
35 virtio_net_is_inorder(struct virtio_net *dev)
36 {
37         return dev->features & (1ULL << VIRTIO_F_IN_ORDER);
38 }
39
40 static bool
41 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
42 {
43         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
44 }
45
46 static __rte_always_inline void
47 do_flush_shadow_used_ring_split(struct virtio_net *dev,
48                         struct vhost_virtqueue *vq,
49                         uint16_t to, uint16_t from, uint16_t size)
50 {
51         rte_memcpy(&vq->used->ring[to],
52                         &vq->shadow_used_split[from],
53                         size * sizeof(struct vring_used_elem));
54         vhost_log_cache_used_vring(dev, vq,
55                         offsetof(struct vring_used, ring[to]),
56                         size * sizeof(struct vring_used_elem));
57 }
58
59 static __rte_always_inline void
60 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
61 {
62         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
63
64         if (used_idx + vq->shadow_used_idx <= vq->size) {
65                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
66                                           vq->shadow_used_idx);
67         } else {
68                 uint16_t size;
69
70                 /* update used ring interval [used_idx, vq->size] */
71                 size = vq->size - used_idx;
72                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
73
74                 /* update the left half used ring interval [0, left_size] */
75                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
76                                           vq->shadow_used_idx - size);
77         }
78         vq->last_used_idx += vq->shadow_used_idx;
79
80         rte_smp_wmb();
81
82         vhost_log_cache_sync(dev, vq);
83
84         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
85         vq->shadow_used_idx = 0;
86         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
87                 sizeof(vq->used->idx));
88 }
89
90 static __rte_always_inline void
91 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
92                          uint16_t desc_idx, uint32_t len)
93 {
94         uint16_t i = vq->shadow_used_idx++;
95
96         vq->shadow_used_split[i].id  = desc_idx;
97         vq->shadow_used_split[i].len = len;
98 }
99
100 static __rte_always_inline void
101 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
102                                   struct vhost_virtqueue *vq)
103 {
104         int i;
105         uint16_t used_idx = vq->last_used_idx;
106         uint16_t head_idx = vq->last_used_idx;
107         uint16_t head_flags = 0;
108
109         /* Split loop in two to save memory barriers */
110         for (i = 0; i < vq->shadow_used_idx; i++) {
111                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
112                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
113
114                 used_idx += vq->shadow_used_packed[i].count;
115                 if (used_idx >= vq->size)
116                         used_idx -= vq->size;
117         }
118
119         rte_smp_wmb();
120
121         for (i = 0; i < vq->shadow_used_idx; i++) {
122                 uint16_t flags;
123
124                 if (vq->shadow_used_packed[i].len)
125                         flags = VRING_DESC_F_WRITE;
126                 else
127                         flags = 0;
128
129                 if (vq->used_wrap_counter) {
130                         flags |= VRING_DESC_F_USED;
131                         flags |= VRING_DESC_F_AVAIL;
132                 } else {
133                         flags &= ~VRING_DESC_F_USED;
134                         flags &= ~VRING_DESC_F_AVAIL;
135                 }
136
137                 if (i > 0) {
138                         vq->desc_packed[vq->last_used_idx].flags = flags;
139
140                         vhost_log_cache_used_vring(dev, vq,
141                                         vq->last_used_idx *
142                                         sizeof(struct vring_packed_desc),
143                                         sizeof(struct vring_packed_desc));
144                 } else {
145                         head_idx = vq->last_used_idx;
146                         head_flags = flags;
147                 }
148
149                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
150         }
151
152         vq->desc_packed[head_idx].flags = head_flags;
153
154         vhost_log_cache_used_vring(dev, vq,
155                                 head_idx *
156                                 sizeof(struct vring_packed_desc),
157                                 sizeof(struct vring_packed_desc));
158
159         vq->shadow_used_idx = 0;
160         vhost_log_cache_sync(dev, vq);
161 }
162
163 static __rte_always_inline void
164 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
165                                   struct vhost_virtqueue *vq)
166 {
167         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
168
169         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
170         rte_smp_wmb();
171         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
172
173         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
174                                    sizeof(struct vring_packed_desc),
175                                    sizeof(struct vring_packed_desc));
176         vq->shadow_used_idx = 0;
177         vhost_log_cache_sync(dev, vq);
178 }
179
180 static __rte_always_inline void
181 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
182                                  struct vhost_virtqueue *vq,
183                                  uint64_t *lens,
184                                  uint16_t *ids)
185 {
186         uint16_t i;
187         uint16_t flags;
188
189         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
190
191         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
192                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
193                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
194         }
195
196         rte_smp_wmb();
197
198         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
199                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
200
201         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
202                                    sizeof(struct vring_packed_desc),
203                                    sizeof(struct vring_packed_desc) *
204                                    PACKED_BATCH_SIZE);
205         vhost_log_cache_sync(dev, vq);
206
207         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
208 }
209
210 static __rte_always_inline void
211 vhost_shadow_dequeue_batch_packed_inorder(struct vhost_virtqueue *vq,
212                                           uint16_t id)
213 {
214         vq->shadow_used_packed[0].id = id;
215
216         if (!vq->shadow_used_idx) {
217                 vq->shadow_last_used_idx = vq->last_used_idx;
218                 vq->shadow_used_packed[0].flags =
219                         PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
220                 vq->shadow_used_packed[0].len = 0;
221                 vq->shadow_used_packed[0].count = 1;
222                 vq->shadow_used_idx++;
223         }
224
225         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
226 }
227
228 static __rte_always_inline void
229 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
230                                   struct vhost_virtqueue *vq,
231                                   uint16_t *ids)
232 {
233         uint16_t flags;
234         uint16_t i;
235         uint16_t begin;
236
237         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
238
239         if (!vq->shadow_used_idx) {
240                 vq->shadow_last_used_idx = vq->last_used_idx;
241                 vq->shadow_used_packed[0].id  = ids[0];
242                 vq->shadow_used_packed[0].len = 0;
243                 vq->shadow_used_packed[0].count = 1;
244                 vq->shadow_used_packed[0].flags = flags;
245                 vq->shadow_used_idx++;
246                 begin = 1;
247         } else
248                 begin = 0;
249
250         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
251                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
252                 vq->desc_packed[vq->last_used_idx + i].len = 0;
253         }
254
255         rte_smp_wmb();
256         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
257                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
258
259         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
260                                    sizeof(struct vring_packed_desc),
261                                    sizeof(struct vring_packed_desc) *
262                                    PACKED_BATCH_SIZE);
263         vhost_log_cache_sync(dev, vq);
264
265         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
266 }
267
268 static __rte_always_inline void
269 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
270                                    uint16_t buf_id,
271                                    uint16_t count)
272 {
273         uint16_t flags;
274
275         flags = vq->desc_packed[vq->last_used_idx].flags;
276         if (vq->used_wrap_counter) {
277                 flags |= VRING_DESC_F_USED;
278                 flags |= VRING_DESC_F_AVAIL;
279         } else {
280                 flags &= ~VRING_DESC_F_USED;
281                 flags &= ~VRING_DESC_F_AVAIL;
282         }
283
284         if (!vq->shadow_used_idx) {
285                 vq->shadow_last_used_idx = vq->last_used_idx;
286
287                 vq->shadow_used_packed[0].id  = buf_id;
288                 vq->shadow_used_packed[0].len = 0;
289                 vq->shadow_used_packed[0].flags = flags;
290                 vq->shadow_used_idx++;
291         } else {
292                 vq->desc_packed[vq->last_used_idx].id = buf_id;
293                 vq->desc_packed[vq->last_used_idx].len = 0;
294                 vq->desc_packed[vq->last_used_idx].flags = flags;
295         }
296
297         vq_inc_last_used_packed(vq, count);
298 }
299
300 static __rte_always_inline void
301 vhost_shadow_dequeue_single_packed_inorder(struct vhost_virtqueue *vq,
302                                            uint16_t buf_id,
303                                            uint16_t count)
304 {
305         uint16_t flags;
306
307         vq->shadow_used_packed[0].id = buf_id;
308
309         flags = vq->desc_packed[vq->last_used_idx].flags;
310         if (vq->used_wrap_counter) {
311                 flags |= VRING_DESC_F_USED;
312                 flags |= VRING_DESC_F_AVAIL;
313         } else {
314                 flags &= ~VRING_DESC_F_USED;
315                 flags &= ~VRING_DESC_F_AVAIL;
316         }
317
318         if (!vq->shadow_used_idx) {
319                 vq->shadow_last_used_idx = vq->last_used_idx;
320                 vq->shadow_used_packed[0].len = 0;
321                 vq->shadow_used_packed[0].flags = flags;
322                 vq->shadow_used_idx++;
323         }
324
325         vq_inc_last_used_packed(vq, count);
326 }
327
328 static inline void
329 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
330 {
331         struct batch_copy_elem *elem = vq->batch_copy_elems;
332         uint16_t count = vq->batch_copy_nb_elems;
333         int i;
334
335         for (i = 0; i < count; i++) {
336                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
337                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
338                                            elem[i].len);
339                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
340         }
341
342         vq->batch_copy_nb_elems = 0;
343 }
344
345 static inline void
346 do_data_copy_dequeue(struct vhost_virtqueue *vq)
347 {
348         struct batch_copy_elem *elem = vq->batch_copy_elems;
349         uint16_t count = vq->batch_copy_nb_elems;
350         int i;
351
352         for (i = 0; i < count; i++)
353                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
354
355         vq->batch_copy_nb_elems = 0;
356 }
357
358 static __rte_always_inline void
359 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
360                                    struct vhost_virtqueue *vq,
361                                    uint32_t len[],
362                                    uint16_t id[],
363                                    uint16_t count[],
364                                    uint16_t num_buffers)
365 {
366         uint16_t i;
367         for (i = 0; i < num_buffers; i++) {
368                 /* enqueue shadow flush action aligned with batch num */
369                 if (!vq->shadow_used_idx)
370                         vq->shadow_aligned_idx = vq->last_used_idx &
371                                 PACKED_BATCH_MASK;
372                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
373                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
374                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
375                 vq->shadow_aligned_idx += count[i];
376                 vq->shadow_used_idx++;
377         }
378
379         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
380                 do_data_copy_enqueue(dev, vq);
381                 vhost_flush_enqueue_shadow_packed(dev, vq);
382         }
383 }
384
385 static __rte_always_inline void
386 vhost_flush_dequeue_packed(struct virtio_net *dev,
387                            struct vhost_virtqueue *vq)
388 {
389         int shadow_count;
390         if (!vq->shadow_used_idx)
391                 return;
392
393         shadow_count = vq->last_used_idx - vq->shadow_last_used_idx;
394         if (shadow_count <= 0)
395                 shadow_count += vq->size;
396
397         if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
398                 do_data_copy_dequeue(vq);
399                 vhost_flush_dequeue_shadow_packed(dev, vq);
400                 vhost_vring_call_packed(dev, vq);
401         }
402 }
403
404 /* avoid write operation when necessary, to lessen cache issues */
405 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
406         if ((var) != (val))                     \
407                 (var) = (val);                  \
408 } while (0)
409
410 static __rte_always_inline void
411 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
412 {
413         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
414
415         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
416                 csum_l4 |= PKT_TX_TCP_CKSUM;
417
418         if (csum_l4) {
419                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
420                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
421
422                 switch (csum_l4) {
423                 case PKT_TX_TCP_CKSUM:
424                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
425                                                 cksum));
426                         break;
427                 case PKT_TX_UDP_CKSUM:
428                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
429                                                 dgram_cksum));
430                         break;
431                 case PKT_TX_SCTP_CKSUM:
432                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
433                                                 cksum));
434                         break;
435                 }
436         } else {
437                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
438                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
439                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
440         }
441
442         /* IP cksum verification cannot be bypassed, then calculate here */
443         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
444                 struct rte_ipv4_hdr *ipv4_hdr;
445
446                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
447                                                    m_buf->l2_len);
448                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
449         }
450
451         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
452                 if (m_buf->ol_flags & PKT_TX_IPV4)
453                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
454                 else
455                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
456                 net_hdr->gso_size = m_buf->tso_segsz;
457                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
458                                         + m_buf->l4_len;
459         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
460                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
461                 net_hdr->gso_size = m_buf->tso_segsz;
462                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
463                         m_buf->l4_len;
464         } else {
465                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
466                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
467                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
468         }
469 }
470
471 static __rte_always_inline int
472 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
473                 struct buf_vector *buf_vec, uint16_t *vec_idx,
474                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
475 {
476         uint16_t vec_id = *vec_idx;
477
478         while (desc_len) {
479                 uint64_t desc_addr;
480                 uint64_t desc_chunck_len = desc_len;
481
482                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
483                         return -1;
484
485                 desc_addr = vhost_iova_to_vva(dev, vq,
486                                 desc_iova,
487                                 &desc_chunck_len,
488                                 perm);
489                 if (unlikely(!desc_addr))
490                         return -1;
491
492                 rte_prefetch0((void *)(uintptr_t)desc_addr);
493
494                 buf_vec[vec_id].buf_iova = desc_iova;
495                 buf_vec[vec_id].buf_addr = desc_addr;
496                 buf_vec[vec_id].buf_len  = desc_chunck_len;
497
498                 desc_len -= desc_chunck_len;
499                 desc_iova += desc_chunck_len;
500                 vec_id++;
501         }
502         *vec_idx = vec_id;
503
504         return 0;
505 }
506
507 static __rte_always_inline int
508 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
509                          uint32_t avail_idx, uint16_t *vec_idx,
510                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
511                          uint32_t *desc_chain_len, uint8_t perm)
512 {
513         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
514         uint16_t vec_id = *vec_idx;
515         uint32_t len    = 0;
516         uint64_t dlen;
517         uint32_t nr_descs = vq->size;
518         uint32_t cnt    = 0;
519         struct vring_desc *descs = vq->desc;
520         struct vring_desc *idesc = NULL;
521
522         if (unlikely(idx >= vq->size))
523                 return -1;
524
525         *desc_chain_head = idx;
526
527         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
528                 dlen = vq->desc[idx].len;
529                 nr_descs = dlen / sizeof(struct vring_desc);
530                 if (unlikely(nr_descs > vq->size))
531                         return -1;
532
533                 descs = (struct vring_desc *)(uintptr_t)
534                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
535                                                 &dlen,
536                                                 VHOST_ACCESS_RO);
537                 if (unlikely(!descs))
538                         return -1;
539
540                 if (unlikely(dlen < vq->desc[idx].len)) {
541                         /*
542                          * The indirect desc table is not contiguous
543                          * in process VA space, we have to copy it.
544                          */
545                         idesc = vhost_alloc_copy_ind_table(dev, vq,
546                                         vq->desc[idx].addr, vq->desc[idx].len);
547                         if (unlikely(!idesc))
548                                 return -1;
549
550                         descs = idesc;
551                 }
552
553                 idx = 0;
554         }
555
556         while (1) {
557                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
558                         free_ind_table(idesc);
559                         return -1;
560                 }
561
562                 len += descs[idx].len;
563
564                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
565                                                 descs[idx].addr, descs[idx].len,
566                                                 perm))) {
567                         free_ind_table(idesc);
568                         return -1;
569                 }
570
571                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
572                         break;
573
574                 idx = descs[idx].next;
575         }
576
577         *desc_chain_len = len;
578         *vec_idx = vec_id;
579
580         if (unlikely(!!idesc))
581                 free_ind_table(idesc);
582
583         return 0;
584 }
585
586 /*
587  * Returns -1 on fail, 0 on success
588  */
589 static inline int
590 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
591                                 uint32_t size, struct buf_vector *buf_vec,
592                                 uint16_t *num_buffers, uint16_t avail_head,
593                                 uint16_t *nr_vec)
594 {
595         uint16_t cur_idx;
596         uint16_t vec_idx = 0;
597         uint16_t max_tries, tries = 0;
598
599         uint16_t head_idx = 0;
600         uint32_t len = 0;
601
602         *num_buffers = 0;
603         cur_idx  = vq->last_avail_idx;
604
605         if (rxvq_is_mergeable(dev))
606                 max_tries = vq->size - 1;
607         else
608                 max_tries = 1;
609
610         while (size > 0) {
611                 if (unlikely(cur_idx == avail_head))
612                         return -1;
613                 /*
614                  * if we tried all available ring items, and still
615                  * can't get enough buf, it means something abnormal
616                  * happened.
617                  */
618                 if (unlikely(++tries > max_tries))
619                         return -1;
620
621                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
622                                                 &vec_idx, buf_vec,
623                                                 &head_idx, &len,
624                                                 VHOST_ACCESS_RW) < 0))
625                         return -1;
626                 len = RTE_MIN(len, size);
627                 update_shadow_used_ring_split(vq, head_idx, len);
628                 size -= len;
629
630                 cur_idx++;
631                 *num_buffers += 1;
632         }
633
634         *nr_vec = vec_idx;
635
636         return 0;
637 }
638
639 static __rte_always_inline int
640 fill_vec_buf_packed_indirect(struct virtio_net *dev,
641                         struct vhost_virtqueue *vq,
642                         struct vring_packed_desc *desc, uint16_t *vec_idx,
643                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
644 {
645         uint16_t i;
646         uint32_t nr_descs;
647         uint16_t vec_id = *vec_idx;
648         uint64_t dlen;
649         struct vring_packed_desc *descs, *idescs = NULL;
650
651         dlen = desc->len;
652         descs = (struct vring_packed_desc *)(uintptr_t)
653                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
654         if (unlikely(!descs))
655                 return -1;
656
657         if (unlikely(dlen < desc->len)) {
658                 /*
659                  * The indirect desc table is not contiguous
660                  * in process VA space, we have to copy it.
661                  */
662                 idescs = vhost_alloc_copy_ind_table(dev,
663                                 vq, desc->addr, desc->len);
664                 if (unlikely(!idescs))
665                         return -1;
666
667                 descs = idescs;
668         }
669
670         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
671         if (unlikely(nr_descs >= vq->size)) {
672                 free_ind_table(idescs);
673                 return -1;
674         }
675
676         for (i = 0; i < nr_descs; i++) {
677                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
678                         free_ind_table(idescs);
679                         return -1;
680                 }
681
682                 *len += descs[i].len;
683                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
684                                                 descs[i].addr, descs[i].len,
685                                                 perm)))
686                         return -1;
687         }
688         *vec_idx = vec_id;
689
690         if (unlikely(!!idescs))
691                 free_ind_table(idescs);
692
693         return 0;
694 }
695
696 static __rte_always_inline int
697 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
698                                 uint16_t avail_idx, uint16_t *desc_count,
699                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
700                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
701 {
702         bool wrap_counter = vq->avail_wrap_counter;
703         struct vring_packed_desc *descs = vq->desc_packed;
704         uint16_t vec_id = *vec_idx;
705
706         if (avail_idx < vq->last_avail_idx)
707                 wrap_counter ^= 1;
708
709         /*
710          * Perform a load-acquire barrier in desc_is_avail to
711          * enforce the ordering between desc flags and desc
712          * content.
713          */
714         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
715                 return -1;
716
717         *desc_count = 0;
718         *len = 0;
719
720         while (1) {
721                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
722                         return -1;
723
724                 if (unlikely(*desc_count >= vq->size))
725                         return -1;
726
727                 *desc_count += 1;
728                 *buf_id = descs[avail_idx].id;
729
730                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
731                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
732                                                         &descs[avail_idx],
733                                                         &vec_id, buf_vec,
734                                                         len, perm) < 0))
735                                 return -1;
736                 } else {
737                         *len += descs[avail_idx].len;
738
739                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
740                                                         descs[avail_idx].addr,
741                                                         descs[avail_idx].len,
742                                                         perm)))
743                                 return -1;
744                 }
745
746                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
747                         break;
748
749                 if (++avail_idx >= vq->size) {
750                         avail_idx -= vq->size;
751                         wrap_counter ^= 1;
752                 }
753         }
754
755         *vec_idx = vec_id;
756
757         return 0;
758 }
759
760 static __rte_noinline void
761 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
762                 struct buf_vector *buf_vec,
763                 struct virtio_net_hdr_mrg_rxbuf *hdr)
764 {
765         uint64_t len;
766         uint64_t remain = dev->vhost_hlen;
767         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
768         uint64_t iova = buf_vec->buf_iova;
769
770         while (remain) {
771                 len = RTE_MIN(remain,
772                                 buf_vec->buf_len);
773                 dst = buf_vec->buf_addr;
774                 rte_memcpy((void *)(uintptr_t)dst,
775                                 (void *)(uintptr_t)src,
776                                 len);
777
778                 PRINT_PACKET(dev, (uintptr_t)dst,
779                                 (uint32_t)len, 0);
780                 vhost_log_cache_write_iova(dev, vq,
781                                 iova, len);
782
783                 remain -= len;
784                 iova += len;
785                 src += len;
786                 buf_vec++;
787         }
788 }
789
790 static __rte_always_inline int
791 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
792                             struct rte_mbuf *m, struct buf_vector *buf_vec,
793                             uint16_t nr_vec, uint16_t num_buffers)
794 {
795         uint32_t vec_idx = 0;
796         uint32_t mbuf_offset, mbuf_avail;
797         uint32_t buf_offset, buf_avail;
798         uint64_t buf_addr, buf_iova, buf_len;
799         uint32_t cpy_len;
800         uint64_t hdr_addr;
801         struct rte_mbuf *hdr_mbuf;
802         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
803         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
804         int error = 0;
805
806         if (unlikely(m == NULL)) {
807                 error = -1;
808                 goto out;
809         }
810
811         buf_addr = buf_vec[vec_idx].buf_addr;
812         buf_iova = buf_vec[vec_idx].buf_iova;
813         buf_len = buf_vec[vec_idx].buf_len;
814
815         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
816                 error = -1;
817                 goto out;
818         }
819
820         hdr_mbuf = m;
821         hdr_addr = buf_addr;
822         if (unlikely(buf_len < dev->vhost_hlen))
823                 hdr = &tmp_hdr;
824         else
825                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
826
827         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
828                 dev->vid, num_buffers);
829
830         if (unlikely(buf_len < dev->vhost_hlen)) {
831                 buf_offset = dev->vhost_hlen - buf_len;
832                 vec_idx++;
833                 buf_addr = buf_vec[vec_idx].buf_addr;
834                 buf_iova = buf_vec[vec_idx].buf_iova;
835                 buf_len = buf_vec[vec_idx].buf_len;
836                 buf_avail = buf_len - buf_offset;
837         } else {
838                 buf_offset = dev->vhost_hlen;
839                 buf_avail = buf_len - dev->vhost_hlen;
840         }
841
842         mbuf_avail  = rte_pktmbuf_data_len(m);
843         mbuf_offset = 0;
844         while (mbuf_avail != 0 || m->next != NULL) {
845                 /* done with current buf, get the next one */
846                 if (buf_avail == 0) {
847                         vec_idx++;
848                         if (unlikely(vec_idx >= nr_vec)) {
849                                 error = -1;
850                                 goto out;
851                         }
852
853                         buf_addr = buf_vec[vec_idx].buf_addr;
854                         buf_iova = buf_vec[vec_idx].buf_iova;
855                         buf_len = buf_vec[vec_idx].buf_len;
856
857                         buf_offset = 0;
858                         buf_avail  = buf_len;
859                 }
860
861                 /* done with current mbuf, get the next one */
862                 if (mbuf_avail == 0) {
863                         m = m->next;
864
865                         mbuf_offset = 0;
866                         mbuf_avail  = rte_pktmbuf_data_len(m);
867                 }
868
869                 if (hdr_addr) {
870                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
871                         if (rxvq_is_mergeable(dev))
872                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
873                                                 num_buffers);
874
875                         if (unlikely(hdr == &tmp_hdr)) {
876                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
877                         } else {
878                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
879                                                 dev->vhost_hlen, 0);
880                                 vhost_log_cache_write_iova(dev, vq,
881                                                 buf_vec[0].buf_iova,
882                                                 dev->vhost_hlen);
883                         }
884
885                         hdr_addr = 0;
886                 }
887
888                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
889
890                 if (likely(cpy_len > MAX_BATCH_LEN ||
891                                         vq->batch_copy_nb_elems >= vq->size)) {
892                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
893                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
894                                 cpy_len);
895                         vhost_log_cache_write_iova(dev, vq,
896                                                    buf_iova + buf_offset,
897                                                    cpy_len);
898                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
899                                 cpy_len, 0);
900                 } else {
901                         batch_copy[vq->batch_copy_nb_elems].dst =
902                                 (void *)((uintptr_t)(buf_addr + buf_offset));
903                         batch_copy[vq->batch_copy_nb_elems].src =
904                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
905                         batch_copy[vq->batch_copy_nb_elems].log_addr =
906                                 buf_iova + buf_offset;
907                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
908                         vq->batch_copy_nb_elems++;
909                 }
910
911                 mbuf_avail  -= cpy_len;
912                 mbuf_offset += cpy_len;
913                 buf_avail  -= cpy_len;
914                 buf_offset += cpy_len;
915         }
916
917 out:
918
919         return error;
920 }
921
922 static __rte_always_inline int
923 vhost_enqueue_single_packed(struct virtio_net *dev,
924                             struct vhost_virtqueue *vq,
925                             struct rte_mbuf *pkt,
926                             struct buf_vector *buf_vec,
927                             uint16_t *nr_descs)
928 {
929         uint16_t nr_vec = 0;
930         uint16_t avail_idx = vq->last_avail_idx;
931         uint16_t max_tries, tries = 0;
932         uint16_t buf_id = 0;
933         uint32_t len = 0;
934         uint16_t desc_count;
935         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
936         uint16_t num_buffers = 0;
937         uint32_t buffer_len[vq->size];
938         uint16_t buffer_buf_id[vq->size];
939         uint16_t buffer_desc_count[vq->size];
940
941         if (rxvq_is_mergeable(dev))
942                 max_tries = vq->size - 1;
943         else
944                 max_tries = 1;
945
946         while (size > 0) {
947                 /*
948                  * if we tried all available ring items, and still
949                  * can't get enough buf, it means something abnormal
950                  * happened.
951                  */
952                 if (unlikely(++tries > max_tries))
953                         return -1;
954
955                 if (unlikely(fill_vec_buf_packed(dev, vq,
956                                                 avail_idx, &desc_count,
957                                                 buf_vec, &nr_vec,
958                                                 &buf_id, &len,
959                                                 VHOST_ACCESS_RW) < 0))
960                         return -1;
961
962                 len = RTE_MIN(len, size);
963                 size -= len;
964
965                 buffer_len[num_buffers] = len;
966                 buffer_buf_id[num_buffers] = buf_id;
967                 buffer_desc_count[num_buffers] = desc_count;
968                 num_buffers += 1;
969
970                 *nr_descs += desc_count;
971                 avail_idx += desc_count;
972                 if (avail_idx >= vq->size)
973                         avail_idx -= vq->size;
974         }
975
976         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
977                 return -1;
978
979         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
980                                            buffer_desc_count, num_buffers);
981
982         return 0;
983 }
984
985 static __rte_noinline uint32_t
986 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
987         struct rte_mbuf **pkts, uint32_t count)
988 {
989         uint32_t pkt_idx = 0;
990         uint16_t num_buffers;
991         struct buf_vector buf_vec[BUF_VECTOR_MAX];
992         uint16_t avail_head;
993
994         avail_head = *((volatile uint16_t *)&vq->avail->idx);
995
996         /*
997          * The ordering between avail index and
998          * desc reads needs to be enforced.
999          */
1000         rte_smp_rmb();
1001
1002         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1003
1004         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1005                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1006                 uint16_t nr_vec = 0;
1007
1008                 if (unlikely(reserve_avail_buf_split(dev, vq,
1009                                                 pkt_len, buf_vec, &num_buffers,
1010                                                 avail_head, &nr_vec) < 0)) {
1011                         VHOST_LOG_DEBUG(VHOST_DATA,
1012                                 "(%d) failed to get enough desc from vring\n",
1013                                 dev->vid);
1014                         vq->shadow_used_idx -= num_buffers;
1015                         break;
1016                 }
1017
1018                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1019                         dev->vid, vq->last_avail_idx,
1020                         vq->last_avail_idx + num_buffers);
1021
1022                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1023                                                 buf_vec, nr_vec,
1024                                                 num_buffers) < 0) {
1025                         vq->shadow_used_idx -= num_buffers;
1026                         break;
1027                 }
1028
1029                 vq->last_avail_idx += num_buffers;
1030         }
1031
1032         do_data_copy_enqueue(dev, vq);
1033
1034         if (likely(vq->shadow_used_idx)) {
1035                 flush_shadow_used_ring_split(dev, vq);
1036                 vhost_vring_call_split(dev, vq);
1037         }
1038
1039         return pkt_idx;
1040 }
1041
1042 static __rte_always_inline int
1043 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1044                            struct vhost_virtqueue *vq,
1045                            struct rte_mbuf **pkts)
1046 {
1047         bool wrap_counter = vq->avail_wrap_counter;
1048         struct vring_packed_desc *descs = vq->desc_packed;
1049         uint16_t avail_idx = vq->last_avail_idx;
1050         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1051         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1052         uint32_t buf_offset = dev->vhost_hlen;
1053         uint64_t lens[PACKED_BATCH_SIZE];
1054         uint16_t ids[PACKED_BATCH_SIZE];
1055         uint16_t i;
1056
1057         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1058                 return -1;
1059
1060         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1061                 return -1;
1062
1063         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1064                 if (unlikely(pkts[i]->next != NULL))
1065                         return -1;
1066                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1067                                             wrap_counter)))
1068                         return -1;
1069         }
1070
1071         rte_smp_rmb();
1072
1073         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1074                 lens[i] = descs[avail_idx + i].len;
1075
1076         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1077                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1078                         return -1;
1079         }
1080
1081         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1082                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1083                                                   descs[avail_idx + i].addr,
1084                                                   &lens[i],
1085                                                   VHOST_ACCESS_RW);
1086
1087         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1088                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1089                         return -1;
1090         }
1091
1092         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1093                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1094                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1095                                         (uintptr_t)desc_addrs[i];
1096                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1097         }
1098
1099         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1100                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1101
1102         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1103
1104         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1105                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1106                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1107                            pkts[i]->pkt_len);
1108         }
1109
1110         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1111                 ids[i] = descs[avail_idx + i].id;
1112
1113         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1114
1115         return 0;
1116 }
1117
1118 static __rte_always_inline int16_t
1119 virtio_dev_rx_single_packed(struct virtio_net *dev,
1120                             struct vhost_virtqueue *vq,
1121                             struct rte_mbuf *pkt)
1122 {
1123         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1124         uint16_t nr_descs = 0;
1125
1126         rte_smp_rmb();
1127         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1128                                                  &nr_descs) < 0)) {
1129                 VHOST_LOG_DEBUG(VHOST_DATA,
1130                                 "(%d) failed to get enough desc from vring\n",
1131                                 dev->vid);
1132                 return -1;
1133         }
1134
1135         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1136                         dev->vid, vq->last_avail_idx,
1137                         vq->last_avail_idx + nr_descs);
1138
1139         vq_inc_last_avail_packed(vq, nr_descs);
1140
1141         return 0;
1142 }
1143
1144 static __rte_noinline uint32_t
1145 virtio_dev_rx_packed(struct virtio_net *dev,
1146                      struct vhost_virtqueue *vq,
1147                      struct rte_mbuf **pkts,
1148                      uint32_t count)
1149 {
1150         uint32_t pkt_idx = 0;
1151         uint32_t remained = count;
1152
1153         do {
1154                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1155
1156                 if (remained >= PACKED_BATCH_SIZE) {
1157                         if (!virtio_dev_rx_batch_packed(dev, vq, pkts)) {
1158                                 pkt_idx += PACKED_BATCH_SIZE;
1159                                 remained -= PACKED_BATCH_SIZE;
1160                                 continue;
1161                         }
1162                 }
1163
1164                 if (virtio_dev_rx_single_packed(dev, vq, pkts[pkt_idx]))
1165                         break;
1166                 pkt_idx++;
1167                 remained--;
1168
1169         } while (pkt_idx < count);
1170
1171         if (vq->shadow_used_idx) {
1172                 do_data_copy_enqueue(dev, vq);
1173                 vhost_flush_enqueue_shadow_packed(dev, vq);
1174         }
1175
1176         if (pkt_idx)
1177                 vhost_vring_call_packed(dev, vq);
1178
1179         return pkt_idx;
1180 }
1181
1182 static __rte_always_inline uint32_t
1183 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1184         struct rte_mbuf **pkts, uint32_t count)
1185 {
1186         struct vhost_virtqueue *vq;
1187         uint32_t nb_tx = 0;
1188
1189         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1190         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1191                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1192                         dev->vid, __func__, queue_id);
1193                 return 0;
1194         }
1195
1196         vq = dev->virtqueue[queue_id];
1197
1198         rte_spinlock_lock(&vq->access_lock);
1199
1200         if (unlikely(vq->enabled == 0))
1201                 goto out_access_unlock;
1202
1203         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1204                 vhost_user_iotlb_rd_lock(vq);
1205
1206         if (unlikely(vq->access_ok == 0))
1207                 if (unlikely(vring_translate(dev, vq) < 0))
1208                         goto out;
1209
1210         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1211         if (count == 0)
1212                 goto out;
1213
1214         if (vq_is_packed(dev))
1215                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1216         else
1217                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1218
1219 out:
1220         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1221                 vhost_user_iotlb_rd_unlock(vq);
1222
1223 out_access_unlock:
1224         rte_spinlock_unlock(&vq->access_lock);
1225
1226         return nb_tx;
1227 }
1228
1229 uint16_t
1230 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1231         struct rte_mbuf **pkts, uint16_t count)
1232 {
1233         struct virtio_net *dev = get_device(vid);
1234
1235         if (!dev)
1236                 return 0;
1237
1238         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1239                 RTE_LOG(ERR, VHOST_DATA,
1240                         "(%d) %s: built-in vhost net backend is disabled.\n",
1241                         dev->vid, __func__);
1242                 return 0;
1243         }
1244
1245         return virtio_dev_rx(dev, queue_id, pkts, count);
1246 }
1247
1248 static inline bool
1249 virtio_net_with_host_offload(struct virtio_net *dev)
1250 {
1251         if (dev->features &
1252                         ((1ULL << VIRTIO_NET_F_CSUM) |
1253                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1254                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1255                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1256                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1257                 return true;
1258
1259         return false;
1260 }
1261
1262 static void
1263 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1264 {
1265         struct rte_ipv4_hdr *ipv4_hdr;
1266         struct rte_ipv6_hdr *ipv6_hdr;
1267         void *l3_hdr = NULL;
1268         struct rte_ether_hdr *eth_hdr;
1269         uint16_t ethertype;
1270
1271         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1272
1273         m->l2_len = sizeof(struct rte_ether_hdr);
1274         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1275
1276         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1277                 struct rte_vlan_hdr *vlan_hdr =
1278                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1279
1280                 m->l2_len += sizeof(struct rte_vlan_hdr);
1281                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1282         }
1283
1284         l3_hdr = (char *)eth_hdr + m->l2_len;
1285
1286         switch (ethertype) {
1287         case RTE_ETHER_TYPE_IPV4:
1288                 ipv4_hdr = l3_hdr;
1289                 *l4_proto = ipv4_hdr->next_proto_id;
1290                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1291                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1292                 m->ol_flags |= PKT_TX_IPV4;
1293                 break;
1294         case RTE_ETHER_TYPE_IPV6:
1295                 ipv6_hdr = l3_hdr;
1296                 *l4_proto = ipv6_hdr->proto;
1297                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1298                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1299                 m->ol_flags |= PKT_TX_IPV6;
1300                 break;
1301         default:
1302                 m->l3_len = 0;
1303                 *l4_proto = 0;
1304                 *l4_hdr = NULL;
1305                 break;
1306         }
1307 }
1308
1309 static __rte_always_inline void
1310 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1311 {
1312         uint16_t l4_proto = 0;
1313         void *l4_hdr = NULL;
1314         struct rte_tcp_hdr *tcp_hdr = NULL;
1315
1316         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1317                 return;
1318
1319         parse_ethernet(m, &l4_proto, &l4_hdr);
1320         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1321                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1322                         switch (hdr->csum_offset) {
1323                         case (offsetof(struct rte_tcp_hdr, cksum)):
1324                                 if (l4_proto == IPPROTO_TCP)
1325                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1326                                 break;
1327                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1328                                 if (l4_proto == IPPROTO_UDP)
1329                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1330                                 break;
1331                         case (offsetof(struct rte_sctp_hdr, cksum)):
1332                                 if (l4_proto == IPPROTO_SCTP)
1333                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1334                                 break;
1335                         default:
1336                                 break;
1337                         }
1338                 }
1339         }
1340
1341         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1342                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1343                 case VIRTIO_NET_HDR_GSO_TCPV4:
1344                 case VIRTIO_NET_HDR_GSO_TCPV6:
1345                         tcp_hdr = l4_hdr;
1346                         m->ol_flags |= PKT_TX_TCP_SEG;
1347                         m->tso_segsz = hdr->gso_size;
1348                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1349                         break;
1350                 case VIRTIO_NET_HDR_GSO_UDP:
1351                         m->ol_flags |= PKT_TX_UDP_SEG;
1352                         m->tso_segsz = hdr->gso_size;
1353                         m->l4_len = sizeof(struct rte_udp_hdr);
1354                         break;
1355                 default:
1356                         RTE_LOG(WARNING, VHOST_DATA,
1357                                 "unsupported gso type %u.\n", hdr->gso_type);
1358                         break;
1359                 }
1360         }
1361 }
1362
1363 static __rte_noinline void
1364 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1365                 struct buf_vector *buf_vec)
1366 {
1367         uint64_t len;
1368         uint64_t remain = sizeof(struct virtio_net_hdr);
1369         uint64_t src;
1370         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1371
1372         while (remain) {
1373                 len = RTE_MIN(remain, buf_vec->buf_len);
1374                 src = buf_vec->buf_addr;
1375                 rte_memcpy((void *)(uintptr_t)dst,
1376                                 (void *)(uintptr_t)src, len);
1377
1378                 remain -= len;
1379                 dst += len;
1380                 buf_vec++;
1381         }
1382 }
1383
1384 static __rte_always_inline int
1385 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1386                   struct buf_vector *buf_vec, uint16_t nr_vec,
1387                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1388 {
1389         uint32_t buf_avail, buf_offset;
1390         uint64_t buf_addr, buf_iova, buf_len;
1391         uint32_t mbuf_avail, mbuf_offset;
1392         uint32_t cpy_len;
1393         struct rte_mbuf *cur = m, *prev = m;
1394         struct virtio_net_hdr tmp_hdr;
1395         struct virtio_net_hdr *hdr = NULL;
1396         /* A counter to avoid desc dead loop chain */
1397         uint16_t vec_idx = 0;
1398         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1399         int error = 0;
1400
1401         buf_addr = buf_vec[vec_idx].buf_addr;
1402         buf_iova = buf_vec[vec_idx].buf_iova;
1403         buf_len = buf_vec[vec_idx].buf_len;
1404
1405         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1406                 error = -1;
1407                 goto out;
1408         }
1409
1410         if (virtio_net_with_host_offload(dev)) {
1411                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1412                         /*
1413                          * No luck, the virtio-net header doesn't fit
1414                          * in a contiguous virtual area.
1415                          */
1416                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1417                         hdr = &tmp_hdr;
1418                 } else {
1419                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1420                 }
1421         }
1422
1423         /*
1424          * A virtio driver normally uses at least 2 desc buffers
1425          * for Tx: the first for storing the header, and others
1426          * for storing the data.
1427          */
1428         if (unlikely(buf_len < dev->vhost_hlen)) {
1429                 buf_offset = dev->vhost_hlen - buf_len;
1430                 vec_idx++;
1431                 buf_addr = buf_vec[vec_idx].buf_addr;
1432                 buf_iova = buf_vec[vec_idx].buf_iova;
1433                 buf_len = buf_vec[vec_idx].buf_len;
1434                 buf_avail  = buf_len - buf_offset;
1435         } else if (buf_len == dev->vhost_hlen) {
1436                 if (unlikely(++vec_idx >= nr_vec))
1437                         goto out;
1438                 buf_addr = buf_vec[vec_idx].buf_addr;
1439                 buf_iova = buf_vec[vec_idx].buf_iova;
1440                 buf_len = buf_vec[vec_idx].buf_len;
1441
1442                 buf_offset = 0;
1443                 buf_avail = buf_len;
1444         } else {
1445                 buf_offset = dev->vhost_hlen;
1446                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1447         }
1448
1449         PRINT_PACKET(dev,
1450                         (uintptr_t)(buf_addr + buf_offset),
1451                         (uint32_t)buf_avail, 0);
1452
1453         mbuf_offset = 0;
1454         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1455         while (1) {
1456                 uint64_t hpa;
1457
1458                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1459
1460                 /*
1461                  * A desc buf might across two host physical pages that are
1462                  * not continuous. In such case (gpa_to_hpa returns 0), data
1463                  * will be copied even though zero copy is enabled.
1464                  */
1465                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1466                                         buf_iova + buf_offset, cpy_len)))) {
1467                         cur->data_len = cpy_len;
1468                         cur->data_off = 0;
1469                         cur->buf_addr =
1470                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1471                         cur->buf_iova = hpa;
1472
1473                         /*
1474                          * In zero copy mode, one mbuf can only reference data
1475                          * for one or partial of one desc buff.
1476                          */
1477                         mbuf_avail = cpy_len;
1478                 } else {
1479                         if (likely(cpy_len > MAX_BATCH_LEN ||
1480                                    vq->batch_copy_nb_elems >= vq->size ||
1481                                    (hdr && cur == m))) {
1482                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1483                                                                    mbuf_offset),
1484                                            (void *)((uintptr_t)(buf_addr +
1485                                                            buf_offset)),
1486                                            cpy_len);
1487                         } else {
1488                                 batch_copy[vq->batch_copy_nb_elems].dst =
1489                                         rte_pktmbuf_mtod_offset(cur, void *,
1490                                                                 mbuf_offset);
1491                                 batch_copy[vq->batch_copy_nb_elems].src =
1492                                         (void *)((uintptr_t)(buf_addr +
1493                                                                 buf_offset));
1494                                 batch_copy[vq->batch_copy_nb_elems].len =
1495                                         cpy_len;
1496                                 vq->batch_copy_nb_elems++;
1497                         }
1498                 }
1499
1500                 mbuf_avail  -= cpy_len;
1501                 mbuf_offset += cpy_len;
1502                 buf_avail -= cpy_len;
1503                 buf_offset += cpy_len;
1504
1505                 /* This buf reaches to its end, get the next one */
1506                 if (buf_avail == 0) {
1507                         if (++vec_idx >= nr_vec)
1508                                 break;
1509
1510                         buf_addr = buf_vec[vec_idx].buf_addr;
1511                         buf_iova = buf_vec[vec_idx].buf_iova;
1512                         buf_len = buf_vec[vec_idx].buf_len;
1513
1514                         buf_offset = 0;
1515                         buf_avail  = buf_len;
1516
1517                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1518                                         (uint32_t)buf_avail, 0);
1519                 }
1520
1521                 /*
1522                  * This mbuf reaches to its end, get a new one
1523                  * to hold more data.
1524                  */
1525                 if (mbuf_avail == 0) {
1526                         cur = rte_pktmbuf_alloc(mbuf_pool);
1527                         if (unlikely(cur == NULL)) {
1528                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1529                                         "allocate memory for mbuf.\n");
1530                                 error = -1;
1531                                 goto out;
1532                         }
1533                         if (unlikely(dev->dequeue_zero_copy))
1534                                 rte_mbuf_refcnt_update(cur, 1);
1535
1536                         prev->next = cur;
1537                         prev->data_len = mbuf_offset;
1538                         m->nb_segs += 1;
1539                         m->pkt_len += mbuf_offset;
1540                         prev = cur;
1541
1542                         mbuf_offset = 0;
1543                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1544                 }
1545         }
1546
1547         prev->data_len = mbuf_offset;
1548         m->pkt_len    += mbuf_offset;
1549
1550         if (hdr)
1551                 vhost_dequeue_offload(hdr, m);
1552
1553 out:
1554
1555         return error;
1556 }
1557
1558 static __rte_always_inline struct zcopy_mbuf *
1559 get_zmbuf(struct vhost_virtqueue *vq)
1560 {
1561         uint16_t i;
1562         uint16_t last;
1563         int tries = 0;
1564
1565         /* search [last_zmbuf_idx, zmbuf_size) */
1566         i = vq->last_zmbuf_idx;
1567         last = vq->zmbuf_size;
1568
1569 again:
1570         for (; i < last; i++) {
1571                 if (vq->zmbufs[i].in_use == 0) {
1572                         vq->last_zmbuf_idx = i + 1;
1573                         vq->zmbufs[i].in_use = 1;
1574                         return &vq->zmbufs[i];
1575                 }
1576         }
1577
1578         tries++;
1579         if (tries == 1) {
1580                 /* search [0, last_zmbuf_idx) */
1581                 i = 0;
1582                 last = vq->last_zmbuf_idx;
1583                 goto again;
1584         }
1585
1586         return NULL;
1587 }
1588
1589 static void
1590 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1591 {
1592         rte_free(opaque);
1593 }
1594
1595 static int
1596 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1597 {
1598         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1599         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1600         uint16_t buf_len;
1601         rte_iova_t iova;
1602         void *buf;
1603
1604         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1605          * required, otherwise store shinfo in the new buffer.
1606          */
1607         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1608                 shinfo = rte_pktmbuf_mtod(pkt,
1609                                           struct rte_mbuf_ext_shared_info *);
1610         else {
1611                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1612                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1613         }
1614
1615         if (unlikely(total_len > UINT16_MAX))
1616                 return -ENOSPC;
1617
1618         buf_len = total_len;
1619         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1620         if (unlikely(buf == NULL))
1621                 return -ENOMEM;
1622
1623         /* Initialize shinfo */
1624         if (shinfo) {
1625                 shinfo->free_cb = virtio_dev_extbuf_free;
1626                 shinfo->fcb_opaque = buf;
1627                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1628         } else {
1629                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1630                                               virtio_dev_extbuf_free, buf);
1631                 if (unlikely(shinfo == NULL)) {
1632                         rte_free(buf);
1633                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1634                         return -1;
1635                 }
1636         }
1637
1638         iova = rte_malloc_virt2iova(buf);
1639         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1640         rte_pktmbuf_reset_headroom(pkt);
1641
1642         return 0;
1643 }
1644
1645 /*
1646  * Allocate a host supported pktmbuf.
1647  */
1648 static __rte_always_inline struct rte_mbuf *
1649 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1650                          uint32_t data_len)
1651 {
1652         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1653
1654         if (unlikely(pkt == NULL))
1655                 return NULL;
1656
1657         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1658                 return pkt;
1659
1660         /* attach an external buffer if supported */
1661         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1662                 return pkt;
1663
1664         /* check if chained buffers are allowed */
1665         if (!dev->linearbuf)
1666                 return pkt;
1667
1668         /* Data doesn't fit into the buffer and the host supports
1669          * only linear buffers
1670          */
1671         rte_pktmbuf_free(pkt);
1672
1673         return NULL;
1674 }
1675
1676 static __rte_noinline uint16_t
1677 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1678         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1679 {
1680         uint16_t i;
1681         uint16_t free_entries;
1682
1683         if (unlikely(dev->dequeue_zero_copy)) {
1684                 struct zcopy_mbuf *zmbuf, *next;
1685
1686                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1687                      zmbuf != NULL; zmbuf = next) {
1688                         next = TAILQ_NEXT(zmbuf, next);
1689
1690                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1691                                 update_shadow_used_ring_split(vq,
1692                                                 zmbuf->desc_idx, 0);
1693                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1694                                 restore_mbuf(zmbuf->mbuf);
1695                                 rte_pktmbuf_free(zmbuf->mbuf);
1696                                 put_zmbuf(zmbuf);
1697                                 vq->nr_zmbuf -= 1;
1698                         }
1699                 }
1700
1701                 if (likely(vq->shadow_used_idx)) {
1702                         flush_shadow_used_ring_split(dev, vq);
1703                         vhost_vring_call_split(dev, vq);
1704                 }
1705         }
1706
1707         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1708                         vq->last_avail_idx;
1709         if (free_entries == 0)
1710                 return 0;
1711
1712         /*
1713          * The ordering between avail index and
1714          * desc reads needs to be enforced.
1715          */
1716         rte_smp_rmb();
1717
1718         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1719
1720         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1721
1722         count = RTE_MIN(count, MAX_PKT_BURST);
1723         count = RTE_MIN(count, free_entries);
1724         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1725                         dev->vid, count);
1726
1727         for (i = 0; i < count; i++) {
1728                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1729                 uint16_t head_idx;
1730                 uint32_t buf_len;
1731                 uint16_t nr_vec = 0;
1732                 int err;
1733
1734                 if (unlikely(fill_vec_buf_split(dev, vq,
1735                                                 vq->last_avail_idx + i,
1736                                                 &nr_vec, buf_vec,
1737                                                 &head_idx, &buf_len,
1738                                                 VHOST_ACCESS_RO) < 0))
1739                         break;
1740
1741                 if (likely(dev->dequeue_zero_copy == 0))
1742                         update_shadow_used_ring_split(vq, head_idx, 0);
1743
1744                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1745                 if (unlikely(pkts[i] == NULL))
1746                         break;
1747
1748                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1749                                 mbuf_pool);
1750                 if (unlikely(err)) {
1751                         rte_pktmbuf_free(pkts[i]);
1752                         break;
1753                 }
1754
1755                 if (unlikely(dev->dequeue_zero_copy)) {
1756                         struct zcopy_mbuf *zmbuf;
1757
1758                         zmbuf = get_zmbuf(vq);
1759                         if (!zmbuf) {
1760                                 rte_pktmbuf_free(pkts[i]);
1761                                 break;
1762                         }
1763                         zmbuf->mbuf = pkts[i];
1764                         zmbuf->desc_idx = head_idx;
1765
1766                         /*
1767                          * Pin lock the mbuf; we will check later to see
1768                          * whether the mbuf is freed (when we are the last
1769                          * user) or not. If that's the case, we then could
1770                          * update the used ring safely.
1771                          */
1772                         rte_mbuf_refcnt_update(pkts[i], 1);
1773
1774                         vq->nr_zmbuf += 1;
1775                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1776                 }
1777         }
1778         vq->last_avail_idx += i;
1779
1780         if (likely(dev->dequeue_zero_copy == 0)) {
1781                 do_data_copy_dequeue(vq);
1782                 if (unlikely(i < count))
1783                         vq->shadow_used_idx = i;
1784                 if (likely(vq->shadow_used_idx)) {
1785                         flush_shadow_used_ring_split(dev, vq);
1786                         vhost_vring_call_split(dev, vq);
1787                 }
1788         }
1789
1790         return i;
1791 }
1792
1793 static __rte_always_inline int
1794 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1795                                  struct vhost_virtqueue *vq,
1796                                  struct rte_mempool *mbuf_pool,
1797                                  struct rte_mbuf **pkts,
1798                                  uint16_t avail_idx,
1799                                  uintptr_t *desc_addrs,
1800                                  uint16_t *ids)
1801 {
1802         bool wrap = vq->avail_wrap_counter;
1803         struct vring_packed_desc *descs = vq->desc_packed;
1804         struct virtio_net_hdr *hdr;
1805         uint64_t lens[PACKED_BATCH_SIZE];
1806         uint64_t buf_lens[PACKED_BATCH_SIZE];
1807         uint32_t buf_offset = dev->vhost_hlen;
1808         uint16_t flags, i;
1809
1810         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1811                 return -1;
1812         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1813                 return -1;
1814
1815         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1816                 flags = descs[avail_idx + i].flags;
1817                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1818                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1819                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1820                         return -1;
1821         }
1822
1823         rte_smp_rmb();
1824
1825         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1826                 lens[i] = descs[avail_idx + i].len;
1827
1828         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1829                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1830                                                   descs[avail_idx + i].addr,
1831                                                   &lens[i], VHOST_ACCESS_RW);
1832         }
1833
1834         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1835                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1836                         return -1;
1837         }
1838
1839         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1840                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1841                 if (!pkts[i])
1842                         goto free_buf;
1843         }
1844
1845         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1846                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1847
1848         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1849                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1850                         goto free_buf;
1851         }
1852
1853         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1854                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1855                 pkts[i]->data_len = pkts[i]->pkt_len;
1856                 ids[i] = descs[avail_idx + i].id;
1857         }
1858
1859         if (virtio_net_with_host_offload(dev)) {
1860                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1861                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1862                         vhost_dequeue_offload(hdr, pkts[i]);
1863                 }
1864         }
1865
1866         return 0;
1867
1868 free_buf:
1869         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1870                 rte_pktmbuf_free(pkts[i]);
1871
1872         return -1;
1873 }
1874
1875 static __rte_always_inline int
1876 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1877                            struct vhost_virtqueue *vq,
1878                            struct rte_mempool *mbuf_pool,
1879                            struct rte_mbuf **pkts)
1880 {
1881         uint16_t avail_idx = vq->last_avail_idx;
1882         uint32_t buf_offset = dev->vhost_hlen;
1883         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1884         uint16_t ids[PACKED_BATCH_SIZE];
1885         uint16_t i;
1886
1887         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1888                                              avail_idx, desc_addrs, ids))
1889                 return -1;
1890
1891         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1892                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1893
1894         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1895                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1896                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1897                            pkts[i]->pkt_len);
1898
1899         if (virtio_net_is_inorder(dev))
1900                 vhost_shadow_dequeue_batch_packed_inorder(vq,
1901                         ids[PACKED_BATCH_SIZE - 1]);
1902         else
1903                 vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1904
1905         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1906
1907         return 0;
1908 }
1909
1910 static __rte_always_inline int
1911 vhost_dequeue_single_packed(struct virtio_net *dev,
1912                             struct vhost_virtqueue *vq,
1913                             struct rte_mempool *mbuf_pool,
1914                             struct rte_mbuf **pkts,
1915                             uint16_t *buf_id,
1916                             uint16_t *desc_count)
1917 {
1918         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1919         uint32_t buf_len;
1920         uint16_t nr_vec = 0;
1921         int err;
1922
1923         if (unlikely(fill_vec_buf_packed(dev, vq,
1924                                          vq->last_avail_idx, desc_count,
1925                                          buf_vec, &nr_vec,
1926                                          buf_id, &buf_len,
1927                                          VHOST_ACCESS_RO) < 0))
1928                 return -1;
1929
1930         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1931         if (unlikely(*pkts == NULL)) {
1932                 RTE_LOG(ERR, VHOST_DATA,
1933                         "Failed to allocate memory for mbuf.\n");
1934                 return -1;
1935         }
1936
1937         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
1938                                 mbuf_pool);
1939         if (unlikely(err)) {
1940                 rte_pktmbuf_free(*pkts);
1941                 return -1;
1942         }
1943
1944         return 0;
1945 }
1946
1947 static __rte_always_inline int
1948 virtio_dev_tx_single_packed(struct virtio_net *dev,
1949                             struct vhost_virtqueue *vq,
1950                             struct rte_mempool *mbuf_pool,
1951                             struct rte_mbuf **pkts)
1952 {
1953
1954         uint16_t buf_id, desc_count;
1955
1956         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
1957                                         &desc_count))
1958                 return -1;
1959
1960         if (virtio_net_is_inorder(dev))
1961                 vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
1962                                                            desc_count);
1963         else
1964                 vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
1965
1966         vq_inc_last_avail_packed(vq, desc_count);
1967
1968         return 0;
1969 }
1970
1971 static __rte_always_inline int
1972 virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
1973                                  struct vhost_virtqueue *vq,
1974                                  struct rte_mempool *mbuf_pool,
1975                                  struct rte_mbuf **pkts)
1976 {
1977         struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
1978         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1979         uint16_t ids[PACKED_BATCH_SIZE];
1980         uint16_t i;
1981
1982         uint16_t avail_idx = vq->last_avail_idx;
1983
1984         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1985                                              avail_idx, desc_addrs, ids))
1986                 return -1;
1987
1988         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1989                 zmbufs[i] = get_zmbuf(vq);
1990
1991         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1992                 if (!zmbufs[i])
1993                         goto free_pkt;
1994         }
1995
1996         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1997                 zmbufs[i]->mbuf = pkts[i];
1998                 zmbufs[i]->desc_idx = avail_idx + i;
1999                 zmbufs[i]->desc_count = 1;
2000         }
2001
2002         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2003                 rte_mbuf_refcnt_update(pkts[i], 1);
2004
2005         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2006                 TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
2007
2008         vq->nr_zmbuf += PACKED_BATCH_SIZE;
2009         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
2010
2011         return 0;
2012
2013 free_pkt:
2014         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
2015                 rte_pktmbuf_free(pkts[i]);
2016
2017         return -1;
2018 }
2019
2020 static __rte_always_inline int
2021 virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
2022                                   struct vhost_virtqueue *vq,
2023                                   struct rte_mempool *mbuf_pool,
2024                                   struct rte_mbuf **pkts)
2025 {
2026         uint16_t buf_id, desc_count;
2027         struct zcopy_mbuf *zmbuf;
2028
2029         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2030                                         &desc_count))
2031                 return -1;
2032
2033         zmbuf = get_zmbuf(vq);
2034         if (!zmbuf) {
2035                 rte_pktmbuf_free(*pkts);
2036                 return -1;
2037         }
2038         zmbuf->mbuf = *pkts;
2039         zmbuf->desc_idx = vq->last_avail_idx;
2040         zmbuf->desc_count = desc_count;
2041
2042         rte_mbuf_refcnt_update(*pkts, 1);
2043
2044         vq->nr_zmbuf += 1;
2045         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2046
2047         vq_inc_last_avail_packed(vq, desc_count);
2048         return 0;
2049 }
2050
2051 static __rte_always_inline void
2052 free_zmbuf(struct vhost_virtqueue *vq)
2053 {
2054         struct zcopy_mbuf *next = NULL;
2055         struct zcopy_mbuf *zmbuf;
2056
2057         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2058              zmbuf != NULL; zmbuf = next) {
2059                 next = TAILQ_NEXT(zmbuf, next);
2060
2061                 uint16_t last_used_idx = vq->last_used_idx;
2062
2063                 if (mbuf_is_consumed(zmbuf->mbuf)) {
2064                         uint16_t flags;
2065                         flags = vq->desc_packed[last_used_idx].flags;
2066                         if (vq->used_wrap_counter) {
2067                                 flags |= VRING_DESC_F_USED;
2068                                 flags |= VRING_DESC_F_AVAIL;
2069                         } else {
2070                                 flags &= ~VRING_DESC_F_USED;
2071                                 flags &= ~VRING_DESC_F_AVAIL;
2072                         }
2073
2074                         vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
2075                         vq->desc_packed[last_used_idx].len = 0;
2076
2077                         rte_smp_wmb();
2078                         vq->desc_packed[last_used_idx].flags = flags;
2079
2080                         vq_inc_last_used_packed(vq, zmbuf->desc_count);
2081
2082                         TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2083                         restore_mbuf(zmbuf->mbuf);
2084                         rte_pktmbuf_free(zmbuf->mbuf);
2085                         put_zmbuf(zmbuf);
2086                         vq->nr_zmbuf -= 1;
2087                 }
2088         }
2089 }
2090
2091 static __rte_noinline uint16_t
2092 virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
2093                            struct vhost_virtqueue *vq,
2094                            struct rte_mempool *mbuf_pool,
2095                            struct rte_mbuf **pkts,
2096                            uint32_t count)
2097 {
2098         uint32_t pkt_idx = 0;
2099         uint32_t remained = count;
2100
2101         free_zmbuf(vq);
2102
2103         do {
2104                 if (remained >= PACKED_BATCH_SIZE) {
2105                         if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
2106                                 mbuf_pool, &pkts[pkt_idx])) {
2107                                 pkt_idx += PACKED_BATCH_SIZE;
2108                                 remained -= PACKED_BATCH_SIZE;
2109                                 continue;
2110                         }
2111                 }
2112
2113                 if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
2114                                                       &pkts[pkt_idx]))
2115                         break;
2116                 pkt_idx++;
2117                 remained--;
2118
2119         } while (remained);
2120
2121         if (pkt_idx)
2122                 vhost_vring_call_packed(dev, vq);
2123
2124         return pkt_idx;
2125 }
2126
2127 static __rte_noinline uint16_t
2128 virtio_dev_tx_packed(struct virtio_net *dev,
2129                      struct vhost_virtqueue *vq,
2130                      struct rte_mempool *mbuf_pool,
2131                      struct rte_mbuf **pkts,
2132                      uint32_t count)
2133 {
2134         uint32_t pkt_idx = 0;
2135         uint32_t remained = count;
2136
2137         do {
2138                 rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
2139
2140                 if (remained >= PACKED_BATCH_SIZE) {
2141                         if (!virtio_dev_tx_batch_packed(dev, vq, mbuf_pool,
2142                                                         &pkts[pkt_idx])) {
2143                                 vhost_flush_dequeue_packed(dev, vq);
2144                                 pkt_idx += PACKED_BATCH_SIZE;
2145                                 remained -= PACKED_BATCH_SIZE;
2146                                 continue;
2147                         }
2148                 }
2149
2150                 if (virtio_dev_tx_single_packed(dev, vq, mbuf_pool,
2151                                                 &pkts[pkt_idx]))
2152                         break;
2153                 vhost_flush_dequeue_packed(dev, vq);
2154                 pkt_idx++;
2155                 remained--;
2156
2157         } while (remained);
2158
2159         if (vq->shadow_used_idx)
2160                 do_data_copy_dequeue(vq);
2161
2162         return pkt_idx;
2163 }
2164
2165 uint16_t
2166 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2167         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2168 {
2169         struct virtio_net *dev;
2170         struct rte_mbuf *rarp_mbuf = NULL;
2171         struct vhost_virtqueue *vq;
2172
2173         dev = get_device(vid);
2174         if (!dev)
2175                 return 0;
2176
2177         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2178                 RTE_LOG(ERR, VHOST_DATA,
2179                         "(%d) %s: built-in vhost net backend is disabled.\n",
2180                         dev->vid, __func__);
2181                 return 0;
2182         }
2183
2184         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2185                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2186                         dev->vid, __func__, queue_id);
2187                 return 0;
2188         }
2189
2190         vq = dev->virtqueue[queue_id];
2191
2192         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2193                 return 0;
2194
2195         if (unlikely(vq->enabled == 0)) {
2196                 count = 0;
2197                 goto out_access_unlock;
2198         }
2199
2200         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2201                 vhost_user_iotlb_rd_lock(vq);
2202
2203         if (unlikely(vq->access_ok == 0))
2204                 if (unlikely(vring_translate(dev, vq) < 0)) {
2205                         count = 0;
2206                         goto out;
2207                 }
2208
2209         /*
2210          * Construct a RARP broadcast packet, and inject it to the "pkts"
2211          * array, to looks like that guest actually send such packet.
2212          *
2213          * Check user_send_rarp() for more information.
2214          *
2215          * broadcast_rarp shares a cacheline in the virtio_net structure
2216          * with some fields that are accessed during enqueue and
2217          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2218          * result in false sharing between enqueue and dequeue.
2219          *
2220          * Prevent unnecessary false sharing by reading broadcast_rarp first
2221          * and only performing cmpset if the read indicates it is likely to
2222          * be set.
2223          */
2224         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2225                         rte_atomic16_cmpset((volatile uint16_t *)
2226                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2227
2228                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2229                 if (rarp_mbuf == NULL) {
2230                         RTE_LOG(ERR, VHOST_DATA,
2231                                 "Failed to make RARP packet.\n");
2232                         count = 0;
2233                         goto out;
2234                 }
2235                 count -= 1;
2236         }
2237
2238         if (vq_is_packed(dev)) {
2239                 if (unlikely(dev->dequeue_zero_copy))
2240                         count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
2241                                                            pkts, count);
2242                 else
2243                         count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
2244                                                      count);
2245         } else
2246                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2247
2248 out:
2249         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2250                 vhost_user_iotlb_rd_unlock(vq);
2251
2252 out_access_unlock:
2253         rte_spinlock_unlock(&vq->access_lock);
2254
2255         if (unlikely(rarp_mbuf != NULL)) {
2256                 /*
2257                  * Inject it to the head of "pkts" array, so that switch's mac
2258                  * learning table will get updated first.
2259                  */
2260                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2261                 pkts[0] = rarp_mbuf;
2262                 count += 1;
2263         }
2264
2265         return count;
2266 }