vhost: update packed ring dequeue
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42                         struct vhost_virtqueue *vq,
43                         uint16_t to, uint16_t from, uint16_t size)
44 {
45         rte_memcpy(&vq->used->ring[to],
46                         &vq->shadow_used_split[from],
47                         size * sizeof(struct vring_used_elem));
48         vhost_log_cache_used_vring(dev, vq,
49                         offsetof(struct vring_used, ring[to]),
50                         size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58         if (used_idx + vq->shadow_used_idx <= vq->size) {
59                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60                                           vq->shadow_used_idx);
61         } else {
62                 uint16_t size;
63
64                 /* update used ring interval [used_idx, vq->size] */
65                 size = vq->size - used_idx;
66                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68                 /* update the left half used ring interval [0, left_size] */
69                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70                                           vq->shadow_used_idx - size);
71         }
72         vq->last_used_idx += vq->shadow_used_idx;
73
74         rte_smp_wmb();
75
76         vhost_log_cache_sync(dev, vq);
77
78         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79         vq->shadow_used_idx = 0;
80         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81                 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86                          uint16_t desc_idx, uint32_t len)
87 {
88         uint16_t i = vq->shadow_used_idx++;
89
90         vq->shadow_used_split[i].id  = desc_idx;
91         vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
96                                   struct vhost_virtqueue *vq)
97 {
98         int i;
99         uint16_t used_idx = vq->last_used_idx;
100         uint16_t head_idx = vq->last_used_idx;
101         uint16_t head_flags = 0;
102
103         /* Split loop in two to save memory barriers */
104         for (i = 0; i < vq->shadow_used_idx; i++) {
105                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108                 used_idx += vq->shadow_used_packed[i].count;
109                 if (used_idx >= vq->size)
110                         used_idx -= vq->size;
111         }
112
113         rte_smp_wmb();
114
115         for (i = 0; i < vq->shadow_used_idx; i++) {
116                 uint16_t flags;
117
118                 if (vq->shadow_used_packed[i].len)
119                         flags = VRING_DESC_F_WRITE;
120                 else
121                         flags = 0;
122
123                 if (vq->used_wrap_counter) {
124                         flags |= VRING_DESC_F_USED;
125                         flags |= VRING_DESC_F_AVAIL;
126                 } else {
127                         flags &= ~VRING_DESC_F_USED;
128                         flags &= ~VRING_DESC_F_AVAIL;
129                 }
130
131                 if (i > 0) {
132                         vq->desc_packed[vq->last_used_idx].flags = flags;
133
134                         vhost_log_cache_used_vring(dev, vq,
135                                         vq->last_used_idx *
136                                         sizeof(struct vring_packed_desc),
137                                         sizeof(struct vring_packed_desc));
138                 } else {
139                         head_idx = vq->last_used_idx;
140                         head_flags = flags;
141                 }
142
143                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
144         }
145
146         vq->desc_packed[head_idx].flags = head_flags;
147
148         vhost_log_cache_used_vring(dev, vq,
149                                 head_idx *
150                                 sizeof(struct vring_packed_desc),
151                                 sizeof(struct vring_packed_desc));
152
153         vq->shadow_used_idx = 0;
154         vhost_log_cache_sync(dev, vq);
155 }
156
157 static __rte_always_inline void
158 vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
159                                   struct vhost_virtqueue *vq)
160 {
161         struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
162
163         vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
164         rte_smp_wmb();
165         vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
166
167         vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
168                                    sizeof(struct vring_packed_desc),
169                                    sizeof(struct vring_packed_desc));
170         vq->shadow_used_idx = 0;
171         vhost_log_cache_sync(dev, vq);
172 }
173
174 static __rte_always_inline void
175 vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
176                                  struct vhost_virtqueue *vq,
177                                  uint64_t *lens,
178                                  uint16_t *ids)
179 {
180         uint16_t i;
181         uint16_t flags;
182
183         flags = PACKED_DESC_ENQUEUE_USED_FLAG(vq->used_wrap_counter);
184
185         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
186                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
187                 vq->desc_packed[vq->last_used_idx + i].len = lens[i];
188         }
189
190         rte_smp_wmb();
191
192         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
193                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
194
195         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
196                                    sizeof(struct vring_packed_desc),
197                                    sizeof(struct vring_packed_desc) *
198                                    PACKED_BATCH_SIZE);
199         vhost_log_cache_sync(dev, vq);
200
201         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
202 }
203
204 static __rte_always_inline void
205 flush_shadow_used_ring_packed(struct virtio_net *dev,
206                         struct vhost_virtqueue *vq)
207 {
208         int i;
209         uint16_t used_idx = vq->last_used_idx;
210         uint16_t head_idx = vq->last_used_idx;
211         uint16_t head_flags = 0;
212
213         /* Split loop in two to save memory barriers */
214         for (i = 0; i < vq->shadow_used_idx; i++) {
215                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
216                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
217
218                 used_idx += vq->shadow_used_packed[i].count;
219                 if (used_idx >= vq->size)
220                         used_idx -= vq->size;
221         }
222
223         for (i = 0; i < vq->shadow_used_idx; i++) {
224                 uint16_t flags;
225
226                 if (vq->shadow_used_packed[i].len)
227                         flags = VRING_DESC_F_WRITE;
228                 else
229                         flags = 0;
230
231                 if (vq->used_wrap_counter) {
232                         flags |= VRING_DESC_F_USED;
233                         flags |= VRING_DESC_F_AVAIL;
234                 } else {
235                         flags &= ~VRING_DESC_F_USED;
236                         flags &= ~VRING_DESC_F_AVAIL;
237                 }
238
239                 if (i > 0) {
240                         vq->desc_packed[vq->last_used_idx].flags = flags;
241
242                         vhost_log_cache_used_vring(dev, vq,
243                                         vq->last_used_idx *
244                                         sizeof(struct vring_packed_desc),
245                                         sizeof(struct vring_packed_desc));
246                 } else {
247                         head_idx = vq->last_used_idx;
248                         head_flags = flags;
249                 }
250
251                 vq_inc_last_used_packed(vq, vq->shadow_used_packed[i].count);
252         }
253
254         __atomic_store_n(&vq->desc_packed[head_idx].flags, head_flags,
255                          __ATOMIC_RELEASE);
256
257         vhost_log_cache_used_vring(dev, vq,
258                                 head_idx *
259                                 sizeof(struct vring_packed_desc),
260                                 sizeof(struct vring_packed_desc));
261
262         vq->shadow_used_idx = 0;
263         vhost_log_cache_sync(dev, vq);
264 }
265
266 static __rte_always_inline void
267 vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
268                                   struct vhost_virtqueue *vq,
269                                   uint16_t *ids)
270 {
271         uint16_t flags;
272         uint16_t i;
273         uint16_t begin;
274
275         flags = PACKED_DESC_DEQUEUE_USED_FLAG(vq->used_wrap_counter);
276
277         if (!vq->shadow_used_idx) {
278                 vq->shadow_last_used_idx = vq->last_used_idx;
279                 vq->shadow_used_packed[0].id  = ids[0];
280                 vq->shadow_used_packed[0].len = 0;
281                 vq->shadow_used_packed[0].count = 1;
282                 vq->shadow_used_packed[0].flags = flags;
283                 vq->shadow_used_idx++;
284                 begin = 1;
285         } else
286                 begin = 0;
287
288         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE) {
289                 vq->desc_packed[vq->last_used_idx + i].id = ids[i];
290                 vq->desc_packed[vq->last_used_idx + i].len = 0;
291         }
292
293         rte_smp_wmb();
294         vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
295                 vq->desc_packed[vq->last_used_idx + i].flags = flags;
296
297         vhost_log_cache_used_vring(dev, vq, vq->last_used_idx *
298                                    sizeof(struct vring_packed_desc),
299                                    sizeof(struct vring_packed_desc) *
300                                    PACKED_BATCH_SIZE);
301         vhost_log_cache_sync(dev, vq);
302
303         vq_inc_last_used_packed(vq, PACKED_BATCH_SIZE);
304 }
305
306 static __rte_always_inline void
307 vhost_shadow_dequeue_single_packed(struct vhost_virtqueue *vq,
308                                    uint16_t buf_id,
309                                    uint16_t count)
310 {
311         uint16_t flags;
312
313         flags = vq->desc_packed[vq->last_used_idx].flags;
314         if (vq->used_wrap_counter) {
315                 flags |= VRING_DESC_F_USED;
316                 flags |= VRING_DESC_F_AVAIL;
317         } else {
318                 flags &= ~VRING_DESC_F_USED;
319                 flags &= ~VRING_DESC_F_AVAIL;
320         }
321
322         if (!vq->shadow_used_idx) {
323                 vq->shadow_last_used_idx = vq->last_used_idx;
324
325                 vq->shadow_used_packed[0].id  = buf_id;
326                 vq->shadow_used_packed[0].len = 0;
327                 vq->shadow_used_packed[0].flags = flags;
328                 vq->shadow_used_idx++;
329         } else {
330                 vq->desc_packed[vq->last_used_idx].id = buf_id;
331                 vq->desc_packed[vq->last_used_idx].len = 0;
332                 vq->desc_packed[vq->last_used_idx].flags = flags;
333         }
334
335         vq_inc_last_used_packed(vq, count);
336 }
337
338 static __rte_always_inline void
339 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
340                          uint16_t desc_idx, uint32_t len, uint16_t count)
341 {
342         uint16_t i = vq->shadow_used_idx++;
343
344         vq->shadow_used_packed[i].id  = desc_idx;
345         vq->shadow_used_packed[i].len = len;
346         vq->shadow_used_packed[i].count = count;
347 }
348
349 static inline void
350 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
351 {
352         struct batch_copy_elem *elem = vq->batch_copy_elems;
353         uint16_t count = vq->batch_copy_nb_elems;
354         int i;
355
356         for (i = 0; i < count; i++) {
357                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
358                 vhost_log_cache_write_iova(dev, vq, elem[i].log_addr,
359                                            elem[i].len);
360                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
361         }
362
363         vq->batch_copy_nb_elems = 0;
364 }
365
366 static inline void
367 do_data_copy_dequeue(struct vhost_virtqueue *vq)
368 {
369         struct batch_copy_elem *elem = vq->batch_copy_elems;
370         uint16_t count = vq->batch_copy_nb_elems;
371         int i;
372
373         for (i = 0; i < count; i++)
374                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
375
376         vq->batch_copy_nb_elems = 0;
377 }
378
379 static __rte_always_inline void
380 vhost_shadow_enqueue_single_packed(struct virtio_net *dev,
381                                    struct vhost_virtqueue *vq,
382                                    uint32_t len[],
383                                    uint16_t id[],
384                                    uint16_t count[],
385                                    uint16_t num_buffers)
386 {
387         uint16_t i;
388         for (i = 0; i < num_buffers; i++) {
389                 /* enqueue shadow flush action aligned with batch num */
390                 if (!vq->shadow_used_idx)
391                         vq->shadow_aligned_idx = vq->last_used_idx &
392                                 PACKED_BATCH_MASK;
393                 vq->shadow_used_packed[vq->shadow_used_idx].id  = id[i];
394                 vq->shadow_used_packed[vq->shadow_used_idx].len = len[i];
395                 vq->shadow_used_packed[vq->shadow_used_idx].count = count[i];
396                 vq->shadow_aligned_idx += count[i];
397                 vq->shadow_used_idx++;
398         }
399
400         if (vq->shadow_aligned_idx >= PACKED_BATCH_SIZE) {
401                 do_data_copy_enqueue(dev, vq);
402                 vhost_flush_enqueue_shadow_packed(dev, vq);
403         }
404 }
405
406 static __rte_unused void
407 vhost_flush_dequeue_packed(struct virtio_net *dev,
408                            struct vhost_virtqueue *vq)
409 {
410         int shadow_count;
411         if (!vq->shadow_used_idx)
412                 return;
413
414         shadow_count = vq->last_used_idx - vq->shadow_last_used_idx;
415         if (shadow_count <= 0)
416                 shadow_count += vq->size;
417
418         if ((uint32_t)shadow_count >= (vq->size - MAX_PKT_BURST)) {
419                 do_data_copy_dequeue(vq);
420                 vhost_flush_dequeue_shadow_packed(dev, vq);
421                 vhost_vring_call_packed(dev, vq);
422         }
423 }
424
425 /* avoid write operation when necessary, to lessen cache issues */
426 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
427         if ((var) != (val))                     \
428                 (var) = (val);                  \
429 } while (0)
430
431 static __rte_always_inline void
432 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
433 {
434         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
435
436         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
437                 csum_l4 |= PKT_TX_TCP_CKSUM;
438
439         if (csum_l4) {
440                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
441                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
442
443                 switch (csum_l4) {
444                 case PKT_TX_TCP_CKSUM:
445                         net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
446                                                 cksum));
447                         break;
448                 case PKT_TX_UDP_CKSUM:
449                         net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
450                                                 dgram_cksum));
451                         break;
452                 case PKT_TX_SCTP_CKSUM:
453                         net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
454                                                 cksum));
455                         break;
456                 }
457         } else {
458                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
459                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
460                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
461         }
462
463         /* IP cksum verification cannot be bypassed, then calculate here */
464         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
465                 struct rte_ipv4_hdr *ipv4_hdr;
466
467                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
468                                                    m_buf->l2_len);
469                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
470         }
471
472         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
473                 if (m_buf->ol_flags & PKT_TX_IPV4)
474                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
475                 else
476                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
477                 net_hdr->gso_size = m_buf->tso_segsz;
478                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
479                                         + m_buf->l4_len;
480         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
481                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
482                 net_hdr->gso_size = m_buf->tso_segsz;
483                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
484                         m_buf->l4_len;
485         } else {
486                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
487                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
488                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
489         }
490 }
491
492 static __rte_always_inline int
493 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
494                 struct buf_vector *buf_vec, uint16_t *vec_idx,
495                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
496 {
497         uint16_t vec_id = *vec_idx;
498
499         while (desc_len) {
500                 uint64_t desc_addr;
501                 uint64_t desc_chunck_len = desc_len;
502
503                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
504                         return -1;
505
506                 desc_addr = vhost_iova_to_vva(dev, vq,
507                                 desc_iova,
508                                 &desc_chunck_len,
509                                 perm);
510                 if (unlikely(!desc_addr))
511                         return -1;
512
513                 rte_prefetch0((void *)(uintptr_t)desc_addr);
514
515                 buf_vec[vec_id].buf_iova = desc_iova;
516                 buf_vec[vec_id].buf_addr = desc_addr;
517                 buf_vec[vec_id].buf_len  = desc_chunck_len;
518
519                 desc_len -= desc_chunck_len;
520                 desc_iova += desc_chunck_len;
521                 vec_id++;
522         }
523         *vec_idx = vec_id;
524
525         return 0;
526 }
527
528 static __rte_always_inline int
529 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
530                          uint32_t avail_idx, uint16_t *vec_idx,
531                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
532                          uint32_t *desc_chain_len, uint8_t perm)
533 {
534         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
535         uint16_t vec_id = *vec_idx;
536         uint32_t len    = 0;
537         uint64_t dlen;
538         uint32_t nr_descs = vq->size;
539         uint32_t cnt    = 0;
540         struct vring_desc *descs = vq->desc;
541         struct vring_desc *idesc = NULL;
542
543         if (unlikely(idx >= vq->size))
544                 return -1;
545
546         *desc_chain_head = idx;
547
548         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
549                 dlen = vq->desc[idx].len;
550                 nr_descs = dlen / sizeof(struct vring_desc);
551                 if (unlikely(nr_descs > vq->size))
552                         return -1;
553
554                 descs = (struct vring_desc *)(uintptr_t)
555                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
556                                                 &dlen,
557                                                 VHOST_ACCESS_RO);
558                 if (unlikely(!descs))
559                         return -1;
560
561                 if (unlikely(dlen < vq->desc[idx].len)) {
562                         /*
563                          * The indirect desc table is not contiguous
564                          * in process VA space, we have to copy it.
565                          */
566                         idesc = vhost_alloc_copy_ind_table(dev, vq,
567                                         vq->desc[idx].addr, vq->desc[idx].len);
568                         if (unlikely(!idesc))
569                                 return -1;
570
571                         descs = idesc;
572                 }
573
574                 idx = 0;
575         }
576
577         while (1) {
578                 if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
579                         free_ind_table(idesc);
580                         return -1;
581                 }
582
583                 len += descs[idx].len;
584
585                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
586                                                 descs[idx].addr, descs[idx].len,
587                                                 perm))) {
588                         free_ind_table(idesc);
589                         return -1;
590                 }
591
592                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
593                         break;
594
595                 idx = descs[idx].next;
596         }
597
598         *desc_chain_len = len;
599         *vec_idx = vec_id;
600
601         if (unlikely(!!idesc))
602                 free_ind_table(idesc);
603
604         return 0;
605 }
606
607 /*
608  * Returns -1 on fail, 0 on success
609  */
610 static inline int
611 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
612                                 uint32_t size, struct buf_vector *buf_vec,
613                                 uint16_t *num_buffers, uint16_t avail_head,
614                                 uint16_t *nr_vec)
615 {
616         uint16_t cur_idx;
617         uint16_t vec_idx = 0;
618         uint16_t max_tries, tries = 0;
619
620         uint16_t head_idx = 0;
621         uint32_t len = 0;
622
623         *num_buffers = 0;
624         cur_idx  = vq->last_avail_idx;
625
626         if (rxvq_is_mergeable(dev))
627                 max_tries = vq->size - 1;
628         else
629                 max_tries = 1;
630
631         while (size > 0) {
632                 if (unlikely(cur_idx == avail_head))
633                         return -1;
634                 /*
635                  * if we tried all available ring items, and still
636                  * can't get enough buf, it means something abnormal
637                  * happened.
638                  */
639                 if (unlikely(++tries > max_tries))
640                         return -1;
641
642                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
643                                                 &vec_idx, buf_vec,
644                                                 &head_idx, &len,
645                                                 VHOST_ACCESS_RW) < 0))
646                         return -1;
647                 len = RTE_MIN(len, size);
648                 update_shadow_used_ring_split(vq, head_idx, len);
649                 size -= len;
650
651                 cur_idx++;
652                 *num_buffers += 1;
653         }
654
655         *nr_vec = vec_idx;
656
657         return 0;
658 }
659
660 static __rte_always_inline int
661 fill_vec_buf_packed_indirect(struct virtio_net *dev,
662                         struct vhost_virtqueue *vq,
663                         struct vring_packed_desc *desc, uint16_t *vec_idx,
664                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
665 {
666         uint16_t i;
667         uint32_t nr_descs;
668         uint16_t vec_id = *vec_idx;
669         uint64_t dlen;
670         struct vring_packed_desc *descs, *idescs = NULL;
671
672         dlen = desc->len;
673         descs = (struct vring_packed_desc *)(uintptr_t)
674                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
675         if (unlikely(!descs))
676                 return -1;
677
678         if (unlikely(dlen < desc->len)) {
679                 /*
680                  * The indirect desc table is not contiguous
681                  * in process VA space, we have to copy it.
682                  */
683                 idescs = vhost_alloc_copy_ind_table(dev,
684                                 vq, desc->addr, desc->len);
685                 if (unlikely(!idescs))
686                         return -1;
687
688                 descs = idescs;
689         }
690
691         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
692         if (unlikely(nr_descs >= vq->size)) {
693                 free_ind_table(idescs);
694                 return -1;
695         }
696
697         for (i = 0; i < nr_descs; i++) {
698                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
699                         free_ind_table(idescs);
700                         return -1;
701                 }
702
703                 *len += descs[i].len;
704                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
705                                                 descs[i].addr, descs[i].len,
706                                                 perm)))
707                         return -1;
708         }
709         *vec_idx = vec_id;
710
711         if (unlikely(!!idescs))
712                 free_ind_table(idescs);
713
714         return 0;
715 }
716
717 static __rte_always_inline int
718 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
719                                 uint16_t avail_idx, uint16_t *desc_count,
720                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
721                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
722 {
723         bool wrap_counter = vq->avail_wrap_counter;
724         struct vring_packed_desc *descs = vq->desc_packed;
725         uint16_t vec_id = *vec_idx;
726
727         if (avail_idx < vq->last_avail_idx)
728                 wrap_counter ^= 1;
729
730         /*
731          * Perform a load-acquire barrier in desc_is_avail to
732          * enforce the ordering between desc flags and desc
733          * content.
734          */
735         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
736                 return -1;
737
738         *desc_count = 0;
739         *len = 0;
740
741         while (1) {
742                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
743                         return -1;
744
745                 if (unlikely(*desc_count >= vq->size))
746                         return -1;
747
748                 *desc_count += 1;
749                 *buf_id = descs[avail_idx].id;
750
751                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
752                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
753                                                         &descs[avail_idx],
754                                                         &vec_id, buf_vec,
755                                                         len, perm) < 0))
756                                 return -1;
757                 } else {
758                         *len += descs[avail_idx].len;
759
760                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
761                                                         descs[avail_idx].addr,
762                                                         descs[avail_idx].len,
763                                                         perm)))
764                                 return -1;
765                 }
766
767                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
768                         break;
769
770                 if (++avail_idx >= vq->size) {
771                         avail_idx -= vq->size;
772                         wrap_counter ^= 1;
773                 }
774         }
775
776         *vec_idx = vec_id;
777
778         return 0;
779 }
780
781 /*
782  * Returns -1 on fail, 0 on success
783  */
784 static inline int
785 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
786                                 uint32_t size, struct buf_vector *buf_vec,
787                                 uint16_t *nr_vec, uint16_t *num_buffers,
788                                 uint16_t *nr_descs)
789 {
790         uint16_t avail_idx;
791         uint16_t vec_idx = 0;
792         uint16_t max_tries, tries = 0;
793
794         uint16_t buf_id = 0;
795         uint32_t len = 0;
796         uint16_t desc_count;
797
798         *num_buffers = 0;
799         avail_idx = vq->last_avail_idx;
800
801         if (rxvq_is_mergeable(dev))
802                 max_tries = vq->size - 1;
803         else
804                 max_tries = 1;
805
806         while (size > 0) {
807                 /*
808                  * if we tried all available ring items, and still
809                  * can't get enough buf, it means something abnormal
810                  * happened.
811                  */
812                 if (unlikely(++tries > max_tries))
813                         return -1;
814
815                 if (unlikely(fill_vec_buf_packed(dev, vq,
816                                                 avail_idx, &desc_count,
817                                                 buf_vec, &vec_idx,
818                                                 &buf_id, &len,
819                                                 VHOST_ACCESS_RW) < 0))
820                         return -1;
821
822                 len = RTE_MIN(len, size);
823                 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
824                 size -= len;
825
826                 avail_idx += desc_count;
827                 if (avail_idx >= vq->size)
828                         avail_idx -= vq->size;
829
830                 *nr_descs += desc_count;
831                 *num_buffers += 1;
832         }
833
834         *nr_vec = vec_idx;
835
836         return 0;
837 }
838
839 static __rte_noinline void
840 copy_vnet_hdr_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
841                 struct buf_vector *buf_vec,
842                 struct virtio_net_hdr_mrg_rxbuf *hdr)
843 {
844         uint64_t len;
845         uint64_t remain = dev->vhost_hlen;
846         uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
847         uint64_t iova = buf_vec->buf_iova;
848
849         while (remain) {
850                 len = RTE_MIN(remain,
851                                 buf_vec->buf_len);
852                 dst = buf_vec->buf_addr;
853                 rte_memcpy((void *)(uintptr_t)dst,
854                                 (void *)(uintptr_t)src,
855                                 len);
856
857                 PRINT_PACKET(dev, (uintptr_t)dst,
858                                 (uint32_t)len, 0);
859                 vhost_log_cache_write_iova(dev, vq,
860                                 iova, len);
861
862                 remain -= len;
863                 iova += len;
864                 src += len;
865                 buf_vec++;
866         }
867 }
868
869 static __rte_always_inline int
870 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
871                             struct rte_mbuf *m, struct buf_vector *buf_vec,
872                             uint16_t nr_vec, uint16_t num_buffers)
873 {
874         uint32_t vec_idx = 0;
875         uint32_t mbuf_offset, mbuf_avail;
876         uint32_t buf_offset, buf_avail;
877         uint64_t buf_addr, buf_iova, buf_len;
878         uint32_t cpy_len;
879         uint64_t hdr_addr;
880         struct rte_mbuf *hdr_mbuf;
881         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
882         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
883         int error = 0;
884
885         if (unlikely(m == NULL)) {
886                 error = -1;
887                 goto out;
888         }
889
890         buf_addr = buf_vec[vec_idx].buf_addr;
891         buf_iova = buf_vec[vec_idx].buf_iova;
892         buf_len = buf_vec[vec_idx].buf_len;
893
894         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
895                 error = -1;
896                 goto out;
897         }
898
899         hdr_mbuf = m;
900         hdr_addr = buf_addr;
901         if (unlikely(buf_len < dev->vhost_hlen))
902                 hdr = &tmp_hdr;
903         else
904                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
905
906         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
907                 dev->vid, num_buffers);
908
909         if (unlikely(buf_len < dev->vhost_hlen)) {
910                 buf_offset = dev->vhost_hlen - buf_len;
911                 vec_idx++;
912                 buf_addr = buf_vec[vec_idx].buf_addr;
913                 buf_iova = buf_vec[vec_idx].buf_iova;
914                 buf_len = buf_vec[vec_idx].buf_len;
915                 buf_avail = buf_len - buf_offset;
916         } else {
917                 buf_offset = dev->vhost_hlen;
918                 buf_avail = buf_len - dev->vhost_hlen;
919         }
920
921         mbuf_avail  = rte_pktmbuf_data_len(m);
922         mbuf_offset = 0;
923         while (mbuf_avail != 0 || m->next != NULL) {
924                 /* done with current buf, get the next one */
925                 if (buf_avail == 0) {
926                         vec_idx++;
927                         if (unlikely(vec_idx >= nr_vec)) {
928                                 error = -1;
929                                 goto out;
930                         }
931
932                         buf_addr = buf_vec[vec_idx].buf_addr;
933                         buf_iova = buf_vec[vec_idx].buf_iova;
934                         buf_len = buf_vec[vec_idx].buf_len;
935
936                         buf_offset = 0;
937                         buf_avail  = buf_len;
938                 }
939
940                 /* done with current mbuf, get the next one */
941                 if (mbuf_avail == 0) {
942                         m = m->next;
943
944                         mbuf_offset = 0;
945                         mbuf_avail  = rte_pktmbuf_data_len(m);
946                 }
947
948                 if (hdr_addr) {
949                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
950                         if (rxvq_is_mergeable(dev))
951                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
952                                                 num_buffers);
953
954                         if (unlikely(hdr == &tmp_hdr)) {
955                                 copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
956                         } else {
957                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
958                                                 dev->vhost_hlen, 0);
959                                 vhost_log_cache_write_iova(dev, vq,
960                                                 buf_vec[0].buf_iova,
961                                                 dev->vhost_hlen);
962                         }
963
964                         hdr_addr = 0;
965                 }
966
967                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
968
969                 if (likely(cpy_len > MAX_BATCH_LEN ||
970                                         vq->batch_copy_nb_elems >= vq->size)) {
971                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
972                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
973                                 cpy_len);
974                         vhost_log_cache_write_iova(dev, vq,
975                                                    buf_iova + buf_offset,
976                                                    cpy_len);
977                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
978                                 cpy_len, 0);
979                 } else {
980                         batch_copy[vq->batch_copy_nb_elems].dst =
981                                 (void *)((uintptr_t)(buf_addr + buf_offset));
982                         batch_copy[vq->batch_copy_nb_elems].src =
983                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
984                         batch_copy[vq->batch_copy_nb_elems].log_addr =
985                                 buf_iova + buf_offset;
986                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
987                         vq->batch_copy_nb_elems++;
988                 }
989
990                 mbuf_avail  -= cpy_len;
991                 mbuf_offset += cpy_len;
992                 buf_avail  -= cpy_len;
993                 buf_offset += cpy_len;
994         }
995
996 out:
997
998         return error;
999 }
1000
1001 static __rte_always_inline int
1002 vhost_enqueue_single_packed(struct virtio_net *dev,
1003                             struct vhost_virtqueue *vq,
1004                             struct rte_mbuf *pkt,
1005                             struct buf_vector *buf_vec,
1006                             uint16_t *nr_descs)
1007 {
1008         uint16_t nr_vec = 0;
1009         uint16_t avail_idx = vq->last_avail_idx;
1010         uint16_t max_tries, tries = 0;
1011         uint16_t buf_id = 0;
1012         uint32_t len = 0;
1013         uint16_t desc_count;
1014         uint32_t size = pkt->pkt_len + dev->vhost_hlen;
1015         uint16_t num_buffers = 0;
1016         uint32_t buffer_len[vq->size];
1017         uint16_t buffer_buf_id[vq->size];
1018         uint16_t buffer_desc_count[vq->size];
1019
1020         if (rxvq_is_mergeable(dev))
1021                 max_tries = vq->size - 1;
1022         else
1023                 max_tries = 1;
1024
1025         while (size > 0) {
1026                 /*
1027                  * if we tried all available ring items, and still
1028                  * can't get enough buf, it means something abnormal
1029                  * happened.
1030                  */
1031                 if (unlikely(++tries > max_tries))
1032                         return -1;
1033
1034                 if (unlikely(fill_vec_buf_packed(dev, vq,
1035                                                 avail_idx, &desc_count,
1036                                                 buf_vec, &nr_vec,
1037                                                 &buf_id, &len,
1038                                                 VHOST_ACCESS_RW) < 0))
1039                         return -1;
1040
1041                 len = RTE_MIN(len, size);
1042                 size -= len;
1043
1044                 buffer_len[num_buffers] = len;
1045                 buffer_buf_id[num_buffers] = buf_id;
1046                 buffer_desc_count[num_buffers] = desc_count;
1047                 num_buffers += 1;
1048
1049                 *nr_descs += desc_count;
1050                 avail_idx += desc_count;
1051                 if (avail_idx >= vq->size)
1052                         avail_idx -= vq->size;
1053         }
1054
1055         if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
1056                 return -1;
1057
1058         vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
1059                                            buffer_desc_count, num_buffers);
1060
1061         return 0;
1062 }
1063
1064 static __rte_noinline uint32_t
1065 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1066         struct rte_mbuf **pkts, uint32_t count)
1067 {
1068         uint32_t pkt_idx = 0;
1069         uint16_t num_buffers;
1070         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1071         uint16_t avail_head;
1072
1073         avail_head = *((volatile uint16_t *)&vq->avail->idx);
1074
1075         /*
1076          * The ordering between avail index and
1077          * desc reads needs to be enforced.
1078          */
1079         rte_smp_rmb();
1080
1081         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1082
1083         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1084                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1085                 uint16_t nr_vec = 0;
1086
1087                 if (unlikely(reserve_avail_buf_split(dev, vq,
1088                                                 pkt_len, buf_vec, &num_buffers,
1089                                                 avail_head, &nr_vec) < 0)) {
1090                         VHOST_LOG_DEBUG(VHOST_DATA,
1091                                 "(%d) failed to get enough desc from vring\n",
1092                                 dev->vid);
1093                         vq->shadow_used_idx -= num_buffers;
1094                         break;
1095                 }
1096
1097                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1098                         dev->vid, vq->last_avail_idx,
1099                         vq->last_avail_idx + num_buffers);
1100
1101                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1102                                                 buf_vec, nr_vec,
1103                                                 num_buffers) < 0) {
1104                         vq->shadow_used_idx -= num_buffers;
1105                         break;
1106                 }
1107
1108                 vq->last_avail_idx += num_buffers;
1109         }
1110
1111         do_data_copy_enqueue(dev, vq);
1112
1113         if (likely(vq->shadow_used_idx)) {
1114                 flush_shadow_used_ring_split(dev, vq);
1115                 vhost_vring_call_split(dev, vq);
1116         }
1117
1118         return pkt_idx;
1119 }
1120
1121 static __rte_unused int
1122 virtio_dev_rx_batch_packed(struct virtio_net *dev,
1123                            struct vhost_virtqueue *vq,
1124                            struct rte_mbuf **pkts)
1125 {
1126         bool wrap_counter = vq->avail_wrap_counter;
1127         struct vring_packed_desc *descs = vq->desc_packed;
1128         uint16_t avail_idx = vq->last_avail_idx;
1129         uint64_t desc_addrs[PACKED_BATCH_SIZE];
1130         struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
1131         uint32_t buf_offset = dev->vhost_hlen;
1132         uint64_t lens[PACKED_BATCH_SIZE];
1133         uint16_t ids[PACKED_BATCH_SIZE];
1134         uint16_t i;
1135
1136         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1137                 return -1;
1138
1139         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1140                 return -1;
1141
1142         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1143                 if (unlikely(pkts[i]->next != NULL))
1144                         return -1;
1145                 if (unlikely(!desc_is_avail(&descs[avail_idx + i],
1146                                             wrap_counter)))
1147                         return -1;
1148         }
1149
1150         rte_smp_rmb();
1151
1152         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1153                 lens[i] = descs[avail_idx + i].len;
1154
1155         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1156                 if (unlikely(pkts[i]->pkt_len > (lens[i] - buf_offset)))
1157                         return -1;
1158         }
1159
1160         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1161                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1162                                                   descs[avail_idx + i].addr,
1163                                                   &lens[i],
1164                                                   VHOST_ACCESS_RW);
1165
1166         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1167                 if (unlikely(lens[i] != descs[avail_idx + i].len))
1168                         return -1;
1169         }
1170
1171         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1172                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1173                 hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
1174                                         (uintptr_t)desc_addrs[i];
1175                 lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
1176         }
1177
1178         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1179                 virtio_enqueue_offload(pkts[i], &hdrs[i]->hdr);
1180
1181         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1182
1183         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1184                 rte_memcpy((void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1185                            rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1186                            pkts[i]->pkt_len);
1187         }
1188
1189         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1190                 ids[i] = descs[avail_idx + i].id;
1191
1192         vhost_flush_enqueue_batch_packed(dev, vq, lens, ids);
1193
1194         return 0;
1195 }
1196
1197 static __rte_unused int16_t
1198 virtio_dev_rx_single_packed(struct virtio_net *dev,
1199                             struct vhost_virtqueue *vq,
1200                             struct rte_mbuf *pkt)
1201 {
1202         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1203         uint16_t nr_descs = 0;
1204
1205         rte_smp_rmb();
1206         if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
1207                                                  &nr_descs) < 0)) {
1208                 VHOST_LOG_DEBUG(VHOST_DATA,
1209                                 "(%d) failed to get enough desc from vring\n",
1210                                 dev->vid);
1211                 return -1;
1212         }
1213
1214         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1215                         dev->vid, vq->last_avail_idx,
1216                         vq->last_avail_idx + nr_descs);
1217
1218         vq_inc_last_avail_packed(vq, nr_descs);
1219
1220         return 0;
1221 }
1222
1223 static __rte_noinline uint32_t
1224 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1225         struct rte_mbuf **pkts, uint32_t count)
1226 {
1227         uint32_t pkt_idx = 0;
1228         uint16_t num_buffers;
1229         struct buf_vector buf_vec[BUF_VECTOR_MAX];
1230
1231         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
1232                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
1233                 uint16_t nr_vec = 0;
1234                 uint16_t nr_descs = 0;
1235
1236                 if (unlikely(reserve_avail_buf_packed(dev, vq,
1237                                                 pkt_len, buf_vec, &nr_vec,
1238                                                 &num_buffers, &nr_descs) < 0)) {
1239                         VHOST_LOG_DEBUG(VHOST_DATA,
1240                                 "(%d) failed to get enough desc from vring\n",
1241                                 dev->vid);
1242                         vq->shadow_used_idx -= num_buffers;
1243                         break;
1244                 }
1245
1246                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
1247                         dev->vid, vq->last_avail_idx,
1248                         vq->last_avail_idx + num_buffers);
1249
1250                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
1251                                                 buf_vec, nr_vec,
1252                                                 num_buffers) < 0) {
1253                         vq->shadow_used_idx -= num_buffers;
1254                         break;
1255                 }
1256
1257                 vq_inc_last_avail_packed(vq, nr_descs);
1258         }
1259
1260         do_data_copy_enqueue(dev, vq);
1261
1262         if (likely(vq->shadow_used_idx)) {
1263                 vhost_flush_enqueue_shadow_packed(dev, vq);
1264                 vhost_vring_call_packed(dev, vq);
1265         }
1266
1267         return pkt_idx;
1268 }
1269
1270 static __rte_always_inline uint32_t
1271 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
1272         struct rte_mbuf **pkts, uint32_t count)
1273 {
1274         struct vhost_virtqueue *vq;
1275         uint32_t nb_tx = 0;
1276
1277         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1278         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
1279                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1280                         dev->vid, __func__, queue_id);
1281                 return 0;
1282         }
1283
1284         vq = dev->virtqueue[queue_id];
1285
1286         rte_spinlock_lock(&vq->access_lock);
1287
1288         if (unlikely(vq->enabled == 0))
1289                 goto out_access_unlock;
1290
1291         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1292                 vhost_user_iotlb_rd_lock(vq);
1293
1294         if (unlikely(vq->access_ok == 0))
1295                 if (unlikely(vring_translate(dev, vq) < 0))
1296                         goto out;
1297
1298         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
1299         if (count == 0)
1300                 goto out;
1301
1302         if (vq_is_packed(dev))
1303                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
1304         else
1305                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
1306
1307 out:
1308         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1309                 vhost_user_iotlb_rd_unlock(vq);
1310
1311 out_access_unlock:
1312         rte_spinlock_unlock(&vq->access_lock);
1313
1314         return nb_tx;
1315 }
1316
1317 uint16_t
1318 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
1319         struct rte_mbuf **pkts, uint16_t count)
1320 {
1321         struct virtio_net *dev = get_device(vid);
1322
1323         if (!dev)
1324                 return 0;
1325
1326         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1327                 RTE_LOG(ERR, VHOST_DATA,
1328                         "(%d) %s: built-in vhost net backend is disabled.\n",
1329                         dev->vid, __func__);
1330                 return 0;
1331         }
1332
1333         return virtio_dev_rx(dev, queue_id, pkts, count);
1334 }
1335
1336 static inline bool
1337 virtio_net_with_host_offload(struct virtio_net *dev)
1338 {
1339         if (dev->features &
1340                         ((1ULL << VIRTIO_NET_F_CSUM) |
1341                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
1342                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
1343                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
1344                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
1345                 return true;
1346
1347         return false;
1348 }
1349
1350 static void
1351 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
1352 {
1353         struct rte_ipv4_hdr *ipv4_hdr;
1354         struct rte_ipv6_hdr *ipv6_hdr;
1355         void *l3_hdr = NULL;
1356         struct rte_ether_hdr *eth_hdr;
1357         uint16_t ethertype;
1358
1359         eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
1360
1361         m->l2_len = sizeof(struct rte_ether_hdr);
1362         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
1363
1364         if (ethertype == RTE_ETHER_TYPE_VLAN) {
1365                 struct rte_vlan_hdr *vlan_hdr =
1366                         (struct rte_vlan_hdr *)(eth_hdr + 1);
1367
1368                 m->l2_len += sizeof(struct rte_vlan_hdr);
1369                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
1370         }
1371
1372         l3_hdr = (char *)eth_hdr + m->l2_len;
1373
1374         switch (ethertype) {
1375         case RTE_ETHER_TYPE_IPV4:
1376                 ipv4_hdr = l3_hdr;
1377                 *l4_proto = ipv4_hdr->next_proto_id;
1378                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
1379                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1380                 m->ol_flags |= PKT_TX_IPV4;
1381                 break;
1382         case RTE_ETHER_TYPE_IPV6:
1383                 ipv6_hdr = l3_hdr;
1384                 *l4_proto = ipv6_hdr->proto;
1385                 m->l3_len = sizeof(struct rte_ipv6_hdr);
1386                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1387                 m->ol_flags |= PKT_TX_IPV6;
1388                 break;
1389         default:
1390                 m->l3_len = 0;
1391                 *l4_proto = 0;
1392                 *l4_hdr = NULL;
1393                 break;
1394         }
1395 }
1396
1397 static __rte_always_inline void
1398 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1399 {
1400         uint16_t l4_proto = 0;
1401         void *l4_hdr = NULL;
1402         struct rte_tcp_hdr *tcp_hdr = NULL;
1403
1404         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1405                 return;
1406
1407         parse_ethernet(m, &l4_proto, &l4_hdr);
1408         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1409                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1410                         switch (hdr->csum_offset) {
1411                         case (offsetof(struct rte_tcp_hdr, cksum)):
1412                                 if (l4_proto == IPPROTO_TCP)
1413                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1414                                 break;
1415                         case (offsetof(struct rte_udp_hdr, dgram_cksum)):
1416                                 if (l4_proto == IPPROTO_UDP)
1417                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1418                                 break;
1419                         case (offsetof(struct rte_sctp_hdr, cksum)):
1420                                 if (l4_proto == IPPROTO_SCTP)
1421                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1422                                 break;
1423                         default:
1424                                 break;
1425                         }
1426                 }
1427         }
1428
1429         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1430                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1431                 case VIRTIO_NET_HDR_GSO_TCPV4:
1432                 case VIRTIO_NET_HDR_GSO_TCPV6:
1433                         tcp_hdr = l4_hdr;
1434                         m->ol_flags |= PKT_TX_TCP_SEG;
1435                         m->tso_segsz = hdr->gso_size;
1436                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1437                         break;
1438                 case VIRTIO_NET_HDR_GSO_UDP:
1439                         m->ol_flags |= PKT_TX_UDP_SEG;
1440                         m->tso_segsz = hdr->gso_size;
1441                         m->l4_len = sizeof(struct rte_udp_hdr);
1442                         break;
1443                 default:
1444                         RTE_LOG(WARNING, VHOST_DATA,
1445                                 "unsupported gso type %u.\n", hdr->gso_type);
1446                         break;
1447                 }
1448         }
1449 }
1450
1451 static __rte_noinline void
1452 copy_vnet_hdr_from_desc(struct virtio_net_hdr *hdr,
1453                 struct buf_vector *buf_vec)
1454 {
1455         uint64_t len;
1456         uint64_t remain = sizeof(struct virtio_net_hdr);
1457         uint64_t src;
1458         uint64_t dst = (uint64_t)(uintptr_t)hdr;
1459
1460         while (remain) {
1461                 len = RTE_MIN(remain, buf_vec->buf_len);
1462                 src = buf_vec->buf_addr;
1463                 rte_memcpy((void *)(uintptr_t)dst,
1464                                 (void *)(uintptr_t)src, len);
1465
1466                 remain -= len;
1467                 dst += len;
1468                 buf_vec++;
1469         }
1470 }
1471
1472 static __rte_always_inline int
1473 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1474                   struct buf_vector *buf_vec, uint16_t nr_vec,
1475                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1476 {
1477         uint32_t buf_avail, buf_offset;
1478         uint64_t buf_addr, buf_iova, buf_len;
1479         uint32_t mbuf_avail, mbuf_offset;
1480         uint32_t cpy_len;
1481         struct rte_mbuf *cur = m, *prev = m;
1482         struct virtio_net_hdr tmp_hdr;
1483         struct virtio_net_hdr *hdr = NULL;
1484         /* A counter to avoid desc dead loop chain */
1485         uint16_t vec_idx = 0;
1486         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1487         int error = 0;
1488
1489         buf_addr = buf_vec[vec_idx].buf_addr;
1490         buf_iova = buf_vec[vec_idx].buf_iova;
1491         buf_len = buf_vec[vec_idx].buf_len;
1492
1493         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1494                 error = -1;
1495                 goto out;
1496         }
1497
1498         if (virtio_net_with_host_offload(dev)) {
1499                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1500                         /*
1501                          * No luck, the virtio-net header doesn't fit
1502                          * in a contiguous virtual area.
1503                          */
1504                         copy_vnet_hdr_from_desc(&tmp_hdr, buf_vec);
1505                         hdr = &tmp_hdr;
1506                 } else {
1507                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1508                 }
1509         }
1510
1511         /*
1512          * A virtio driver normally uses at least 2 desc buffers
1513          * for Tx: the first for storing the header, and others
1514          * for storing the data.
1515          */
1516         if (unlikely(buf_len < dev->vhost_hlen)) {
1517                 buf_offset = dev->vhost_hlen - buf_len;
1518                 vec_idx++;
1519                 buf_addr = buf_vec[vec_idx].buf_addr;
1520                 buf_iova = buf_vec[vec_idx].buf_iova;
1521                 buf_len = buf_vec[vec_idx].buf_len;
1522                 buf_avail  = buf_len - buf_offset;
1523         } else if (buf_len == dev->vhost_hlen) {
1524                 if (unlikely(++vec_idx >= nr_vec))
1525                         goto out;
1526                 buf_addr = buf_vec[vec_idx].buf_addr;
1527                 buf_iova = buf_vec[vec_idx].buf_iova;
1528                 buf_len = buf_vec[vec_idx].buf_len;
1529
1530                 buf_offset = 0;
1531                 buf_avail = buf_len;
1532         } else {
1533                 buf_offset = dev->vhost_hlen;
1534                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1535         }
1536
1537         PRINT_PACKET(dev,
1538                         (uintptr_t)(buf_addr + buf_offset),
1539                         (uint32_t)buf_avail, 0);
1540
1541         mbuf_offset = 0;
1542         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1543         while (1) {
1544                 uint64_t hpa;
1545
1546                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1547
1548                 /*
1549                  * A desc buf might across two host physical pages that are
1550                  * not continuous. In such case (gpa_to_hpa returns 0), data
1551                  * will be copied even though zero copy is enabled.
1552                  */
1553                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1554                                         buf_iova + buf_offset, cpy_len)))) {
1555                         cur->data_len = cpy_len;
1556                         cur->data_off = 0;
1557                         cur->buf_addr =
1558                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1559                         cur->buf_iova = hpa;
1560
1561                         /*
1562                          * In zero copy mode, one mbuf can only reference data
1563                          * for one or partial of one desc buff.
1564                          */
1565                         mbuf_avail = cpy_len;
1566                 } else {
1567                         if (likely(cpy_len > MAX_BATCH_LEN ||
1568                                    vq->batch_copy_nb_elems >= vq->size ||
1569                                    (hdr && cur == m))) {
1570                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1571                                                                    mbuf_offset),
1572                                            (void *)((uintptr_t)(buf_addr +
1573                                                            buf_offset)),
1574                                            cpy_len);
1575                         } else {
1576                                 batch_copy[vq->batch_copy_nb_elems].dst =
1577                                         rte_pktmbuf_mtod_offset(cur, void *,
1578                                                                 mbuf_offset);
1579                                 batch_copy[vq->batch_copy_nb_elems].src =
1580                                         (void *)((uintptr_t)(buf_addr +
1581                                                                 buf_offset));
1582                                 batch_copy[vq->batch_copy_nb_elems].len =
1583                                         cpy_len;
1584                                 vq->batch_copy_nb_elems++;
1585                         }
1586                 }
1587
1588                 mbuf_avail  -= cpy_len;
1589                 mbuf_offset += cpy_len;
1590                 buf_avail -= cpy_len;
1591                 buf_offset += cpy_len;
1592
1593                 /* This buf reaches to its end, get the next one */
1594                 if (buf_avail == 0) {
1595                         if (++vec_idx >= nr_vec)
1596                                 break;
1597
1598                         buf_addr = buf_vec[vec_idx].buf_addr;
1599                         buf_iova = buf_vec[vec_idx].buf_iova;
1600                         buf_len = buf_vec[vec_idx].buf_len;
1601
1602                         buf_offset = 0;
1603                         buf_avail  = buf_len;
1604
1605                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1606                                         (uint32_t)buf_avail, 0);
1607                 }
1608
1609                 /*
1610                  * This mbuf reaches to its end, get a new one
1611                  * to hold more data.
1612                  */
1613                 if (mbuf_avail == 0) {
1614                         cur = rte_pktmbuf_alloc(mbuf_pool);
1615                         if (unlikely(cur == NULL)) {
1616                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1617                                         "allocate memory for mbuf.\n");
1618                                 error = -1;
1619                                 goto out;
1620                         }
1621                         if (unlikely(dev->dequeue_zero_copy))
1622                                 rte_mbuf_refcnt_update(cur, 1);
1623
1624                         prev->next = cur;
1625                         prev->data_len = mbuf_offset;
1626                         m->nb_segs += 1;
1627                         m->pkt_len += mbuf_offset;
1628                         prev = cur;
1629
1630                         mbuf_offset = 0;
1631                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1632                 }
1633         }
1634
1635         prev->data_len = mbuf_offset;
1636         m->pkt_len    += mbuf_offset;
1637
1638         if (hdr)
1639                 vhost_dequeue_offload(hdr, m);
1640
1641 out:
1642
1643         return error;
1644 }
1645
1646 static __rte_always_inline struct zcopy_mbuf *
1647 get_zmbuf(struct vhost_virtqueue *vq)
1648 {
1649         uint16_t i;
1650         uint16_t last;
1651         int tries = 0;
1652
1653         /* search [last_zmbuf_idx, zmbuf_size) */
1654         i = vq->last_zmbuf_idx;
1655         last = vq->zmbuf_size;
1656
1657 again:
1658         for (; i < last; i++) {
1659                 if (vq->zmbufs[i].in_use == 0) {
1660                         vq->last_zmbuf_idx = i + 1;
1661                         vq->zmbufs[i].in_use = 1;
1662                         return &vq->zmbufs[i];
1663                 }
1664         }
1665
1666         tries++;
1667         if (tries == 1) {
1668                 /* search [0, last_zmbuf_idx) */
1669                 i = 0;
1670                 last = vq->last_zmbuf_idx;
1671                 goto again;
1672         }
1673
1674         return NULL;
1675 }
1676
1677 static void
1678 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
1679 {
1680         rte_free(opaque);
1681 }
1682
1683 static int
1684 virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
1685 {
1686         struct rte_mbuf_ext_shared_info *shinfo = NULL;
1687         uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
1688         uint16_t buf_len;
1689         rte_iova_t iova;
1690         void *buf;
1691
1692         /* Try to use pkt buffer to store shinfo to reduce the amount of memory
1693          * required, otherwise store shinfo in the new buffer.
1694          */
1695         if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
1696                 shinfo = rte_pktmbuf_mtod(pkt,
1697                                           struct rte_mbuf_ext_shared_info *);
1698         else {
1699                 total_len += sizeof(*shinfo) + sizeof(uintptr_t);
1700                 total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
1701         }
1702
1703         if (unlikely(total_len > UINT16_MAX))
1704                 return -ENOSPC;
1705
1706         buf_len = total_len;
1707         buf = rte_malloc(NULL, buf_len, RTE_CACHE_LINE_SIZE);
1708         if (unlikely(buf == NULL))
1709                 return -ENOMEM;
1710
1711         /* Initialize shinfo */
1712         if (shinfo) {
1713                 shinfo->free_cb = virtio_dev_extbuf_free;
1714                 shinfo->fcb_opaque = buf;
1715                 rte_mbuf_ext_refcnt_set(shinfo, 1);
1716         } else {
1717                 shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
1718                                               virtio_dev_extbuf_free, buf);
1719                 if (unlikely(shinfo == NULL)) {
1720                         rte_free(buf);
1721                         RTE_LOG(ERR, VHOST_DATA, "Failed to init shinfo\n");
1722                         return -1;
1723                 }
1724         }
1725
1726         iova = rte_malloc_virt2iova(buf);
1727         rte_pktmbuf_attach_extbuf(pkt, buf, iova, buf_len, shinfo);
1728         rte_pktmbuf_reset_headroom(pkt);
1729
1730         return 0;
1731 }
1732
1733 /*
1734  * Allocate a host supported pktmbuf.
1735  */
1736 static __rte_always_inline struct rte_mbuf *
1737 virtio_dev_pktmbuf_alloc(struct virtio_net *dev, struct rte_mempool *mp,
1738                          uint32_t data_len)
1739 {
1740         struct rte_mbuf *pkt = rte_pktmbuf_alloc(mp);
1741
1742         if (unlikely(pkt == NULL))
1743                 return NULL;
1744
1745         if (rte_pktmbuf_tailroom(pkt) >= data_len)
1746                 return pkt;
1747
1748         /* attach an external buffer if supported */
1749         if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
1750                 return pkt;
1751
1752         /* check if chained buffers are allowed */
1753         if (!dev->linearbuf)
1754                 return pkt;
1755
1756         /* Data doesn't fit into the buffer and the host supports
1757          * only linear buffers
1758          */
1759         rte_pktmbuf_free(pkt);
1760
1761         return NULL;
1762 }
1763
1764 static __rte_noinline uint16_t
1765 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1766         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1767 {
1768         uint16_t i;
1769         uint16_t free_entries;
1770
1771         if (unlikely(dev->dequeue_zero_copy)) {
1772                 struct zcopy_mbuf *zmbuf, *next;
1773
1774                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1775                      zmbuf != NULL; zmbuf = next) {
1776                         next = TAILQ_NEXT(zmbuf, next);
1777
1778                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1779                                 update_shadow_used_ring_split(vq,
1780                                                 zmbuf->desc_idx, 0);
1781                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1782                                 restore_mbuf(zmbuf->mbuf);
1783                                 rte_pktmbuf_free(zmbuf->mbuf);
1784                                 put_zmbuf(zmbuf);
1785                                 vq->nr_zmbuf -= 1;
1786                         }
1787                 }
1788
1789                 if (likely(vq->shadow_used_idx)) {
1790                         flush_shadow_used_ring_split(dev, vq);
1791                         vhost_vring_call_split(dev, vq);
1792                 }
1793         }
1794
1795         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1796                         vq->last_avail_idx;
1797         if (free_entries == 0)
1798                 return 0;
1799
1800         /*
1801          * The ordering between avail index and
1802          * desc reads needs to be enforced.
1803          */
1804         rte_smp_rmb();
1805
1806         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1807
1808         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1809
1810         count = RTE_MIN(count, MAX_PKT_BURST);
1811         count = RTE_MIN(count, free_entries);
1812         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1813                         dev->vid, count);
1814
1815         for (i = 0; i < count; i++) {
1816                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1817                 uint16_t head_idx;
1818                 uint32_t buf_len;
1819                 uint16_t nr_vec = 0;
1820                 int err;
1821
1822                 if (unlikely(fill_vec_buf_split(dev, vq,
1823                                                 vq->last_avail_idx + i,
1824                                                 &nr_vec, buf_vec,
1825                                                 &head_idx, &buf_len,
1826                                                 VHOST_ACCESS_RO) < 0))
1827                         break;
1828
1829                 if (likely(dev->dequeue_zero_copy == 0))
1830                         update_shadow_used_ring_split(vq, head_idx, 0);
1831
1832                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
1833                 if (unlikely(pkts[i] == NULL))
1834                         break;
1835
1836                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1837                                 mbuf_pool);
1838                 if (unlikely(err)) {
1839                         rte_pktmbuf_free(pkts[i]);
1840                         break;
1841                 }
1842
1843                 if (unlikely(dev->dequeue_zero_copy)) {
1844                         struct zcopy_mbuf *zmbuf;
1845
1846                         zmbuf = get_zmbuf(vq);
1847                         if (!zmbuf) {
1848                                 rte_pktmbuf_free(pkts[i]);
1849                                 break;
1850                         }
1851                         zmbuf->mbuf = pkts[i];
1852                         zmbuf->desc_idx = head_idx;
1853
1854                         /*
1855                          * Pin lock the mbuf; we will check later to see
1856                          * whether the mbuf is freed (when we are the last
1857                          * user) or not. If that's the case, we then could
1858                          * update the used ring safely.
1859                          */
1860                         rte_mbuf_refcnt_update(pkts[i], 1);
1861
1862                         vq->nr_zmbuf += 1;
1863                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1864                 }
1865         }
1866         vq->last_avail_idx += i;
1867
1868         if (likely(dev->dequeue_zero_copy == 0)) {
1869                 do_data_copy_dequeue(vq);
1870                 if (unlikely(i < count))
1871                         vq->shadow_used_idx = i;
1872                 if (likely(vq->shadow_used_idx)) {
1873                         flush_shadow_used_ring_split(dev, vq);
1874                         vhost_vring_call_split(dev, vq);
1875                 }
1876         }
1877
1878         return i;
1879 }
1880
1881 static __rte_always_inline int
1882 vhost_reserve_avail_batch_packed(struct virtio_net *dev,
1883                                  struct vhost_virtqueue *vq,
1884                                  struct rte_mempool *mbuf_pool,
1885                                  struct rte_mbuf **pkts,
1886                                  uint16_t avail_idx,
1887                                  uintptr_t *desc_addrs,
1888                                  uint16_t *ids)
1889 {
1890         bool wrap = vq->avail_wrap_counter;
1891         struct vring_packed_desc *descs = vq->desc_packed;
1892         struct virtio_net_hdr *hdr;
1893         uint64_t lens[PACKED_BATCH_SIZE];
1894         uint64_t buf_lens[PACKED_BATCH_SIZE];
1895         uint32_t buf_offset = dev->vhost_hlen;
1896         uint16_t flags, i;
1897
1898         if (unlikely(avail_idx & PACKED_BATCH_MASK))
1899                 return -1;
1900         if (unlikely((avail_idx + PACKED_BATCH_SIZE) > vq->size))
1901                 return -1;
1902
1903         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1904                 flags = descs[avail_idx + i].flags;
1905                 if (unlikely((wrap != !!(flags & VRING_DESC_F_AVAIL)) ||
1906                              (wrap == !!(flags & VRING_DESC_F_USED))  ||
1907                              (flags & PACKED_DESC_SINGLE_DEQUEUE_FLAG)))
1908                         return -1;
1909         }
1910
1911         rte_smp_rmb();
1912
1913         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1914                 lens[i] = descs[avail_idx + i].len;
1915
1916         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1917                 desc_addrs[i] = vhost_iova_to_vva(dev, vq,
1918                                                   descs[avail_idx + i].addr,
1919                                                   &lens[i], VHOST_ACCESS_RW);
1920         }
1921
1922         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1923                 if (unlikely((lens[i] != descs[avail_idx + i].len)))
1924                         return -1;
1925         }
1926
1927         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1928                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, lens[i]);
1929                 if (!pkts[i])
1930                         goto free_buf;
1931         }
1932
1933         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1934                 buf_lens[i] = pkts[i]->buf_len - pkts[i]->data_off;
1935
1936         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1937                 if (unlikely(buf_lens[i] < (lens[i] - buf_offset)))
1938                         goto free_buf;
1939         }
1940
1941         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1942                 pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
1943                 pkts[i]->data_len = pkts[i]->pkt_len;
1944                 ids[i] = descs[avail_idx + i].id;
1945         }
1946
1947         if (virtio_net_with_host_offload(dev)) {
1948                 vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
1949                         hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
1950                         vhost_dequeue_offload(hdr, pkts[i]);
1951                 }
1952         }
1953
1954         return 0;
1955
1956 free_buf:
1957         for (i = 0; i < PACKED_BATCH_SIZE; i++)
1958                 rte_pktmbuf_free(pkts[i]);
1959
1960         return -1;
1961 }
1962
1963 static __rte_unused int
1964 virtio_dev_tx_batch_packed(struct virtio_net *dev,
1965                            struct vhost_virtqueue *vq,
1966                            struct rte_mempool *mbuf_pool,
1967                            struct rte_mbuf **pkts)
1968 {
1969         uint16_t avail_idx = vq->last_avail_idx;
1970         uint32_t buf_offset = dev->vhost_hlen;
1971         uintptr_t desc_addrs[PACKED_BATCH_SIZE];
1972         uint16_t ids[PACKED_BATCH_SIZE];
1973         uint16_t i;
1974
1975         if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
1976                                              avail_idx, desc_addrs, ids))
1977                 return -1;
1978
1979         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1980                 rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
1981
1982         vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
1983                 rte_memcpy(rte_pktmbuf_mtod_offset(pkts[i], void *, 0),
1984                            (void *)(uintptr_t)(desc_addrs[i] + buf_offset),
1985                            pkts[i]->pkt_len);
1986
1987         vhost_shadow_dequeue_batch_packed(dev, vq, ids);
1988
1989         vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
1990
1991         return 0;
1992 }
1993
1994 static __rte_always_inline int
1995 vhost_dequeue_single_packed(struct virtio_net *dev,
1996                             struct vhost_virtqueue *vq,
1997                             struct rte_mempool *mbuf_pool,
1998                             struct rte_mbuf **pkts,
1999                             uint16_t *buf_id,
2000                             uint16_t *desc_count)
2001 {
2002         struct buf_vector buf_vec[BUF_VECTOR_MAX];
2003         uint32_t buf_len;
2004         uint16_t nr_vec = 0;
2005         int err;
2006
2007         if (unlikely(fill_vec_buf_packed(dev, vq,
2008                                          vq->last_avail_idx, desc_count,
2009                                          buf_vec, &nr_vec,
2010                                          buf_id, &buf_len,
2011                                          VHOST_ACCESS_RO) < 0))
2012                 return -1;
2013
2014         *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2015         if (unlikely(*pkts == NULL)) {
2016                 RTE_LOG(ERR, VHOST_DATA,
2017                         "Failed to allocate memory for mbuf.\n");
2018                 return -1;
2019         }
2020
2021         err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
2022                                 mbuf_pool);
2023         if (unlikely(err)) {
2024                 rte_pktmbuf_free(*pkts);
2025                 return -1;
2026         }
2027
2028         return 0;
2029 }
2030
2031 static __rte_unused int
2032 virtio_dev_tx_single_packed(struct virtio_net *dev,
2033                             struct vhost_virtqueue *vq,
2034                             struct rte_mempool *mbuf_pool,
2035                             struct rte_mbuf **pkts)
2036 {
2037
2038         uint16_t buf_id, desc_count;
2039
2040         if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
2041                                         &desc_count))
2042                 return -1;
2043
2044         vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
2045
2046         vq_inc_last_avail_packed(vq, desc_count);
2047
2048         return 0;
2049 }
2050
2051 static __rte_noinline uint16_t
2052 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
2053         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2054 {
2055         uint16_t i;
2056
2057         if (unlikely(dev->dequeue_zero_copy)) {
2058                 struct zcopy_mbuf *zmbuf, *next;
2059
2060                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
2061                      zmbuf != NULL; zmbuf = next) {
2062                         next = TAILQ_NEXT(zmbuf, next);
2063
2064                         if (mbuf_is_consumed(zmbuf->mbuf)) {
2065                                 update_shadow_used_ring_packed(vq,
2066                                                 zmbuf->desc_idx,
2067                                                 0,
2068                                                 zmbuf->desc_count);
2069
2070                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
2071                                 restore_mbuf(zmbuf->mbuf);
2072                                 rte_pktmbuf_free(zmbuf->mbuf);
2073                                 put_zmbuf(zmbuf);
2074                                 vq->nr_zmbuf -= 1;
2075                         }
2076                 }
2077
2078                 if (likely(vq->shadow_used_idx)) {
2079                         flush_shadow_used_ring_packed(dev, vq);
2080                         vhost_vring_call_packed(dev, vq);
2081                 }
2082         }
2083
2084         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
2085
2086         count = RTE_MIN(count, MAX_PKT_BURST);
2087         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
2088                         dev->vid, count);
2089
2090         for (i = 0; i < count; i++) {
2091                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
2092                 uint16_t buf_id;
2093                 uint32_t buf_len;
2094                 uint16_t desc_count, nr_vec = 0;
2095                 int err;
2096
2097                 if (unlikely(fill_vec_buf_packed(dev, vq,
2098                                                 vq->last_avail_idx, &desc_count,
2099                                                 buf_vec, &nr_vec,
2100                                                 &buf_id, &buf_len,
2101                                                 VHOST_ACCESS_RO) < 0))
2102                         break;
2103
2104                 if (likely(dev->dequeue_zero_copy == 0))
2105                         update_shadow_used_ring_packed(vq, buf_id, 0,
2106                                         desc_count);
2107
2108                 pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
2109                 if (unlikely(pkts[i] == NULL))
2110                         break;
2111
2112                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
2113                                 mbuf_pool);
2114                 if (unlikely(err)) {
2115                         rte_pktmbuf_free(pkts[i]);
2116                         break;
2117                 }
2118
2119                 if (unlikely(dev->dequeue_zero_copy)) {
2120                         struct zcopy_mbuf *zmbuf;
2121
2122                         zmbuf = get_zmbuf(vq);
2123                         if (!zmbuf) {
2124                                 rte_pktmbuf_free(pkts[i]);
2125                                 break;
2126                         }
2127                         zmbuf->mbuf = pkts[i];
2128                         zmbuf->desc_idx = buf_id;
2129                         zmbuf->desc_count = desc_count;
2130
2131                         /*
2132                          * Pin lock the mbuf; we will check later to see
2133                          * whether the mbuf is freed (when we are the last
2134                          * user) or not. If that's the case, we then could
2135                          * update the used ring safely.
2136                          */
2137                         rte_mbuf_refcnt_update(pkts[i], 1);
2138
2139                         vq->nr_zmbuf += 1;
2140                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
2141                 }
2142
2143                 vq_inc_last_avail_packed(vq, desc_count);
2144         }
2145
2146         if (likely(dev->dequeue_zero_copy == 0)) {
2147                 do_data_copy_dequeue(vq);
2148                 if (unlikely(i < count))
2149                         vq->shadow_used_idx = i;
2150                 if (likely(vq->shadow_used_idx)) {
2151                         flush_shadow_used_ring_packed(dev, vq);
2152                         vhost_vring_call_packed(dev, vq);
2153                 }
2154         }
2155
2156         return i;
2157 }
2158
2159 uint16_t
2160 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
2161         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
2162 {
2163         struct virtio_net *dev;
2164         struct rte_mbuf *rarp_mbuf = NULL;
2165         struct vhost_virtqueue *vq;
2166
2167         dev = get_device(vid);
2168         if (!dev)
2169                 return 0;
2170
2171         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
2172                 RTE_LOG(ERR, VHOST_DATA,
2173                         "(%d) %s: built-in vhost net backend is disabled.\n",
2174                         dev->vid, __func__);
2175                 return 0;
2176         }
2177
2178         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
2179                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
2180                         dev->vid, __func__, queue_id);
2181                 return 0;
2182         }
2183
2184         vq = dev->virtqueue[queue_id];
2185
2186         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
2187                 return 0;
2188
2189         if (unlikely(vq->enabled == 0)) {
2190                 count = 0;
2191                 goto out_access_unlock;
2192         }
2193
2194         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2195                 vhost_user_iotlb_rd_lock(vq);
2196
2197         if (unlikely(vq->access_ok == 0))
2198                 if (unlikely(vring_translate(dev, vq) < 0)) {
2199                         count = 0;
2200                         goto out;
2201                 }
2202
2203         /*
2204          * Construct a RARP broadcast packet, and inject it to the "pkts"
2205          * array, to looks like that guest actually send such packet.
2206          *
2207          * Check user_send_rarp() for more information.
2208          *
2209          * broadcast_rarp shares a cacheline in the virtio_net structure
2210          * with some fields that are accessed during enqueue and
2211          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
2212          * result in false sharing between enqueue and dequeue.
2213          *
2214          * Prevent unnecessary false sharing by reading broadcast_rarp first
2215          * and only performing cmpset if the read indicates it is likely to
2216          * be set.
2217          */
2218         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
2219                         rte_atomic16_cmpset((volatile uint16_t *)
2220                                 &dev->broadcast_rarp.cnt, 1, 0))) {
2221
2222                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
2223                 if (rarp_mbuf == NULL) {
2224                         RTE_LOG(ERR, VHOST_DATA,
2225                                 "Failed to make RARP packet.\n");
2226                         count = 0;
2227                         goto out;
2228                 }
2229                 count -= 1;
2230         }
2231
2232         if (vq_is_packed(dev))
2233                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
2234         else
2235                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
2236
2237 out:
2238         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
2239                 vhost_user_iotlb_rd_unlock(vq);
2240
2241 out_access_unlock:
2242         rte_spinlock_unlock(&vq->access_lock);
2243
2244         if (unlikely(rarp_mbuf != NULL)) {
2245                 /*
2246                  * Inject it to the head of "pkts" array, so that switch's mac
2247                  * learning table will get updated first.
2248                  */
2249                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
2250                 pkts[0] = rarp_mbuf;
2251                 count += 1;
2252         }
2253
2254         return count;
2255 }