net/qede: fix ntuple filter configuration
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void *
41 alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
42                 uint64_t desc_addr, uint64_t desc_len)
43 {
44         void *idesc;
45         uint64_t src, dst;
46         uint64_t len, remain = desc_len;
47
48         idesc = rte_malloc(__func__, desc_len, 0);
49         if (unlikely(!idesc))
50                 return 0;
51
52         dst = (uint64_t)(uintptr_t)idesc;
53
54         while (remain) {
55                 len = remain;
56                 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
57                                 VHOST_ACCESS_RO);
58                 if (unlikely(!src || !len)) {
59                         rte_free(idesc);
60                         return 0;
61                 }
62
63                 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
64
65                 remain -= len;
66                 dst += len;
67                 desc_addr += len;
68         }
69
70         return idesc;
71 }
72
73 static __rte_always_inline void
74 free_ind_table(void *idesc)
75 {
76         rte_free(idesc);
77 }
78
79 static __rte_always_inline void
80 do_flush_shadow_used_ring_split(struct virtio_net *dev,
81                         struct vhost_virtqueue *vq,
82                         uint16_t to, uint16_t from, uint16_t size)
83 {
84         rte_memcpy(&vq->used->ring[to],
85                         &vq->shadow_used_split[from],
86                         size * sizeof(struct vring_used_elem));
87         vhost_log_cache_used_vring(dev, vq,
88                         offsetof(struct vring_used, ring[to]),
89                         size * sizeof(struct vring_used_elem));
90 }
91
92 static __rte_always_inline void
93 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
94 {
95         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
96
97         if (used_idx + vq->shadow_used_idx <= vq->size) {
98                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
99                                           vq->shadow_used_idx);
100         } else {
101                 uint16_t size;
102
103                 /* update used ring interval [used_idx, vq->size] */
104                 size = vq->size - used_idx;
105                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
106
107                 /* update the left half used ring interval [0, left_size] */
108                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
109                                           vq->shadow_used_idx - size);
110         }
111         vq->last_used_idx += vq->shadow_used_idx;
112
113         rte_smp_wmb();
114
115         vhost_log_cache_sync(dev, vq);
116
117         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
118         vq->shadow_used_idx = 0;
119         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
120                 sizeof(vq->used->idx));
121 }
122
123 static __rte_always_inline void
124 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
125                          uint16_t desc_idx, uint16_t len)
126 {
127         uint16_t i = vq->shadow_used_idx++;
128
129         vq->shadow_used_split[i].id  = desc_idx;
130         vq->shadow_used_split[i].len = len;
131 }
132
133 static __rte_always_inline void
134 flush_shadow_used_ring_packed(struct virtio_net *dev,
135                         struct vhost_virtqueue *vq)
136 {
137         int i;
138         uint16_t used_idx = vq->last_used_idx;
139
140         /* Split loop in two to save memory barriers */
141         for (i = 0; i < vq->shadow_used_idx; i++) {
142                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
143                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
144
145                 used_idx += vq->shadow_used_packed[i].count;
146                 if (used_idx >= vq->size)
147                         used_idx -= vq->size;
148         }
149
150         rte_smp_wmb();
151
152         for (i = 0; i < vq->shadow_used_idx; i++) {
153                 uint16_t flags;
154
155                 if (vq->shadow_used_packed[i].len)
156                         flags = VRING_DESC_F_WRITE;
157                 else
158                         flags = 0;
159
160                 if (vq->used_wrap_counter) {
161                         flags |= VRING_DESC_F_USED;
162                         flags |= VRING_DESC_F_AVAIL;
163                 } else {
164                         flags &= ~VRING_DESC_F_USED;
165                         flags &= ~VRING_DESC_F_AVAIL;
166                 }
167
168                 vq->desc_packed[vq->last_used_idx].flags = flags;
169
170                 vhost_log_cache_used_vring(dev, vq,
171                                         vq->last_used_idx *
172                                         sizeof(struct vring_packed_desc),
173                                         sizeof(struct vring_packed_desc));
174
175                 vq->last_used_idx += vq->shadow_used_packed[i].count;
176                 if (vq->last_used_idx >= vq->size) {
177                         vq->used_wrap_counter ^= 1;
178                         vq->last_used_idx -= vq->size;
179                 }
180         }
181
182         rte_smp_wmb();
183         vq->shadow_used_idx = 0;
184         vhost_log_cache_sync(dev, vq);
185 }
186
187 static __rte_always_inline void
188 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
189                          uint16_t desc_idx, uint16_t len, uint16_t count)
190 {
191         uint16_t i = vq->shadow_used_idx++;
192
193         vq->shadow_used_packed[i].id  = desc_idx;
194         vq->shadow_used_packed[i].len = len;
195         vq->shadow_used_packed[i].count = count;
196 }
197
198 static inline void
199 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
200 {
201         struct batch_copy_elem *elem = vq->batch_copy_elems;
202         uint16_t count = vq->batch_copy_nb_elems;
203         int i;
204
205         for (i = 0; i < count; i++) {
206                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
207                 vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
208                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
209         }
210
211         vq->batch_copy_nb_elems = 0;
212 }
213
214 static inline void
215 do_data_copy_dequeue(struct vhost_virtqueue *vq)
216 {
217         struct batch_copy_elem *elem = vq->batch_copy_elems;
218         uint16_t count = vq->batch_copy_nb_elems;
219         int i;
220
221         for (i = 0; i < count; i++)
222                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
223
224         vq->batch_copy_nb_elems = 0;
225 }
226
227 /* avoid write operation when necessary, to lessen cache issues */
228 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
229         if ((var) != (val))                     \
230                 (var) = (val);                  \
231 } while (0)
232
233 static __rte_always_inline void
234 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
235 {
236         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
237
238         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
239                 csum_l4 |= PKT_TX_TCP_CKSUM;
240
241         if (csum_l4) {
242                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
243                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
244
245                 switch (csum_l4) {
246                 case PKT_TX_TCP_CKSUM:
247                         net_hdr->csum_offset = (offsetof(struct tcp_hdr,
248                                                 cksum));
249                         break;
250                 case PKT_TX_UDP_CKSUM:
251                         net_hdr->csum_offset = (offsetof(struct udp_hdr,
252                                                 dgram_cksum));
253                         break;
254                 case PKT_TX_SCTP_CKSUM:
255                         net_hdr->csum_offset = (offsetof(struct sctp_hdr,
256                                                 cksum));
257                         break;
258                 }
259         } else {
260                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
261                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
262                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
263         }
264
265         /* IP cksum verification cannot be bypassed, then calculate here */
266         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
267                 struct ipv4_hdr *ipv4_hdr;
268
269                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
270                                                    m_buf->l2_len);
271                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
272         }
273
274         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
275                 if (m_buf->ol_flags & PKT_TX_IPV4)
276                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
277                 else
278                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
279                 net_hdr->gso_size = m_buf->tso_segsz;
280                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
281                                         + m_buf->l4_len;
282         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
283                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
284                 net_hdr->gso_size = m_buf->tso_segsz;
285                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
286                         m_buf->l4_len;
287         } else {
288                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
289                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
290                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
291         }
292 }
293
294 static __rte_always_inline int
295 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
296                 struct buf_vector *buf_vec, uint16_t *vec_idx,
297                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
298 {
299         uint16_t vec_id = *vec_idx;
300
301         while (desc_len) {
302                 uint64_t desc_addr;
303                 uint64_t desc_chunck_len = desc_len;
304
305                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
306                         return -1;
307
308                 desc_addr = vhost_iova_to_vva(dev, vq,
309                                 desc_iova,
310                                 &desc_chunck_len,
311                                 perm);
312                 if (unlikely(!desc_addr))
313                         return -1;
314
315                 buf_vec[vec_id].buf_iova = desc_iova;
316                 buf_vec[vec_id].buf_addr = desc_addr;
317                 buf_vec[vec_id].buf_len  = desc_chunck_len;
318
319                 desc_len -= desc_chunck_len;
320                 desc_iova += desc_chunck_len;
321                 vec_id++;
322         }
323         *vec_idx = vec_id;
324
325         return 0;
326 }
327
328 static __rte_always_inline int
329 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
330                          uint32_t avail_idx, uint16_t *vec_idx,
331                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
332                          uint16_t *desc_chain_len, uint8_t perm)
333 {
334         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
335         uint16_t vec_id = *vec_idx;
336         uint32_t len    = 0;
337         uint64_t dlen;
338         struct vring_desc *descs = vq->desc;
339         struct vring_desc *idesc = NULL;
340
341         *desc_chain_head = idx;
342
343         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
344                 dlen = vq->desc[idx].len;
345                 descs = (struct vring_desc *)(uintptr_t)
346                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
347                                                 &dlen,
348                                                 VHOST_ACCESS_RO);
349                 if (unlikely(!descs))
350                         return -1;
351
352                 if (unlikely(dlen < vq->desc[idx].len)) {
353                         /*
354                          * The indirect desc table is not contiguous
355                          * in process VA space, we have to copy it.
356                          */
357                         idesc = alloc_copy_ind_table(dev, vq,
358                                         vq->desc[idx].addr, vq->desc[idx].len);
359                         if (unlikely(!idesc))
360                                 return -1;
361
362                         descs = idesc;
363                 }
364
365                 idx = 0;
366         }
367
368         while (1) {
369                 if (unlikely(idx >= vq->size)) {
370                         free_ind_table(idesc);
371                         return -1;
372                 }
373
374                 len += descs[idx].len;
375
376                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
377                                                 descs[idx].addr, descs[idx].len,
378                                                 perm))) {
379                         free_ind_table(idesc);
380                         return -1;
381                 }
382
383                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
384                         break;
385
386                 idx = descs[idx].next;
387         }
388
389         *desc_chain_len = len;
390         *vec_idx = vec_id;
391
392         if (unlikely(!!idesc))
393                 free_ind_table(idesc);
394
395         return 0;
396 }
397
398 /*
399  * Returns -1 on fail, 0 on success
400  */
401 static inline int
402 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
403                                 uint32_t size, struct buf_vector *buf_vec,
404                                 uint16_t *num_buffers, uint16_t avail_head,
405                                 uint16_t *nr_vec)
406 {
407         uint16_t cur_idx;
408         uint16_t vec_idx = 0;
409         uint16_t max_tries, tries = 0;
410
411         uint16_t head_idx = 0;
412         uint16_t len = 0;
413
414         *num_buffers = 0;
415         cur_idx  = vq->last_avail_idx;
416
417         if (rxvq_is_mergeable(dev))
418                 max_tries = vq->size - 1;
419         else
420                 max_tries = 1;
421
422         while (size > 0) {
423                 if (unlikely(cur_idx == avail_head))
424                         return -1;
425                 /*
426                  * if we tried all available ring items, and still
427                  * can't get enough buf, it means something abnormal
428                  * happened.
429                  */
430                 if (unlikely(++tries > max_tries))
431                         return -1;
432
433                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
434                                                 &vec_idx, buf_vec,
435                                                 &head_idx, &len,
436                                                 VHOST_ACCESS_RW) < 0))
437                         return -1;
438                 len = RTE_MIN(len, size);
439                 update_shadow_used_ring_split(vq, head_idx, len);
440                 size -= len;
441
442                 cur_idx++;
443                 *num_buffers += 1;
444         }
445
446         *nr_vec = vec_idx;
447
448         return 0;
449 }
450
451 static __rte_always_inline int
452 fill_vec_buf_packed_indirect(struct virtio_net *dev,
453                         struct vhost_virtqueue *vq,
454                         struct vring_packed_desc *desc, uint16_t *vec_idx,
455                         struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
456 {
457         uint16_t i;
458         uint32_t nr_descs;
459         uint16_t vec_id = *vec_idx;
460         uint64_t dlen;
461         struct vring_packed_desc *descs, *idescs = NULL;
462
463         dlen = desc->len;
464         descs = (struct vring_packed_desc *)(uintptr_t)
465                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
466         if (unlikely(!descs))
467                 return -1;
468
469         if (unlikely(dlen < desc->len)) {
470                 /*
471                  * The indirect desc table is not contiguous
472                  * in process VA space, we have to copy it.
473                  */
474                 idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
475                 if (unlikely(!idescs))
476                         return -1;
477
478                 descs = idescs;
479         }
480
481         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
482         if (unlikely(nr_descs >= vq->size)) {
483                 free_ind_table(idescs);
484                 return -1;
485         }
486
487         for (i = 0; i < nr_descs; i++) {
488                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
489                         free_ind_table(idescs);
490                         return -1;
491                 }
492
493                 *len += descs[i].len;
494                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
495                                                 descs[i].addr, descs[i].len,
496                                                 perm)))
497                         return -1;
498         }
499         *vec_idx = vec_id;
500
501         if (unlikely(!!idescs))
502                 free_ind_table(idescs);
503
504         return 0;
505 }
506
507 static __rte_always_inline int
508 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
509                                 uint16_t avail_idx, uint16_t *desc_count,
510                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
511                                 uint16_t *buf_id, uint16_t *len, uint8_t perm)
512 {
513         bool wrap_counter = vq->avail_wrap_counter;
514         struct vring_packed_desc *descs = vq->desc_packed;
515         uint16_t vec_id = *vec_idx;
516
517         if (avail_idx < vq->last_avail_idx)
518                 wrap_counter ^= 1;
519
520         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
521                 return -1;
522
523         *desc_count = 0;
524
525         while (1) {
526                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
527                         return -1;
528
529                 *desc_count += 1;
530                 *buf_id = descs[avail_idx].id;
531
532                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
533                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
534                                                         &descs[avail_idx],
535                                                         &vec_id, buf_vec,
536                                                         len, perm) < 0))
537                                 return -1;
538                 } else {
539                         *len += descs[avail_idx].len;
540
541                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
542                                                         descs[avail_idx].addr,
543                                                         descs[avail_idx].len,
544                                                         perm)))
545                                 return -1;
546                 }
547
548                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
549                         break;
550
551                 if (++avail_idx >= vq->size) {
552                         avail_idx -= vq->size;
553                         wrap_counter ^= 1;
554                 }
555         }
556
557         *vec_idx = vec_id;
558
559         return 0;
560 }
561
562 /*
563  * Returns -1 on fail, 0 on success
564  */
565 static inline int
566 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
567                                 uint32_t size, struct buf_vector *buf_vec,
568                                 uint16_t *nr_vec, uint16_t *num_buffers,
569                                 uint16_t *nr_descs)
570 {
571         uint16_t avail_idx;
572         uint16_t vec_idx = 0;
573         uint16_t max_tries, tries = 0;
574
575         uint16_t buf_id = 0;
576         uint16_t len = 0;
577         uint16_t desc_count;
578
579         *num_buffers = 0;
580         avail_idx = vq->last_avail_idx;
581
582         if (rxvq_is_mergeable(dev))
583                 max_tries = vq->size - 1;
584         else
585                 max_tries = 1;
586
587         while (size > 0) {
588                 /*
589                  * if we tried all available ring items, and still
590                  * can't get enough buf, it means something abnormal
591                  * happened.
592                  */
593                 if (unlikely(++tries > max_tries))
594                         return -1;
595
596                 if (unlikely(fill_vec_buf_packed(dev, vq,
597                                                 avail_idx, &desc_count,
598                                                 buf_vec, &vec_idx,
599                                                 &buf_id, &len,
600                                                 VHOST_ACCESS_RO) < 0))
601                         return -1;
602
603                 len = RTE_MIN(len, size);
604                 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
605                 size -= len;
606
607                 avail_idx += desc_count;
608                 if (avail_idx >= vq->size)
609                         avail_idx -= vq->size;
610
611                 *nr_descs += desc_count;
612                 *num_buffers += 1;
613         }
614
615         *nr_vec = vec_idx;
616
617         return 0;
618 }
619
620 static __rte_always_inline int
621 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
622                             struct rte_mbuf *m, struct buf_vector *buf_vec,
623                             uint16_t nr_vec, uint16_t num_buffers)
624 {
625         uint32_t vec_idx = 0;
626         uint32_t mbuf_offset, mbuf_avail;
627         uint32_t buf_offset, buf_avail;
628         uint64_t buf_addr, buf_iova, buf_len;
629         uint32_t cpy_len;
630         uint64_t hdr_addr;
631         struct rte_mbuf *hdr_mbuf;
632         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
633         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
634         int error = 0;
635
636         if (unlikely(m == NULL)) {
637                 error = -1;
638                 goto out;
639         }
640
641         buf_addr = buf_vec[vec_idx].buf_addr;
642         buf_iova = buf_vec[vec_idx].buf_iova;
643         buf_len = buf_vec[vec_idx].buf_len;
644
645         if (nr_vec > 1)
646                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
647
648         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
649                 error = -1;
650                 goto out;
651         }
652
653         hdr_mbuf = m;
654         hdr_addr = buf_addr;
655         if (unlikely(buf_len < dev->vhost_hlen))
656                 hdr = &tmp_hdr;
657         else
658                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
659
660         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
661                 dev->vid, num_buffers);
662
663         if (unlikely(buf_len < dev->vhost_hlen)) {
664                 buf_offset = dev->vhost_hlen - buf_len;
665                 vec_idx++;
666                 buf_addr = buf_vec[vec_idx].buf_addr;
667                 buf_iova = buf_vec[vec_idx].buf_iova;
668                 buf_len = buf_vec[vec_idx].buf_len;
669                 buf_avail = buf_len - buf_offset;
670         } else {
671                 buf_offset = dev->vhost_hlen;
672                 buf_avail = buf_len - dev->vhost_hlen;
673         }
674
675         mbuf_avail  = rte_pktmbuf_data_len(m);
676         mbuf_offset = 0;
677         while (mbuf_avail != 0 || m->next != NULL) {
678                 /* done with current buf, get the next one */
679                 if (buf_avail == 0) {
680                         vec_idx++;
681                         if (unlikely(vec_idx >= nr_vec)) {
682                                 error = -1;
683                                 goto out;
684                         }
685
686                         buf_addr = buf_vec[vec_idx].buf_addr;
687                         buf_iova = buf_vec[vec_idx].buf_iova;
688                         buf_len = buf_vec[vec_idx].buf_len;
689
690                         /* Prefetch next buffer address. */
691                         if (vec_idx + 1 < nr_vec)
692                                 rte_prefetch0((void *)(uintptr_t)
693                                                 buf_vec[vec_idx + 1].buf_addr);
694                         buf_offset = 0;
695                         buf_avail  = buf_len;
696                 }
697
698                 /* done with current mbuf, get the next one */
699                 if (mbuf_avail == 0) {
700                         m = m->next;
701
702                         mbuf_offset = 0;
703                         mbuf_avail  = rte_pktmbuf_data_len(m);
704                 }
705
706                 if (hdr_addr) {
707                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
708                         if (rxvq_is_mergeable(dev))
709                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
710                                                 num_buffers);
711
712                         if (unlikely(hdr == &tmp_hdr)) {
713                                 uint64_t len;
714                                 uint64_t remain = dev->vhost_hlen;
715                                 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
716                                 uint64_t iova = buf_vec[0].buf_iova;
717                                 uint16_t hdr_vec_idx = 0;
718
719                                 while (remain) {
720                                         len = RTE_MIN(remain,
721                                                 buf_vec[hdr_vec_idx].buf_len);
722                                         dst = buf_vec[hdr_vec_idx].buf_addr;
723                                         rte_memcpy((void *)(uintptr_t)dst,
724                                                         (void *)(uintptr_t)src,
725                                                         len);
726
727                                         PRINT_PACKET(dev, (uintptr_t)dst,
728                                                         (uint32_t)len, 0);
729                                         vhost_log_cache_write(dev, vq,
730                                                         iova, len);
731
732                                         remain -= len;
733                                         iova += len;
734                                         src += len;
735                                         hdr_vec_idx++;
736                                 }
737                         } else {
738                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
739                                                 dev->vhost_hlen, 0);
740                                 vhost_log_cache_write(dev, vq,
741                                                 buf_vec[0].buf_iova,
742                                                 dev->vhost_hlen);
743                         }
744
745                         hdr_addr = 0;
746                 }
747
748                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
749
750                 if (likely(cpy_len > MAX_BATCH_LEN ||
751                                         vq->batch_copy_nb_elems >= vq->size)) {
752                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
753                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
754                                 cpy_len);
755                         vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
756                                         cpy_len);
757                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
758                                 cpy_len, 0);
759                 } else {
760                         batch_copy[vq->batch_copy_nb_elems].dst =
761                                 (void *)((uintptr_t)(buf_addr + buf_offset));
762                         batch_copy[vq->batch_copy_nb_elems].src =
763                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
764                         batch_copy[vq->batch_copy_nb_elems].log_addr =
765                                 buf_iova + buf_offset;
766                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
767                         vq->batch_copy_nb_elems++;
768                 }
769
770                 mbuf_avail  -= cpy_len;
771                 mbuf_offset += cpy_len;
772                 buf_avail  -= cpy_len;
773                 buf_offset += cpy_len;
774         }
775
776 out:
777
778         return error;
779 }
780
781 static __rte_always_inline uint32_t
782 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
783         struct rte_mbuf **pkts, uint32_t count)
784 {
785         uint32_t pkt_idx = 0;
786         uint16_t num_buffers;
787         struct buf_vector buf_vec[BUF_VECTOR_MAX];
788         uint16_t avail_head;
789
790         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
791         avail_head = *((volatile uint16_t *)&vq->avail->idx);
792
793         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
794                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
795                 uint16_t nr_vec = 0;
796
797                 if (unlikely(reserve_avail_buf_split(dev, vq,
798                                                 pkt_len, buf_vec, &num_buffers,
799                                                 avail_head, &nr_vec) < 0)) {
800                         VHOST_LOG_DEBUG(VHOST_DATA,
801                                 "(%d) failed to get enough desc from vring\n",
802                                 dev->vid);
803                         vq->shadow_used_idx -= num_buffers;
804                         break;
805                 }
806
807                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
808
809                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
810                         dev->vid, vq->last_avail_idx,
811                         vq->last_avail_idx + num_buffers);
812
813                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
814                                                 buf_vec, nr_vec,
815                                                 num_buffers) < 0) {
816                         vq->shadow_used_idx -= num_buffers;
817                         break;
818                 }
819
820                 vq->last_avail_idx += num_buffers;
821         }
822
823         do_data_copy_enqueue(dev, vq);
824
825         if (likely(vq->shadow_used_idx)) {
826                 flush_shadow_used_ring_split(dev, vq);
827                 vhost_vring_call_split(dev, vq);
828         }
829
830         return pkt_idx;
831 }
832
833 static __rte_always_inline uint32_t
834 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
835         struct rte_mbuf **pkts, uint32_t count)
836 {
837         uint32_t pkt_idx = 0;
838         uint16_t num_buffers;
839         struct buf_vector buf_vec[BUF_VECTOR_MAX];
840
841         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
842                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
843                 uint16_t nr_vec = 0;
844                 uint16_t nr_descs = 0;
845
846                 if (unlikely(reserve_avail_buf_packed(dev, vq,
847                                                 pkt_len, buf_vec, &nr_vec,
848                                                 &num_buffers, &nr_descs) < 0)) {
849                         VHOST_LOG_DEBUG(VHOST_DATA,
850                                 "(%d) failed to get enough desc from vring\n",
851                                 dev->vid);
852                         vq->shadow_used_idx -= num_buffers;
853                         break;
854                 }
855
856                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
857
858                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
859                         dev->vid, vq->last_avail_idx,
860                         vq->last_avail_idx + num_buffers);
861
862                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
863                                                 buf_vec, nr_vec,
864                                                 num_buffers) < 0) {
865                         vq->shadow_used_idx -= num_buffers;
866                         break;
867                 }
868
869                 vq->last_avail_idx += nr_descs;
870                 if (vq->last_avail_idx >= vq->size) {
871                         vq->last_avail_idx -= vq->size;
872                         vq->avail_wrap_counter ^= 1;
873                 }
874         }
875
876         do_data_copy_enqueue(dev, vq);
877
878         if (likely(vq->shadow_used_idx)) {
879                 flush_shadow_used_ring_packed(dev, vq);
880                 vhost_vring_call_packed(dev, vq);
881         }
882
883         return pkt_idx;
884 }
885
886 static __rte_always_inline uint32_t
887 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
888         struct rte_mbuf **pkts, uint32_t count)
889 {
890         struct vhost_virtqueue *vq;
891
892         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
893         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
894                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
895                         dev->vid, __func__, queue_id);
896                 return 0;
897         }
898
899         vq = dev->virtqueue[queue_id];
900
901         rte_spinlock_lock(&vq->access_lock);
902
903         if (unlikely(vq->enabled == 0))
904                 goto out_access_unlock;
905
906         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
907                 vhost_user_iotlb_rd_lock(vq);
908
909         if (unlikely(vq->access_ok == 0))
910                 if (unlikely(vring_translate(dev, vq) < 0))
911                         goto out;
912
913         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
914         if (count == 0)
915                 goto out;
916
917         if (vq_is_packed(dev))
918                 count = virtio_dev_rx_packed(dev, vq, pkts, count);
919         else
920                 count = virtio_dev_rx_split(dev, vq, pkts, count);
921
922 out:
923         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
924                 vhost_user_iotlb_rd_unlock(vq);
925
926 out_access_unlock:
927         rte_spinlock_unlock(&vq->access_lock);
928
929         return count;
930 }
931
932 uint16_t
933 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
934         struct rte_mbuf **pkts, uint16_t count)
935 {
936         struct virtio_net *dev = get_device(vid);
937
938         if (!dev)
939                 return 0;
940
941         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
942                 RTE_LOG(ERR, VHOST_DATA,
943                         "(%d) %s: built-in vhost net backend is disabled.\n",
944                         dev->vid, __func__);
945                 return 0;
946         }
947
948         return virtio_dev_rx(dev, queue_id, pkts, count);
949 }
950
951 static inline bool
952 virtio_net_with_host_offload(struct virtio_net *dev)
953 {
954         if (dev->features &
955                         ((1ULL << VIRTIO_NET_F_CSUM) |
956                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
957                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
958                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
959                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
960                 return true;
961
962         return false;
963 }
964
965 static void
966 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
967 {
968         struct ipv4_hdr *ipv4_hdr;
969         struct ipv6_hdr *ipv6_hdr;
970         void *l3_hdr = NULL;
971         struct ether_hdr *eth_hdr;
972         uint16_t ethertype;
973
974         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
975
976         m->l2_len = sizeof(struct ether_hdr);
977         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
978
979         if (ethertype == ETHER_TYPE_VLAN) {
980                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
981
982                 m->l2_len += sizeof(struct vlan_hdr);
983                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
984         }
985
986         l3_hdr = (char *)eth_hdr + m->l2_len;
987
988         switch (ethertype) {
989         case ETHER_TYPE_IPv4:
990                 ipv4_hdr = l3_hdr;
991                 *l4_proto = ipv4_hdr->next_proto_id;
992                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
993                 *l4_hdr = (char *)l3_hdr + m->l3_len;
994                 m->ol_flags |= PKT_TX_IPV4;
995                 break;
996         case ETHER_TYPE_IPv6:
997                 ipv6_hdr = l3_hdr;
998                 *l4_proto = ipv6_hdr->proto;
999                 m->l3_len = sizeof(struct ipv6_hdr);
1000                 *l4_hdr = (char *)l3_hdr + m->l3_len;
1001                 m->ol_flags |= PKT_TX_IPV6;
1002                 break;
1003         default:
1004                 m->l3_len = 0;
1005                 *l4_proto = 0;
1006                 *l4_hdr = NULL;
1007                 break;
1008         }
1009 }
1010
1011 static __rte_always_inline void
1012 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1013 {
1014         uint16_t l4_proto = 0;
1015         void *l4_hdr = NULL;
1016         struct tcp_hdr *tcp_hdr = NULL;
1017
1018         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1019                 return;
1020
1021         parse_ethernet(m, &l4_proto, &l4_hdr);
1022         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1023                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1024                         switch (hdr->csum_offset) {
1025                         case (offsetof(struct tcp_hdr, cksum)):
1026                                 if (l4_proto == IPPROTO_TCP)
1027                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1028                                 break;
1029                         case (offsetof(struct udp_hdr, dgram_cksum)):
1030                                 if (l4_proto == IPPROTO_UDP)
1031                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1032                                 break;
1033                         case (offsetof(struct sctp_hdr, cksum)):
1034                                 if (l4_proto == IPPROTO_SCTP)
1035                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1036                                 break;
1037                         default:
1038                                 break;
1039                         }
1040                 }
1041         }
1042
1043         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1044                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1045                 case VIRTIO_NET_HDR_GSO_TCPV4:
1046                 case VIRTIO_NET_HDR_GSO_TCPV6:
1047                         tcp_hdr = l4_hdr;
1048                         m->ol_flags |= PKT_TX_TCP_SEG;
1049                         m->tso_segsz = hdr->gso_size;
1050                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1051                         break;
1052                 case VIRTIO_NET_HDR_GSO_UDP:
1053                         m->ol_flags |= PKT_TX_UDP_SEG;
1054                         m->tso_segsz = hdr->gso_size;
1055                         m->l4_len = sizeof(struct udp_hdr);
1056                         break;
1057                 default:
1058                         RTE_LOG(WARNING, VHOST_DATA,
1059                                 "unsupported gso type %u.\n", hdr->gso_type);
1060                         break;
1061                 }
1062         }
1063 }
1064
1065 static __rte_always_inline void
1066 put_zmbuf(struct zcopy_mbuf *zmbuf)
1067 {
1068         zmbuf->in_use = 0;
1069 }
1070
1071 static __rte_always_inline int
1072 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1073                   struct buf_vector *buf_vec, uint16_t nr_vec,
1074                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1075 {
1076         uint32_t buf_avail, buf_offset;
1077         uint64_t buf_addr, buf_iova, buf_len;
1078         uint32_t mbuf_avail, mbuf_offset;
1079         uint32_t cpy_len;
1080         struct rte_mbuf *cur = m, *prev = m;
1081         struct virtio_net_hdr tmp_hdr;
1082         struct virtio_net_hdr *hdr = NULL;
1083         /* A counter to avoid desc dead loop chain */
1084         uint16_t vec_idx = 0;
1085         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1086         int error = 0;
1087
1088         buf_addr = buf_vec[vec_idx].buf_addr;
1089         buf_iova = buf_vec[vec_idx].buf_iova;
1090         buf_len = buf_vec[vec_idx].buf_len;
1091
1092         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1093                 error = -1;
1094                 goto out;
1095         }
1096
1097         if (likely(nr_vec > 1))
1098                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
1099
1100         if (virtio_net_with_host_offload(dev)) {
1101                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1102                         uint64_t len;
1103                         uint64_t remain = sizeof(struct virtio_net_hdr);
1104                         uint64_t src;
1105                         uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
1106                         uint16_t hdr_vec_idx = 0;
1107
1108                         /*
1109                          * No luck, the virtio-net header doesn't fit
1110                          * in a contiguous virtual area.
1111                          */
1112                         while (remain) {
1113                                 len = RTE_MIN(remain,
1114                                         buf_vec[hdr_vec_idx].buf_len);
1115                                 src = buf_vec[hdr_vec_idx].buf_addr;
1116                                 rte_memcpy((void *)(uintptr_t)dst,
1117                                                    (void *)(uintptr_t)src, len);
1118
1119                                 remain -= len;
1120                                 dst += len;
1121                                 hdr_vec_idx++;
1122                         }
1123
1124                         hdr = &tmp_hdr;
1125                 } else {
1126                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1127                         rte_prefetch0(hdr);
1128                 }
1129         }
1130
1131         /*
1132          * A virtio driver normally uses at least 2 desc buffers
1133          * for Tx: the first for storing the header, and others
1134          * for storing the data.
1135          */
1136         if (unlikely(buf_len < dev->vhost_hlen)) {
1137                 buf_offset = dev->vhost_hlen - buf_len;
1138                 vec_idx++;
1139                 buf_addr = buf_vec[vec_idx].buf_addr;
1140                 buf_iova = buf_vec[vec_idx].buf_iova;
1141                 buf_len = buf_vec[vec_idx].buf_len;
1142                 buf_avail  = buf_len - buf_offset;
1143         } else if (buf_len == dev->vhost_hlen) {
1144                 if (unlikely(++vec_idx >= nr_vec))
1145                         goto out;
1146                 buf_addr = buf_vec[vec_idx].buf_addr;
1147                 buf_iova = buf_vec[vec_idx].buf_iova;
1148                 buf_len = buf_vec[vec_idx].buf_len;
1149
1150                 buf_offset = 0;
1151                 buf_avail = buf_len;
1152         } else {
1153                 buf_offset = dev->vhost_hlen;
1154                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1155         }
1156
1157         rte_prefetch0((void *)(uintptr_t)
1158                         (buf_addr + buf_offset));
1159
1160         PRINT_PACKET(dev,
1161                         (uintptr_t)(buf_addr + buf_offset),
1162                         (uint32_t)buf_avail, 0);
1163
1164         mbuf_offset = 0;
1165         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1166         while (1) {
1167                 uint64_t hpa;
1168
1169                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1170
1171                 /*
1172                  * A desc buf might across two host physical pages that are
1173                  * not continuous. In such case (gpa_to_hpa returns 0), data
1174                  * will be copied even though zero copy is enabled.
1175                  */
1176                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1177                                         buf_iova + buf_offset, cpy_len)))) {
1178                         cur->data_len = cpy_len;
1179                         cur->data_off = 0;
1180                         cur->buf_addr =
1181                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1182                         cur->buf_iova = hpa;
1183
1184                         /*
1185                          * In zero copy mode, one mbuf can only reference data
1186                          * for one or partial of one desc buff.
1187                          */
1188                         mbuf_avail = cpy_len;
1189                 } else {
1190                         if (likely(cpy_len > MAX_BATCH_LEN ||
1191                                    vq->batch_copy_nb_elems >= vq->size ||
1192                                    (hdr && cur == m))) {
1193                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1194                                                                    mbuf_offset),
1195                                            (void *)((uintptr_t)(buf_addr +
1196                                                            buf_offset)),
1197                                            cpy_len);
1198                         } else {
1199                                 batch_copy[vq->batch_copy_nb_elems].dst =
1200                                         rte_pktmbuf_mtod_offset(cur, void *,
1201                                                                 mbuf_offset);
1202                                 batch_copy[vq->batch_copy_nb_elems].src =
1203                                         (void *)((uintptr_t)(buf_addr +
1204                                                                 buf_offset));
1205                                 batch_copy[vq->batch_copy_nb_elems].len =
1206                                         cpy_len;
1207                                 vq->batch_copy_nb_elems++;
1208                         }
1209                 }
1210
1211                 mbuf_avail  -= cpy_len;
1212                 mbuf_offset += cpy_len;
1213                 buf_avail -= cpy_len;
1214                 buf_offset += cpy_len;
1215
1216                 /* This buf reaches to its end, get the next one */
1217                 if (buf_avail == 0) {
1218                         if (++vec_idx >= nr_vec)
1219                                 break;
1220
1221                         buf_addr = buf_vec[vec_idx].buf_addr;
1222                         buf_iova = buf_vec[vec_idx].buf_iova;
1223                         buf_len = buf_vec[vec_idx].buf_len;
1224
1225                         /*
1226                          * Prefecth desc n + 1 buffer while
1227                          * desc n buffer is processed.
1228                          */
1229                         if (vec_idx + 1 < nr_vec)
1230                                 rte_prefetch0((void *)(uintptr_t)
1231                                                 buf_vec[vec_idx + 1].buf_addr);
1232
1233                         buf_offset = 0;
1234                         buf_avail  = buf_len;
1235
1236                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1237                                         (uint32_t)buf_avail, 0);
1238                 }
1239
1240                 /*
1241                  * This mbuf reaches to its end, get a new one
1242                  * to hold more data.
1243                  */
1244                 if (mbuf_avail == 0) {
1245                         cur = rte_pktmbuf_alloc(mbuf_pool);
1246                         if (unlikely(cur == NULL)) {
1247                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1248                                         "allocate memory for mbuf.\n");
1249                                 error = -1;
1250                                 goto out;
1251                         }
1252                         if (unlikely(dev->dequeue_zero_copy))
1253                                 rte_mbuf_refcnt_update(cur, 1);
1254
1255                         prev->next = cur;
1256                         prev->data_len = mbuf_offset;
1257                         m->nb_segs += 1;
1258                         m->pkt_len += mbuf_offset;
1259                         prev = cur;
1260
1261                         mbuf_offset = 0;
1262                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1263                 }
1264         }
1265
1266         prev->data_len = mbuf_offset;
1267         m->pkt_len    += mbuf_offset;
1268
1269         if (hdr)
1270                 vhost_dequeue_offload(hdr, m);
1271
1272 out:
1273
1274         return error;
1275 }
1276
1277 static __rte_always_inline struct zcopy_mbuf *
1278 get_zmbuf(struct vhost_virtqueue *vq)
1279 {
1280         uint16_t i;
1281         uint16_t last;
1282         int tries = 0;
1283
1284         /* search [last_zmbuf_idx, zmbuf_size) */
1285         i = vq->last_zmbuf_idx;
1286         last = vq->zmbuf_size;
1287
1288 again:
1289         for (; i < last; i++) {
1290                 if (vq->zmbufs[i].in_use == 0) {
1291                         vq->last_zmbuf_idx = i + 1;
1292                         vq->zmbufs[i].in_use = 1;
1293                         return &vq->zmbufs[i];
1294                 }
1295         }
1296
1297         tries++;
1298         if (tries == 1) {
1299                 /* search [0, last_zmbuf_idx) */
1300                 i = 0;
1301                 last = vq->last_zmbuf_idx;
1302                 goto again;
1303         }
1304
1305         return NULL;
1306 }
1307
1308 static __rte_always_inline bool
1309 mbuf_is_consumed(struct rte_mbuf *m)
1310 {
1311         while (m) {
1312                 if (rte_mbuf_refcnt_read(m) > 1)
1313                         return false;
1314                 m = m->next;
1315         }
1316
1317         return true;
1318 }
1319
1320 static __rte_always_inline void
1321 restore_mbuf(struct rte_mbuf *m)
1322 {
1323         uint32_t mbuf_size, priv_size;
1324
1325         while (m) {
1326                 priv_size = rte_pktmbuf_priv_size(m->pool);
1327                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1328                 /* start of buffer is after mbuf structure and priv data */
1329
1330                 m->buf_addr = (char *)m + mbuf_size;
1331                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1332                 m = m->next;
1333         }
1334 }
1335
1336 static __rte_always_inline uint16_t
1337 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1338         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1339 {
1340         uint16_t i;
1341         uint16_t free_entries;
1342
1343         if (unlikely(dev->dequeue_zero_copy)) {
1344                 struct zcopy_mbuf *zmbuf, *next;
1345
1346                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1347                      zmbuf != NULL; zmbuf = next) {
1348                         next = TAILQ_NEXT(zmbuf, next);
1349
1350                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1351                                 update_shadow_used_ring_split(vq,
1352                                                 zmbuf->desc_idx, 0);
1353                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1354                                 restore_mbuf(zmbuf->mbuf);
1355                                 rte_pktmbuf_free(zmbuf->mbuf);
1356                                 put_zmbuf(zmbuf);
1357                                 vq->nr_zmbuf -= 1;
1358                         }
1359                 }
1360
1361                 flush_shadow_used_ring_split(dev, vq);
1362                 vhost_vring_call_split(dev, vq);
1363         }
1364
1365         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1366
1367         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1368                         vq->last_avail_idx;
1369         if (free_entries == 0)
1370                 return 0;
1371
1372         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1373
1374         count = RTE_MIN(count, MAX_PKT_BURST);
1375         count = RTE_MIN(count, free_entries);
1376         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1377                         dev->vid, count);
1378
1379         for (i = 0; i < count; i++) {
1380                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1381                 uint16_t head_idx, dummy_len;
1382                 uint16_t nr_vec = 0;
1383                 int err;
1384
1385                 if (unlikely(fill_vec_buf_split(dev, vq,
1386                                                 vq->last_avail_idx + i,
1387                                                 &nr_vec, buf_vec,
1388                                                 &head_idx, &dummy_len,
1389                                                 VHOST_ACCESS_RO) < 0))
1390                         break;
1391
1392                 if (likely(dev->dequeue_zero_copy == 0))
1393                         update_shadow_used_ring_split(vq, head_idx, 0);
1394
1395                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1396
1397                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1398                 if (unlikely(pkts[i] == NULL)) {
1399                         RTE_LOG(ERR, VHOST_DATA,
1400                                 "Failed to allocate memory for mbuf.\n");
1401                         break;
1402                 }
1403
1404                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1405                                 mbuf_pool);
1406                 if (unlikely(err)) {
1407                         rte_pktmbuf_free(pkts[i]);
1408                         break;
1409                 }
1410
1411                 if (unlikely(dev->dequeue_zero_copy)) {
1412                         struct zcopy_mbuf *zmbuf;
1413
1414                         zmbuf = get_zmbuf(vq);
1415                         if (!zmbuf) {
1416                                 rte_pktmbuf_free(pkts[i]);
1417                                 break;
1418                         }
1419                         zmbuf->mbuf = pkts[i];
1420                         zmbuf->desc_idx = head_idx;
1421
1422                         /*
1423                          * Pin lock the mbuf; we will check later to see
1424                          * whether the mbuf is freed (when we are the last
1425                          * user) or not. If that's the case, we then could
1426                          * update the used ring safely.
1427                          */
1428                         rte_mbuf_refcnt_update(pkts[i], 1);
1429
1430                         vq->nr_zmbuf += 1;
1431                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1432                 }
1433         }
1434         vq->last_avail_idx += i;
1435
1436         if (likely(dev->dequeue_zero_copy == 0)) {
1437                 do_data_copy_dequeue(vq);
1438                 if (unlikely(i < count))
1439                         vq->shadow_used_idx = i;
1440                 flush_shadow_used_ring_split(dev, vq);
1441                 vhost_vring_call_split(dev, vq);
1442         }
1443
1444         return i;
1445 }
1446
1447 static __rte_always_inline uint16_t
1448 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1449         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1450 {
1451         uint16_t i;
1452
1453         rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
1454
1455         if (unlikely(dev->dequeue_zero_copy)) {
1456                 struct zcopy_mbuf *zmbuf, *next;
1457
1458                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1459                      zmbuf != NULL; zmbuf = next) {
1460                         next = TAILQ_NEXT(zmbuf, next);
1461
1462                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1463                                 update_shadow_used_ring_packed(vq,
1464                                                 zmbuf->desc_idx,
1465                                                 0,
1466                                                 zmbuf->desc_count);
1467
1468                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1469                                 restore_mbuf(zmbuf->mbuf);
1470                                 rte_pktmbuf_free(zmbuf->mbuf);
1471                                 put_zmbuf(zmbuf);
1472                                 vq->nr_zmbuf -= 1;
1473                         }
1474                 }
1475
1476                 flush_shadow_used_ring_packed(dev, vq);
1477                 vhost_vring_call_packed(dev, vq);
1478         }
1479
1480         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1481
1482         count = RTE_MIN(count, MAX_PKT_BURST);
1483         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1484                         dev->vid, count);
1485
1486         for (i = 0; i < count; i++) {
1487                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1488                 uint16_t buf_id, dummy_len;
1489                 uint16_t desc_count, nr_vec = 0;
1490                 int err;
1491
1492                 if (unlikely(fill_vec_buf_packed(dev, vq,
1493                                                 vq->last_avail_idx, &desc_count,
1494                                                 buf_vec, &nr_vec,
1495                                                 &buf_id, &dummy_len,
1496                                                 VHOST_ACCESS_RW) < 0))
1497                         break;
1498
1499                 if (likely(dev->dequeue_zero_copy == 0))
1500                         update_shadow_used_ring_packed(vq, buf_id, 0,
1501                                         desc_count);
1502
1503                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1504
1505                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1506                 if (unlikely(pkts[i] == NULL)) {
1507                         RTE_LOG(ERR, VHOST_DATA,
1508                                 "Failed to allocate memory for mbuf.\n");
1509                         break;
1510                 }
1511
1512                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1513                                 mbuf_pool);
1514                 if (unlikely(err)) {
1515                         rte_pktmbuf_free(pkts[i]);
1516                         break;
1517                 }
1518
1519                 if (unlikely(dev->dequeue_zero_copy)) {
1520                         struct zcopy_mbuf *zmbuf;
1521
1522                         zmbuf = get_zmbuf(vq);
1523                         if (!zmbuf) {
1524                                 rte_pktmbuf_free(pkts[i]);
1525                                 break;
1526                         }
1527                         zmbuf->mbuf = pkts[i];
1528                         zmbuf->desc_idx = buf_id;
1529                         zmbuf->desc_count = desc_count;
1530
1531                         /*
1532                          * Pin lock the mbuf; we will check later to see
1533                          * whether the mbuf is freed (when we are the last
1534                          * user) or not. If that's the case, we then could
1535                          * update the used ring safely.
1536                          */
1537                         rte_mbuf_refcnt_update(pkts[i], 1);
1538
1539                         vq->nr_zmbuf += 1;
1540                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1541                 }
1542
1543                 vq->last_avail_idx += desc_count;
1544                 if (vq->last_avail_idx >= vq->size) {
1545                         vq->last_avail_idx -= vq->size;
1546                         vq->avail_wrap_counter ^= 1;
1547                 }
1548         }
1549
1550         if (likely(dev->dequeue_zero_copy == 0)) {
1551                 do_data_copy_dequeue(vq);
1552                 if (unlikely(i < count))
1553                         vq->shadow_used_idx = i;
1554                 flush_shadow_used_ring_packed(dev, vq);
1555                 vhost_vring_call_packed(dev, vq);
1556         }
1557
1558         return i;
1559 }
1560
1561 uint16_t
1562 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1563         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1564 {
1565         struct virtio_net *dev;
1566         struct rte_mbuf *rarp_mbuf = NULL;
1567         struct vhost_virtqueue *vq;
1568
1569         dev = get_device(vid);
1570         if (!dev)
1571                 return 0;
1572
1573         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1574                 RTE_LOG(ERR, VHOST_DATA,
1575                         "(%d) %s: built-in vhost net backend is disabled.\n",
1576                         dev->vid, __func__);
1577                 return 0;
1578         }
1579
1580         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1581                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1582                         dev->vid, __func__, queue_id);
1583                 return 0;
1584         }
1585
1586         vq = dev->virtqueue[queue_id];
1587
1588         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1589                 return 0;
1590
1591         if (unlikely(vq->enabled == 0)) {
1592                 count = 0;
1593                 goto out_access_unlock;
1594         }
1595
1596         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1597                 vhost_user_iotlb_rd_lock(vq);
1598
1599         if (unlikely(vq->access_ok == 0))
1600                 if (unlikely(vring_translate(dev, vq) < 0)) {
1601                         count = 0;
1602                         goto out;
1603                 }
1604
1605         /*
1606          * Construct a RARP broadcast packet, and inject it to the "pkts"
1607          * array, to looks like that guest actually send such packet.
1608          *
1609          * Check user_send_rarp() for more information.
1610          *
1611          * broadcast_rarp shares a cacheline in the virtio_net structure
1612          * with some fields that are accessed during enqueue and
1613          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1614          * result in false sharing between enqueue and dequeue.
1615          *
1616          * Prevent unnecessary false sharing by reading broadcast_rarp first
1617          * and only performing cmpset if the read indicates it is likely to
1618          * be set.
1619          */
1620         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1621                         rte_atomic16_cmpset((volatile uint16_t *)
1622                                 &dev->broadcast_rarp.cnt, 1, 0))) {
1623
1624                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1625                 if (rarp_mbuf == NULL) {
1626                         RTE_LOG(ERR, VHOST_DATA,
1627                                 "Failed to make RARP packet.\n");
1628                         count = 0;
1629                         goto out;
1630                 }
1631                 count -= 1;
1632         }
1633
1634         if (vq_is_packed(dev))
1635                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
1636         else
1637                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
1638
1639 out:
1640         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1641                 vhost_user_iotlb_rd_unlock(vq);
1642
1643 out_access_unlock:
1644         rte_spinlock_unlock(&vq->access_lock);
1645
1646         if (unlikely(rarp_mbuf != NULL)) {
1647                 /*
1648                  * Inject it to the head of "pkts" array, so that switch's mac
1649                  * learning table will get updated first.
1650                  */
1651                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
1652                 pkts[0] = rarp_mbuf;
1653                 count += 1;
1654         }
1655
1656         return count;
1657 }