vhost: batch used descs chains write-back with packed ring
[dpdk.git] / lib / librte_vhost / virtio_net.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <linux/virtio_net.h>
8
9 #include <rte_mbuf.h>
10 #include <rte_memcpy.h>
11 #include <rte_ether.h>
12 #include <rte_ip.h>
13 #include <rte_vhost.h>
14 #include <rte_tcp.h>
15 #include <rte_udp.h>
16 #include <rte_sctp.h>
17 #include <rte_arp.h>
18 #include <rte_spinlock.h>
19 #include <rte_malloc.h>
20
21 #include "iotlb.h"
22 #include "vhost.h"
23
24 #define MAX_PKT_BURST 32
25
26 #define MAX_BATCH_LEN 256
27
28 static  __rte_always_inline bool
29 rxvq_is_mergeable(struct virtio_net *dev)
30 {
31         return dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF);
32 }
33
34 static bool
35 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
36 {
37         return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
38 }
39
40 static __rte_always_inline void
41 do_flush_shadow_used_ring_split(struct virtio_net *dev,
42                         struct vhost_virtqueue *vq,
43                         uint16_t to, uint16_t from, uint16_t size)
44 {
45         rte_memcpy(&vq->used->ring[to],
46                         &vq->shadow_used_split[from],
47                         size * sizeof(struct vring_used_elem));
48         vhost_log_cache_used_vring(dev, vq,
49                         offsetof(struct vring_used, ring[to]),
50                         size * sizeof(struct vring_used_elem));
51 }
52
53 static __rte_always_inline void
54 flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
55 {
56         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
57
58         if (used_idx + vq->shadow_used_idx <= vq->size) {
59                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
60                                           vq->shadow_used_idx);
61         } else {
62                 uint16_t size;
63
64                 /* update used ring interval [used_idx, vq->size] */
65                 size = vq->size - used_idx;
66                 do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
67
68                 /* update the left half used ring interval [0, left_size] */
69                 do_flush_shadow_used_ring_split(dev, vq, 0, size,
70                                           vq->shadow_used_idx - size);
71         }
72         vq->last_used_idx += vq->shadow_used_idx;
73
74         rte_smp_wmb();
75
76         vhost_log_cache_sync(dev, vq);
77
78         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
79         vq->shadow_used_idx = 0;
80         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
81                 sizeof(vq->used->idx));
82 }
83
84 static __rte_always_inline void
85 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
86                          uint16_t desc_idx, uint32_t len)
87 {
88         uint16_t i = vq->shadow_used_idx++;
89
90         vq->shadow_used_split[i].id  = desc_idx;
91         vq->shadow_used_split[i].len = len;
92 }
93
94 static __rte_always_inline void
95 flush_shadow_used_ring_packed(struct virtio_net *dev,
96                         struct vhost_virtqueue *vq)
97 {
98         int i;
99         uint16_t used_idx = vq->last_used_idx;
100         uint16_t head_idx = vq->last_used_idx;
101         uint16_t head_flags = 0;
102
103         /* Split loop in two to save memory barriers */
104         for (i = 0; i < vq->shadow_used_idx; i++) {
105                 vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id;
106                 vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len;
107
108                 used_idx += vq->shadow_used_packed[i].count;
109                 if (used_idx >= vq->size)
110                         used_idx -= vq->size;
111         }
112
113         rte_smp_wmb();
114
115         for (i = 0; i < vq->shadow_used_idx; i++) {
116                 uint16_t flags;
117
118                 if (vq->shadow_used_packed[i].len)
119                         flags = VRING_DESC_F_WRITE;
120                 else
121                         flags = 0;
122
123                 if (vq->used_wrap_counter) {
124                         flags |= VRING_DESC_F_USED;
125                         flags |= VRING_DESC_F_AVAIL;
126                 } else {
127                         flags &= ~VRING_DESC_F_USED;
128                         flags &= ~VRING_DESC_F_AVAIL;
129                 }
130
131                 if (i > 0) {
132                         vq->desc_packed[vq->last_used_idx].flags = flags;
133
134                         vhost_log_cache_used_vring(dev, vq,
135                                         vq->last_used_idx *
136                                         sizeof(struct vring_packed_desc),
137                                         sizeof(struct vring_packed_desc));
138                 } else {
139                         head_idx = vq->last_used_idx;
140                         head_flags = flags;
141                 }
142
143                 vq->last_used_idx += vq->shadow_used_packed[i].count;
144                 if (vq->last_used_idx >= vq->size) {
145                         vq->used_wrap_counter ^= 1;
146                         vq->last_used_idx -= vq->size;
147                 }
148         }
149
150         vq->desc_packed[head_idx].flags = head_flags;
151
152         vhost_log_cache_used_vring(dev, vq,
153                                 head_idx *
154                                 sizeof(struct vring_packed_desc),
155                                 sizeof(struct vring_packed_desc));
156
157         vq->shadow_used_idx = 0;
158         vhost_log_cache_sync(dev, vq);
159 }
160
161 static __rte_always_inline void
162 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
163                          uint16_t desc_idx, uint32_t len, uint16_t count)
164 {
165         uint16_t i = vq->shadow_used_idx++;
166
167         vq->shadow_used_packed[i].id  = desc_idx;
168         vq->shadow_used_packed[i].len = len;
169         vq->shadow_used_packed[i].count = count;
170 }
171
172 static inline void
173 do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
174 {
175         struct batch_copy_elem *elem = vq->batch_copy_elems;
176         uint16_t count = vq->batch_copy_nb_elems;
177         int i;
178
179         for (i = 0; i < count; i++) {
180                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
181                 vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
182                 PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
183         }
184
185         vq->batch_copy_nb_elems = 0;
186 }
187
188 static inline void
189 do_data_copy_dequeue(struct vhost_virtqueue *vq)
190 {
191         struct batch_copy_elem *elem = vq->batch_copy_elems;
192         uint16_t count = vq->batch_copy_nb_elems;
193         int i;
194
195         for (i = 0; i < count; i++)
196                 rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
197
198         vq->batch_copy_nb_elems = 0;
199 }
200
201 /* avoid write operation when necessary, to lessen cache issues */
202 #define ASSIGN_UNLESS_EQUAL(var, val) do {      \
203         if ((var) != (val))                     \
204                 (var) = (val);                  \
205 } while (0)
206
207 static __rte_always_inline void
208 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
209 {
210         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
211
212         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
213                 csum_l4 |= PKT_TX_TCP_CKSUM;
214
215         if (csum_l4) {
216                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
217                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
218
219                 switch (csum_l4) {
220                 case PKT_TX_TCP_CKSUM:
221                         net_hdr->csum_offset = (offsetof(struct tcp_hdr,
222                                                 cksum));
223                         break;
224                 case PKT_TX_UDP_CKSUM:
225                         net_hdr->csum_offset = (offsetof(struct udp_hdr,
226                                                 dgram_cksum));
227                         break;
228                 case PKT_TX_SCTP_CKSUM:
229                         net_hdr->csum_offset = (offsetof(struct sctp_hdr,
230                                                 cksum));
231                         break;
232                 }
233         } else {
234                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
235                 ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
236                 ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
237         }
238
239         /* IP cksum verification cannot be bypassed, then calculate here */
240         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
241                 struct ipv4_hdr *ipv4_hdr;
242
243                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
244                                                    m_buf->l2_len);
245                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
246         }
247
248         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
249                 if (m_buf->ol_flags & PKT_TX_IPV4)
250                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
251                 else
252                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
253                 net_hdr->gso_size = m_buf->tso_segsz;
254                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
255                                         + m_buf->l4_len;
256         } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
257                 net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
258                 net_hdr->gso_size = m_buf->tso_segsz;
259                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
260                         m_buf->l4_len;
261         } else {
262                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
263                 ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
264                 ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
265         }
266 }
267
268 static __rte_always_inline int
269 map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
270                 struct buf_vector *buf_vec, uint16_t *vec_idx,
271                 uint64_t desc_iova, uint64_t desc_len, uint8_t perm)
272 {
273         uint16_t vec_id = *vec_idx;
274
275         while (desc_len) {
276                 uint64_t desc_addr;
277                 uint64_t desc_chunck_len = desc_len;
278
279                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
280                         return -1;
281
282                 desc_addr = vhost_iova_to_vva(dev, vq,
283                                 desc_iova,
284                                 &desc_chunck_len,
285                                 perm);
286                 if (unlikely(!desc_addr))
287                         return -1;
288
289                 buf_vec[vec_id].buf_iova = desc_iova;
290                 buf_vec[vec_id].buf_addr = desc_addr;
291                 buf_vec[vec_id].buf_len  = desc_chunck_len;
292
293                 desc_len -= desc_chunck_len;
294                 desc_iova += desc_chunck_len;
295                 vec_id++;
296         }
297         *vec_idx = vec_id;
298
299         return 0;
300 }
301
302 static __rte_always_inline int
303 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
304                          uint32_t avail_idx, uint16_t *vec_idx,
305                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
306                          uint32_t *desc_chain_len, uint8_t perm)
307 {
308         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
309         uint16_t vec_id = *vec_idx;
310         uint32_t len    = 0;
311         uint64_t dlen;
312         struct vring_desc *descs = vq->desc;
313         struct vring_desc *idesc = NULL;
314
315         *desc_chain_head = idx;
316
317         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
318                 dlen = vq->desc[idx].len;
319                 descs = (struct vring_desc *)(uintptr_t)
320                         vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
321                                                 &dlen,
322                                                 VHOST_ACCESS_RO);
323                 if (unlikely(!descs))
324                         return -1;
325
326                 if (unlikely(dlen < vq->desc[idx].len)) {
327                         /*
328                          * The indirect desc table is not contiguous
329                          * in process VA space, we have to copy it.
330                          */
331                         idesc = alloc_copy_ind_table(dev, vq,
332                                         vq->desc[idx].addr, vq->desc[idx].len);
333                         if (unlikely(!idesc))
334                                 return -1;
335
336                         descs = idesc;
337                 }
338
339                 idx = 0;
340         }
341
342         while (1) {
343                 if (unlikely(idx >= vq->size)) {
344                         free_ind_table(idesc);
345                         return -1;
346                 }
347
348                 len += descs[idx].len;
349
350                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
351                                                 descs[idx].addr, descs[idx].len,
352                                                 perm))) {
353                         free_ind_table(idesc);
354                         return -1;
355                 }
356
357                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
358                         break;
359
360                 idx = descs[idx].next;
361         }
362
363         *desc_chain_len = len;
364         *vec_idx = vec_id;
365
366         if (unlikely(!!idesc))
367                 free_ind_table(idesc);
368
369         return 0;
370 }
371
372 /*
373  * Returns -1 on fail, 0 on success
374  */
375 static inline int
376 reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
377                                 uint32_t size, struct buf_vector *buf_vec,
378                                 uint16_t *num_buffers, uint16_t avail_head,
379                                 uint16_t *nr_vec)
380 {
381         uint16_t cur_idx;
382         uint16_t vec_idx = 0;
383         uint16_t max_tries, tries = 0;
384
385         uint16_t head_idx = 0;
386         uint32_t len = 0;
387
388         *num_buffers = 0;
389         cur_idx  = vq->last_avail_idx;
390
391         if (rxvq_is_mergeable(dev))
392                 max_tries = vq->size - 1;
393         else
394                 max_tries = 1;
395
396         while (size > 0) {
397                 if (unlikely(cur_idx == avail_head))
398                         return -1;
399                 /*
400                  * if we tried all available ring items, and still
401                  * can't get enough buf, it means something abnormal
402                  * happened.
403                  */
404                 if (unlikely(++tries > max_tries))
405                         return -1;
406
407                 if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
408                                                 &vec_idx, buf_vec,
409                                                 &head_idx, &len,
410                                                 VHOST_ACCESS_RW) < 0))
411                         return -1;
412                 len = RTE_MIN(len, size);
413                 update_shadow_used_ring_split(vq, head_idx, len);
414                 size -= len;
415
416                 cur_idx++;
417                 *num_buffers += 1;
418         }
419
420         *nr_vec = vec_idx;
421
422         return 0;
423 }
424
425 static __rte_always_inline int
426 fill_vec_buf_packed_indirect(struct virtio_net *dev,
427                         struct vhost_virtqueue *vq,
428                         struct vring_packed_desc *desc, uint16_t *vec_idx,
429                         struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
430 {
431         uint16_t i;
432         uint32_t nr_descs;
433         uint16_t vec_id = *vec_idx;
434         uint64_t dlen;
435         struct vring_packed_desc *descs, *idescs = NULL;
436
437         dlen = desc->len;
438         descs = (struct vring_packed_desc *)(uintptr_t)
439                 vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO);
440         if (unlikely(!descs))
441                 return -1;
442
443         if (unlikely(dlen < desc->len)) {
444                 /*
445                  * The indirect desc table is not contiguous
446                  * in process VA space, we have to copy it.
447                  */
448                 idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len);
449                 if (unlikely(!idescs))
450                         return -1;
451
452                 descs = idescs;
453         }
454
455         nr_descs =  desc->len / sizeof(struct vring_packed_desc);
456         if (unlikely(nr_descs >= vq->size)) {
457                 free_ind_table(idescs);
458                 return -1;
459         }
460
461         for (i = 0; i < nr_descs; i++) {
462                 if (unlikely(vec_id >= BUF_VECTOR_MAX)) {
463                         free_ind_table(idescs);
464                         return -1;
465                 }
466
467                 *len += descs[i].len;
468                 if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
469                                                 descs[i].addr, descs[i].len,
470                                                 perm)))
471                         return -1;
472         }
473         *vec_idx = vec_id;
474
475         if (unlikely(!!idescs))
476                 free_ind_table(idescs);
477
478         return 0;
479 }
480
481 static __rte_always_inline int
482 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
483                                 uint16_t avail_idx, uint16_t *desc_count,
484                                 struct buf_vector *buf_vec, uint16_t *vec_idx,
485                                 uint16_t *buf_id, uint32_t *len, uint8_t perm)
486 {
487         bool wrap_counter = vq->avail_wrap_counter;
488         struct vring_packed_desc *descs = vq->desc_packed;
489         uint16_t vec_id = *vec_idx;
490
491         if (avail_idx < vq->last_avail_idx)
492                 wrap_counter ^= 1;
493
494         if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter)))
495                 return -1;
496
497         /*
498          * The ordering between desc flags and desc
499          * content reads need to be enforced.
500          */
501         rte_smp_rmb();
502
503         *desc_count = 0;
504         *len = 0;
505
506         while (1) {
507                 if (unlikely(vec_id >= BUF_VECTOR_MAX))
508                         return -1;
509
510                 *desc_count += 1;
511                 *buf_id = descs[avail_idx].id;
512
513                 if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) {
514                         if (unlikely(fill_vec_buf_packed_indirect(dev, vq,
515                                                         &descs[avail_idx],
516                                                         &vec_id, buf_vec,
517                                                         len, perm) < 0))
518                                 return -1;
519                 } else {
520                         *len += descs[avail_idx].len;
521
522                         if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
523                                                         descs[avail_idx].addr,
524                                                         descs[avail_idx].len,
525                                                         perm)))
526                                 return -1;
527                 }
528
529                 if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0)
530                         break;
531
532                 if (++avail_idx >= vq->size) {
533                         avail_idx -= vq->size;
534                         wrap_counter ^= 1;
535                 }
536         }
537
538         *vec_idx = vec_id;
539
540         return 0;
541 }
542
543 /*
544  * Returns -1 on fail, 0 on success
545  */
546 static inline int
547 reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
548                                 uint32_t size, struct buf_vector *buf_vec,
549                                 uint16_t *nr_vec, uint16_t *num_buffers,
550                                 uint16_t *nr_descs)
551 {
552         uint16_t avail_idx;
553         uint16_t vec_idx = 0;
554         uint16_t max_tries, tries = 0;
555
556         uint16_t buf_id = 0;
557         uint32_t len = 0;
558         uint16_t desc_count;
559
560         *num_buffers = 0;
561         avail_idx = vq->last_avail_idx;
562
563         if (rxvq_is_mergeable(dev))
564                 max_tries = vq->size - 1;
565         else
566                 max_tries = 1;
567
568         while (size > 0) {
569                 /*
570                  * if we tried all available ring items, and still
571                  * can't get enough buf, it means something abnormal
572                  * happened.
573                  */
574                 if (unlikely(++tries > max_tries))
575                         return -1;
576
577                 if (unlikely(fill_vec_buf_packed(dev, vq,
578                                                 avail_idx, &desc_count,
579                                                 buf_vec, &vec_idx,
580                                                 &buf_id, &len,
581                                                 VHOST_ACCESS_RW) < 0))
582                         return -1;
583
584                 len = RTE_MIN(len, size);
585                 update_shadow_used_ring_packed(vq, buf_id, len, desc_count);
586                 size -= len;
587
588                 avail_idx += desc_count;
589                 if (avail_idx >= vq->size)
590                         avail_idx -= vq->size;
591
592                 *nr_descs += desc_count;
593                 *num_buffers += 1;
594         }
595
596         *nr_vec = vec_idx;
597
598         return 0;
599 }
600
601 static __rte_always_inline int
602 copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
603                             struct rte_mbuf *m, struct buf_vector *buf_vec,
604                             uint16_t nr_vec, uint16_t num_buffers)
605 {
606         uint32_t vec_idx = 0;
607         uint32_t mbuf_offset, mbuf_avail;
608         uint32_t buf_offset, buf_avail;
609         uint64_t buf_addr, buf_iova, buf_len;
610         uint32_t cpy_len;
611         uint64_t hdr_addr;
612         struct rte_mbuf *hdr_mbuf;
613         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
614         struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
615         int error = 0;
616
617         if (unlikely(m == NULL)) {
618                 error = -1;
619                 goto out;
620         }
621
622         buf_addr = buf_vec[vec_idx].buf_addr;
623         buf_iova = buf_vec[vec_idx].buf_iova;
624         buf_len = buf_vec[vec_idx].buf_len;
625
626         if (nr_vec > 1)
627                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
628
629         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
630                 error = -1;
631                 goto out;
632         }
633
634         hdr_mbuf = m;
635         hdr_addr = buf_addr;
636         if (unlikely(buf_len < dev->vhost_hlen))
637                 hdr = &tmp_hdr;
638         else
639                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
640
641         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
642                 dev->vid, num_buffers);
643
644         if (unlikely(buf_len < dev->vhost_hlen)) {
645                 buf_offset = dev->vhost_hlen - buf_len;
646                 vec_idx++;
647                 buf_addr = buf_vec[vec_idx].buf_addr;
648                 buf_iova = buf_vec[vec_idx].buf_iova;
649                 buf_len = buf_vec[vec_idx].buf_len;
650                 buf_avail = buf_len - buf_offset;
651         } else {
652                 buf_offset = dev->vhost_hlen;
653                 buf_avail = buf_len - dev->vhost_hlen;
654         }
655
656         mbuf_avail  = rte_pktmbuf_data_len(m);
657         mbuf_offset = 0;
658         while (mbuf_avail != 0 || m->next != NULL) {
659                 /* done with current buf, get the next one */
660                 if (buf_avail == 0) {
661                         vec_idx++;
662                         if (unlikely(vec_idx >= nr_vec)) {
663                                 error = -1;
664                                 goto out;
665                         }
666
667                         buf_addr = buf_vec[vec_idx].buf_addr;
668                         buf_iova = buf_vec[vec_idx].buf_iova;
669                         buf_len = buf_vec[vec_idx].buf_len;
670
671                         /* Prefetch next buffer address. */
672                         if (vec_idx + 1 < nr_vec)
673                                 rte_prefetch0((void *)(uintptr_t)
674                                                 buf_vec[vec_idx + 1].buf_addr);
675                         buf_offset = 0;
676                         buf_avail  = buf_len;
677                 }
678
679                 /* done with current mbuf, get the next one */
680                 if (mbuf_avail == 0) {
681                         m = m->next;
682
683                         mbuf_offset = 0;
684                         mbuf_avail  = rte_pktmbuf_data_len(m);
685                 }
686
687                 if (hdr_addr) {
688                         virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
689                         if (rxvq_is_mergeable(dev))
690                                 ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
691                                                 num_buffers);
692
693                         if (unlikely(hdr == &tmp_hdr)) {
694                                 uint64_t len;
695                                 uint64_t remain = dev->vhost_hlen;
696                                 uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
697                                 uint64_t iova = buf_vec[0].buf_iova;
698                                 uint16_t hdr_vec_idx = 0;
699
700                                 while (remain) {
701                                         len = RTE_MIN(remain,
702                                                 buf_vec[hdr_vec_idx].buf_len);
703                                         dst = buf_vec[hdr_vec_idx].buf_addr;
704                                         rte_memcpy((void *)(uintptr_t)dst,
705                                                         (void *)(uintptr_t)src,
706                                                         len);
707
708                                         PRINT_PACKET(dev, (uintptr_t)dst,
709                                                         (uint32_t)len, 0);
710                                         vhost_log_cache_write(dev, vq,
711                                                         iova, len);
712
713                                         remain -= len;
714                                         iova += len;
715                                         src += len;
716                                         hdr_vec_idx++;
717                                 }
718                         } else {
719                                 PRINT_PACKET(dev, (uintptr_t)hdr_addr,
720                                                 dev->vhost_hlen, 0);
721                                 vhost_log_cache_write(dev, vq,
722                                                 buf_vec[0].buf_iova,
723                                                 dev->vhost_hlen);
724                         }
725
726                         hdr_addr = 0;
727                 }
728
729                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
730
731                 if (likely(cpy_len > MAX_BATCH_LEN ||
732                                         vq->batch_copy_nb_elems >= vq->size)) {
733                         rte_memcpy((void *)((uintptr_t)(buf_addr + buf_offset)),
734                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
735                                 cpy_len);
736                         vhost_log_cache_write(dev, vq, buf_iova + buf_offset,
737                                         cpy_len);
738                         PRINT_PACKET(dev, (uintptr_t)(buf_addr + buf_offset),
739                                 cpy_len, 0);
740                 } else {
741                         batch_copy[vq->batch_copy_nb_elems].dst =
742                                 (void *)((uintptr_t)(buf_addr + buf_offset));
743                         batch_copy[vq->batch_copy_nb_elems].src =
744                                 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
745                         batch_copy[vq->batch_copy_nb_elems].log_addr =
746                                 buf_iova + buf_offset;
747                         batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
748                         vq->batch_copy_nb_elems++;
749                 }
750
751                 mbuf_avail  -= cpy_len;
752                 mbuf_offset += cpy_len;
753                 buf_avail  -= cpy_len;
754                 buf_offset += cpy_len;
755         }
756
757 out:
758
759         return error;
760 }
761
762 static __rte_always_inline uint32_t
763 virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
764         struct rte_mbuf **pkts, uint32_t count)
765 {
766         uint32_t pkt_idx = 0;
767         uint16_t num_buffers;
768         struct buf_vector buf_vec[BUF_VECTOR_MAX];
769         uint16_t avail_head;
770
771         avail_head = *((volatile uint16_t *)&vq->avail->idx);
772
773         /*
774          * The ordering between avail index and
775          * desc reads needs to be enforced.
776          */
777         rte_smp_rmb();
778
779         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
780
781         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
782                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
783                 uint16_t nr_vec = 0;
784
785                 if (unlikely(reserve_avail_buf_split(dev, vq,
786                                                 pkt_len, buf_vec, &num_buffers,
787                                                 avail_head, &nr_vec) < 0)) {
788                         VHOST_LOG_DEBUG(VHOST_DATA,
789                                 "(%d) failed to get enough desc from vring\n",
790                                 dev->vid);
791                         vq->shadow_used_idx -= num_buffers;
792                         break;
793                 }
794
795                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
796
797                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
798                         dev->vid, vq->last_avail_idx,
799                         vq->last_avail_idx + num_buffers);
800
801                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
802                                                 buf_vec, nr_vec,
803                                                 num_buffers) < 0) {
804                         vq->shadow_used_idx -= num_buffers;
805                         break;
806                 }
807
808                 vq->last_avail_idx += num_buffers;
809         }
810
811         do_data_copy_enqueue(dev, vq);
812
813         if (likely(vq->shadow_used_idx)) {
814                 flush_shadow_used_ring_split(dev, vq);
815                 vhost_vring_call_split(dev, vq);
816         }
817
818         return pkt_idx;
819 }
820
821 static __rte_always_inline uint32_t
822 virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
823         struct rte_mbuf **pkts, uint32_t count)
824 {
825         uint32_t pkt_idx = 0;
826         uint16_t num_buffers;
827         struct buf_vector buf_vec[BUF_VECTOR_MAX];
828
829         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
830                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
831                 uint16_t nr_vec = 0;
832                 uint16_t nr_descs = 0;
833
834                 if (unlikely(reserve_avail_buf_packed(dev, vq,
835                                                 pkt_len, buf_vec, &nr_vec,
836                                                 &num_buffers, &nr_descs) < 0)) {
837                         VHOST_LOG_DEBUG(VHOST_DATA,
838                                 "(%d) failed to get enough desc from vring\n",
839                                 dev->vid);
840                         vq->shadow_used_idx -= num_buffers;
841                         break;
842                 }
843
844                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
845
846                 VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
847                         dev->vid, vq->last_avail_idx,
848                         vq->last_avail_idx + num_buffers);
849
850                 if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
851                                                 buf_vec, nr_vec,
852                                                 num_buffers) < 0) {
853                         vq->shadow_used_idx -= num_buffers;
854                         break;
855                 }
856
857                 vq->last_avail_idx += nr_descs;
858                 if (vq->last_avail_idx >= vq->size) {
859                         vq->last_avail_idx -= vq->size;
860                         vq->avail_wrap_counter ^= 1;
861                 }
862         }
863
864         do_data_copy_enqueue(dev, vq);
865
866         if (likely(vq->shadow_used_idx)) {
867                 flush_shadow_used_ring_packed(dev, vq);
868                 vhost_vring_call_packed(dev, vq);
869         }
870
871         return pkt_idx;
872 }
873
874 static __rte_always_inline uint32_t
875 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
876         struct rte_mbuf **pkts, uint32_t count)
877 {
878         struct vhost_virtqueue *vq;
879         uint32_t nb_tx = 0;
880
881         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
882         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
883                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
884                         dev->vid, __func__, queue_id);
885                 return 0;
886         }
887
888         vq = dev->virtqueue[queue_id];
889
890         rte_spinlock_lock(&vq->access_lock);
891
892         if (unlikely(vq->enabled == 0))
893                 goto out_access_unlock;
894
895         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
896                 vhost_user_iotlb_rd_lock(vq);
897
898         if (unlikely(vq->access_ok == 0))
899                 if (unlikely(vring_translate(dev, vq) < 0))
900                         goto out;
901
902         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
903         if (count == 0)
904                 goto out;
905
906         if (vq_is_packed(dev))
907                 nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
908         else
909                 nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
910
911 out:
912         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
913                 vhost_user_iotlb_rd_unlock(vq);
914
915 out_access_unlock:
916         rte_spinlock_unlock(&vq->access_lock);
917
918         return nb_tx;
919 }
920
921 uint16_t
922 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
923         struct rte_mbuf **pkts, uint16_t count)
924 {
925         struct virtio_net *dev = get_device(vid);
926
927         if (!dev)
928                 return 0;
929
930         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
931                 RTE_LOG(ERR, VHOST_DATA,
932                         "(%d) %s: built-in vhost net backend is disabled.\n",
933                         dev->vid, __func__);
934                 return 0;
935         }
936
937         return virtio_dev_rx(dev, queue_id, pkts, count);
938 }
939
940 static inline bool
941 virtio_net_with_host_offload(struct virtio_net *dev)
942 {
943         if (dev->features &
944                         ((1ULL << VIRTIO_NET_F_CSUM) |
945                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
946                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
947                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
948                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
949                 return true;
950
951         return false;
952 }
953
954 static void
955 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
956 {
957         struct ipv4_hdr *ipv4_hdr;
958         struct ipv6_hdr *ipv6_hdr;
959         void *l3_hdr = NULL;
960         struct ether_hdr *eth_hdr;
961         uint16_t ethertype;
962
963         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
964
965         m->l2_len = sizeof(struct ether_hdr);
966         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
967
968         if (ethertype == ETHER_TYPE_VLAN) {
969                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
970
971                 m->l2_len += sizeof(struct vlan_hdr);
972                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
973         }
974
975         l3_hdr = (char *)eth_hdr + m->l2_len;
976
977         switch (ethertype) {
978         case ETHER_TYPE_IPv4:
979                 ipv4_hdr = l3_hdr;
980                 *l4_proto = ipv4_hdr->next_proto_id;
981                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
982                 *l4_hdr = (char *)l3_hdr + m->l3_len;
983                 m->ol_flags |= PKT_TX_IPV4;
984                 break;
985         case ETHER_TYPE_IPv6:
986                 ipv6_hdr = l3_hdr;
987                 *l4_proto = ipv6_hdr->proto;
988                 m->l3_len = sizeof(struct ipv6_hdr);
989                 *l4_hdr = (char *)l3_hdr + m->l3_len;
990                 m->ol_flags |= PKT_TX_IPV6;
991                 break;
992         default:
993                 m->l3_len = 0;
994                 *l4_proto = 0;
995                 *l4_hdr = NULL;
996                 break;
997         }
998 }
999
1000 static __rte_always_inline void
1001 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
1002 {
1003         uint16_t l4_proto = 0;
1004         void *l4_hdr = NULL;
1005         struct tcp_hdr *tcp_hdr = NULL;
1006
1007         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
1008                 return;
1009
1010         parse_ethernet(m, &l4_proto, &l4_hdr);
1011         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1012                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
1013                         switch (hdr->csum_offset) {
1014                         case (offsetof(struct tcp_hdr, cksum)):
1015                                 if (l4_proto == IPPROTO_TCP)
1016                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
1017                                 break;
1018                         case (offsetof(struct udp_hdr, dgram_cksum)):
1019                                 if (l4_proto == IPPROTO_UDP)
1020                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
1021                                 break;
1022                         case (offsetof(struct sctp_hdr, cksum)):
1023                                 if (l4_proto == IPPROTO_SCTP)
1024                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
1025                                 break;
1026                         default:
1027                                 break;
1028                         }
1029                 }
1030         }
1031
1032         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1033                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1034                 case VIRTIO_NET_HDR_GSO_TCPV4:
1035                 case VIRTIO_NET_HDR_GSO_TCPV6:
1036                         tcp_hdr = l4_hdr;
1037                         m->ol_flags |= PKT_TX_TCP_SEG;
1038                         m->tso_segsz = hdr->gso_size;
1039                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
1040                         break;
1041                 case VIRTIO_NET_HDR_GSO_UDP:
1042                         m->ol_flags |= PKT_TX_UDP_SEG;
1043                         m->tso_segsz = hdr->gso_size;
1044                         m->l4_len = sizeof(struct udp_hdr);
1045                         break;
1046                 default:
1047                         RTE_LOG(WARNING, VHOST_DATA,
1048                                 "unsupported gso type %u.\n", hdr->gso_type);
1049                         break;
1050                 }
1051         }
1052 }
1053
1054 static __rte_always_inline void
1055 put_zmbuf(struct zcopy_mbuf *zmbuf)
1056 {
1057         zmbuf->in_use = 0;
1058 }
1059
1060 static __rte_always_inline int
1061 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
1062                   struct buf_vector *buf_vec, uint16_t nr_vec,
1063                   struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
1064 {
1065         uint32_t buf_avail, buf_offset;
1066         uint64_t buf_addr, buf_iova, buf_len;
1067         uint32_t mbuf_avail, mbuf_offset;
1068         uint32_t cpy_len;
1069         struct rte_mbuf *cur = m, *prev = m;
1070         struct virtio_net_hdr tmp_hdr;
1071         struct virtio_net_hdr *hdr = NULL;
1072         /* A counter to avoid desc dead loop chain */
1073         uint16_t vec_idx = 0;
1074         struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
1075         int error = 0;
1076
1077         buf_addr = buf_vec[vec_idx].buf_addr;
1078         buf_iova = buf_vec[vec_idx].buf_iova;
1079         buf_len = buf_vec[vec_idx].buf_len;
1080
1081         if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
1082                 error = -1;
1083                 goto out;
1084         }
1085
1086         if (likely(nr_vec > 1))
1087                 rte_prefetch0((void *)(uintptr_t)buf_vec[1].buf_addr);
1088
1089         if (virtio_net_with_host_offload(dev)) {
1090                 if (unlikely(buf_len < sizeof(struct virtio_net_hdr))) {
1091                         uint64_t len;
1092                         uint64_t remain = sizeof(struct virtio_net_hdr);
1093                         uint64_t src;
1094                         uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
1095                         uint16_t hdr_vec_idx = 0;
1096
1097                         /*
1098                          * No luck, the virtio-net header doesn't fit
1099                          * in a contiguous virtual area.
1100                          */
1101                         while (remain) {
1102                                 len = RTE_MIN(remain,
1103                                         buf_vec[hdr_vec_idx].buf_len);
1104                                 src = buf_vec[hdr_vec_idx].buf_addr;
1105                                 rte_memcpy((void *)(uintptr_t)dst,
1106                                                    (void *)(uintptr_t)src, len);
1107
1108                                 remain -= len;
1109                                 dst += len;
1110                                 hdr_vec_idx++;
1111                         }
1112
1113                         hdr = &tmp_hdr;
1114                 } else {
1115                         hdr = (struct virtio_net_hdr *)((uintptr_t)buf_addr);
1116                         rte_prefetch0(hdr);
1117                 }
1118         }
1119
1120         /*
1121          * A virtio driver normally uses at least 2 desc buffers
1122          * for Tx: the first for storing the header, and others
1123          * for storing the data.
1124          */
1125         if (unlikely(buf_len < dev->vhost_hlen)) {
1126                 buf_offset = dev->vhost_hlen - buf_len;
1127                 vec_idx++;
1128                 buf_addr = buf_vec[vec_idx].buf_addr;
1129                 buf_iova = buf_vec[vec_idx].buf_iova;
1130                 buf_len = buf_vec[vec_idx].buf_len;
1131                 buf_avail  = buf_len - buf_offset;
1132         } else if (buf_len == dev->vhost_hlen) {
1133                 if (unlikely(++vec_idx >= nr_vec))
1134                         goto out;
1135                 buf_addr = buf_vec[vec_idx].buf_addr;
1136                 buf_iova = buf_vec[vec_idx].buf_iova;
1137                 buf_len = buf_vec[vec_idx].buf_len;
1138
1139                 buf_offset = 0;
1140                 buf_avail = buf_len;
1141         } else {
1142                 buf_offset = dev->vhost_hlen;
1143                 buf_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
1144         }
1145
1146         rte_prefetch0((void *)(uintptr_t)
1147                         (buf_addr + buf_offset));
1148
1149         PRINT_PACKET(dev,
1150                         (uintptr_t)(buf_addr + buf_offset),
1151                         (uint32_t)buf_avail, 0);
1152
1153         mbuf_offset = 0;
1154         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1155         while (1) {
1156                 uint64_t hpa;
1157
1158                 cpy_len = RTE_MIN(buf_avail, mbuf_avail);
1159
1160                 /*
1161                  * A desc buf might across two host physical pages that are
1162                  * not continuous. In such case (gpa_to_hpa returns 0), data
1163                  * will be copied even though zero copy is enabled.
1164                  */
1165                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1166                                         buf_iova + buf_offset, cpy_len)))) {
1167                         cur->data_len = cpy_len;
1168                         cur->data_off = 0;
1169                         cur->buf_addr =
1170                                 (void *)(uintptr_t)(buf_addr + buf_offset);
1171                         cur->buf_iova = hpa;
1172
1173                         /*
1174                          * In zero copy mode, one mbuf can only reference data
1175                          * for one or partial of one desc buff.
1176                          */
1177                         mbuf_avail = cpy_len;
1178                 } else {
1179                         if (likely(cpy_len > MAX_BATCH_LEN ||
1180                                    vq->batch_copy_nb_elems >= vq->size ||
1181                                    (hdr && cur == m))) {
1182                                 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1183                                                                    mbuf_offset),
1184                                            (void *)((uintptr_t)(buf_addr +
1185                                                            buf_offset)),
1186                                            cpy_len);
1187                         } else {
1188                                 batch_copy[vq->batch_copy_nb_elems].dst =
1189                                         rte_pktmbuf_mtod_offset(cur, void *,
1190                                                                 mbuf_offset);
1191                                 batch_copy[vq->batch_copy_nb_elems].src =
1192                                         (void *)((uintptr_t)(buf_addr +
1193                                                                 buf_offset));
1194                                 batch_copy[vq->batch_copy_nb_elems].len =
1195                                         cpy_len;
1196                                 vq->batch_copy_nb_elems++;
1197                         }
1198                 }
1199
1200                 mbuf_avail  -= cpy_len;
1201                 mbuf_offset += cpy_len;
1202                 buf_avail -= cpy_len;
1203                 buf_offset += cpy_len;
1204
1205                 /* This buf reaches to its end, get the next one */
1206                 if (buf_avail == 0) {
1207                         if (++vec_idx >= nr_vec)
1208                                 break;
1209
1210                         buf_addr = buf_vec[vec_idx].buf_addr;
1211                         buf_iova = buf_vec[vec_idx].buf_iova;
1212                         buf_len = buf_vec[vec_idx].buf_len;
1213
1214                         /*
1215                          * Prefecth desc n + 1 buffer while
1216                          * desc n buffer is processed.
1217                          */
1218                         if (vec_idx + 1 < nr_vec)
1219                                 rte_prefetch0((void *)(uintptr_t)
1220                                                 buf_vec[vec_idx + 1].buf_addr);
1221
1222                         buf_offset = 0;
1223                         buf_avail  = buf_len;
1224
1225                         PRINT_PACKET(dev, (uintptr_t)buf_addr,
1226                                         (uint32_t)buf_avail, 0);
1227                 }
1228
1229                 /*
1230                  * This mbuf reaches to its end, get a new one
1231                  * to hold more data.
1232                  */
1233                 if (mbuf_avail == 0) {
1234                         cur = rte_pktmbuf_alloc(mbuf_pool);
1235                         if (unlikely(cur == NULL)) {
1236                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1237                                         "allocate memory for mbuf.\n");
1238                                 error = -1;
1239                                 goto out;
1240                         }
1241                         if (unlikely(dev->dequeue_zero_copy))
1242                                 rte_mbuf_refcnt_update(cur, 1);
1243
1244                         prev->next = cur;
1245                         prev->data_len = mbuf_offset;
1246                         m->nb_segs += 1;
1247                         m->pkt_len += mbuf_offset;
1248                         prev = cur;
1249
1250                         mbuf_offset = 0;
1251                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1252                 }
1253         }
1254
1255         prev->data_len = mbuf_offset;
1256         m->pkt_len    += mbuf_offset;
1257
1258         if (hdr)
1259                 vhost_dequeue_offload(hdr, m);
1260
1261 out:
1262
1263         return error;
1264 }
1265
1266 static __rte_always_inline struct zcopy_mbuf *
1267 get_zmbuf(struct vhost_virtqueue *vq)
1268 {
1269         uint16_t i;
1270         uint16_t last;
1271         int tries = 0;
1272
1273         /* search [last_zmbuf_idx, zmbuf_size) */
1274         i = vq->last_zmbuf_idx;
1275         last = vq->zmbuf_size;
1276
1277 again:
1278         for (; i < last; i++) {
1279                 if (vq->zmbufs[i].in_use == 0) {
1280                         vq->last_zmbuf_idx = i + 1;
1281                         vq->zmbufs[i].in_use = 1;
1282                         return &vq->zmbufs[i];
1283                 }
1284         }
1285
1286         tries++;
1287         if (tries == 1) {
1288                 /* search [0, last_zmbuf_idx) */
1289                 i = 0;
1290                 last = vq->last_zmbuf_idx;
1291                 goto again;
1292         }
1293
1294         return NULL;
1295 }
1296
1297 static __rte_always_inline bool
1298 mbuf_is_consumed(struct rte_mbuf *m)
1299 {
1300         while (m) {
1301                 if (rte_mbuf_refcnt_read(m) > 1)
1302                         return false;
1303                 m = m->next;
1304         }
1305
1306         return true;
1307 }
1308
1309 static __rte_always_inline void
1310 restore_mbuf(struct rte_mbuf *m)
1311 {
1312         uint32_t mbuf_size, priv_size;
1313
1314         while (m) {
1315                 priv_size = rte_pktmbuf_priv_size(m->pool);
1316                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1317                 /* start of buffer is after mbuf structure and priv data */
1318
1319                 m->buf_addr = (char *)m + mbuf_size;
1320                 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1321                 m = m->next;
1322         }
1323 }
1324
1325 static __rte_always_inline uint16_t
1326 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
1327         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1328 {
1329         uint16_t i;
1330         uint16_t free_entries;
1331
1332         if (unlikely(dev->dequeue_zero_copy)) {
1333                 struct zcopy_mbuf *zmbuf, *next;
1334
1335                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1336                      zmbuf != NULL; zmbuf = next) {
1337                         next = TAILQ_NEXT(zmbuf, next);
1338
1339                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1340                                 update_shadow_used_ring_split(vq,
1341                                                 zmbuf->desc_idx, 0);
1342                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1343                                 restore_mbuf(zmbuf->mbuf);
1344                                 rte_pktmbuf_free(zmbuf->mbuf);
1345                                 put_zmbuf(zmbuf);
1346                                 vq->nr_zmbuf -= 1;
1347                         }
1348                 }
1349
1350                 if (likely(vq->shadow_used_idx)) {
1351                         flush_shadow_used_ring_split(dev, vq);
1352                         vhost_vring_call_split(dev, vq);
1353                 }
1354         }
1355
1356         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1357                         vq->last_avail_idx;
1358         if (free_entries == 0)
1359                 return 0;
1360
1361         /*
1362          * The ordering between avail index and
1363          * desc reads needs to be enforced.
1364          */
1365         rte_smp_rmb();
1366
1367         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
1368
1369         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1370
1371         count = RTE_MIN(count, MAX_PKT_BURST);
1372         count = RTE_MIN(count, free_entries);
1373         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1374                         dev->vid, count);
1375
1376         for (i = 0; i < count; i++) {
1377                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1378                 uint16_t head_idx;
1379                 uint32_t dummy_len;
1380                 uint16_t nr_vec = 0;
1381                 int err;
1382
1383                 if (unlikely(fill_vec_buf_split(dev, vq,
1384                                                 vq->last_avail_idx + i,
1385                                                 &nr_vec, buf_vec,
1386                                                 &head_idx, &dummy_len,
1387                                                 VHOST_ACCESS_RO) < 0))
1388                         break;
1389
1390                 if (likely(dev->dequeue_zero_copy == 0))
1391                         update_shadow_used_ring_split(vq, head_idx, 0);
1392
1393                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1394
1395                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1396                 if (unlikely(pkts[i] == NULL)) {
1397                         RTE_LOG(ERR, VHOST_DATA,
1398                                 "Failed to allocate memory for mbuf.\n");
1399                         break;
1400                 }
1401
1402                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1403                                 mbuf_pool);
1404                 if (unlikely(err)) {
1405                         rte_pktmbuf_free(pkts[i]);
1406                         break;
1407                 }
1408
1409                 if (unlikely(dev->dequeue_zero_copy)) {
1410                         struct zcopy_mbuf *zmbuf;
1411
1412                         zmbuf = get_zmbuf(vq);
1413                         if (!zmbuf) {
1414                                 rte_pktmbuf_free(pkts[i]);
1415                                 break;
1416                         }
1417                         zmbuf->mbuf = pkts[i];
1418                         zmbuf->desc_idx = head_idx;
1419
1420                         /*
1421                          * Pin lock the mbuf; we will check later to see
1422                          * whether the mbuf is freed (when we are the last
1423                          * user) or not. If that's the case, we then could
1424                          * update the used ring safely.
1425                          */
1426                         rte_mbuf_refcnt_update(pkts[i], 1);
1427
1428                         vq->nr_zmbuf += 1;
1429                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1430                 }
1431         }
1432         vq->last_avail_idx += i;
1433
1434         if (likely(dev->dequeue_zero_copy == 0)) {
1435                 do_data_copy_dequeue(vq);
1436                 if (unlikely(i < count))
1437                         vq->shadow_used_idx = i;
1438                 if (likely(vq->shadow_used_idx)) {
1439                         flush_shadow_used_ring_split(dev, vq);
1440                         vhost_vring_call_split(dev, vq);
1441                 }
1442         }
1443
1444         return i;
1445 }
1446
1447 static __rte_always_inline uint16_t
1448 virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
1449         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1450 {
1451         uint16_t i;
1452
1453         if (unlikely(dev->dequeue_zero_copy)) {
1454                 struct zcopy_mbuf *zmbuf, *next;
1455
1456                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1457                      zmbuf != NULL; zmbuf = next) {
1458                         next = TAILQ_NEXT(zmbuf, next);
1459
1460                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1461                                 update_shadow_used_ring_packed(vq,
1462                                                 zmbuf->desc_idx,
1463                                                 0,
1464                                                 zmbuf->desc_count);
1465
1466                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1467                                 restore_mbuf(zmbuf->mbuf);
1468                                 rte_pktmbuf_free(zmbuf->mbuf);
1469                                 put_zmbuf(zmbuf);
1470                                 vq->nr_zmbuf -= 1;
1471                         }
1472                 }
1473
1474                 if (likely(vq->shadow_used_idx)) {
1475                         flush_shadow_used_ring_packed(dev, vq);
1476                         vhost_vring_call_packed(dev, vq);
1477                 }
1478         }
1479
1480         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1481
1482         count = RTE_MIN(count, MAX_PKT_BURST);
1483         VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1484                         dev->vid, count);
1485
1486         for (i = 0; i < count; i++) {
1487                 struct buf_vector buf_vec[BUF_VECTOR_MAX];
1488                 uint16_t buf_id;
1489                 uint32_t dummy_len;
1490                 uint16_t desc_count, nr_vec = 0;
1491                 int err;
1492
1493                 if (unlikely(fill_vec_buf_packed(dev, vq,
1494                                                 vq->last_avail_idx, &desc_count,
1495                                                 buf_vec, &nr_vec,
1496                                                 &buf_id, &dummy_len,
1497                                                 VHOST_ACCESS_RO) < 0))
1498                         break;
1499
1500                 if (likely(dev->dequeue_zero_copy == 0))
1501                         update_shadow_used_ring_packed(vq, buf_id, 0,
1502                                         desc_count);
1503
1504                 rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr);
1505
1506                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1507                 if (unlikely(pkts[i] == NULL)) {
1508                         RTE_LOG(ERR, VHOST_DATA,
1509                                 "Failed to allocate memory for mbuf.\n");
1510                         break;
1511                 }
1512
1513                 err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
1514                                 mbuf_pool);
1515                 if (unlikely(err)) {
1516                         rte_pktmbuf_free(pkts[i]);
1517                         break;
1518                 }
1519
1520                 if (unlikely(dev->dequeue_zero_copy)) {
1521                         struct zcopy_mbuf *zmbuf;
1522
1523                         zmbuf = get_zmbuf(vq);
1524                         if (!zmbuf) {
1525                                 rte_pktmbuf_free(pkts[i]);
1526                                 break;
1527                         }
1528                         zmbuf->mbuf = pkts[i];
1529                         zmbuf->desc_idx = buf_id;
1530                         zmbuf->desc_count = desc_count;
1531
1532                         /*
1533                          * Pin lock the mbuf; we will check later to see
1534                          * whether the mbuf is freed (when we are the last
1535                          * user) or not. If that's the case, we then could
1536                          * update the used ring safely.
1537                          */
1538                         rte_mbuf_refcnt_update(pkts[i], 1);
1539
1540                         vq->nr_zmbuf += 1;
1541                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1542                 }
1543
1544                 vq->last_avail_idx += desc_count;
1545                 if (vq->last_avail_idx >= vq->size) {
1546                         vq->last_avail_idx -= vq->size;
1547                         vq->avail_wrap_counter ^= 1;
1548                 }
1549         }
1550
1551         if (likely(dev->dequeue_zero_copy == 0)) {
1552                 do_data_copy_dequeue(vq);
1553                 if (unlikely(i < count))
1554                         vq->shadow_used_idx = i;
1555                 if (likely(vq->shadow_used_idx)) {
1556                         flush_shadow_used_ring_packed(dev, vq);
1557                         vhost_vring_call_packed(dev, vq);
1558                 }
1559         }
1560
1561         return i;
1562 }
1563
1564 uint16_t
1565 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1566         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1567 {
1568         struct virtio_net *dev;
1569         struct rte_mbuf *rarp_mbuf = NULL;
1570         struct vhost_virtqueue *vq;
1571
1572         dev = get_device(vid);
1573         if (!dev)
1574                 return 0;
1575
1576         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
1577                 RTE_LOG(ERR, VHOST_DATA,
1578                         "(%d) %s: built-in vhost net backend is disabled.\n",
1579                         dev->vid, __func__);
1580                 return 0;
1581         }
1582
1583         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
1584                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1585                         dev->vid, __func__, queue_id);
1586                 return 0;
1587         }
1588
1589         vq = dev->virtqueue[queue_id];
1590
1591         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1592                 return 0;
1593
1594         if (unlikely(vq->enabled == 0)) {
1595                 count = 0;
1596                 goto out_access_unlock;
1597         }
1598
1599         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1600                 vhost_user_iotlb_rd_lock(vq);
1601
1602         if (unlikely(vq->access_ok == 0))
1603                 if (unlikely(vring_translate(dev, vq) < 0)) {
1604                         count = 0;
1605                         goto out;
1606                 }
1607
1608         /*
1609          * Construct a RARP broadcast packet, and inject it to the "pkts"
1610          * array, to looks like that guest actually send such packet.
1611          *
1612          * Check user_send_rarp() for more information.
1613          *
1614          * broadcast_rarp shares a cacheline in the virtio_net structure
1615          * with some fields that are accessed during enqueue and
1616          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1617          * result in false sharing between enqueue and dequeue.
1618          *
1619          * Prevent unnecessary false sharing by reading broadcast_rarp first
1620          * and only performing cmpset if the read indicates it is likely to
1621          * be set.
1622          */
1623         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1624                         rte_atomic16_cmpset((volatile uint16_t *)
1625                                 &dev->broadcast_rarp.cnt, 1, 0))) {
1626
1627                 rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
1628                 if (rarp_mbuf == NULL) {
1629                         RTE_LOG(ERR, VHOST_DATA,
1630                                 "Failed to make RARP packet.\n");
1631                         count = 0;
1632                         goto out;
1633                 }
1634                 count -= 1;
1635         }
1636
1637         if (vq_is_packed(dev))
1638                 count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
1639         else
1640                 count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
1641
1642 out:
1643         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1644                 vhost_user_iotlb_rd_unlock(vq);
1645
1646 out_access_unlock:
1647         rte_spinlock_unlock(&vq->access_lock);
1648
1649         if (unlikely(rarp_mbuf != NULL)) {
1650                 /*
1651                  * Inject it to the head of "pkts" array, so that switch's mac
1652                  * learning table will get updated first.
1653                  */
1654                 memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *));
1655                 pkts[0] = rarp_mbuf;
1656                 count += 1;
1657         }
1658
1659         return count;
1660 }